Repository: PJLab-ADG/Livox-Mapping Branch: main Commit: 38bc18f15ad8 Files: 73 Total size: 786.6 KB Directory structure: gitextract_r29jj8ts/ ├── .gitignore ├── LICENSE ├── LIO-Livox/ │ ├── CMakeLists.txt │ ├── LICENSE │ ├── README.md │ ├── cmake/ │ │ ├── FindEigen3.cmake │ │ └── FindSuiteSparse.cmake │ ├── config/ │ │ └── horizon_config.yaml │ ├── include/ │ │ ├── Estimator/ │ │ │ └── Estimator.h │ │ ├── IMUIntegrator/ │ │ │ └── IMUIntegrator.h │ │ ├── LidarFeatureExtractor/ │ │ │ └── LidarFeatureExtractor.h │ │ ├── MapManager/ │ │ │ └── Map_Manager.h │ │ ├── segment/ │ │ │ ├── pointsCorrect.hpp │ │ │ └── segment.hpp │ │ ├── sophus/ │ │ │ ├── average.hpp │ │ │ ├── common.hpp │ │ │ ├── example_ensure_handler.cpp │ │ │ ├── formatstring.hpp │ │ │ ├── geometry.hpp │ │ │ ├── interpolate.hpp │ │ │ ├── interpolate_details.hpp │ │ │ ├── num_diff.hpp │ │ │ ├── rotation_matrix.hpp │ │ │ ├── rxso2.hpp │ │ │ ├── rxso3.hpp │ │ │ ├── se2.hpp │ │ │ ├── se3.hpp │ │ │ ├── sim2.hpp │ │ │ ├── sim3.hpp │ │ │ ├── sim_details.hpp │ │ │ ├── so2.hpp │ │ │ ├── so3.hpp │ │ │ ├── test_macros.hpp │ │ │ ├── types.hpp │ │ │ └── velocities.hpp │ │ └── utils/ │ │ ├── ceresfunc.h │ │ ├── math_utils.hpp │ │ └── pcl_utils.hpp │ ├── launch/ │ │ └── livox_odometry.launch │ ├── package.xml │ ├── rviz_cfg/ │ │ └── lio.rviz │ └── src/ │ ├── lio/ │ │ ├── Estimator.cpp │ │ ├── IMUIntegrator.cpp │ │ ├── LidarFeatureExtractor.cpp │ │ ├── Map_Manager.cpp │ │ ├── PoseEstimation.cpp │ │ ├── ScanRegistration.cpp │ │ └── ceresfunc.cpp │ └── segment/ │ ├── pointsCorrect.cpp │ └── segment.cpp ├── README.md ├── SC-PGO/ │ ├── .gitignore │ ├── CMakeLists.txt │ ├── README.md │ ├── include/ │ │ ├── livox_mapping/ │ │ │ ├── common.h │ │ │ └── tic_toc.h │ │ └── scancontext/ │ │ ├── KDTreeVectorOfVectorsAdaptor.h │ │ ├── Scancontext.cpp │ │ ├── Scancontext.h │ │ └── nanoflann.hpp │ ├── launch/ │ │ └── livox_mapping.launch │ ├── package.xml │ ├── rviz_cfg/ │ │ └── livox_mapping.rviz │ ├── src/ │ │ └── laserPosegraphOptimization.cpp │ └── utils/ │ ├── moving_object_removal/ │ │ ├── README.md │ │ └── move_object_removal_livox.py │ └── python/ │ ├── bone_table.npy │ ├── jet_table.npy │ ├── makeMergedMap.py │ └── pypcdMyUtils.py └── ad_localization_msgs/ ├── CMakeLists.txt ├── msg/ │ └── NavStateInfo.msg └── package.xml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ .vscode/ __pycache__/ ================================================ FILE: LICENSE ================================================ BSD 3-Clause License Copyright (c) 2021, PJLab-ADG All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: LIO-Livox/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.8.3) project(livox_odometry) SET(CMAKE_BUILD_TYPE "Release") set(CMAKE_CXX_FLAGS "-std=c++11") #SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") SET(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) find_package(catkin REQUIRED COMPONENTS message_generation geometry_msgs nav_msgs sensor_msgs roscpp rospy std_msgs tf tf_conversions rosbag livox_ros_driver eigen_conversions pcl_conversions pcl_ros message_filters std_srvs) find_package(Eigen3 REQUIRED) find_package(PCL REQUIRED) find_package(Ceres REQUIRED) find_package(OpenCV REQUIRED) find_package(SuiteSparse REQUIRED) include_directories(include ${catkin_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIR} ${PCL_INCLUDE_DIRS} ${CERES_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${SUITESPARSE_INCLUDE_DIRS}) ################## ## ROS messages ## ################## generate_messages(DEPENDENCIES std_msgs) catkin_package(CATKIN_DEPENDS geometry_msgs nav_msgs roscpp rospy std_msgs message_runtime pcl_conversions pcl_ros message_filters std_srvs tf_conversions tf eigen_conversions DEPENDS PCL OpenCV INCLUDE_DIRS include) add_executable (ScanRegistration src/lio/ScanRegistration.cpp src/lio/LidarFeatureExtractor.cpp src/segment/segment.cpp src/segment/pointsCorrect.cpp) target_link_libraries(ScanRegistration ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES}) add_executable (PoseEstimation src/lio/PoseEstimation.cpp src/lio/Estimator.cpp src/lio/IMUIntegrator.cpp src/lio/ceresfunc.cpp src/lio/Map_Manager.cpp) target_link_libraries(PoseEstimation ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${CERES_LIBRARIES}) ================================================ FILE: LIO-Livox/LICENSE ================================================ BSD 3-Clause License Copyright (c) 2021, LIVOX All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: LIO-Livox/README.md ================================================ # LIO-Livox (A Robust LiDAR-Inertial Odometry for Livox LiDAR) This respository implements a robust LiDAR-inertial odometry system for Livox LiDAR. The system uses only a single Livox LiDAR with a built-in IMU. It has a robust initialization module, which is independent to the sensor motion. **It can be initialized with the static state, dynamic state, and the mixture of static and dynamic state.** The system achieves super robust performance. **It can pass through a 4km-tunnel and run on the highway with a very high speed (about 80km/h) using a single Livox Horizon.** Moreover, **it is also robust to dynamic objects**, such as cars, bicycles, and pedestrains. It obtains high precision of localization even in traffic jams. **The mapping result is precise even most of the FOV is occluded by vehicles.** Videos of the demonstration of the system can be found on Youtube and Bilibili. *NOTE: Images are only used for demonstration, not used in the system.* **Developer**: [Livox](www.livoxtech.com)
## System achritecture
The system consists of two ros nodes: ScanRegistartion and PoseEstimation. * The class "LidarFeatureExtractor" of the node "ScanRegistartion" extracts corner features, surface features, and irregular features from the raw point cloud. * In the node "PoseEstimation", the main thread aims to estimate sensor poses, while another thread in the class "Estimator" uses the class "MapManager" to build and manage feature maps. The system is mainly designed for car platforms in the large scale outdoor environment. Users can easily run the system with a Livox Horizon LiDAR.Horizon The system starts with the node "ScanRegistartion", where feature points are extracted. Before the feature extraction, dynamic objects are removed from the raw point cloud, since in urban scenes there are usually many dynamic objects, which affect system robustness and precision. For the dynamic objects filter, we use a fast point cloud segmentation method. The Euclidean clustering is applied to group points into some clusters. The raw point cloud is divided into ground points, background points, and foreground points. Foreground points are considered as dynamic objects, which are excluded form the feature extraction process. Due to the dynamic objects filter, the system obtains high robustness in dynamic scenes. In open scenarios, usually few features can be extracted, leading to degeneracy on certain degrees of freedom. To tackle this problem, we developed a feature extraction process to make the distribution of feature points wide and uniform. A uniform and wide distribution provides more constraints on all 6 degrees of freedom, which is helpful for eliminating degeneracy. Besides, some irregular points also provides information in feature-less scenes. Therefore, we also extract irregular features as a class for the point cloud registration. Feature points are classifed into three types, corner features, surface features, and irregular features, according to their local geometry properties. We first extract points with large curvature and isolated points on each scan line as corner points. Then principal components analysis (PCA) is performed to classify surface features and irregular features, as shown in the following figure. For points with different distance, thresholds are set to different values, in order to make the distribution of points in space as uniform as possible.
In the node "PoseEstimation", the motion distortion of the point cloud is compensated using IMU preintegration or constant velocity model. Then the IMU initialization module is performed. If the Initialization is successfully finished, the system will switch to the LIO mode. Otherwise, it run with LO mode and initialize IMU states. In the LO mode, we use a frame-to-model point cloud registration to estimate the sensor pose. Inspired by ORB-SLAM3, a maximum a posteriori (MAP) estimation method is adopted to jointly initialize IMU biases, velocities, and the gravity direction. This method doesn't need a careful initialization process. **The system can be initialized with an arbitrary motion.** This method takes into account sensor uncertainty, which obtains the optimum in the sense of maximum posterior probability. It achieves efficient, robust, and accurate performance. After the initialization, a tightly coupled slding window based sensor fusion module is performed to estimate IMU poses, biases, and velocities within the sliding window. Simultaneously, an extra thread builds and maintains the global map in parallel. ## Prerequisites * [Ubuntu](http://ubuntu.com) (tested on 16.04 and 18.04) * [ROS](http://wiki.ros.org/ROS/Installation) (tested with Kinetic and Melodic) * [Eigen](http://eigen.tuxfamily.org/index.php?title=Main_Page) * [Ceres Solver](http://ceres-solver.org/installation.html) * [PCL](http://www.pointclouds.org/downloads/linux.html) * [livox_ros_driver](https://github.com/Livox-SDK/livox_ros_driver) * Suitesparse ``` sudo apt-get install libsuitesparse-dev ``` ## Compilation ``` cd ~/catkin_ws/src git clone https://github.com/Livox-SDK/LIO-Livox cd .. catkin_make ``` ## Run with bag files: ### Run the launch file: ``` cd ~/catkin_ws source devel/setup.bash roslaunch lio_livox horizon.launch ``` #### Play your bag files: ``` rosbag play YOUR_ROSBAG.bag ``` ## Run with your device: ### Run your LiDAR with livox_ros_driver ``` cd ~/catkin_ws source devel/setup.bash roslaunch livox_ros_driver livox_lidar_msg.launch ``` ### Run the launch file: ``` cd ~/catkin_ws source devel/setup.bash roslaunch lio_livox horizon.launch ``` ## Notes: The current version of the system is only adopted for Livox Horizon. In theory, it should be able to run directly with a Livox Avia, but we haven't done enough tests. Besides, the system doesn't provide a interface of Livox mid series. If you want use mid-40 or mid-70, you can try [livox_mapping](https://github.com/Livox-SDK/livox_mapping). The topic of point cloud messages is /livox/lidar and its type is livox_ros_driver/CustomMsg. \ The topic of IMU messages is /livox/imu and its type is sensor_msgs/Imu. There are some parameters in launch files: * IMU_Mode: choose IMU information fusion strategy, there are 3 modes: - 0 - without using IMU information, pure LiDAR odometry, motion distortion is removed using a constant velocity model - 1 - using IMU preintegration to remove motion distortion - 2 - tightly coupling IMU and LiDAR information * Extrinsic_Tlb: extrinsic parameter between LiDAR and IMU, which uses SE3 form. If you want to use an external IMU, you need to calibrate your own sensor suite and change this parameter to your extrinsic parameter. There are also some parameters in the config file: * Use_seg: choose the segmentation mode for dynamic objects filtering, there are 2 modes: - 0 - without using the segmentation method, you can choose this mode if there is few dynamic objects in your data - 1 - using the segmentation method to remove dynamic objects ## Acknowledgements Thanks for following work: * [LOAM](https://github.com/cuitaixiang/LOAM_NOTED) (LOAM: Lidar Odometry and Mapping in Real-time) * [VINS-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono) (VINS-Mono: A Robust and Versatile Monocular Visual-Inertial State Estimator) * [LIO-mapping](https://github.com/hyye/lio-mapping) (Tightly Coupled 3D Lidar Inertial Odometry and Mapping) * [ORB-SLAM3](https://github.com/UZ-SLAMLab/ORB_SLAM3) (ORB-SLAM3: An Accurate Open-Source Library for Visual, Visual-Inertial and Multi-Map SLAM) * [LiLi-OM](https://github.com/KIT-ISAS/lili-om) (Towards High-Performance Solid-State-LiDAR-Inertial Odometry and Mapping) * [MSCKF_VIO](https://github.com/KumarRobotics/msckf_vio) (Robust Stereo Visual Inertial Odometry for Fast Autonomous Flight) * [horizon_highway_slam](https://github.com/Livox-SDK/horizon_highway_slam) * [livox_mapping](https://github.com/Livox-SDK/livox_mapping) * [livox_horizon_loam](https://github.com/Livox-SDK/livox_horizon_loam) ## Support You can get support from Livox with the following methods: * Send email to [cs@livoxtech.com](cs@livoxtech.com) with a clear description of your problem and your setup * Report issues on github ================================================ FILE: LIO-Livox/cmake/FindEigen3.cmake ================================================ # - Try to find Eigen3 lib # # This module supports requiring a minimum version, e.g. you can do # find_package(Eigen3 3.1.2) # to require version 3.1.2 or newer of Eigen3. # # Once done this will define # # EIGEN3_FOUND - system has eigen lib with correct version # EIGEN3_INCLUDE_DIR - the eigen include directory # EIGEN3_VERSION - eigen version # Copyright (c) 2006, 2007 Montel Laurent, # Copyright (c) 2008, 2009 Gael Guennebaud, # Copyright (c) 2009 Benoit Jacob # Redistribution and use is allowed according to the terms of the 2-clause BSD license. if(NOT Eigen3_FIND_VERSION) if(NOT Eigen3_FIND_VERSION_MAJOR) set(Eigen3_FIND_VERSION_MAJOR 2) endif(NOT Eigen3_FIND_VERSION_MAJOR) if(NOT Eigen3_FIND_VERSION_MINOR) set(Eigen3_FIND_VERSION_MINOR 91) endif(NOT Eigen3_FIND_VERSION_MINOR) if(NOT Eigen3_FIND_VERSION_PATCH) set(Eigen3_FIND_VERSION_PATCH 0) endif(NOT Eigen3_FIND_VERSION_PATCH) set(Eigen3_FIND_VERSION "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}") endif(NOT Eigen3_FIND_VERSION) macro(_eigen3_check_version) file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header) string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match "${_eigen3_version_header}") set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}") string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match "${_eigen3_version_header}") set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}") string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match "${_eigen3_version_header}") set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}") set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION}) if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) set(EIGEN3_VERSION_OK FALSE) else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) set(EIGEN3_VERSION_OK TRUE) endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) if(NOT EIGEN3_VERSION_OK) message(STATUS "Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, " "but at least version ${Eigen3_FIND_VERSION} is required") endif(NOT EIGEN3_VERSION_OK) endmacro(_eigen3_check_version) if (EIGEN3_INCLUDE_DIR) # in cache already _eigen3_check_version() set(EIGEN3_FOUND ${EIGEN3_VERSION_OK}) else (EIGEN3_INCLUDE_DIR) # specific additional paths for some OS if (WIN32) set(EIGEN_ADDITIONAL_SEARCH_PATHS ${EIGEN_ADDITIONAL_SEARCH_PATHS} "C:/Program Files/Eigen/include" "C:/Program Files (x86)/Eigen/include") endif(WIN32) find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library PATHS ${CMAKE_INSTALL_PREFIX}/include ${EIGEN_ADDITIONAL_SEARCH_PATHS} ${KDE4_INCLUDE_DIR} PATH_SUFFIXES eigen3 eigen ) if(EIGEN3_INCLUDE_DIR) _eigen3_check_version() endif(EIGEN3_INCLUDE_DIR) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK) mark_as_advanced(EIGEN3_INCLUDE_DIR) endif(EIGEN3_INCLUDE_DIR) ================================================ FILE: LIO-Livox/cmake/FindSuiteSparse.cmake ================================================ # Ceres Solver - A fast non-linear least squares minimizer # Copyright 2015 Google Inc. All rights reserved. # http://ceres-solver.org/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of Google Inc. nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Author: alexs.mac@gmail.com (Alex Stewart) # # FindSuiteSparse.cmake - Find SuiteSparse libraries & dependencies. # # This module defines the following variables: # # SUITESPARSE_FOUND: TRUE iff SuiteSparse and all dependencies have been found. # SUITESPARSE_INCLUDE_DIRS: Include directories for all SuiteSparse components. # SUITESPARSE_LIBRARIES: Libraries for all SuiteSparse component libraries and # dependencies. # SUITESPARSE_VERSION: Extracted from UFconfig.h (<= v3) or # SuiteSparse_config.h (>= v4). # SUITESPARSE_MAIN_VERSION: Equal to 4 if SUITESPARSE_VERSION = 4.2.1 # SUITESPARSE_SUB_VERSION: Equal to 2 if SUITESPARSE_VERSION = 4.2.1 # SUITESPARSE_SUBSUB_VERSION: Equal to 1 if SUITESPARSE_VERSION = 4.2.1 # # SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION: TRUE iff running # on Ubuntu, SUITESPARSE_VERSION is 3.4.0 and found SuiteSparse is a system # install, in which case found version of SuiteSparse cannot be used to link # a shared library due to a bug (static linking is unaffected). # # The following variables control the behaviour of this module: # # SUITESPARSE_INCLUDE_DIR_HINTS: List of additional directories in which to # search for SuiteSparse includes, # e.g: /timbuktu/include. # SUITESPARSE_LIBRARY_DIR_HINTS: List of additional directories in which to # search for SuiteSparse libraries, # e.g: /timbuktu/lib. # # The following variables define the presence / includes & libraries for the # SuiteSparse components searched for, the SUITESPARSE_XX variables are the # union of the variables for all components. # # == Symmetric Approximate Minimum Degree (AMD) # AMD_FOUND # AMD_INCLUDE_DIR # AMD_LIBRARY # # == Constrained Approximate Minimum Degree (CAMD) # CAMD_FOUND # CAMD_INCLUDE_DIR # CAMD_LIBRARY # # == Column Approximate Minimum Degree (COLAMD) # COLAMD_FOUND # COLAMD_INCLUDE_DIR # COLAMD_LIBRARY # # Constrained Column Approximate Minimum Degree (CCOLAMD) # CCOLAMD_FOUND # CCOLAMD_INCLUDE_DIR # CCOLAMD_LIBRARY # # == Sparse Supernodal Cholesky Factorization and Update/Downdate (CHOLMOD) # CHOLMOD_FOUND # CHOLMOD_INCLUDE_DIR # CHOLMOD_LIBRARY # # == Multifrontal Sparse QR (SuiteSparseQR) # SUITESPARSEQR_FOUND # SUITESPARSEQR_INCLUDE_DIR # SUITESPARSEQR_LIBRARY # # == Common configuration for all but CSparse (SuiteSparse version >= 4). # SUITESPARSE_CONFIG_FOUND # SUITESPARSE_CONFIG_INCLUDE_DIR # SUITESPARSE_CONFIG_LIBRARY # # == Common configuration for all but CSparse (SuiteSparse version < 4). # UFCONFIG_FOUND # UFCONFIG_INCLUDE_DIR # # Optional SuiteSparse Dependencies: # # == Serial Graph Partitioning and Fill-reducing Matrix Ordering (METIS) # METIS_FOUND # METIS_LIBRARY # # == Intel Thread Building Blocks (TBB) # TBB_FOUND # TBB_LIBRARY # TBB_MALLOC_FOUND # TBB_MALLOC_LIBRARY # Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when # FindSuiteSparse was invoked. macro(SUITESPARSE_RESET_FIND_LIBRARY_PREFIX) if (MSVC) set(CMAKE_FIND_LIBRARY_PREFIXES "${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}") endif (MSVC) endmacro(SUITESPARSE_RESET_FIND_LIBRARY_PREFIX) # Called if we failed to find SuiteSparse or any of it's required dependencies, # unsets all public (designed to be used externally) variables and reports # error message at priority depending upon [REQUIRED/QUIET/] argument. macro(SUITESPARSE_REPORT_NOT_FOUND REASON_MSG) unset(SUITESPARSE_FOUND) unset(SUITESPARSE_INCLUDE_DIRS) unset(SUITESPARSE_LIBRARIES) unset(SUITESPARSE_VERSION) unset(SUITESPARSE_MAIN_VERSION) unset(SUITESPARSE_SUB_VERSION) unset(SUITESPARSE_SUBSUB_VERSION) # Do NOT unset SUITESPARSE_FOUND_REQUIRED_VARS here, as it is used by # FindPackageHandleStandardArgs() to generate the automatic error message on # failure which highlights which components are missing. suitesparse_reset_find_library_prefix() # Note _FIND_[REQUIRED/QUIETLY] variables defined by FindPackage() # use the camelcase library name, not uppercase. if (SuiteSparse_FIND_QUIETLY) message(STATUS "Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN}) elseif (SuiteSparse_FIND_REQUIRED) message(FATAL_ERROR "Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN}) else() # Neither QUIETLY nor REQUIRED, use no priority which emits a message # but continues configuration and allows generation. message("-- Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN}) endif (SuiteSparse_FIND_QUIETLY) # Do not call return(), s/t we keep processing if not called with REQUIRED # and report all missing components, rather than bailing after failing to find # the first. endmacro(SUITESPARSE_REPORT_NOT_FOUND) # Protect against any alternative find_package scripts for this library having # been called previously (in a client project) which set SUITESPARSE_FOUND, but # not the other variables we require / set here which could cause the search # logic here to fail. unset(SUITESPARSE_FOUND) # Handle possible presence of lib prefix for libraries on MSVC, see # also SUITESPARSE_RESET_FIND_LIBRARY_PREFIX(). if (MSVC) # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES # s/t we can set it back before returning. set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES "${CMAKE_FIND_LIBRARY_PREFIXES}") # The empty string in this list is important, it represents the case when # the libraries have no prefix (shared libraries / DLLs). set(CMAKE_FIND_LIBRARY_PREFIXES "lib" "" "${CMAKE_FIND_LIBRARY_PREFIXES}") endif (MSVC) # Specify search directories for include files and libraries (this is the union # of the search directories for all OSs). Search user-specified hint # directories first if supplied, and search user-installed locations first # so that we prefer user installs to system installs where both exist. list(APPEND SUITESPARSE_CHECK_INCLUDE_DIRS /opt/local/include /opt/local/include/ufsparse # Mac OS X /usr/local/homebrew/include # Mac OS X /usr/local/include /usr/include) list(APPEND SUITESPARSE_CHECK_LIBRARY_DIRS /opt/local/lib /opt/local/lib/ufsparse # Mac OS X /usr/local/homebrew/lib # Mac OS X /usr/local/lib /usr/lib) # Additional suffixes to try appending to each search path. list(APPEND SUITESPARSE_CHECK_PATH_SUFFIXES suitesparse) # Windows/Ubuntu # Wrappers to find_path/library that pass the SuiteSparse search hints/paths. # # suitesparse_find_component( [FILES name1 [name2 ...]] # [LIBRARIES name1 [name2 ...]] # [REQUIRED]) macro(suitesparse_find_component COMPONENT) include(CMakeParseArguments) set(OPTIONS REQUIRED) set(MULTI_VALUE_ARGS FILES LIBRARIES) cmake_parse_arguments(SUITESPARSE_FIND_${COMPONENT} "${OPTIONS}" "" "${MULTI_VALUE_ARGS}" ${ARGN}) if (SUITESPARSE_FIND_${COMPONENT}_REQUIRED) list(APPEND SUITESPARSE_FOUND_REQUIRED_VARS ${COMPONENT}_FOUND) endif() set(${COMPONENT}_FOUND TRUE) if (SUITESPARSE_FIND_${COMPONENT}_FILES) find_path(${COMPONENT}_INCLUDE_DIR NAMES ${SUITESPARSE_FIND_${COMPONENT}_FILES} HINTS ${SUITESPARSE_INCLUDE_DIR_HINTS} PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS} PATH_SUFFIXES ${SUITESPARSE_CHECK_PATH_SUFFIXES}) if (${COMPONENT}_INCLUDE_DIR) message(STATUS "Found ${COMPONENT} headers in: " "${${COMPONENT}_INCLUDE_DIR}") mark_as_advanced(${COMPONENT}_INCLUDE_DIR) else() # Specified headers not found. set(${COMPONENT}_FOUND FALSE) if (SUITESPARSE_FIND_${COMPONENT}_REQUIRED) suitesparse_report_not_found( "Did not find ${COMPONENT} header (required SuiteSparse component).") else() message(STATUS "Did not find ${COMPONENT} header (optional " "SuiteSparse component).") # Hide optional vars from CMake GUI even if not found. mark_as_advanced(${COMPONENT}_INCLUDE_DIR) endif() endif() endif() if (SUITESPARSE_FIND_${COMPONENT}_LIBRARIES) find_library(${COMPONENT}_LIBRARY NAMES ${SUITESPARSE_FIND_${COMPONENT}_LIBRARIES} HINTS ${SUITESPARSE_LIBRARY_DIR_HINTS} PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS} PATH_SUFFIXES ${SUITESPARSE_CHECK_PATH_SUFFIXES}) if (${COMPONENT}_LIBRARY) message(STATUS "Found ${COMPONENT} library: ${${COMPONENT}_LIBRARY}") mark_as_advanced(${COMPONENT}_LIBRARY) else () # Specified libraries not found. set(${COMPONENT}_FOUND FALSE) if (SUITESPARSE_FIND_${COMPONENT}_REQUIRED) suitesparse_report_not_found( "Did not find ${COMPONENT} library (required SuiteSparse component).") else() message(STATUS "Did not find ${COMPONENT} library (optional SuiteSparse " "dependency)") # Hide optional vars from CMake GUI even if not found. mark_as_advanced(${COMPONENT}_LIBRARY) endif() endif() endif() endmacro() # Given the number of components of SuiteSparse, and to ensure that the # automatic failure message generated by FindPackageHandleStandardArgs() # when not all required components are found is helpful, we maintain a list # of all variables that must be defined for SuiteSparse to be considered found. unset(SUITESPARSE_FOUND_REQUIRED_VARS) # BLAS. find_package(BLAS QUIET) if (NOT BLAS_FOUND) suitesparse_report_not_found( "Did not find BLAS library (required for SuiteSparse).") endif (NOT BLAS_FOUND) list(APPEND SUITESPARSE_FOUND_REQUIRED_VARS BLAS_FOUND) # LAPACK. find_package(LAPACK QUIET) if (NOT LAPACK_FOUND) suitesparse_report_not_found( "Did not find LAPACK library (required for SuiteSparse).") endif (NOT LAPACK_FOUND) list(APPEND SUITESPARSE_FOUND_REQUIRED_VARS LAPACK_FOUND) suitesparse_find_component(AMD REQUIRED FILES amd.h LIBRARIES amd) suitesparse_find_component(CAMD REQUIRED FILES camd.h LIBRARIES camd) suitesparse_find_component(COLAMD REQUIRED FILES colamd.h LIBRARIES colamd) suitesparse_find_component(CCOLAMD REQUIRED FILES ccolamd.h LIBRARIES ccolamd) suitesparse_find_component(CHOLMOD REQUIRED FILES cholmod.h LIBRARIES cholmod) suitesparse_find_component( SUITESPARSEQR REQUIRED FILES SuiteSparseQR.hpp LIBRARIES spqr) if (SUITESPARSEQR_FOUND) # SuiteSparseQR may be compiled with Intel Threading Building Blocks, # we assume that if TBB is installed, SuiteSparseQR was compiled with # support for it, this will do no harm if it wasn't. find_package(TBB QUIET) if (TBB_FOUND) message(STATUS "Found Intel Thread Building Blocks (TBB) library " "(${TBB_VERSION}) assuming SuiteSparseQR was compiled " "with TBB.") # Add the TBB libraries to the SuiteSparseQR libraries (the only # libraries to optionally depend on TBB). list(APPEND SUITESPARSEQR_LIBRARY ${TBB_LIBRARIES}) else() message(STATUS "Did not find Intel TBB library, assuming SuiteSparseQR was " "not compiled with TBB.") endif() endif(SUITESPARSEQR_FOUND) # UFconfig / SuiteSparse_config. # # If SuiteSparse version is >= 4 then SuiteSparse_config is required. # For SuiteSparse 3, UFconfig.h is required. suitesparse_find_component( SUITESPARSE_CONFIG FILES SuiteSparse_config.h LIBRARIES suitesparseconfig) if (SUITESPARSE_CONFIG_FOUND) # SuiteSparse_config (SuiteSparse version >= 4) requires librt library for # timing by default when compiled on Linux or Unix, but not on OSX (which # does not have librt). if (CMAKE_SYSTEM_NAME MATCHES "Linux" OR UNIX AND NOT APPLE) suitesparse_find_component(LIBRT LIBRARIES rt) if (LIBRT_FOUND) message(STATUS "Adding librt: ${LIBRT_LIBRARY} to " "SuiteSparse_config libraries (required on Linux & Unix [not OSX] if " "SuiteSparse is compiled with timing).") list(APPEND SUITESPARSE_CONFIG_LIBRARY ${LIBRT_LIBRARY}) else() message(STATUS "Could not find librt, but found SuiteSparse_config, " "assuming that SuiteSparse was compiled without timing.") endif () endif (CMAKE_SYSTEM_NAME MATCHES "Linux" OR UNIX AND NOT APPLE) else() # Failed to find SuiteSparse_config (>= v4 installs), instead look for # UFconfig header which should be present in < v4 installs. suitesparse_find_component(UFCONFIG FILES UFconfig.h) endif () if (NOT SUITESPARSE_CONFIG_FOUND AND NOT UFCONFIG_FOUND) suitesparse_report_not_found( "Failed to find either: SuiteSparse_config header & library (should be " "present in all SuiteSparse >= v4 installs), or UFconfig header (should " "be present in all SuiteSparse < v4 installs).") endif() # Extract the SuiteSparse version from the appropriate header (UFconfig.h for # <= v3, SuiteSparse_config.h for >= v4). list(APPEND SUITESPARSE_FOUND_REQUIRED_VARS SUITESPARSE_VERSION) if (UFCONFIG_FOUND) # SuiteSparse version <= 3. set(SUITESPARSE_VERSION_FILE ${UFCONFIG_INCLUDE_DIR}/UFconfig.h) if (NOT EXISTS ${SUITESPARSE_VERSION_FILE}) suitesparse_report_not_found( "Could not find file: ${SUITESPARSE_VERSION_FILE} containing version " "information for <= v3 SuiteSparse installs, but UFconfig was found " "(only present in <= v3 installs).") else (NOT EXISTS ${SUITESPARSE_VERSION_FILE}) file(READ ${SUITESPARSE_VERSION_FILE} UFCONFIG_CONTENTS) string(REGEX MATCH "#define SUITESPARSE_MAIN_VERSION [0-9]+" SUITESPARSE_MAIN_VERSION "${UFCONFIG_CONTENTS}") string(REGEX REPLACE "#define SUITESPARSE_MAIN_VERSION ([0-9]+)" "\\1" SUITESPARSE_MAIN_VERSION "${SUITESPARSE_MAIN_VERSION}") string(REGEX MATCH "#define SUITESPARSE_SUB_VERSION [0-9]+" SUITESPARSE_SUB_VERSION "${UFCONFIG_CONTENTS}") string(REGEX REPLACE "#define SUITESPARSE_SUB_VERSION ([0-9]+)" "\\1" SUITESPARSE_SUB_VERSION "${SUITESPARSE_SUB_VERSION}") string(REGEX MATCH "#define SUITESPARSE_SUBSUB_VERSION [0-9]+" SUITESPARSE_SUBSUB_VERSION "${UFCONFIG_CONTENTS}") string(REGEX REPLACE "#define SUITESPARSE_SUBSUB_VERSION ([0-9]+)" "\\1" SUITESPARSE_SUBSUB_VERSION "${SUITESPARSE_SUBSUB_VERSION}") # This is on a single line s/t CMake does not interpret it as a list of # elements and insert ';' separators which would result in 4.;2.;1 nonsense. set(SUITESPARSE_VERSION "${SUITESPARSE_MAIN_VERSION}.${SUITESPARSE_SUB_VERSION}.${SUITESPARSE_SUBSUB_VERSION}") endif (NOT EXISTS ${SUITESPARSE_VERSION_FILE}) endif (UFCONFIG_FOUND) if (SUITESPARSE_CONFIG_FOUND) # SuiteSparse version >= 4. set(SUITESPARSE_VERSION_FILE ${SUITESPARSE_CONFIG_INCLUDE_DIR}/SuiteSparse_config.h) if (NOT EXISTS ${SUITESPARSE_VERSION_FILE}) suitesparse_report_not_found( "Could not find file: ${SUITESPARSE_VERSION_FILE} containing version " "information for >= v4 SuiteSparse installs, but SuiteSparse_config was " "found (only present in >= v4 installs).") else (NOT EXISTS ${SUITESPARSE_VERSION_FILE}) file(READ ${SUITESPARSE_VERSION_FILE} SUITESPARSE_CONFIG_CONTENTS) string(REGEX MATCH "#define SUITESPARSE_MAIN_VERSION [0-9]+" SUITESPARSE_MAIN_VERSION "${SUITESPARSE_CONFIG_CONTENTS}") string(REGEX REPLACE "#define SUITESPARSE_MAIN_VERSION ([0-9]+)" "\\1" SUITESPARSE_MAIN_VERSION "${SUITESPARSE_MAIN_VERSION}") string(REGEX MATCH "#define SUITESPARSE_SUB_VERSION [0-9]+" SUITESPARSE_SUB_VERSION "${SUITESPARSE_CONFIG_CONTENTS}") string(REGEX REPLACE "#define SUITESPARSE_SUB_VERSION ([0-9]+)" "\\1" SUITESPARSE_SUB_VERSION "${SUITESPARSE_SUB_VERSION}") string(REGEX MATCH "#define SUITESPARSE_SUBSUB_VERSION [0-9]+" SUITESPARSE_SUBSUB_VERSION "${SUITESPARSE_CONFIG_CONTENTS}") string(REGEX REPLACE "#define SUITESPARSE_SUBSUB_VERSION ([0-9]+)" "\\1" SUITESPARSE_SUBSUB_VERSION "${SUITESPARSE_SUBSUB_VERSION}") # This is on a single line s/t CMake does not interpret it as a list of # elements and insert ';' separators which would result in 4.;2.;1 nonsense. set(SUITESPARSE_VERSION "${SUITESPARSE_MAIN_VERSION}.${SUITESPARSE_SUB_VERSION}.${SUITESPARSE_SUBSUB_VERSION}") endif (NOT EXISTS ${SUITESPARSE_VERSION_FILE}) endif (SUITESPARSE_CONFIG_FOUND) # METIS (Optional dependency). suitesparse_find_component(METIS LIBRARIES metis) # Only mark SuiteSparse as found if all required components and dependencies # have been found. set(SUITESPARSE_FOUND TRUE) foreach(REQUIRED_VAR ${SUITESPARSE_FOUND_REQUIRED_VARS}) if (NOT ${REQUIRED_VAR}) set(SUITESPARSE_FOUND FALSE) endif (NOT ${REQUIRED_VAR}) endforeach(REQUIRED_VAR ${SUITESPARSE_FOUND_REQUIRED_VARS}) if (SUITESPARSE_FOUND) list(APPEND SUITESPARSE_INCLUDE_DIRS ${AMD_INCLUDE_DIR} ${CAMD_INCLUDE_DIR} ${COLAMD_INCLUDE_DIR} ${CCOLAMD_INCLUDE_DIR} ${CHOLMOD_INCLUDE_DIR} ${SUITESPARSEQR_INCLUDE_DIR}) # Handle config separately, as otherwise at least one of them will be set # to NOTFOUND which would cause any check on SUITESPARSE_INCLUDE_DIRS to fail. if (SUITESPARSE_CONFIG_FOUND) list(APPEND SUITESPARSE_INCLUDE_DIRS ${SUITESPARSE_CONFIG_INCLUDE_DIR}) endif (SUITESPARSE_CONFIG_FOUND) if (UFCONFIG_FOUND) list(APPEND SUITESPARSE_INCLUDE_DIRS ${UFCONFIG_INCLUDE_DIR}) endif (UFCONFIG_FOUND) # As SuiteSparse includes are often all in the same directory, remove any # repetitions. list(REMOVE_DUPLICATES SUITESPARSE_INCLUDE_DIRS) # Important: The ordering of these libraries is *NOT* arbitrary, as these # could potentially be static libraries their link ordering is important. list(APPEND SUITESPARSE_LIBRARIES ${SUITESPARSEQR_LIBRARY} ${CHOLMOD_LIBRARY} ${CCOLAMD_LIBRARY} ${CAMD_LIBRARY} ${COLAMD_LIBRARY} ${AMD_LIBRARY} ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES}) if (SUITESPARSE_CONFIG_FOUND) list(APPEND SUITESPARSE_LIBRARIES ${SUITESPARSE_CONFIG_LIBRARY}) endif (SUITESPARSE_CONFIG_FOUND) if (METIS_FOUND) list(APPEND SUITESPARSE_LIBRARIES ${METIS_LIBRARY}) endif (METIS_FOUND) endif() # Determine if we are running on Ubuntu with the package install of SuiteSparse # which is broken and does not support linking a shared library. set(SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION FALSE) if (CMAKE_SYSTEM_NAME MATCHES "Linux" AND SUITESPARSE_VERSION VERSION_EQUAL 3.4.0) find_program(LSB_RELEASE_EXECUTABLE lsb_release) if (LSB_RELEASE_EXECUTABLE) # Any even moderately recent Ubuntu release (likely to be affected by # this bug) should have lsb_release, if it isn't present we are likely # on a different Linux distribution (should be fine). execute_process(COMMAND ${LSB_RELEASE_EXECUTABLE} -si OUTPUT_VARIABLE LSB_DISTRIBUTOR_ID OUTPUT_STRIP_TRAILING_WHITESPACE) if (LSB_DISTRIBUTOR_ID MATCHES "Ubuntu" AND SUITESPARSE_LIBRARIES MATCHES "/usr/lib/libamd") # We are on Ubuntu, and the SuiteSparse version matches the broken # system install version and is a system install. set(SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION TRUE) message(STATUS "Found system install of SuiteSparse " "${SUITESPARSE_VERSION} running on Ubuntu, which has a known bug " "preventing linking of shared libraries (static linking unaffected).") endif (LSB_DISTRIBUTOR_ID MATCHES "Ubuntu" AND SUITESPARSE_LIBRARIES MATCHES "/usr/lib/libamd") endif (LSB_RELEASE_EXECUTABLE) endif (CMAKE_SYSTEM_NAME MATCHES "Linux" AND SUITESPARSE_VERSION VERSION_EQUAL 3.4.0) suitesparse_reset_find_library_prefix() # Handle REQUIRED and QUIET arguments to FIND_PACKAGE include(FindPackageHandleStandardArgs) if (SUITESPARSE_FOUND) find_package_handle_standard_args(SuiteSparse REQUIRED_VARS ${SUITESPARSE_FOUND_REQUIRED_VARS} VERSION_VAR SUITESPARSE_VERSION FAIL_MESSAGE "Failed to find some/all required components of SuiteSparse.") else (SUITESPARSE_FOUND) # Do not pass VERSION_VAR to FindPackageHandleStandardArgs() if we failed to # find SuiteSparse to avoid a confusing autogenerated failure message # that states 'not found (missing: FOO) (found version: x.y.z)'. find_package_handle_standard_args(SuiteSparse REQUIRED_VARS ${SUITESPARSE_FOUND_REQUIRED_VARS} FAIL_MESSAGE "Failed to find some/all required components of SuiteSparse.") endif (SUITESPARSE_FOUND) ================================================ FILE: LIO-Livox/config/horizon_config.yaml ================================================ %YAML:1.0 # switches Lidar_Type: 0 # 0-horizon Used_Line: 6 # lines used for lio, set to 1~6 Feature_Mode: 0 # 0(false) or 1(true) NumCurvSize: 2 DistanceFaraway: 100 # [m] DistanceFaraway far NumFlat: 3 # nums of one part's flat feature PartNum: 150 # nums of one scan's parts FlatThreshold: 0.02 # cloud curvature threshold of flat feature BreakCornerDis: 1 # break distance of break points LidarNearestDis: 1.0 # if(depth < LidarNearestDis) do not use this point KdTreeCornerOutlierDis: 0.2 # corner filter threshold Use_seg: 1 # use segment algorithm map_skip_frame: 2 ================================================ FILE: LIO-Livox/include/Estimator/Estimator.h ================================================ #ifndef LIO_LIVOX_ESTIMATOR_H #define LIO_LIVOX_ESTIMATOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "MapManager/Map_Manager.h" #include "utils/ceresfunc.h" #include "utils/pcl_utils.hpp" #include "IMUIntegrator/IMUIntegrator.h" #include class Estimator{ typedef pcl::PointXYZINormal PointType; public: /** \brief slide window size */ static const int SLIDEWINDOWSIZE = 10; /** \brief lidar frame struct */ struct LidarFrame{ pcl::PointCloud::Ptr laserCloud; IMUIntegrator imuIntegrator; Eigen::Vector3d P; Eigen::Vector3d V; Eigen::Quaterniond Q; Eigen::Vector3d bg; Eigen::Vector3d ba; Eigen::VectorXf ground_plane_coeff; double timeStamp; LidarFrame(){ P.setZero(); V.setZero(); Q.setIdentity(); bg.setZero(); ba.setZero(); timeStamp = 0; } }; /** \brief point to line feature */ struct FeatureLine{ Eigen::Vector3d pointOri; Eigen::Vector3d lineP1; Eigen::Vector3d lineP2; double error; bool valid; FeatureLine(Eigen::Vector3d po, Eigen::Vector3d p1, Eigen::Vector3d p2) :pointOri(std::move(po)), lineP1(std::move(p1)), lineP2(std::move(p2)){ valid = false; error = 0; } double ComputeError(const Eigen::Matrix4d& pose){ Eigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1); double l12 = std::sqrt((lineP1(0) - lineP2(0))*(lineP1(0) - lineP2(0)) + (lineP1(1) - lineP2(1))* (lineP1(1) - lineP2(1)) + (lineP1(2) - lineP2(2))*(lineP1(2) - lineP2(2))); double a012 = std::sqrt( ((P_to_Map(0) - lineP1(0)) * (P_to_Map(1) - lineP2(1)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(1) - lineP1(1))) * ((P_to_Map(0) - lineP1(0)) * (P_to_Map(1) - lineP2(1)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(1) - lineP1(1))) + ((P_to_Map(0) - lineP1(0)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(2) - lineP1(2))) * ((P_to_Map(0) - lineP1(0)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(2) - lineP1(2))) + ((P_to_Map(1) - lineP1(1)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(1) - lineP2(1)) * (P_to_Map(2) - lineP1(2))) * ((P_to_Map(1) - lineP1(1)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(1) - lineP2(1)) * (P_to_Map(2) - lineP1(2)))); error = a012 / l12; } }; /** \brief point to plan feature */ struct FeaturePlan{ Eigen::Vector3d pointOri; double pa; double pb; double pc; double pd; double error; bool valid; FeaturePlan(const Eigen::Vector3d& po, const double& pa_, const double& pb_, const double& pc_, const double& pd_) :pointOri(po), pa(pa_), pb(pb_), pc(pc_), pd(pd_){ valid = false; error = 0; } double ComputeError(const Eigen::Matrix4d& pose){ Eigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1); error = pa * P_to_Map(0) + pb * P_to_Map(1) + pc * P_to_Map(2) + pd; } }; /** \brief point to plan feature */ struct FeaturePlanVec{ Eigen::Vector3d pointOri; Eigen::Vector3d pointProj; Eigen::Matrix3d sqrt_info; double error; bool valid; FeaturePlanVec(const Eigen::Vector3d& po, const Eigen::Vector3d& p_proj, Eigen::Matrix3d sqrt_info_) :pointOri(po), pointProj(p_proj), sqrt_info(sqrt_info_) { valid = false; error = 0; } double ComputeError(const Eigen::Matrix4d& pose){ Eigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1); error = (P_to_Map - pointProj).norm(); } }; /** \brief non feature */ struct FeatureNon{ Eigen::Vector3d pointOri; double pa; double pb; double pc; double pd; double error; bool valid; FeatureNon(const Eigen::Vector3d& po, const double& pa_, const double& pb_, const double& pc_, const double& pd_) :pointOri(po), pa(pa_), pb(pb_), pc(pc_), pd(pd_){ valid = false; error = 0; } double ComputeError(const Eigen::Matrix4d& pose){ Eigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1); error = pa * P_to_Map(0) + pb * P_to_Map(1) + pc * P_to_Map(2) + pd; } }; public: /** \brief constructor of Estimator */ Estimator(const float& filter_corner, const float& filter_surf); ~Estimator(); /** \brief Open a independent thread to increment MAP cloud */ [[noreturn]] void threadMapIncrement(); /** \brief construct sharp feature Ceres Costfunctions * \param[in] edges: store costfunctions * \param[in] m4d: lidar pose, represented by matrix 4X4 */ void processPointToLine(std::vector& edges, std::vector& vLineFeatures, const pcl::PointCloud::Ptr& laserCloudCorner, const pcl::PointCloud::Ptr& laserCloudCornerMap, const pcl::KdTreeFLANN::Ptr& kdtree, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d); /** \brief construct Plan feature Ceres Costfunctions * \param[in] edges: store costfunctions * \param[in] m4d: lidar pose, represented by matrix 4X4 */ void processPointToPlan(std::vector& edges, std::vector& vPlanFeatures, const pcl::PointCloud::Ptr& laserCloudSurf, const pcl::PointCloud::Ptr& laserCloudSurfMap, const pcl::KdTreeFLANN::Ptr& kdtree, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d); void processPointToPlanVec(std::vector& edges, std::vector& vPlanFeatures, const pcl::PointCloud::Ptr& laserCloudSurf, const pcl::PointCloud::Ptr& laserCloudSurfMap, const pcl::KdTreeFLANN::Ptr& kdtree, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d); void processNonFeatureICP(std::vector& edges, std::vector& vNonFeatures, const pcl::PointCloud::Ptr& laserCloudNonFeature, const pcl::PointCloud::Ptr& laserCloudNonFeatureLocal, const pcl::KdTreeFLANN::Ptr& kdtreeLocal, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d); /** \brief Transform Lidar Pose in slidewindow to double array * \param[in] lidarFrameList: Lidar Poses in slidewindow */ void vector2double(const std::list& lidarFrameList); /** \brief Transform double array to Lidar Pose in slidewindow * \param[in] lidarFrameList: Lidar Poses in slidewindow */ void double2vector(std::list& lidarFrameList); /** \brief estimate lidar pose by matching current lidar cloud with map cloud and tightly coupled IMU message * \param[in] lidarFrameList: multi-frames of lidar cloud and lidar pose * \param[in] exTlb: extrinsic matrix between lidar and IMU * \param[in] gravity: gravity vector */ void EstimateLidarPose(std::list& lidarFrameList, const Eigen::Matrix4d& exTlb, const Eigen::Vector3d& gravity, nav_msgs::Odometry& debugInfo); void Estimate(std::list& lidarFrameList, const Eigen::Matrix4d& exTlb, const Eigen::Vector3d& gravity); pcl::PointCloud::Ptr get_corner_map(){ return map_manager->get_corner_map(); } pcl::PointCloud::Ptr get_surf_map(){ return map_manager->get_surf_map(); } pcl::PointCloud::Ptr get_nonfeature_map(){ return map_manager->get_nonfeature_map(); } pcl::PointCloud::Ptr get_init_ground_cloud(){ return initGroundCloud; } void MapIncrementLocal(const pcl::PointCloud::Ptr& laserCloudCornerStack, const pcl::PointCloud::Ptr& laserCloudSurfStack, const pcl::PointCloud::Ptr& laserCloudNonFeatureStack, const Eigen::Matrix4d& transformTobeMapped); private: /** \brief store map points */ MAP_MANAGER* map_manager; int init_ground_count; Eigen::VectorXf init_ground_plane_coeff; double para_PR[SLIDEWINDOWSIZE][6]; double para_VBias[SLIDEWINDOWSIZE][9]; MarginalizationInfo *last_marginalization_info = nullptr; std::vector last_marginalization_parameter_blocks; std::vector::Ptr> laserCloudCornerLast; std::vector::Ptr> laserCloudSurfLast; std::vector::Ptr> laserCloudNonFeatureLast; pcl::PointCloud::Ptr laserCloudCornerFromLocal; pcl::PointCloud::Ptr laserCloudSurfFromLocal; pcl::PointCloud::Ptr laserCloudNonFeatureFromLocal; pcl::PointCloud::Ptr laserCloudCornerForMap; pcl::PointCloud::Ptr laserCloudSurfForMap; pcl::PointCloud::Ptr laserCloudNonFeatureForMap; pcl::PointCloud::Ptr initGroundCloud; Eigen::Matrix4d transformForMap; std::vector::Ptr> laserCloudCornerStack; std::vector::Ptr> laserCloudSurfStack; std::vector::Ptr> laserCloudNonFeatureStack; pcl::KdTreeFLANN::Ptr kdtreeCornerFromLocal; pcl::KdTreeFLANN::Ptr kdtreeSurfFromLocal; pcl::KdTreeFLANN::Ptr kdtreeNonFeatureFromLocal; pcl::VoxelGrid downSizeFilterCorner; pcl::VoxelGrid downSizeFilterSurf; pcl::VoxelGrid downSizeFilterNonFeature; std::mutex mtx_Map; std::thread threadMap; pcl::KdTreeFLANN CornerKdMap[10000]; pcl::KdTreeFLANN SurfKdMap[10000]; pcl::KdTreeFLANN NonFeatureKdMap[10000]; pcl::PointCloud GlobalSurfMap[10000]; pcl::PointCloud GlobalCornerMap[10000]; pcl::PointCloud GlobalNonFeatureMap[10000]; int laserCenWidth_last = 10; int laserCenHeight_last = 5; int laserCenDepth_last = 10; static const int localMapWindowSize = 50; int localMapID = 0; pcl::PointCloud::Ptr localCornerMap[localMapWindowSize]; pcl::PointCloud::Ptr localSurfMap[localMapWindowSize]; pcl::PointCloud::Ptr localNonFeatureMap[localMapWindowSize]; int map_update_ID = 0; int map_skip_frame = 2; //every map_skip_frame frame update map double plan_weight_tan = 0.0; double thres_dist = 1.0; }; #endif //LIO_LIVOX_ESTIMATOR_H ================================================ FILE: LIO-Livox/include/IMUIntegrator/IMUIntegrator.h ================================================ #ifndef LIO_LIVOX_IMUINTEGRATOR_H #define LIO_LIVOX_IMUINTEGRATOR_H #include #include #include #include #include #include #include "sophus/so3.hpp" class IMUIntegrator{ public: IMUIntegrator(); /** \brief constructor of IMUIntegrator * \param[in] vIMU: IMU messages need to be integrated */ IMUIntegrator(std::vector vIMU); void Reset(); /** \brief get delta quaternion after IMU integration */ const Eigen::Quaterniond & GetDeltaQ() const; /** \brief get delta displacement after IMU integration */ const Eigen::Vector3d & GetDeltaP() const; /** \brief get delta velocity after IMU integration */ const Eigen::Vector3d & GetDeltaV() const; /** \brief get time span after IMU integration */ const double & GetDeltaTime() const; /** \brief get linearized bias gyr */ const Eigen::Vector3d & GetBiasGyr() const; /** \brief get linearized bias acc */ const Eigen::Vector3d& GetBiasAcc() const; /** \brief get covariance matrix after IMU integration */ const Eigen::Matrix& GetCovariance(); /** \brief get jacobian matrix after IMU integration */ const Eigen::Matrix & GetJacobian() const; /** \brief get average acceleration of IMU messages for initialization */ Eigen::Vector3d GetAverageAcc(); /** \brief push IMU message to the IMU buffer vimuMsg * \param[in] imu: the IMU message need to be pushed */ void PushIMUMsg(const sensor_msgs::ImuConstPtr& imu); void PushIMUMsg(const std::vector& vimu); const std::vector & GetIMUMsg() const; /** \brief only integrate gyro information of each IMU message stored in vimuMsg * \param[in] lastTime: the left time boundary of vimuMsg */ void GyroIntegration(double lastTime); /** \brief pre-integration of IMU messages stored in vimuMsg */ void PreIntegration(double lastTime, const Eigen::Vector3d& bg, const Eigen::Vector3d& ba); /** \brief normal integration of IMU messages stored in vimuMsg */ void Integration(){} public: const double acc_n = 0.08; const double gyr_n = 0.004; const double acc_w = 2.0e-4; const double gyr_w = 2.0e-5; constexpr static const double lidar_m = 6e-3; constexpr static const double gnorm = 9.805; enum JacobianOrder { O_P = 0, O_R = 3, O_V = 6, O_BG = 9, O_BA = 12 }; private: std::vector vimuMsg; Eigen::Quaterniond dq; Eigen::Vector3d dp; Eigen::Vector3d dv; Eigen::Vector3d linearized_bg; Eigen::Vector3d linearized_ba; Eigen::Matrix covariance; Eigen::Matrix jacobian; Eigen::Matrix noise; double dtime; }; #endif //LIO_LIVOX_IMUINTEGRATOR_H ================================================ FILE: LIO-Livox/include/LidarFeatureExtractor/LidarFeatureExtractor.h ================================================ #ifndef LIO_LIVOX_LIDARFEATUREEXTRACTOR_H #define LIO_LIVOX_LIDARFEATUREEXTRACTOR_H #include #include #include #include #include #include #include #include #include "opencv2/core.hpp" #include "segment/segment.hpp" class LidarFeatureExtractor{ typedef pcl::PointXYZINormal PointType; public: /** \brief constructor of LidarFeatureExtractor * \param[in] n_scans: lines used to extract lidar features */ LidarFeatureExtractor(int n_scans,int NumCurvSize,float DistanceFaraway,int NumFlat,int PartNum,float FlatThreshold, float BreakCornerDis,float LidarNearestDis,float KdTreeCornerOutlierDis); /** \brief transform float to int */ static uint32_t _float_as_int(float f){ union{uint32_t i; float f;} conv{}; conv.f = f; return conv.i; } /** \brief transform int to float */ static float _int_as_float(uint32_t i){ union{float f; uint32_t i;} conv{}; conv.i = i; return conv.f; } /** \brief Determine whether the point_list is flat * \param[in] point_list: points need to be judged * \param[in] plane_threshold */ bool plane_judge(const std::vector& point_list,const int plane_threshold); /** \brief Detect lidar feature points * \param[in] cloud: original lidar cloud need to be detected * \param[in] pointsLessSharp: less sharp index of cloud * \param[in] pointsLessFlat: less flat index of cloud */ void detectFeaturePoint(pcl::PointCloud::Ptr& cloud, std::vector& pointsLessSharp, std::vector& pointsLessFlat); void detectFeaturePoint2(pcl::PointCloud::Ptr& cloud, pcl::PointCloud::Ptr& pointsLessFlat, pcl::PointCloud::Ptr& pointsNonFeature); void detectFeaturePoint3(pcl::PointCloud::Ptr& cloud, std::vector& pointsLessSharp); void FeatureExtract_with_segment(const livox_ros_driver::CustomMsgConstPtr &msg, pcl::PointCloud::Ptr& laserCloud, pcl::PointCloud::Ptr& laserConerFeature, pcl::PointCloud::Ptr& laserSurfFeature, pcl::PointCloud::Ptr& laserNonFeature, sensor_msgs::PointCloud2 &msg2, int Used_Line = 1); /** \brief Detect lidar feature points of CustomMsg * \param[in] msg: original CustomMsg need to be detected * \param[in] laserCloud: transform CustomMsg to pcl point cloud format * \param[in] laserConerFeature: less Coner features extracted from laserCloud * \param[in] laserSurfFeature: less Surf features extracted from laserCloud */ void FeatureExtract(const livox_ros_driver::CustomMsgConstPtr &msg, pcl::PointCloud::Ptr& laserCloud, pcl::PointCloud::Ptr& laserConerFeature, pcl::PointCloud::Ptr& laserSurfFeature, int Used_Line = 1); private: /** \brief lines used to extract lidar features */ const int N_SCANS; /** \brief store original points of each line */ std::vector::Ptr> vlines; /** \brief store corner feature index of each line */ std::vector> vcorner; /** \brief store surf feature index of each line */ std::vector> vsurf; int thNumCurvSize; float thDistanceFaraway; int thNumFlat; int thPartNum; float thFlatThreshold; float thBreakCornerDis; float thLidarNearestDis; }; #endif //LIO_LIVOX_LIDARFEATUREEXTRACTOR_H ================================================ FILE: LIO-Livox/include/MapManager/Map_Manager.h ================================================ #ifndef LIO_LIVOX_MAP_MANAGER_H #define LIO_LIVOX_MAP_MANAGER_H #include #include #include #include #include class MAP_MANAGER{ typedef pcl::PointXYZINormal PointType; public: std::mutex mtx_MapManager; /** \brief constructor of MAP_MANAGER */ MAP_MANAGER(const float& filter_corner, const float& filter_surf); static size_t ToIndex(int i, int j, int k); /** \brief transform float to int */ static uint32_t _float_as_int(float f){ union{uint32_t i; float f;} conv{}; conv.f = f; return conv.i; } /** \brief transform int to float */ static float _int_as_float(uint32_t i){ union{float f; uint32_t i;} conv{}; conv.i = i; return conv.f; } /** \brief transform point pi to the MAP coordinate * \param[in] pi: point to be transformed * \param[in] po: point after transfomation * \param[in] _transformTobeMapped: transform matrix between pi and po */ static void pointAssociateToMap(PointType const * const pi, PointType * const po, const Eigen::Matrix4d& _transformTobeMapped); void featureAssociateToMap(const pcl::PointCloud::Ptr& laserCloudCorner, const pcl::PointCloud::Ptr& laserCloudSurf, const pcl::PointCloud::Ptr& laserCloudNonFeature, const pcl::PointCloud::Ptr& laserCloudCornerToMap, const pcl::PointCloud::Ptr& laserCloudSurfToMap, const pcl::PointCloud::Ptr& laserCloudNonFeatureToMap, const Eigen::Matrix4d& transformTobeMapped); /** \brief add new lidar points to the map * \param[in] laserCloudCornerStack: coner feature points that need to be added to map * \param[in] laserCloudSurfStack: surf feature points that need to be added to map * \param[in] transformTobeMapped: transform matrix of the lidar pose */ void MapIncrement(const pcl::PointCloud::Ptr& laserCloudCornerStack, const pcl::PointCloud::Ptr& laserCloudSurfStack, const pcl::PointCloud::Ptr& laserCloudNonFeatureStack, const Eigen::Matrix4d& transformTobeMapped); /** \brief retrieve map points according to the lidar pose * \param[in] laserCloudCornerFromMap: store coner feature points retrieved from map * \param[in] laserCloudSurfFromMap: tore surf feature points retrieved from map * \param[in] transformTobeMapped: transform matrix of the lidar pose */ void MapMove(const Eigen::Matrix4d& transformTobeMapped); size_t FindUsedCornerMap(const PointType *p,int a,int b,int c); size_t FindUsedSurfMap(const PointType *p,int a,int b,int c); size_t FindUsedNonFeatureMap(const PointType *p,int a,int b,int c); pcl::KdTreeFLANN getCornerKdMap(int i){ return CornerKdMap_last[i]; } pcl::KdTreeFLANN getSurfKdMap(int i){ return SurfKdMap_last[i]; } pcl::KdTreeFLANN getNonFeatureKdMap(int i){ return NonFeatureKdMap_last[i]; } pcl::PointCloud::Ptr get_corner_map(){ return laserCloudCornerFromMap; } pcl::PointCloud::Ptr get_surf_map(){ return laserCloudSurfFromMap; } pcl::PointCloud::Ptr get_nonfeature_map(){ return laserCloudNonFeatureFromMap; } int get_map_current_pos(){ return currentUpdatePos; } int get_laserCloudCenWidth_last(){ return laserCloudCenWidth_last; } int get_laserCloudCenHeight_last(){ return laserCloudCenHeight_last; } int get_laserCloudCenDepth_last(){ return laserCloudCenDepth_last; } pcl::PointCloud laserCloudSurf_for_match[4851]; pcl::PointCloud laserCloudCorner_for_match[4851]; pcl::PointCloud laserCloudNonFeature_for_match[4851]; private: int laserCloudCenWidth = 10; int laserCloudCenHeight = 5; int laserCloudCenDepth = 10; int laserCloudCenWidth_last = 10; int laserCloudCenHeight_last = 5; int laserCloudCenDepth_last = 10; static const int laserCloudWidth = 21; static const int laserCloudHeight = 11; static const int laserCloudDepth = 21; static const int laserCloudNum = laserCloudWidth * laserCloudHeight * laserCloudDepth;//4851 pcl::PointCloud::Ptr laserCloudCornerArray[laserCloudNum]; pcl::PointCloud::Ptr laserCloudSurfArray[laserCloudNum]; pcl::PointCloud::Ptr laserCloudNonFeatureArray[laserCloudNum]; pcl::PointCloud::Ptr laserCloudCornerArrayStack[laserCloudNum]; pcl::PointCloud::Ptr laserCloudSurfArrayStack[laserCloudNum]; pcl::PointCloud::Ptr laserCloudNonFeatureArrayStack[laserCloudNum]; pcl::VoxelGrid downSizeFilterCorner; pcl::VoxelGrid downSizeFilterSurf; pcl::VoxelGrid downSizeFilterNonFeature; pcl::PointCloud::Ptr laserCloudCornerFromMap; pcl::PointCloud::Ptr laserCloudSurfFromMap; pcl::PointCloud::Ptr laserCloudNonFeatureFromMap; pcl::KdTreeFLANN::Ptr laserCloudCornerKdMap[laserCloudNum]; pcl::KdTreeFLANN::Ptr laserCloudSurfKdMap[laserCloudNum]; pcl::KdTreeFLANN::Ptr laserCloudNonFeatureKdMap[laserCloudNum]; pcl::KdTreeFLANN CornerKdMap_copy[laserCloudNum]; pcl::KdTreeFLANN SurfKdMap_copy[laserCloudNum]; pcl::KdTreeFLANN NonFeatureKdMap_copy[laserCloudNum]; pcl::KdTreeFLANN CornerKdMap_last[laserCloudNum]; pcl::KdTreeFLANN SurfKdMap_last[laserCloudNum]; pcl::KdTreeFLANN NonFeatureKdMap_last[laserCloudNum]; static const int localMapWindowSize = 60; pcl::PointCloud::Ptr localCornerMap[localMapWindowSize]; pcl::PointCloud::Ptr localSurfMap[localMapWindowSize]; pcl::PointCloud::Ptr localNonFeatureMap[localMapWindowSize]; int localMapID = 0; int currentUpdatePos = 0; int estimatorPos = 0; }; #endif //LIO_LIVOX_MAP_MANAGER_H ================================================ FILE: LIO-Livox/include/segment/pointsCorrect.hpp ================================================ #ifndef _COMMON_HPP #define _CONNON_HPP #include #include #include #include #include #include #include #include #include #include #include #include using namespace std; typedef struct { Eigen::Matrix3f eigenVectorsPCA; Eigen::Vector3f eigenValuesPCA; std::vector neibors; } SNeiborPCA_cor; int GetNeiborPCA_cor(SNeiborPCA_cor &npca, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN kdtree, pcl::PointXYZ searchPoint, float fSearchRadius); int FilterGndForPos_cor(float* outPoints,float*inPoints,int inNum); int CalGndPos_cor(float *gnd, float *fPoints,int pointNum,float fSearchRadius); int GetRTMatrix_cor(float *RTM, float *v0, float *v1); int CorrectPoints_cor(float *fPoints,int pointNum,float *gndPos); int GetGndPos(float *pos, float *fPoints,int pointNum); #endif ================================================ FILE: LIO-Livox/include/segment/segment.hpp ================================================ #ifndef _SEGMENT_HPP #define _SEGMENT_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include "pointsCorrect.hpp" using namespace std; #define SELF_CALI_FRAMES 20 #define GND_IMG_NX 150 #define GND_IMG_NY 400 #define GND_IMG_DX 0.2 #define GND_IMG_DY 0.2 #define GND_IMG_OFFX 40 #define GND_IMG_OFFY 40 #define GND_IMG_NX1 24 #define GND_IMG_NY1 20 #define GND_IMG_DX1 4 #define GND_IMG_DY1 4 #define GND_IMG_OFFX1 40 #define GND_IMG_OFFY1 40 #define DN_SAMPLE_IMG_NX 600 //(GND_IMG_NX) #define DN_SAMPLE_IMG_NY 200 //(GND_IMG_NY) #define DN_SAMPLE_IMG_NZ 100 #define DN_SAMPLE_IMG_DX 0.4 //(GND_IMG_DX) #define DN_SAMPLE_IMG_DY 0.4 //(GND_IMG_DY) #define DN_SAMPLE_IMG_DZ 0.2 #define DN_SAMPLE_IMG_OFFX 40 //(GND_IMG_OFFX) #define DN_SAMPLE_IMG_OFFY 40 //(GND_IMG_OFFY) #define DN_SAMPLE_IMG_OFFZ 2.5//2.5 #define FREE_ANG_NUM 360 #define FREE_PI (3.14159265) #define FREE_DELTA_ANG (FREE_PI*2/FREE_ANG_NUM) typedef struct { Eigen::Matrix3f eigenVectorsPCA; Eigen::Vector3f eigenValuesPCA; std::vector neibors; } SNeiborPCA; typedef struct { // basic int pnum; float xmin; float xmax; float ymin; float ymax; float zmin; float zmax; float zmean; // pca float d0[3]; float d1[3]; float center[3]; float obb[8]; int cls;//类别 } SClusterFeature; int FilterGndForPos(float* outPoints,float*inPoints,int inNum); int CalNomarls(float *nvects, float *fPoints,int pointNum,float fSearchRadius); int CalGndPos(float *gnd, float *fPoints,int pointNum,float fSearchRadius); int GetNeiborPCA(SNeiborPCA &npca, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN kdtree, pcl::PointXYZ searchPoint, float fSearchRadius); int CorrectPoints(float *fPoints,int pointNum,float *gndPos); // 地面上物体分割 int AbvGndSeg(int *pLabel, float *fPoints, int pointNum); int SegBG(int *pLabel, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius); SClusterFeature FindACluster(int *pLabel, int seedId, int labelId, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius, float thrHeight); int SegObjects(int *pLabel, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius); int CalFreeRegion(float *pFreeDis, float *fPoints,int *pLabel,int pointNum); int FreeSeg(float *fPoints,int *pLabel,int pointNum); int CompleteObjects(int *pLabel, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius); int ExpandObjects(int *pLabel, float* fPoints, int pointNum, float fSearchRadius); int ExpandBG(int *pLabel, float* fPoints, int pointNum, float fSearchRadius); // 地面分割 int GndSeg(int* pLabel,float *fPoints,int pointNum,float fSearchRadius); class PCSeg { public: PCSeg(); // functions int DoSeg(int *pLabel, float* fPoints1, int pointNum); int GetMainVectors(float*fPoints, int* pLabel, int pointNum); int EncodeFeatures(float *pFeas); int DoBoundaryDetection(float* fPoints1,int *pLabel1,int pointNum); int DoTrafficLineDet(float *fPoints1,int *pLabel1,int pointNum); int CorrectPoints(float *fPoints,int pointNum,float *gndPos); float *PrePoints; int numPrePoints = 0; float gnd_pos[100*6]; int gnd_vet_len = 0; int laneType=0; float lanePosition[2] = {0}; // vars unsigned char *pVImg; float gndPos[6]; int posFlag; // cluster features float pVectors[256*3]; float pCenters[256*3];//重心 int pnum[256]; int objClass[256]; float zs[256]; float pOBBs[256*8]; float CBBox[256*7];//(a,x,y,z,l,w,h) int clusterNum; float *corPoints; int corNum; ~PCSeg(); }; SClusterFeature CalBBox(float *fPoints,int pointNum); SClusterFeature CalOBB(float *fPoints,int pointNum); #endif ================================================ FILE: LIO-Livox/include/sophus/average.hpp ================================================ /// @file /// Calculation of biinvariant means. #ifndef SOPHUS_AVERAGE_HPP #define SOPHUS_AVERAGE_HPP #include "common.hpp" #include "rxso2.hpp" #include "rxso3.hpp" #include "se2.hpp" #include "se3.hpp" #include "sim2.hpp" #include "sim3.hpp" #include "so2.hpp" #include "so3.hpp" namespace Sophus { /// Calculates mean iteratively. /// /// Returns ``nullopt`` if it does not converge. /// template optional iterativeMean( SequenceContainer const& foo_Ts_bar, int max_num_iterations) { size_t N = foo_Ts_bar.size(); SOPHUS_ENSURE(N >= 1, "N must be >= 1."); using Group = typename SequenceContainer::value_type; using Scalar = typename Group::Scalar; using Tangent = typename Group::Tangent; // This implements the algorithm in the beginning of Sec. 4.2 in // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf. Group foo_T_average = foo_Ts_bar.front(); Scalar w = Scalar(1. / N); for (int i = 0; i < max_num_iterations; ++i) { Tangent average; setToZero(average); for (Group const& foo_T_bar : foo_Ts_bar) { average += w * (foo_T_average.inverse() * foo_T_bar).log(); } Group foo_T_newaverage = foo_T_average * Group::exp(average); if (squaredNorm( (foo_T_newaverage.inverse() * foo_T_average).log()) < Constants::epsilon()) { return foo_T_newaverage; } foo_T_average = foo_T_newaverage; } // LCOV_EXCL_START return nullopt; // LCOV_EXCL_STOP } #ifdef DOXYGEN_SHOULD_SKIP_THIS /// Mean implementation for any Lie group. template optional average( SequenceContainer const& foo_Ts_bar); #else // Mean implementation for SO(2). template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar) { // This implements rotational part of Proposition 12 from Sec. 6.2 of // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf. size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar)); SOPHUS_ENSURE(N >= 1, "N must be >= 1."); SO2 foo_T_average = foo_Ts_bar.front(); Scalar w = Scalar(1. / N); Scalar average(0); for (SO2 const& foo_T_bar : foo_Ts_bar) { average += w * (foo_T_average.inverse() * foo_T_bar).log(); } return foo_T_average * SO2::exp(average); } // Mean implementation for RxSO(2). template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar) { size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar)); SOPHUS_ENSURE(N >= 1, "N must be >= 1."); RxSO2 foo_T_average = foo_Ts_bar.front(); Scalar w = Scalar(1. / N); Vector2 average(Scalar(0), Scalar(0)); for (RxSO2 const& foo_T_bar : foo_Ts_bar) { average += w * (foo_T_average.inverse() * foo_T_bar).log(); } return foo_T_average * RxSO2::exp(average); } namespace details { template void getQuaternion(T const&); template Eigen::Quaternion getUnitQuaternion(SO3 const& R) { return R.unit_quaternion(); } template Eigen::Quaternion getUnitQuaternion(RxSO3 const& sR) { return sR.so3().unit_quaternion(); } template Eigen::Quaternion averageUnitQuaternion( SequenceContainer const& foo_Ts_bar) { // This: http://stackoverflow.com/a/27410865/1221742 size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar)); SOPHUS_ENSURE(N >= 1, "N must be >= 1."); Eigen::Matrix Q(4, N); int i = 0; Scalar w = Scalar(1. / N); for (auto const& foo_T_bar : foo_Ts_bar) { Q.col(i) = w * details::getUnitQuaternion(foo_T_bar).coeffs(); ++i; } Eigen::Matrix QQt = Q * Q.transpose(); // TODO: Figure out why we can't use SelfAdjointEigenSolver here. Eigen::EigenSolver > es(QQt); std::complex max_eigenvalue = es.eigenvalues()[0]; Eigen::Matrix, 4, 1> max_eigenvector = es.eigenvectors().col(0); for (int i = 1; i < 4; i++) { if (std::norm(es.eigenvalues()[i]) > std::norm(max_eigenvalue)) { max_eigenvalue = es.eigenvalues()[i]; max_eigenvector = es.eigenvectors().col(i); } } Eigen::Quaternion quat; quat.coeffs() << // max_eigenvector[0].real(), // max_eigenvector[1].real(), // max_eigenvector[2].real(), // max_eigenvector[3].real(); return quat; } } // namespace details // Mean implementation for SO(3). // // TODO: Detect degenerated cases and return nullopt. template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar) { return SO3(details::averageUnitQuaternion(foo_Ts_bar)); } // Mean implementation for R x SO(3). template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar) { size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar)); SOPHUS_ENSURE(N >= 1, "N must be >= 1."); Scalar scale_sum = Scalar(0); using std::exp; using std::log; for (RxSO3 const& foo_T_bar : foo_Ts_bar) { scale_sum += log(foo_T_bar.scale()); } return RxSO3(exp(scale_sum / Scalar(N)), SO3(details::averageUnitQuaternion(foo_Ts_bar))); } template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) { // TODO: Implement Proposition 12 from Sec. 6.2 of // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf. return iterativeMean(foo_Ts_bar, max_num_iterations); } template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) { return iterativeMean(foo_Ts_bar, max_num_iterations); } template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) { return iterativeMean(foo_Ts_bar, max_num_iterations); } template enable_if_t< std::is_same >::value, optional > average(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) { return iterativeMean(foo_Ts_bar, max_num_iterations); } #endif // DOXYGEN_SHOULD_SKIP_THIS } // namespace Sophus #endif // SOPHUS_AVERAGE_HPP ================================================ FILE: LIO-Livox/include/sophus/common.hpp ================================================ /// @file /// Common functionality. #ifndef SOPHUS_COMMON_HPP #define SOPHUS_COMMON_HPP #include #include #include #include #include #include #if !defined(SOPHUS_DISABLE_ENSURES) #include "formatstring.hpp" #endif //!defined(SOPHUS_DISABLE_ENSURES) // following boost's assert.hpp #undef SOPHUS_ENSURE // ENSURES are similar to ASSERTS, but they are always checked for (including in // release builds). At the moment there are no ASSERTS in Sophus which should // only be used for checks which are performance critical. #ifdef __GNUC__ #define SOPHUS_FUNCTION __PRETTY_FUNCTION__ #elif (_MSC_VER >= 1310) #define SOPHUS_FUNCTION __FUNCTION__ #else #define SOPHUS_FUNCTION "unknown" #endif // Make sure this compiles with older versions of Eigen which do not have // EIGEN_DEVICE_FUNC defined. #ifndef EIGEN_DEVICE_FUNC #define EIGEN_DEVICE_FUNC #endif // NVCC on windows has issues with defaulting the Map specialization constructors, so // special case that specific platform case. #if defined(_MSC_VER) && defined(__CUDACC__) #define SOPHUS_WINDOW_NVCC_FALLBACK #endif // Make sure this compiles with older versions of Eigen which do not have // EIGEN_DEFAULT_COPY_CONSTRUCTOR defined #ifndef EIGEN_DEFAULT_COPY_CONSTRUCTOR #if EIGEN_HAS_CXX11 && !defined(SOPHUS_WINDOW_NVCC_FALLBACK) #define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) EIGEN_DEVICE_FUNC CLASS(const CLASS&) = default; #else #define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) #endif #ifndef EIGEN_INHERIT_ASSIGNMENT_OPERATORS #error "eigen must have EIGEN_INHERIT_ASSIGNMENT_OPERATORS" #endif #define SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived) #endif #ifndef SOPHUS_INHERIT_ASSIGNMENT_OPERATORS #ifndef EIGEN_INHERIT_ASSIGNMENT_OPERATORS #error "eigen must have EIGEN_INHERIT_ASSIGNMENT_OPERATORS" #endif #define SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) #endif #define SOPHUS_FUNC EIGEN_DEVICE_FUNC #if defined(SOPHUS_DISABLE_ENSURES) #define SOPHUS_ENSURE(expr, ...) ((void)0) #elif defined(SOPHUS_ENABLE_ENSURE_HANDLER) namespace Sophus { void ensureFailed(char const* function, char const* file, int line, char const* description); } #define SOPHUS_ENSURE(expr, ...) \ ((expr) ? ((void)0) \ : ::Sophus::ensureFailed( \ SOPHUS_FUNCTION, __FILE__, __LINE__, \ Sophus::details::FormatString(__VA_ARGS__).c_str())) #else // LCOV_EXCL_START namespace Sophus { template SOPHUS_FUNC void defaultEnsure(char const* function, char const* file, int line, char const* description, Args&&... args) { std::printf("Sophus ensure failed in function '%s', file '%s', line %d.\n", function, file, line); #ifdef __CUDACC__ std::printf("%s", description); #else std::cout << details::FormatString(description, std::forward(args)...) << std::endl; std::abort(); #endif } } // namespace Sophus // LCOV_EXCL_STOP #define SOPHUS_ENSURE(expr, ...) \ ((expr) ? ((void)0) \ : Sophus::defaultEnsure(SOPHUS_FUNCTION, __FILE__, __LINE__, \ ##__VA_ARGS__)) #endif namespace Sophus { template struct Constants { SOPHUS_FUNC static Scalar epsilon() { return Scalar(1e-10); } SOPHUS_FUNC static Scalar epsilonSqrt() { using std::sqrt; return sqrt(epsilon()); } SOPHUS_FUNC static Scalar pi() { return Scalar(3.141592653589793238462643383279502884); } }; template <> struct Constants { SOPHUS_FUNC static float constexpr epsilon() { return static_cast(1e-5); } SOPHUS_FUNC static float epsilonSqrt() { return std::sqrt(epsilon()); } SOPHUS_FUNC static float constexpr pi() { return 3.141592653589793238462643383279502884f; } }; /// Nullopt type of lightweight optional class. struct nullopt_t { explicit constexpr nullopt_t() {} }; constexpr nullopt_t nullopt{}; /// Lightweight optional implementation which requires ``T`` to have a /// default constructor. /// /// TODO: Replace with std::optional once Sophus moves to c++17. /// template class optional { public: optional() : is_valid_(false) {} optional(nullopt_t) : is_valid_(false) {} optional(T const& type) : type_(type), is_valid_(true) {} explicit operator bool() const { return is_valid_; } T const* operator->() const { SOPHUS_ENSURE(is_valid_, "must be valid"); return &type_; } T* operator->() { SOPHUS_ENSURE(is_valid_, "must be valid"); return &type_; } T const& operator*() const { SOPHUS_ENSURE(is_valid_, "must be valid"); return type_; } T& operator*() { SOPHUS_ENSURE(is_valid_, "must be valid"); return type_; } private: T type_; bool is_valid_; }; template using enable_if_t = typename std::enable_if::type; template struct IsUniformRandomBitGenerator { static const bool value = std::is_unsigned::value && std::is_unsigned::value && std::is_unsigned::value; }; } // namespace Sophus #endif // SOPHUS_COMMON_HPP ================================================ FILE: LIO-Livox/include/sophus/example_ensure_handler.cpp ================================================ #include "common.hpp" #include #include namespace Sophus { void ensureFailed(char const* function, char const* file, int line, char const* description) { std::printf("Sophus ensure failed in function '%s', file '%s', line %d.\n", file, function, line); std::printf("Description: %s\n", description); std::abort(); } } // namespace Sophus ================================================ FILE: LIO-Livox/include/sophus/formatstring.hpp ================================================ /// @file /// FormatString functionality #ifndef SOPHUS_FORMATSTRING_HPP #define SOPHUS_FORMATSTRING_HPP #include namespace Sophus { namespace details { // Following: http://stackoverflow.com/a/22759544 template class IsStreamable { private: template static auto test(int) -> decltype(std::declval() << std::declval(), std::true_type()); template static auto test(...) -> std::false_type; public: static bool const value = decltype(test(0))::value; }; template class ArgToStream { public: static void impl(std::stringstream& stream, T&& arg) { stream << std::forward(arg); } }; inline void FormatStream(std::stringstream& stream, char const* text) { stream << text; return; } // Following: http://en.cppreference.com/w/cpp/language/parameter_pack template void FormatStream(std::stringstream& stream, char const* text, T&& arg, Args&&... args) { static_assert(IsStreamable::value, "One of the args has no ostream overload!"); for (; *text != '\0'; ++text) { if (*text == '%') { ArgToStream::impl(stream, std::forward(arg)); FormatStream(stream, text + 1, std::forward(args)...); return; } stream << *text; } stream << "\nFormat-Warning: There are " << sizeof...(Args) + 1 << " args unused."; return; } template std::string FormatString(char const* text, Args&&... args) { std::stringstream stream; FormatStream(stream, text, std::forward(args)...); return stream.str(); } inline std::string FormatString() { return std::string(); } } // namespace details } // namespace Sophus #endif //SOPHUS_FORMATSTRING_HPP ================================================ FILE: LIO-Livox/include/sophus/geometry.hpp ================================================ /// @file /// Transformations between poses and hyperplanes. #ifndef GEOMETRY_HPP #define GEOMETRY_HPP #include "se2.hpp" #include "se3.hpp" #include "so2.hpp" #include "so3.hpp" #include "types.hpp" namespace Sophus { /// Takes in a rotation ``R_foo_plane`` and returns the corresponding line /// normal along the y-axis (in reference frame ``foo``). /// template Vector2 normalFromSO2(SO2 const& R_foo_line) { return R_foo_line.matrix().col(1); } /// Takes in line normal in reference frame foo and constructs a corresponding /// rotation matrix ``R_foo_line``. /// /// Precondition: ``normal_foo`` must not be close to zero. /// template SO2 SO2FromNormal(Vector2 normal_foo) { SOPHUS_ENSURE(normal_foo.squaredNorm() > Constants::epsilon(), "%", normal_foo.transpose()); normal_foo.normalize(); return SO2(normal_foo.y(), -normal_foo.x()); } /// Takes in a rotation ``R_foo_plane`` and returns the corresponding plane /// normal along the z-axis /// (in reference frame ``foo``). /// template Vector3 normalFromSO3(SO3 const& R_foo_plane) { return R_foo_plane.matrix().col(2); } /// Takes in plane normal in reference frame foo and constructs a corresponding /// rotation matrix ``R_foo_plane``. /// /// Note: The ``plane`` frame is defined as such that the normal points along /// the positive z-axis. One can specify hints for the x-axis and y-axis /// of the ``plane`` frame. /// /// Preconditions: /// - ``normal_foo``, ``xDirHint_foo``, ``yDirHint_foo`` must not be close to /// zero. /// - ``xDirHint_foo`` and ``yDirHint_foo`` must be approx. perpendicular. /// template Matrix3 rotationFromNormal(Vector3 const& normal_foo, Vector3 xDirHint_foo = Vector3(T(1), T(0), T(0)), Vector3 yDirHint_foo = Vector3(T(0), T(1), T(0))) { SOPHUS_ENSURE(xDirHint_foo.dot(yDirHint_foo) < Constants::epsilon(), "xDirHint (%) and yDirHint (%) must be perpendicular.", xDirHint_foo.transpose(), yDirHint_foo.transpose()); using std::abs; using std::sqrt; T const xDirHint_foo_sqr_length = xDirHint_foo.squaredNorm(); T const yDirHint_foo_sqr_length = yDirHint_foo.squaredNorm(); T const normal_foo_sqr_length = normal_foo.squaredNorm(); SOPHUS_ENSURE(xDirHint_foo_sqr_length > Constants::epsilon(), "%", xDirHint_foo.transpose()); SOPHUS_ENSURE(yDirHint_foo_sqr_length > Constants::epsilon(), "%", yDirHint_foo.transpose()); SOPHUS_ENSURE(normal_foo_sqr_length > Constants::epsilon(), "%", normal_foo.transpose()); Matrix3 basis_foo; basis_foo.col(2) = normal_foo; if (abs(xDirHint_foo_sqr_length - T(1)) > Constants::epsilon()) { xDirHint_foo.normalize(); } if (abs(yDirHint_foo_sqr_length - T(1)) > Constants::epsilon()) { yDirHint_foo.normalize(); } if (abs(normal_foo_sqr_length - T(1)) > Constants::epsilon()) { basis_foo.col(2).normalize(); } T abs_x_dot_z = abs(basis_foo.col(2).dot(xDirHint_foo)); T abs_y_dot_z = abs(basis_foo.col(2).dot(yDirHint_foo)); if (abs_x_dot_z < abs_y_dot_z) { // basis_foo.z and xDirHint are far from parallel. basis_foo.col(1) = basis_foo.col(2).cross(xDirHint_foo).normalized(); basis_foo.col(0) = basis_foo.col(1).cross(basis_foo.col(2)); } else { // basis_foo.z and yDirHint are far from parallel. basis_foo.col(0) = yDirHint_foo.cross(basis_foo.col(2)).normalized(); basis_foo.col(1) = basis_foo.col(2).cross(basis_foo.col(0)); } T det = basis_foo.determinant(); // sanity check SOPHUS_ENSURE(abs(det - T(1)) < Constants::epsilon(), "Determinant of basis is not 1, but %. Basis is \n%\n", det, basis_foo); return basis_foo; } /// Takes in plane normal in reference frame foo and constructs a corresponding /// rotation matrix ``R_foo_plane``. /// /// See ``rotationFromNormal`` for details. /// template SO3 SO3FromNormal(Vector3 const& normal_foo) { return SO3(rotationFromNormal(normal_foo)); } /// Returns a line (wrt. to frame ``foo``), given a pose of the ``line`` in /// reference frame ``foo``. /// /// Note: The plane is defined by X-axis of the ``line`` frame. /// template Line2 lineFromSE2(SE2 const& T_foo_line) { return Line2(normalFromSO2(T_foo_line.so2()), T_foo_line.translation()); } /// Returns the pose ``T_foo_line``, given a line in reference frame ``foo``. /// /// Note: The line is defined by X-axis of the frame ``line``. /// template SE2 SE2FromLine(Line2 const& line_foo) { T const d = line_foo.offset(); Vector2 const n = line_foo.normal(); SO2 const R_foo_plane = SO2FromNormal(n); return SE2(R_foo_plane, -d * n); } /// Returns a plane (wrt. to frame ``foo``), given a pose of the ``plane`` in /// reference frame ``foo``. /// /// Note: The plane is defined by XY-plane of the frame ``plane``. /// template Plane3 planeFromSE3(SE3 const& T_foo_plane) { return Plane3(normalFromSO3(T_foo_plane.so3()), T_foo_plane.translation()); } /// Returns the pose ``T_foo_plane``, given a plane in reference frame ``foo``. /// /// Note: The plane is defined by XY-plane of the frame ``plane``. /// template SE3 SE3FromPlane(Plane3 const& plane_foo) { T const d = plane_foo.offset(); Vector3 const n = plane_foo.normal(); SO3 const R_foo_plane = SO3FromNormal(n); return SE3(R_foo_plane, -d * n); } /// Takes in a hyperplane and returns unique representation by ensuring that the /// ``offset`` is not negative. /// template Eigen::Hyperplane makeHyperplaneUnique( Eigen::Hyperplane const& plane) { if (plane.offset() >= 0) { return plane; } return Eigen::Hyperplane(-plane.normal(), -plane.offset()); } } // namespace Sophus #endif // GEOMETRY_HPP ================================================ FILE: LIO-Livox/include/sophus/interpolate.hpp ================================================ /// @file /// Interpolation for Lie groups. #ifndef SOPHUS_INTERPOLATE_HPP #define SOPHUS_INTERPOLATE_HPP #include #include "interpolate_details.hpp" namespace Sophus { /// This function interpolates between two Lie group elements ``foo_T_bar`` /// and ``foo_T_baz`` with an interpolation factor of ``alpha`` in [0, 1]. /// /// It returns a pose ``foo_T_quiz`` with ``quiz`` being a frame between ``bar`` /// and ``baz``. If ``alpha=0`` it returns ``foo_T_bar``. If it is 1, it returns /// ``foo_T_bar``. /// /// (Since interpolation on Lie groups is inverse-invariant, we can equivalently /// think of the input arguments as being ``bar_T_foo``, ``baz_T_foo`` and the /// return value being ``quiz_T_foo``.) /// /// Precondition: ``p`` must be in [0, 1]. /// template enable_if_t::supported, G> interpolate( G const& foo_T_bar, G const& foo_T_baz, Scalar2 p = Scalar2(0.5f)) { using Scalar = typename G::Scalar; Scalar inter_p(p); SOPHUS_ENSURE(inter_p >= Scalar(0) && inter_p <= Scalar(1), "p (%) must in [0, 1]."); return foo_T_bar * G::exp(inter_p * (foo_T_bar.inverse() * foo_T_baz).log()); } } // namespace Sophus #endif // SOPHUS_INTERPOLATE_HPP ================================================ FILE: LIO-Livox/include/sophus/interpolate_details.hpp ================================================ #ifndef SOPHUS_INTERPOLATE_DETAILS_HPP #define SOPHUS_INTERPOLATE_DETAILS_HPP #include "rxso2.hpp" #include "rxso3.hpp" #include "se2.hpp" #include "se3.hpp" #include "sim2.hpp" #include "sim3.hpp" #include "so2.hpp" #include "so3.hpp" namespace Sophus { namespace interp_details { template struct Traits; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(SO2 const& foo_T_bar) { using std::abs; Scalar angle = foo_T_bar.log(); return abs(abs(angle) - Constants::pi()) < Constants::epsilon(); } }; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(RxSO2 const& foo_T_bar) { return Traits>::hasShortestPathAmbiguity(foo_T_bar.so2()); } }; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(SO3 const& foo_T_bar) { using std::abs; Scalar angle = foo_T_bar.logAndTheta().theta; return abs(abs(angle) - Constants::pi()) < Constants::epsilon(); } }; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(RxSO3 const& foo_T_bar) { return Traits>::hasShortestPathAmbiguity(foo_T_bar.so3()); } }; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(SE2 const& foo_T_bar) { return Traits>::hasShortestPathAmbiguity(foo_T_bar.so2()); } }; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(SE3 const& foo_T_bar) { return Traits>::hasShortestPathAmbiguity(foo_T_bar.so3()); } }; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(Sim2 const& foo_T_bar) { return Traits>::hasShortestPathAmbiguity( foo_T_bar.rxso2().so2()); ; } }; template struct Traits> { static bool constexpr supported = true; static bool hasShortestPathAmbiguity(Sim3 const& foo_T_bar) { return Traits>::hasShortestPathAmbiguity( foo_T_bar.rxso3().so3()); ; } }; } // namespace interp_details } // namespace Sophus #endif // SOPHUS_INTERPOLATE_DETAILS_HPP ================================================ FILE: LIO-Livox/include/sophus/num_diff.hpp ================================================ /// @file /// Numerical differentiation using finite differences #ifndef SOPHUS_NUM_DIFF_HPP #define SOPHUS_NUM_DIFF_HPP #include #include #include #include "types.hpp" namespace Sophus { namespace details { template class Curve { public: template static auto num_diff(Fn curve, Scalar t, Scalar h) -> decltype(curve(t)) { using ReturnType = decltype(curve(t)); static_assert(std::is_floating_point::value, "Scalar must be a floating point type."); static_assert(IsFloatingPoint::value, "ReturnType must be either a floating point scalar, " "vector or matrix."); return (curve(t + h) - curve(t - h)) / (Scalar(2) * h); } }; template class VectorField { public: static Eigen::Matrix num_diff( std::function(Sophus::Vector)> vector_field, Sophus::Vector const& a, Scalar eps) { static_assert(std::is_floating_point::value, "Scalar must be a floating point type."); Eigen::Matrix J; Sophus::Vector h; h.setZero(); for (int i = 0; i < M; ++i) { h[i] = eps; J.col(i) = (vector_field(a + h) - vector_field(a - h)) / (Scalar(2) * eps); h[i] = Scalar(0); } return J; } }; template class VectorField { public: static Eigen::Matrix num_diff( std::function(Scalar)> vector_field, Scalar const& a, Scalar eps) { return details::Curve::num_diff(std::move(vector_field), a, eps); } }; } // namespace details /// Calculates the derivative of a curve at a point ``t``. /// /// Here, a curve is a function from a Scalar to a Euclidean space. Thus, it /// returns either a Scalar, a vector or a matrix. /// template auto curveNumDiff(Fn curve, Scalar t, Scalar h = Constants::epsilonSqrt()) -> decltype(details::Curve::num_diff(std::move(curve), t, h)) { return details::Curve::num_diff(std::move(curve), t, h); } /// Calculates the derivative of a vector field at a point ``a``. /// /// Here, a vector field is a function from a vector space to another vector /// space. /// template Eigen::Matrix vectorFieldNumDiff( Fn vector_field, ScalarOrVector const& a, Scalar eps = Constants::epsilonSqrt()) { return details::VectorField::num_diff(std::move(vector_field), a, eps); } } // namespace Sophus #endif // SOPHUS_NUM_DIFF_HPP ================================================ FILE: LIO-Livox/include/sophus/rotation_matrix.hpp ================================================ /// @file /// Rotation matrix helper functions. #ifndef SOPHUS_ROTATION_MATRIX_HPP #define SOPHUS_ROTATION_MATRIX_HPP #include #include #include "types.hpp" namespace Sophus { /// Takes in arbitrary square matrix and returns true if it is /// orthogonal. template SOPHUS_FUNC bool isOrthogonal(Eigen::MatrixBase const& R) { using Scalar = typename D::Scalar; static int const N = D::RowsAtCompileTime; static int const M = D::ColsAtCompileTime; static_assert(N == M, "must be a square matrix"); static_assert(N >= 2, "must have compile time dimension >= 2"); return (R * R.transpose() - Matrix::Identity()).norm() < Constants::epsilon(); } /// Takes in arbitrary square matrix and returns true if it is /// "scaled-orthogonal" with positive determinant. /// template SOPHUS_FUNC bool isScaledOrthogonalAndPositive(Eigen::MatrixBase const& sR) { using Scalar = typename D::Scalar; static int const N = D::RowsAtCompileTime; static int const M = D::ColsAtCompileTime; using std::pow; using std::sqrt; Scalar det = sR.determinant(); if (det <= Scalar(0)) { return false; } Scalar scale_sqr = pow(det, Scalar(2. / N)); static_assert(N == M, "must be a square matrix"); static_assert(N >= 2, "must have compile time dimension >= 2"); return (sR * sR.transpose() - scale_sqr * Matrix::Identity()) .template lpNorm() < sqrt(Constants::epsilon()); } /// Takes in arbitrary square matrix (2x2 or larger) and returns closest /// orthogonal matrix with positive determinant. template SOPHUS_FUNC enable_if_t< std::is_floating_point::value, Matrix> makeRotationMatrix(Eigen::MatrixBase const& R) { using Scalar = typename D::Scalar; static int const N = D::RowsAtCompileTime; static int const M = D::ColsAtCompileTime; static_assert(N == M, "must be a square matrix"); static_assert(N >= 2, "must have compile time dimension >= 2"); Eigen::JacobiSVD> svd( R, Eigen::ComputeFullU | Eigen::ComputeFullV); // Determine determinant of orthogonal matrix U*V'. Scalar d = (svd.matrixU() * svd.matrixV().transpose()).determinant(); // Starting from the identity matrix D, set the last entry to d (+1 or // -1), so that det(U*D*V') = 1. Matrix Diag = Matrix::Identity(); Diag(N - 1, N - 1) = d; return svd.matrixU() * Diag * svd.matrixV().transpose(); } } // namespace Sophus #endif // SOPHUS_ROTATION_MATRIX_HPP ================================================ FILE: LIO-Livox/include/sophus/rxso2.hpp ================================================ /// @file /// Direct product R X SO(2) - rotation and scaling in 2d. #ifndef SOPHUS_RXSO2_HPP #define SOPHUS_RXSO2_HPP #include "so2.hpp" namespace Sophus { template class RxSO2; using RxSO2d = RxSO2; using RxSO2f = RxSO2; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using ComplexType = Sophus::Vector2; }; template struct traits, Options_>> : traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using ComplexType = Map, Options>; }; template struct traits const, Options_>> : traits const> { static constexpr int Options = Options_; using Scalar = Scalar_; using ComplexType = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// RxSO2 base type - implements RxSO2 class but is storage agnostic /// /// This class implements the group ``R+ x SO(2)``, the direct product of the /// group of positive scalar 2x2 matrices (= isomorph to the positive /// real numbers) and the two-dimensional special orthogonal group SO(2). /// Geometrically, it is the group of rotation and scaling in two dimensions. /// As a matrix groups, R+ x SO(2) consists of matrices of the form ``s * R`` /// where ``R`` is an orthogonal matrix with ``det(R) = 1`` and ``s > 0`` /// being a positive real number. In particular, it has the following form: /// /// | s * cos(theta) s * -sin(theta) | /// | s * sin(theta) s * cos(theta) | /// /// where ``theta`` being the rotation angle. Internally, it is represented by /// the first column of the rotation matrix, or in other words by a non-zero /// complex number. /// /// R+ x SO(2) is not compact, but a commutative group. First it is not compact /// since the scale factor is not bound. Second it is commutative since /// ``sR(alpha, s1) * sR(beta, s2) = sR(beta, s2) * sR(alpha, s1)``, simply /// because ``alpha + beta = beta + alpha`` and ``s1 * s2 = s2 * s1`` with /// ``alpha`` and ``beta`` being rotation angles and ``s1``, ``s2`` being scale /// factors. /// /// This class has the explicit class invariant that the scale ``s`` is not /// too close to zero. Strictly speaking, it must hold that: /// /// ``complex().norm() >= Constants::epsilon()``. /// /// In order to obey this condition, group multiplication is implemented with /// saturation such that a product always has a scale which is equal or greater /// this threshold. template class RxSO2Base { public: static constexpr int Options = Eigen::internal::traits::Options; using Scalar = typename Eigen::internal::traits::Scalar; using ComplexType = typename Eigen::internal::traits::ComplexType; using ComplexTemporaryType = Sophus::Vector2; /// Degrees of freedom of manifold, number of dimensions in tangent space /// (one for rotation and one for scaling). static int constexpr DoF = 2; /// Number of internal parameters used (complex number is a tuple). static int constexpr num_parameters = 2; /// Group transformations are 2x2 matrices. static int constexpr N = 2; using Transformation = Matrix; using Point = Vector2; using HomogeneousPoint = Vector3; using Line = ParametrizedLine2; using Tangent = Vector; using Adjoint = Matrix; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with RxSO2 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using RxSO2Product = RxSO2>; template using PointProduct = Vector2>; template using HomogeneousPointProduct = Vector3>; /// Adjoint transformation /// /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. /// /// For RxSO(2), it simply returns the identity matrix. /// SOPHUS_FUNC Adjoint Adj() const { return Adjoint::Identity(); } /// Returns rotation angle. /// SOPHUS_FUNC Scalar angle() const { return SO2(complex()).log(); } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC RxSO2 cast() const { return RxSO2(complex().template cast()); } /// This provides unsafe read/write access to internal data. RxSO(2) is /// represented by a complex number (two parameters). When using direct /// write access, the user needs to take care of that the complex number is /// not set close to zero. /// /// Note: The first parameter represents the real part, while the /// second parameter represent the imaginary part. /// SOPHUS_FUNC Scalar* data() { return complex_nonconst().data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { return complex().data(); } /// Returns group inverse. /// SOPHUS_FUNC RxSO2 inverse() const { Scalar squared_scale = complex().squaredNorm(); return RxSO2(complex().x() / squared_scale, -complex().y() / squared_scale); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (scaled rotation matrices) to elements of the tangent /// space (rotation-vector plus logarithm of scale factor). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of RxSO2. /// SOPHUS_FUNC Tangent log() const { using std::log; Tangent theta_sigma; theta_sigma[1] = log(scale()); theta_sigma[0] = SO2(complex()).log(); return theta_sigma; } /// Returns 2x2 matrix representation of the instance. /// /// For RxSO2, the matrix representation is an scaled orthogonal matrix ``sR`` /// with ``det(R)=s^2``, thus a scaled rotation matrix ``R`` with scale /// ``s``. /// SOPHUS_FUNC Transformation matrix() const { Transformation sR; // clang-format off sR << complex()[0], -complex()[1], complex()[1], complex()[0]; // clang-format on return sR; } /// Assignment operator. /// SOPHUS_FUNC RxSO2Base& operator=(RxSO2Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC RxSO2Base& operator=( RxSO2Base const& other) { complex_nonconst() = other.complex(); return *this; } /// Group multiplication, which is rotation concatenation and scale /// multiplication. /// /// Note: This function performs saturation for products close to zero in /// order to ensure the class invariant. /// template SOPHUS_FUNC RxSO2Product operator*( RxSO2Base const& other) const { using ResultT = ReturnScalar; Scalar lhs_real = complex().x(); Scalar lhs_imag = complex().y(); typename OtherDerived::Scalar const& rhs_real = other.complex().x(); typename OtherDerived::Scalar const& rhs_imag = other.complex().y(); /// complex multiplication typename RxSO2Product::ComplexType result_complex( lhs_real * rhs_real - lhs_imag * rhs_imag, lhs_real * rhs_imag + lhs_imag * rhs_real); const ResultT squared_scale = result_complex.squaredNorm(); if (squared_scale < Constants::epsilon() * Constants::epsilon()) { /// Saturation to ensure class invariant. result_complex.normalize(); result_complex *= Constants::epsilon(); } return RxSO2Product(result_complex); } /// Group action on 2-points. /// /// This function rotates a 2 dimensional point ``p`` by the SO2 element /// ``bar_R_foo`` (= rotation matrix) and scales it by the scale factor ``s``: /// /// ``p_bar = s * (bar_R_foo * p_foo)``. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { return matrix() * p; } /// Group action on homogeneous 2-points. See above for more details. /// template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { const auto rsp = *this * p.template head<2>(); return HomogeneousPointProduct(rsp(0), rsp(1), p(2)); } /// Group action on lines. /// /// This function rotates a parameterized line ``l(t) = o + t * d`` by the SO2 /// element and scales it by the scale factor /// /// Origin ``o`` is rotated and scaled /// Direction ``d`` is rotated (preserving it's norm) /// SOPHUS_FUNC Line operator*(Line const& l) const { return Line((*this) * l.origin(), (*this) * l.direction() / scale()); } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SO2's Scalar type. /// /// Note: This function performs saturation for products close to zero in /// order to ensure the class invariant. /// template >::value>::type> SOPHUS_FUNC RxSO2Base& operator*=( RxSO2Base const& other) { *static_cast(this) = *this * other; return *this; } /// Returns internal parameters of RxSO(2). /// /// It returns (c[0], c[1]), with c being the complex number. /// SOPHUS_FUNC Sophus::Vector params() const { return complex(); } /// Sets non-zero complex /// /// Precondition: ``z`` must not be close to zero. SOPHUS_FUNC void setComplex(Vector2 const& z) { SOPHUS_ENSURE(z.squaredNorm() > Constants::epsilon() * Constants::epsilon(), "Scale factor must be greater-equal epsilon."); static_cast(this)->complex_nonconst() = z; } /// Accessor of complex. /// SOPHUS_FUNC ComplexType const& complex() const { return static_cast(this)->complex(); } /// Returns rotation matrix. /// SOPHUS_FUNC Transformation rotationMatrix() const { ComplexTemporaryType norm_quad = complex(); norm_quad.normalize(); return SO2(norm_quad).matrix(); } /// Returns scale. /// SOPHUS_FUNC Scalar scale() const { return complex().norm(); } /// Setter of rotation angle, leaves scale as is. /// SOPHUS_FUNC void setAngle(Scalar const& theta) { setSO2(SO2(theta)); } /// Setter of complex using rotation matrix ``R``, leaves scale as is. /// /// Precondition: ``R`` must be orthogonal with determinant of one. /// SOPHUS_FUNC void setRotationMatrix(Transformation const& R) { setSO2(SO2(R)); } /// Sets scale and leaves rotation as is. /// SOPHUS_FUNC void setScale(Scalar const& scale) { using std::sqrt; complex_nonconst().normalize(); complex_nonconst() *= scale; } /// Setter of complex number using scaled rotation matrix ``sR``. /// /// Precondition: The 2x2 matrix must be "scaled orthogonal" /// and have a positive determinant. /// SOPHUS_FUNC void setScaledRotationMatrix(Transformation const& sR) { SOPHUS_ENSURE(isScaledOrthogonalAndPositive(sR), "sR must be scaled orthogonal:\n %", sR); complex_nonconst() = sR.col(0); } /// Setter of SO(2) rotations, leaves scale as is. /// SOPHUS_FUNC void setSO2(SO2 const& so2) { using std::sqrt; Scalar saved_scale = scale(); complex_nonconst() = so2.unit_complex(); complex_nonconst() *= saved_scale; } SOPHUS_FUNC SO2 so2() const { return SO2(complex()); } protected: /// Mutator of complex is private to ensure class invariant. /// SOPHUS_FUNC ComplexType& complex_nonconst() { return static_cast(this)->complex_nonconst(); } }; /// RxSO2 using storage; derived from RxSO2Base. template class RxSO2 : public RxSO2Base> { public: using Base = RxSO2Base>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using ComplexMember = Eigen::Matrix; /// ``Base`` is friend so complex_nonconst can be accessed from ``Base``. friend class RxSO2Base>; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes complex number to identity rotation and /// scale to 1. /// SOPHUS_FUNC RxSO2() : complex_(Scalar(1), Scalar(0)) {} /// Copy constructor /// SOPHUS_FUNC RxSO2(RxSO2 const& other) = default; /// Copy-like constructor from OtherDerived. /// template SOPHUS_FUNC RxSO2(RxSO2Base const& other) : complex_(other.complex()) {} /// Constructor from scaled rotation matrix /// /// Precondition: rotation matrix need to be scaled orthogonal with /// determinant of ``s^2``. /// SOPHUS_FUNC explicit RxSO2(Transformation const& sR) { this->setScaledRotationMatrix(sR); } /// Constructor from scale factor and rotation matrix ``R``. /// /// Precondition: Rotation matrix ``R`` must to be orthogonal with determinant /// of 1 and ``scale`` must to be close to zero. /// SOPHUS_FUNC RxSO2(Scalar const& scale, Transformation const& R) : RxSO2((scale * SO2(R).unit_complex()).eval()) {} /// Constructor from scale factor and SO2 /// /// Precondition: ``scale`` must be close to zero. /// SOPHUS_FUNC RxSO2(Scalar const& scale, SO2 const& so2) : RxSO2((scale * so2.unit_complex()).eval()) {} /// Constructor from complex number. /// /// Precondition: complex number must not be close to zero. /// SOPHUS_FUNC explicit RxSO2(Vector2 const& z) : complex_(z) { SOPHUS_ENSURE(complex_.squaredNorm() >= Constants::epsilon() * Constants::epsilon(), "Scale factor must be greater-equal epsilon: % vs %", complex_.squaredNorm(), Constants::epsilon() * Constants::epsilon()); } /// Constructor from complex number. /// /// Precondition: complex number must not be close to zero. /// SOPHUS_FUNC explicit RxSO2(Scalar const& real, Scalar const& imag) : RxSO2(Vector2(real, imag)) {} /// Accessor of complex. /// SOPHUS_FUNC ComplexMember const& complex() const { return complex_; } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) { return generator(i); } /// Group exponential /// /// This functions takes in an element of tangent space (= rotation angle /// plus logarithm of scale) and returns the corresponding element of the /// group RxSO2. /// /// To be more specific, this function computes ``expmat(hat(theta))`` /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the /// hat()-operator of RSO2. /// SOPHUS_FUNC static RxSO2 exp(Tangent const& a) { using std::exp; Scalar const theta = a[0]; Scalar const sigma = a[1]; Scalar s = exp(sigma); Vector2 z = SO2::exp(theta).unit_complex(); z *= s; return RxSO2(z); } /// Returns the ith infinitesimal generators of ``R+ x SO(2)``. /// /// The infinitesimal generators of RxSO2 are: /// /// ``` /// | 0 -1 | /// G_0 = | 1 0 | /// /// | 1 0 | /// G_1 = | 0 1 | /// ``` /// /// Precondition: ``i`` must be 0, or 1. /// SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 && i <= 1, "i should be 0 or 1."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } /// hat-operator /// /// It takes in the 2-vector representation ``a`` (= rotation angle plus /// logarithm of scale) and returns the corresponding matrix representation /// of Lie algebra element. /// /// Formally, the hat()-operator of RxSO2 is defined as /// /// ``hat(.): R^2 -> R^{2x2}, hat(a) = sum_i a_i * G_i`` (for i=0,1,2) /// /// with ``G_i`` being the ith infinitesimal generator of RxSO2. /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& a) { Transformation A; // clang-format off A << a(1), -a(0), a(0), a(1); // clang-format on return A; } /// Lie bracket /// /// It computes the Lie bracket of RxSO(2). To be more specific, it computes /// /// ``[omega_1, omega_2]_rxso2 := vee([hat(omega_1), hat(omega_2)])`` /// /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the /// hat()-operator and ``vee(.)`` the vee()-operator of RxSO2. /// SOPHUS_FUNC static Tangent lieBracket(Tangent const&, Tangent const&) { Vector2 res; res.setZero(); return res; } /// Draw uniform sample from RxSO(2) manifold. /// /// The scale factor is drawn uniformly in log2-space from [-1, 1], /// hence the scale is in [0.5, 2)]. /// template static RxSO2 sampleUniform(UniformRandomBitGenerator& generator) { std::uniform_real_distribution uniform(Scalar(-1), Scalar(1)); using std::exp2; return RxSO2(exp2(uniform(generator)), SO2::sampleUniform(generator)); } /// vee-operator /// /// It takes the 2x2-matrix representation ``Omega`` and maps it to the /// corresponding vector representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | d -x | /// | x d | /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { using std::abs; return Tangent(Omega(1, 0), Omega(0, 0)); } protected: SOPHUS_FUNC ComplexMember& complex_nonconst() { return complex_; } ComplexMember complex_; }; } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``RxSO2``; derived from RxSO2Base. /// /// Allows us to wrap RxSO2 objects around POD array (e.g. external z style /// complex). template class Map, Options> : public Sophus::RxSO2Base, Options>> { using Base = Sophus::RxSO2Base, Options>>; public: using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; /// ``Base`` is friend so complex_nonconst can be accessed from ``Base``. friend class Sophus::RxSO2Base, Options>>; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : complex_(coeffs) {} /// Accessor of complex. /// SOPHUS_FUNC Map, Options> const& complex() const { return complex_; } protected: SOPHUS_FUNC Map, Options>& complex_nonconst() { return complex_; } Map, Options> complex_; }; /// Specialization of Eigen::Map for ``RxSO2 const``; derived from RxSO2Base. /// /// Allows us to wrap RxSO2 objects around POD array (e.g. external z style /// complex). template class Map const, Options> : public Sophus::RxSO2Base const, Options>> { public: using Base = Sophus::RxSO2Base const, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : complex_(coeffs) {} /// Accessor of complex. /// SOPHUS_FUNC Map const, Options> const& complex() const { return complex_; } protected: Map const, Options> const complex_; }; } // namespace Eigen #endif /// SOPHUS_RXSO2_HPP ================================================ FILE: LIO-Livox/include/sophus/rxso3.hpp ================================================ /// @file /// Direct product R X SO(3) - rotation and scaling in 3d. #ifndef SOPHUS_RXSO3_HPP #define SOPHUS_RXSO3_HPP #include "so3.hpp" namespace Sophus { template class RxSO3; using RxSO3d = RxSO3; using RxSO3f = RxSO3; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using QuaternionType = Eigen::Quaternion; }; template struct traits, Options_>> : traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using QuaternionType = Map, Options>; }; template struct traits const, Options_>> : traits const> { static constexpr int Options = Options_; using Scalar = Scalar_; using QuaternionType = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// RxSO3 base type - implements RxSO3 class but is storage agnostic /// /// This class implements the group ``R+ x SO(3)``, the direct product of the /// group of positive scalar 3x3 matrices (= isomorph to the positive /// real numbers) and the three-dimensional special orthogonal group SO(3). /// Geometrically, it is the group of rotation and scaling in three dimensions. /// As a matrix groups, RxSO3 consists of matrices of the form ``s * R`` /// where ``R`` is an orthogonal matrix with ``det(R)=1`` and ``s > 0`` /// being a positive real number. /// /// Internally, RxSO3 is represented by the group of non-zero quaternions. /// In particular, the scale equals the squared(!) norm of the quaternion ``q``, /// ``s = |q|^2``. This is a most compact representation since the degrees of /// freedom (DoF) of RxSO3 (=4) equals the number of internal parameters (=4). /// /// This class has the explicit class invariant that the scale ``s`` is not /// too close to zero. Strictly speaking, it must hold that: /// /// ``quaternion().squaredNorm() >= Constants::epsilon()``. /// /// In order to obey this condition, group multiplication is implemented with /// saturation such that a product always has a scale which is equal or greater /// this threshold. template class RxSO3Base { public: static constexpr int Options = Eigen::internal::traits::Options; using Scalar = typename Eigen::internal::traits::Scalar; using QuaternionType = typename Eigen::internal::traits::QuaternionType; using QuaternionTemporaryType = Eigen::Quaternion; /// Degrees of freedom of manifold, number of dimensions in tangent space /// (three for rotation and one for scaling). static int constexpr DoF = 4; /// Number of internal parameters used (quaternion is a 4-tuple). static int constexpr num_parameters = 4; /// Group transformations are 3x3 matrices. static int constexpr N = 3; using Transformation = Matrix; using Point = Vector3; using HomogeneousPoint = Vector4; using Line = ParametrizedLine3; using Tangent = Vector; using Adjoint = Matrix; struct TangentAndTheta { EIGEN_MAKE_ALIGNED_OPERATOR_NEW Tangent tangent; Scalar theta; }; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with RxSO3 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using RxSO3Product = RxSO3>; template using PointProduct = Vector3>; template using HomogeneousPointProduct = Vector4>; /// Adjoint transformation /// /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. /// /// For RxSO(3), it simply returns the rotation matrix corresponding to ``A``. /// SOPHUS_FUNC Adjoint Adj() const { Adjoint res; res.setIdentity(); res.template topLeftCorner<3, 3>() = rotationMatrix(); return res; } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC RxSO3 cast() const { return RxSO3(quaternion().template cast()); } /// This provides unsafe read/write access to internal data. RxSO(3) is /// represented by an Eigen::Quaternion (four parameters). When using direct /// write access, the user needs to take care of that the quaternion is not /// set close to zero. /// /// Note: The first three Scalars represent the imaginary parts, while the /// forth Scalar represent the real part. /// SOPHUS_FUNC Scalar* data() { return quaternion_nonconst().coeffs().data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { return quaternion().coeffs().data(); } /// Returns group inverse. /// SOPHUS_FUNC RxSO3 inverse() const { return RxSO3(quaternion().inverse()); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (scaled rotation matrices) to elements of the tangent /// space (rotation-vector plus logarithm of scale factor). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of RxSO3. /// SOPHUS_FUNC Tangent log() const { return logAndTheta().tangent; } /// As above, but also returns ``theta = |omega|``. /// SOPHUS_FUNC TangentAndTheta logAndTheta() const { using std::log; Scalar scale = quaternion().squaredNorm(); TangentAndTheta result; result.tangent[3] = log(scale); auto omega_and_theta = SO3(quaternion()).logAndTheta(); result.tangent.template head<3>() = omega_and_theta.tangent; result.theta = omega_and_theta.theta; return result; } /// Returns 3x3 matrix representation of the instance. /// /// For RxSO3, the matrix representation is an scaled orthogonal matrix ``sR`` /// with ``det(R)=s^3``, thus a scaled rotation matrix ``R`` with scale /// ``s``. /// SOPHUS_FUNC Transformation matrix() const { Transformation sR; Scalar const vx_sq = quaternion().vec().x() * quaternion().vec().x(); Scalar const vy_sq = quaternion().vec().y() * quaternion().vec().y(); Scalar const vz_sq = quaternion().vec().z() * quaternion().vec().z(); Scalar const w_sq = quaternion().w() * quaternion().w(); Scalar const two_vx = Scalar(2) * quaternion().vec().x(); Scalar const two_vy = Scalar(2) * quaternion().vec().y(); Scalar const two_vz = Scalar(2) * quaternion().vec().z(); Scalar const two_vx_vy = two_vx * quaternion().vec().y(); Scalar const two_vx_vz = two_vx * quaternion().vec().z(); Scalar const two_vx_w = two_vx * quaternion().w(); Scalar const two_vy_vz = two_vy * quaternion().vec().z(); Scalar const two_vy_w = two_vy * quaternion().w(); Scalar const two_vz_w = two_vz * quaternion().w(); sR(0, 0) = vx_sq - vy_sq - vz_sq + w_sq; sR(1, 0) = two_vx_vy + two_vz_w; sR(2, 0) = two_vx_vz - two_vy_w; sR(0, 1) = two_vx_vy - two_vz_w; sR(1, 1) = -vx_sq + vy_sq - vz_sq + w_sq; sR(2, 1) = two_vx_w + two_vy_vz; sR(0, 2) = two_vx_vz + two_vy_w; sR(1, 2) = -two_vx_w + two_vy_vz; sR(2, 2) = -vx_sq - vy_sq + vz_sq + w_sq; return sR; } /// Assignment operator. /// SOPHUS_FUNC RxSO3Base& operator=(RxSO3Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC RxSO3Base& operator=( RxSO3Base const& other) { quaternion_nonconst() = other.quaternion(); return *this; } /// Group multiplication, which is rotation concatenation and scale /// multiplication. /// /// Note: This function performs saturation for products close to zero in /// order to ensure the class invariant. /// template SOPHUS_FUNC RxSO3Product operator*( RxSO3Base const& other) const { using ResultT = ReturnScalar; typename RxSO3Product::QuaternionType result_quaternion( quaternion() * other.quaternion()); ResultT scale = result_quaternion.squaredNorm(); if (scale < Constants::epsilon()) { SOPHUS_ENSURE(scale > ResultT(0), "Scale must be greater zero."); /// Saturation to ensure class invariant. result_quaternion.normalize(); result_quaternion.coeffs() *= sqrt(Constants::epsilon()); } return RxSO3Product(result_quaternion); } /// Group action on 3-points. /// /// This function rotates a 3 dimensional point ``p`` by the SO3 element /// ``bar_R_foo`` (= rotation matrix) and scales it by the scale factor /// ``s``: /// /// ``p_bar = s * (bar_R_foo * p_foo)``. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { // Follows http:///eigen.tuxfamily.org/bz/show_bug.cgi?id=459 Scalar scale = quaternion().squaredNorm(); PointProduct two_vec_cross_p = quaternion().vec().cross(p); two_vec_cross_p += two_vec_cross_p; return scale * p + (quaternion().w() * two_vec_cross_p + quaternion().vec().cross(two_vec_cross_p)); } /// Group action on homogeneous 3-points. See above for more details. /// template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { const auto rsp = *this * p.template head<3>(); return HomogeneousPointProduct(rsp(0), rsp(1), rsp(2), p(3)); } /// Group action on lines. /// /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO3 /// element ans scales it by the scale factor: /// /// Origin ``o`` is rotated and scaled /// Direction ``d`` is rotated (preserving it's norm) /// SOPHUS_FUNC Line operator*(Line const& l) const { return Line((*this) * l.origin(), (*this) * l.direction() / quaternion().squaredNorm()); } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SO3's Scalar type. /// /// Note: This function performs saturation for products close to zero in /// order to ensure the class invariant. /// template >::value>::type> SOPHUS_FUNC RxSO3Base& operator*=( RxSO3Base const& other) { *static_cast(this) = *this * other; return *this; } /// Returns internal parameters of RxSO(3). /// /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real), with q being the /// quaternion. /// SOPHUS_FUNC Sophus::Vector params() const { return quaternion().coeffs(); } /// Sets non-zero quaternion /// /// Precondition: ``quat`` must not be close to zero. SOPHUS_FUNC void setQuaternion(Eigen::Quaternion const& quat) { SOPHUS_ENSURE(quat.squaredNorm() > Constants::epsilon() * Constants::epsilon(), "Scale factor must be greater-equal epsilon."); static_cast(this)->quaternion_nonconst() = quat; } /// Accessor of quaternion. /// SOPHUS_FUNC QuaternionType const& quaternion() const { return static_cast(this)->quaternion(); } /// Returns rotation matrix. /// SOPHUS_FUNC Transformation rotationMatrix() const { QuaternionTemporaryType norm_quad = quaternion(); norm_quad.normalize(); return norm_quad.toRotationMatrix(); } /// Returns scale. /// SOPHUS_FUNC Scalar scale() const { return quaternion().squaredNorm(); } /// Setter of quaternion using rotation matrix ``R``, leaves scale as is. /// SOPHUS_FUNC void setRotationMatrix(Transformation const& R) { using std::sqrt; Scalar saved_scale = scale(); quaternion_nonconst() = R; quaternion_nonconst().coeffs() *= sqrt(saved_scale); } /// Sets scale and leaves rotation as is. /// /// Note: This function as a significant computational cost, since it has to /// call the square root twice. /// SOPHUS_FUNC void setScale(Scalar const& scale) { using std::sqrt; quaternion_nonconst().normalize(); quaternion_nonconst().coeffs() *= sqrt(scale); } /// Setter of quaternion using scaled rotation matrix ``sR``. /// /// Precondition: The 3x3 matrix must be "scaled orthogonal" /// and have a positive determinant. /// SOPHUS_FUNC void setScaledRotationMatrix(Transformation const& sR) { Transformation squared_sR = sR * sR.transpose(); Scalar squared_scale = Scalar(1. / 3.) * (squared_sR(0, 0) + squared_sR(1, 1) + squared_sR(2, 2)); SOPHUS_ENSURE(squared_scale >= Constants::epsilon() * Constants::epsilon(), "Scale factor must be greater-equal epsilon."); Scalar scale = sqrt(squared_scale); quaternion_nonconst() = sR / scale; quaternion_nonconst().coeffs() *= sqrt(scale); } /// Setter of SO(3) rotations, leaves scale as is. /// SOPHUS_FUNC void setSO3(SO3 const& so3) { using std::sqrt; Scalar saved_scale = scale(); quaternion_nonconst() = so3.unit_quaternion(); quaternion_nonconst().coeffs() *= sqrt(saved_scale); } SOPHUS_FUNC SO3 so3() const { return SO3(quaternion()); } protected: /// Mutator of quaternion is private to ensure class invariant. /// SOPHUS_FUNC QuaternionType& quaternion_nonconst() { return static_cast(this)->quaternion_nonconst(); } }; /// RxSO3 using storage; derived from RxSO3Base. template class RxSO3 : public RxSO3Base> { public: using Base = RxSO3Base>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using QuaternionMember = Eigen::Quaternion; /// ``Base`` is friend so quaternion_nonconst can be accessed from ``Base``. friend class RxSO3Base>; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes quaternion to identity rotation and scale /// to 1. /// SOPHUS_FUNC RxSO3() : quaternion_(Scalar(1), Scalar(0), Scalar(0), Scalar(0)) {} /// Copy constructor /// SOPHUS_FUNC RxSO3(RxSO3 const& other) = default; /// Copy-like constructor from OtherDerived /// template SOPHUS_FUNC RxSO3(RxSO3Base const& other) : quaternion_(other.quaternion()) {} /// Constructor from scaled rotation matrix /// /// Precondition: rotation matrix need to be scaled orthogonal with /// determinant of ``s^3``. /// SOPHUS_FUNC explicit RxSO3(Transformation const& sR) { this->setScaledRotationMatrix(sR); } /// Constructor from scale factor and rotation matrix ``R``. /// /// Precondition: Rotation matrix ``R`` must to be orthogonal with determinant /// of 1 and ``scale`` must not be close to zero. /// SOPHUS_FUNC RxSO3(Scalar const& scale, Transformation const& R) : quaternion_(R) { SOPHUS_ENSURE(scale >= Constants::epsilon(), "Scale factor must be greater-equal epsilon."); using std::sqrt; quaternion_.coeffs() *= sqrt(scale); } /// Constructor from scale factor and SO3 /// /// Precondition: ``scale`` must not to be close to zero. /// SOPHUS_FUNC RxSO3(Scalar const& scale, SO3 const& so3) : quaternion_(so3.unit_quaternion()) { SOPHUS_ENSURE(scale >= Constants::epsilon(), "Scale factor must be greater-equal epsilon."); using std::sqrt; quaternion_.coeffs() *= sqrt(scale); } /// Constructor from quaternion /// /// Precondition: quaternion must not be close to zero. /// template SOPHUS_FUNC explicit RxSO3(Eigen::QuaternionBase const& quat) : quaternion_(quat) { static_assert(std::is_same::value, "must be same Scalar type."); SOPHUS_ENSURE(quaternion_.squaredNorm() >= Constants::epsilon(), "Scale factor must be greater-equal epsilon."); } /// Accessor of quaternion. /// SOPHUS_FUNC QuaternionMember const& quaternion() const { return quaternion_; } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) { return generator(i); } /// Group exponential /// /// This functions takes in an element of tangent space (= rotation 3-vector /// plus logarithm of scale) and returns the corresponding element of the /// group RxSO3. /// /// To be more specific, thixs function computes ``expmat(hat(omega))`` /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the /// hat()-operator of RSO3. /// SOPHUS_FUNC static RxSO3 exp(Tangent const& a) { Scalar theta; return expAndTheta(a, &theta); } /// As above, but also returns ``theta = |omega|`` as out-parameter. /// /// Precondition: ``theta`` must not be ``nullptr``. /// SOPHUS_FUNC static RxSO3 expAndTheta(Tangent const& a, Scalar* theta) { SOPHUS_ENSURE(theta != nullptr, "must not be nullptr."); using std::exp; using std::sqrt; Vector3 const omega = a.template head<3>(); Scalar sigma = a[3]; Scalar sqrt_scale = sqrt(exp(sigma)); Eigen::Quaternion quat = SO3::expAndTheta(omega, theta).unit_quaternion(); quat.coeffs() *= sqrt_scale; return RxSO3(quat); } /// Returns the ith infinitesimal generators of ``R+ x SO(3)``. /// /// The infinitesimal generators of RxSO3 are: /// /// ``` /// | 0 0 0 | /// G_0 = | 0 0 -1 | /// | 0 1 0 | /// /// | 0 0 1 | /// G_1 = | 0 0 0 | /// | -1 0 0 | /// /// | 0 -1 0 | /// G_2 = | 1 0 0 | /// | 0 0 0 | /// /// | 1 0 0 | /// G_2 = | 0 1 0 | /// | 0 0 1 | /// ``` /// /// Precondition: ``i`` must be 0, 1, 2 or 3. /// SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 && i <= 3, "i should be in range [0,3]."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } /// hat-operator /// /// It takes in the 4-vector representation ``a`` (= rotation vector plus /// logarithm of scale) and returns the corresponding matrix representation /// of Lie algebra element. /// /// Formally, the hat()-operator of RxSO3 is defined as /// /// ``hat(.): R^4 -> R^{3x3}, hat(a) = sum_i a_i * G_i`` (for i=0,1,2,3) /// /// with ``G_i`` being the ith infinitesimal generator of RxSO3. /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& a) { Transformation A; // clang-format off A << a(3), -a(2), a(1), a(2), a(3), -a(0), -a(1), a(0), a(3); // clang-format on return A; } /// Lie bracket /// /// It computes the Lie bracket of RxSO(3). To be more specific, it computes /// /// ``[omega_1, omega_2]_rxso3 := vee([hat(omega_1), hat(omega_2)])`` /// /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the /// hat()-operator and ``vee(.)`` the vee()-operator of RxSO3. /// SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) { Vector3 const omega1 = a.template head<3>(); Vector3 const omega2 = b.template head<3>(); Vector4 res; res.template head<3>() = omega1.cross(omega2); res[3] = Scalar(0); return res; } /// Draw uniform sample from RxSO(3) manifold. /// /// The scale factor is drawn uniformly in log2-space from [-1, 1], /// hence the scale is in [0.5, 2]. /// template static RxSO3 sampleUniform(UniformRandomBitGenerator& generator) { std::uniform_real_distribution uniform(Scalar(-1), Scalar(1)); using std::exp2; return RxSO3(exp2(uniform(generator)), SO3::sampleUniform(generator)); } /// vee-operator /// /// It takes the 3x3-matrix representation ``Omega`` and maps it to the /// corresponding vector representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | d -c b | /// | c d -a | /// | -b a d | /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { using std::abs; return Tangent(Omega(2, 1), Omega(0, 2), Omega(1, 0), Omega(0, 0)); } protected: SOPHUS_FUNC QuaternionMember& quaternion_nonconst() { return quaternion_; } QuaternionMember quaternion_; }; } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``RxSO3``; derived from RxSO3Base /// /// Allows us to wrap RxSO3 objects around POD array (e.g. external c style /// quaternion). template class Map, Options> : public Sophus::RxSO3Base, Options>> { public: using Base = Sophus::RxSO3Base, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; /// ``Base`` is friend so quaternion_nonconst can be accessed from ``Base``. friend class Sophus::RxSO3Base, Options>>; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : quaternion_(coeffs) {} /// Accessor of quaternion. /// SOPHUS_FUNC Map, Options> const& quaternion() const { return quaternion_; } protected: SOPHUS_FUNC Map, Options>& quaternion_nonconst() { return quaternion_; } Map, Options> quaternion_; }; /// Specialization of Eigen::Map for ``RxSO3 const``; derived from RxSO3Base. /// /// Allows us to wrap RxSO3 objects around POD array (e.g. external c style /// quaternion). template class Map const, Options> : public Sophus::RxSO3Base const, Options>> { public: using Base = Sophus::RxSO3Base const, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : quaternion_(coeffs) {} /// Accessor of quaternion. /// SOPHUS_FUNC Map const, Options> const& quaternion() const { return quaternion_; } protected: Map const, Options> const quaternion_; }; } // namespace Eigen #endif /// SOPHUS_RXSO3_HPP ================================================ FILE: LIO-Livox/include/sophus/se2.hpp ================================================ /// @file /// Special Euclidean group SE(2) - rotation and translation in 2d. #ifndef SOPHUS_SE2_HPP #define SOPHUS_SE2_HPP #include "so2.hpp" namespace Sophus { template class SE2; using SE2d = SE2; using SE2f = SE2; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { using Scalar = Scalar_; using TranslationType = Sophus::Vector2; using SO2Type = Sophus::SO2; }; template struct traits, Options>> : traits> { using Scalar = Scalar_; using TranslationType = Map, Options>; using SO2Type = Map, Options>; }; template struct traits const, Options>> : traits const> { using Scalar = Scalar_; using TranslationType = Map const, Options>; using SO2Type = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// SE2 base type - implements SE2 class but is storage agnostic. /// /// SE(2) is the group of rotations and translation in 2d. It is the /// semi-direct product of SO(2) and the 2d Euclidean vector space. The class /// is represented using a composition of SO2Group for rotation and a 2-vector /// for translation. /// /// SE(2) is neither compact, nor a commutative group. /// /// See SO2Group for more details of the rotation representation in 2d. /// template class SE2Base { public: using Scalar = typename Eigen::internal::traits::Scalar; using TranslationType = typename Eigen::internal::traits::TranslationType; using SO2Type = typename Eigen::internal::traits::SO2Type; /// Degrees of freedom of manifold, number of dimensions in tangent space /// (two for translation, three for rotation). static int constexpr DoF = 3; /// Number of internal parameters used (tuple for complex, two for /// translation). static int constexpr num_parameters = 4; /// Group transformations are 3x3 matrices. static int constexpr N = 3; using Transformation = Matrix; using Point = Vector2; using HomogeneousPoint = Vector3; using Line = ParametrizedLine2; using Tangent = Vector; using Adjoint = Matrix; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with SE2 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using SE2Product = SE2>; template using PointProduct = Vector2>; template using HomogeneousPointProduct = Vector3>; /// Adjoint transformation /// /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. /// SOPHUS_FUNC Adjoint Adj() const { Matrix const& R = so2().matrix(); Transformation res; res.setIdentity(); res.template topLeftCorner<2, 2>() = R; res(0, 2) = translation()[1]; res(1, 2) = -translation()[0]; return res; } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC SE2 cast() const { return SE2(so2().template cast(), translation().template cast()); } /// Returns derivative of this * exp(x) wrt x at x=0. /// SOPHUS_FUNC Matrix Dx_this_mul_exp_x_at_0() const { Matrix J; Sophus::Vector2 const c = unit_complex(); Scalar o(0); J(0, 0) = o; J(0, 1) = o; J(0, 2) = -c[1]; J(1, 0) = o; J(1, 1) = o; J(1, 2) = c[0]; J(2, 0) = c[0]; J(2, 1) = -c[1]; J(2, 2) = o; J(3, 0) = c[1]; J(3, 1) = c[0]; J(3, 2) = o; return J; } /// Returns group inverse. /// SOPHUS_FUNC SE2 inverse() const { SO2 const invR = so2().inverse(); return SE2(invR, invR * (translation() * Scalar(-1))); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (rigid body transformations) to elements of the /// tangent space (twist). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of SE(2). /// SOPHUS_FUNC Tangent log() const { using std::abs; Tangent upsilon_theta; Scalar theta = so2().log(); upsilon_theta[2] = theta; Scalar halftheta = Scalar(0.5) * theta; Scalar halftheta_by_tan_of_halftheta; Vector2 z = so2().unit_complex(); Scalar real_minus_one = z.x() - Scalar(1.); if (abs(real_minus_one) < Constants::epsilon()) { halftheta_by_tan_of_halftheta = Scalar(1.) - Scalar(1. / 12) * theta * theta; } else { halftheta_by_tan_of_halftheta = -(halftheta * z.y()) / (real_minus_one); } Matrix V_inv; V_inv << halftheta_by_tan_of_halftheta, halftheta, -halftheta, halftheta_by_tan_of_halftheta; upsilon_theta.template head<2>() = V_inv * translation(); return upsilon_theta; } /// Normalize SO2 element /// /// It re-normalizes the SO2 element. /// SOPHUS_FUNC void normalize() { so2().normalize(); } /// Returns 3x3 matrix representation of the instance. /// /// It has the following form: /// /// | R t | /// | o 1 | /// /// where ``R`` is a 2x2 rotation matrix, ``t`` a translation 2-vector and /// ``o`` a 2-column vector of zeros. /// SOPHUS_FUNC Transformation matrix() const { Transformation homogenious_matrix; homogenious_matrix.template topLeftCorner<2, 3>() = matrix2x3(); homogenious_matrix.row(2) = Matrix(Scalar(0), Scalar(0), Scalar(1)); return homogenious_matrix; } /// Returns the significant first two rows of the matrix above. /// SOPHUS_FUNC Matrix matrix2x3() const { Matrix matrix; matrix.template topLeftCorner<2, 2>() = rotationMatrix(); matrix.col(2) = translation(); return matrix; } /// Assignment operator. /// SOPHUS_FUNC SE2Base& operator=(SE2Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC SE2Base& operator=(SE2Base const& other) { so2() = other.so2(); translation() = other.translation(); return *this; } /// Group multiplication, which is rotation concatenation. /// template SOPHUS_FUNC SE2Product operator*( SE2Base const& other) const { return SE2Product( so2() * other.so2(), translation() + so2() * other.translation()); } /// Group action on 2-points. /// /// This function rotates and translates a two dimensional point ``p`` by the /// SE(2) element ``bar_T_foo = (bar_R_foo, t_bar)`` (= rigid body /// transformation): /// /// ``p_bar = bar_R_foo * p_foo + t_bar``. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { return so2() * p + translation(); } /// Group action on homogeneous 2-points. See above for more details. /// template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { const PointProduct tp = so2() * p.template head<2>() + p(2) * translation(); return HomogeneousPointProduct(tp(0), tp(1), p(2)); } /// Group action on lines. /// /// This function rotates and translates a parametrized line /// ``l(t) = o + t * d`` by the SE(2) element: /// /// Origin ``o`` is rotated and translated using SE(2) action /// Direction ``d`` is rotated using SO(2) action /// SOPHUS_FUNC Line operator*(Line const& l) const { return Line((*this) * l.origin(), so2() * l.direction()); } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SO2's Scalar type. /// template >::value>::type> SOPHUS_FUNC SE2Base& operator*=(SE2Base const& other) { *static_cast(this) = *this * other; return *this; } /// Returns internal parameters of SE(2). /// /// It returns (c[0], c[1], t[0], t[1]), /// with c being the unit complex number, t the translation 3-vector. /// SOPHUS_FUNC Sophus::Vector params() const { Sophus::Vector p; p << so2().params(), translation(); return p; } /// Returns rotation matrix. /// SOPHUS_FUNC Matrix rotationMatrix() const { return so2().matrix(); } /// Takes in complex number, and normalizes it. /// /// Precondition: The complex number must not be close to zero. /// SOPHUS_FUNC void setComplex(Sophus::Vector2 const& complex) { return so2().setComplex(complex); } /// Sets ``so3`` using ``rotation_matrix``. /// /// Precondition: ``R`` must be orthogonal and ``det(R)=1``. /// SOPHUS_FUNC void setRotationMatrix(Matrix const& R) { SOPHUS_ENSURE(isOrthogonal(R), "R is not orthogonal:\n %", R); SOPHUS_ENSURE(R.determinant() > Scalar(0), "det(R) is not positive: %", R.determinant()); typename SO2Type::ComplexTemporaryType const complex( Scalar(0.5) * (R(0, 0) + R(1, 1)), Scalar(0.5) * (R(1, 0) - R(0, 1))); so2().setComplex(complex); } /// Mutator of SO3 group. /// SOPHUS_FUNC SO2Type& so2() { return static_cast(this)->so2(); } /// Accessor of SO3 group. /// SOPHUS_FUNC SO2Type const& so2() const { return static_cast(this)->so2(); } /// Mutator of translation vector. /// SOPHUS_FUNC TranslationType& translation() { return static_cast(this)->translation(); } /// Accessor of translation vector /// SOPHUS_FUNC TranslationType const& translation() const { return static_cast(this)->translation(); } /// Accessor of unit complex number. /// SOPHUS_FUNC typename Eigen::internal::traits::SO2Type::ComplexT const& unit_complex() const { return so2().unit_complex(); } }; /// SE2 using default storage; derived from SE2Base. template class SE2 : public SE2Base> { public: using Base = SE2Base>; static int constexpr DoF = Base::DoF; static int constexpr num_parameters = Base::num_parameters; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using SO2Member = SO2; using TranslationMember = Vector2; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes rigid body motion to the identity. /// SOPHUS_FUNC SE2(); /// Copy constructor /// SOPHUS_FUNC SE2(SE2 const& other) = default; /// Copy-like constructor from OtherDerived /// template SOPHUS_FUNC SE2(SE2Base const& other) : so2_(other.so2()), translation_(other.translation()) { static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from SO3 and translation vector /// template SOPHUS_FUNC SE2(SO2Base const& so2, Eigen::MatrixBase const& translation) : so2_(so2), translation_(translation) { static_assert(std::is_same::value, "must be same Scalar type"); static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from rotation matrix and translation vector /// /// Precondition: Rotation matrix needs to be orthogonal with determinant /// of 1. /// SOPHUS_FUNC SE2(typename SO2::Transformation const& rotation_matrix, Point const& translation) : so2_(rotation_matrix), translation_(translation) {} /// Constructor from rotation angle and translation vector. /// SOPHUS_FUNC SE2(Scalar const& theta, Point const& translation) : so2_(theta), translation_(translation) {} /// Constructor from complex number and translation vector /// /// Precondition: ``complex`` must not be close to zero. SOPHUS_FUNC SE2(Vector2 const& complex, Point const& translation) : so2_(complex), translation_(translation) {} /// Constructor from 3x3 matrix /// /// Precondition: Rotation matrix needs to be orthogonal with determinant /// of 1. The last row must be ``(0, 0, 1)``. /// SOPHUS_FUNC explicit SE2(Transformation const& T) : so2_(T.template topLeftCorner<2, 2>().eval()), translation_(T.template block<2, 1>(0, 2)) {} /// This provides unsafe read/write access to internal data. SO(2) is /// represented by a complex number (two parameters). When using direct write /// access, the user needs to take care of that the complex number stays /// normalized. /// SOPHUS_FUNC Scalar* data() { // so2_ and translation_ are layed out sequentially with no padding return so2_.data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { /// so2_ and translation_ are layed out sequentially with no padding return so2_.data(); } /// Accessor of SO3 /// SOPHUS_FUNC SO2Member& so2() { return so2_; } /// Mutator of SO3 /// SOPHUS_FUNC SO2Member const& so2() const { return so2_; } /// Mutator of translation vector /// SOPHUS_FUNC TranslationMember& translation() { return translation_; } /// Accessor of translation vector /// SOPHUS_FUNC TranslationMember const& translation() const { return translation_; } /// Returns derivative of exp(x) wrt. x. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x( Tangent const& upsilon_theta) { using std::abs; using std::cos; using std::pow; using std::sin; Sophus::Matrix J; Sophus::Vector upsilon = upsilon_theta.template head<2>(); Scalar theta = upsilon_theta[2]; if (abs(theta) < Constants::epsilon()) { Scalar const o(0); Scalar const i(1); // clang-format off J << o, o, o, o, o, i, i, o, -Scalar(0.5) * upsilon[1], o, i, Scalar(0.5) * upsilon[0]; // clang-format on return J; } Scalar const c0 = sin(theta); Scalar const c1 = cos(theta); Scalar const c2 = 1.0 / theta; Scalar const c3 = c0 * c2; Scalar const c4 = -c1 + Scalar(1); Scalar const c5 = c2 * c4; Scalar const c6 = c1 * c2; Scalar const c7 = pow(theta, -2); Scalar const c8 = c0 * c7; Scalar const c9 = c4 * c7; Scalar const o = Scalar(0); J(0, 0) = o; J(0, 1) = o; J(0, 2) = -c0; J(1, 0) = o; J(1, 1) = o; J(1, 2) = c1; J(2, 0) = c3; J(2, 1) = -c5; J(2, 2) = -c3 * upsilon[1] + c6 * upsilon[0] - c8 * upsilon[0] + c9 * upsilon[1]; J(3, 0) = c5; J(3, 1) = c3; J(3, 2) = c3 * upsilon[0] + c6 * upsilon[1] - c8 * upsilon[1] - c9 * upsilon[0]; return J; } /// Returns derivative of exp(x) wrt. x_i at x=0. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x_at_0() { Sophus::Matrix J; Scalar const o(0); Scalar const i(1); // clang-format off J << o, o, o, o, o, i, i, o, o, o, i, o; // clang-format on return J; } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) { return generator(i); } /// Group exponential /// /// This functions takes in an element of tangent space (= twist ``a``) and /// returns the corresponding element of the group SE(2). /// /// The first two components of ``a`` represent the translational part /// ``upsilon`` in the tangent space of SE(2), while the last three components /// of ``a`` represents the rotation vector ``omega``. /// To be more specific, this function computes ``expmat(hat(a))`` with /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator /// of SE(2), see below. /// SOPHUS_FUNC static SE2 exp(Tangent const& a) { Scalar theta = a[2]; SO2 so2 = SO2::exp(theta); Scalar sin_theta_by_theta; Scalar one_minus_cos_theta_by_theta; using std::abs; if (abs(theta) < Constants::epsilon()) { Scalar theta_sq = theta * theta; sin_theta_by_theta = Scalar(1.) - Scalar(1. / 6.) * theta_sq; one_minus_cos_theta_by_theta = Scalar(0.5) * theta - Scalar(1. / 24.) * theta * theta_sq; } else { sin_theta_by_theta = so2.unit_complex().y() / theta; one_minus_cos_theta_by_theta = (Scalar(1.) - so2.unit_complex().x()) / theta; } Vector2 trans( sin_theta_by_theta * a[0] - one_minus_cos_theta_by_theta * a[1], one_minus_cos_theta_by_theta * a[0] + sin_theta_by_theta * a[1]); return SE2(so2, trans); } /// Returns closest SE3 given arbitrary 4x4 matrix. /// template static SOPHUS_FUNC enable_if_t::value, SE2> fitToSE2(Matrix3 const& T) { return SE2(SO2::fitToSO2(T.template block<2, 2>(0, 0)), T.template block<2, 1>(0, 2)); } /// Returns the ith infinitesimal generators of SE(2). /// /// The infinitesimal generators of SE(2) are: /// /// ``` /// | 0 0 1 | /// G_0 = | 0 0 0 | /// | 0 0 0 | /// /// | 0 0 0 | /// G_1 = | 0 0 1 | /// | 0 0 0 | /// /// | 0 -1 0 | /// G_2 = | 1 0 0 | /// | 0 0 0 | /// ``` /// /// Precondition: ``i`` must be in 0, 1 or 2. /// SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 || i <= 2, "i should be in range [0,2]."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } /// hat-operator /// /// It takes in the 3-vector representation (= twist) and returns the /// corresponding matrix representation of Lie algebra element. /// /// Formally, the hat()-operator of SE(3) is defined as /// /// ``hat(.): R^3 -> R^{3x33}, hat(a) = sum_i a_i * G_i`` (for i=0,1,2) /// /// with ``G_i`` being the ith infinitesimal generator of SE(2). /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& a) { Transformation Omega; Omega.setZero(); Omega.template topLeftCorner<2, 2>() = SO2::hat(a[2]); Omega.col(2).template head<2>() = a.template head<2>(); return Omega; } /// Lie bracket /// /// It computes the Lie bracket of SE(2). To be more specific, it computes /// /// ``[omega_1, omega_2]_se2 := vee([hat(omega_1), hat(omega_2)])`` /// /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the /// hat()-operator and ``vee(.)`` the vee()-operator of SE(2). /// SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) { Vector2 upsilon1 = a.template head<2>(); Vector2 upsilon2 = b.template head<2>(); Scalar theta1 = a[2]; Scalar theta2 = b[2]; return Tangent(-theta1 * upsilon2[1] + theta2 * upsilon1[1], theta1 * upsilon2[0] - theta2 * upsilon1[0], Scalar(0)); } /// Construct pure rotation. /// static SOPHUS_FUNC SE2 rot(Scalar const& x) { return SE2(SO2(x), Sophus::Vector2::Zero()); } /// Draw uniform sample from SE(2) manifold. /// /// Translations are drawn component-wise from the range [-1, 1]. /// template static SE2 sampleUniform(UniformRandomBitGenerator& generator) { std::uniform_real_distribution uniform(Scalar(-1), Scalar(1)); return SE2(SO2::sampleUniform(generator), Vector2(uniform(generator), uniform(generator))); } /// Construct a translation only SE(2) instance. /// template static SOPHUS_FUNC SE2 trans(T0 const& x, T1 const& y) { return SE2(SO2(), Vector2(x, y)); } static SOPHUS_FUNC SE2 trans(Vector2 const& xy) { return SE2(SO2(), xy); } /// Construct x-axis translation. /// static SOPHUS_FUNC SE2 transX(Scalar const& x) { return SE2::trans(x, Scalar(0)); } /// Construct y-axis translation. /// static SOPHUS_FUNC SE2 transY(Scalar const& y) { return SE2::trans(Scalar(0), y); } /// vee-operator /// /// It takes the 3x3-matrix representation ``Omega`` and maps it to the /// corresponding 3-vector representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | 0 -d a | /// | d 0 b | /// | 0 0 0 | /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { SOPHUS_ENSURE( Omega.row(2).template lpNorm<1>() < Constants::epsilon(), "Omega: \n%", Omega); Tangent upsilon_omega; upsilon_omega.template head<2>() = Omega.col(2).template head<2>(); upsilon_omega[2] = SO2::vee(Omega.template topLeftCorner<2, 2>()); return upsilon_omega; } protected: SO2Member so2_; TranslationMember translation_; }; template SE2::SE2() : translation_(TranslationMember::Zero()) { static_assert(std::is_standard_layout::value, "Assume standard layout for the use of offsetof check below."); static_assert( offsetof(SE2, so2_) + sizeof(Scalar) * SO2::num_parameters == offsetof(SE2, translation_), "This class assumes packed storage and hence will only work " "correctly depending on the compiler (options) - in " "particular when using [this->data(), this-data() + " "num_parameters] to access the raw data in a contiguous fashion."); } } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``SE2``; derived from SE2Base. /// /// Allows us to wrap SE2 objects around POD array. template class Map, Options> : public Sophus::SE2Base, Options>> { public: using Base = Sophus::SE2Base, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : so2_(coeffs), translation_(coeffs + Sophus::SO2::num_parameters) {} /// Mutator of SO3 /// SOPHUS_FUNC Map, Options>& so2() { return so2_; } /// Accessor of SO3 /// SOPHUS_FUNC Map, Options> const& so2() const { return so2_; } /// Mutator of translation vector /// SOPHUS_FUNC Map, Options>& translation() { return translation_; } /// Accessor of translation vector /// SOPHUS_FUNC Map, Options> const& translation() const { return translation_; } protected: Map, Options> so2_; Map, Options> translation_; }; /// Specialization of Eigen::Map for ``SE2 const``; derived from SE2Base. /// /// Allows us to wrap SE2 objects around POD array. template class Map const, Options> : public Sophus::SE2Base const, Options>> { public: using Base = Sophus::SE2Base const, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : so2_(coeffs), translation_(coeffs + Sophus::SO2::num_parameters) {} /// Accessor of SO3 /// SOPHUS_FUNC Map const, Options> const& so2() const { return so2_; } /// Accessor of translation vector /// SOPHUS_FUNC Map const, Options> const& translation() const { return translation_; } protected: Map const, Options> const so2_; Map const, Options> const translation_; }; } // namespace Eigen #endif ================================================ FILE: LIO-Livox/include/sophus/se3.hpp ================================================ /// @file /// Special Euclidean group SE(3) - rotation and translation in 3d. #ifndef SOPHUS_SE3_HPP #define SOPHUS_SE3_HPP #include "so3.hpp" namespace Sophus { template class SE3; using SE3d = SE3; using SE3f = SE3; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { using Scalar = Scalar_; using TranslationType = Sophus::Vector3; using SO3Type = Sophus::SO3; }; template struct traits, Options>> : traits> { using Scalar = Scalar_; using TranslationType = Map, Options>; using SO3Type = Map, Options>; }; template struct traits const, Options>> : traits const> { using Scalar = Scalar_; using TranslationType = Map const, Options>; using SO3Type = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// SE3 base type - implements SE3 class but is storage agnostic. /// /// SE(3) is the group of rotations and translation in 3d. It is the /// semi-direct product of SO(3) and the 3d Euclidean vector space. The class /// is represented using a composition of SO3 for rotation and a one 3-vector /// for translation. /// /// SE(3) is neither compact, nor a commutative group. /// /// See SO3 for more details of the rotation representation in 3d. /// template class SE3Base { public: using Scalar = typename Eigen::internal::traits::Scalar; using TranslationType = typename Eigen::internal::traits::TranslationType; using SO3Type = typename Eigen::internal::traits::SO3Type; using QuaternionType = typename SO3Type::QuaternionType; /// Degrees of freedom of manifold, number of dimensions in tangent space /// (three for translation, three for rotation). static int constexpr DoF = 6; /// Number of internal parameters used (4-tuple for quaternion, three for /// translation). static int constexpr num_parameters = 7; /// Group transformations are 4x4 matrices. static int constexpr N = 4; using Transformation = Matrix; using Point = Vector3; using HomogeneousPoint = Vector4; using Line = ParametrizedLine3; using Tangent = Vector; using Adjoint = Matrix; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with SE3 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using SE3Product = SE3>; template using PointProduct = Vector3>; template using HomogeneousPointProduct = Vector4>; /// Adjoint transformation /// /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. /// SOPHUS_FUNC Adjoint Adj() const { Sophus::Matrix3 const R = so3().matrix(); Adjoint res; res.block(0, 0, 3, 3) = R; res.block(3, 3, 3, 3) = R; res.block(0, 3, 3, 3) = SO3::hat(translation()) * R; res.block(3, 0, 3, 3) = Matrix3::Zero(3, 3); return res; } /// Extract rotation angle about canonical X-axis /// Scalar angleX() const { return so3().angleX(); } /// Extract rotation angle about canonical Y-axis /// Scalar angleY() const { return so3().angleY(); } /// Extract rotation angle about canonical Z-axis /// Scalar angleZ() const { return so3().angleZ(); } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC SE3 cast() const { return SE3(so3().template cast(), translation().template cast()); } /// Returns derivative of this * exp(x) wrt x at x=0. /// SOPHUS_FUNC Matrix Dx_this_mul_exp_x_at_0() const { Matrix J; Eigen::Quaternion const q = unit_quaternion(); Scalar const c0 = Scalar(0.5) * q.w(); Scalar const c1 = Scalar(0.5) * q.z(); Scalar const c2 = -c1; Scalar const c3 = Scalar(0.5) * q.y(); Scalar const c4 = Scalar(0.5) * q.x(); Scalar const c5 = -c4; Scalar const c6 = -c3; Scalar const c7 = q.w() * q.w(); Scalar const c8 = q.x() * q.x(); Scalar const c9 = q.y() * q.y(); Scalar const c10 = -c9; Scalar const c11 = q.z() * q.z(); Scalar const c12 = -c11; Scalar const c13 = Scalar(2) * q.w(); Scalar const c14 = c13 * q.z(); Scalar const c15 = Scalar(2) * q.x(); Scalar const c16 = c15 * q.y(); Scalar const c17 = c13 * q.y(); Scalar const c18 = c15 * q.z(); Scalar const c19 = c7 - c8; Scalar const c20 = c13 * q.x(); Scalar const c21 = Scalar(2) * q.y() * q.z(); J(0, 0) = 0; J(0, 1) = 0; J(0, 2) = 0; J(0, 3) = c0; J(0, 4) = c2; J(0, 5) = c3; J(1, 0) = 0; J(1, 1) = 0; J(1, 2) = 0; J(1, 3) = c1; J(1, 4) = c0; J(1, 5) = c5; J(2, 0) = 0; J(2, 1) = 0; J(2, 2) = 0; J(2, 3) = c6; J(2, 4) = c4; J(2, 5) = c0; J(3, 0) = 0; J(3, 1) = 0; J(3, 2) = 0; J(3, 3) = c5; J(3, 4) = c6; J(3, 5) = c2; J(4, 0) = c10 + c12 + c7 + c8; J(4, 1) = -c14 + c16; J(4, 2) = c17 + c18; J(4, 3) = 0; J(4, 4) = 0; J(4, 5) = 0; J(5, 0) = c14 + c16; J(5, 1) = c12 + c19 + c9; J(5, 2) = -c20 + c21; J(5, 3) = 0; J(5, 4) = 0; J(5, 5) = 0; J(6, 0) = -c17 + c18; J(6, 1) = c20 + c21; J(6, 2) = c10 + c11 + c19; J(6, 3) = 0; J(6, 4) = 0; J(6, 5) = 0; return J; } /// Returns group inverse. /// SOPHUS_FUNC SE3 inverse() const { SO3 invR = so3().inverse(); return SE3(invR, invR * (translation() * Scalar(-1))); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (rigid body transformations) to elements of the /// tangent space (twist). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of SE(3). /// SOPHUS_FUNC Tangent log() const { // For the derivation of the logarithm of SE(3), see // J. Gallier, D. Xu, "Computing exponentials of skew symmetric matrices // and logarithms of orthogonal matrices", IJRA 2002. // https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf // (Sec. 6., pp. 8) using std::abs; using std::cos; using std::sin; Tangent upsilon_omega; auto omega_and_theta = so3().logAndTheta(); Scalar theta = omega_and_theta.theta; upsilon_omega.template tail<3>() = omega_and_theta.tangent; Matrix3 const Omega = SO3::hat(upsilon_omega.template tail<3>()); if (abs(theta) < Constants::epsilon()) { Matrix3 const V_inv = Matrix3::Identity() - Scalar(0.5) * Omega + Scalar(1. / 12.) * (Omega * Omega); upsilon_omega.template head<3>() = V_inv * translation(); } else { Scalar const half_theta = Scalar(0.5) * theta; Matrix3 const V_inv = (Matrix3::Identity() - Scalar(0.5) * Omega + (Scalar(1) - theta * cos(half_theta) / (Scalar(2) * sin(half_theta))) / (theta * theta) * (Omega * Omega)); upsilon_omega.template head<3>() = V_inv * translation(); } return upsilon_omega; } /// It re-normalizes the SO3 element. /// /// Note: Because of the class invariant of SO3, there is typically no need to /// call this function directly. /// SOPHUS_FUNC void normalize() { so3().normalize(); } /// Returns 4x4 matrix representation of the instance. /// /// It has the following form: /// /// | R t | /// | o 1 | /// /// where ``R`` is a 3x3 rotation matrix, ``t`` a translation 3-vector and /// ``o`` a 3-column vector of zeros. /// SOPHUS_FUNC Transformation matrix() const { Transformation homogenious_matrix; homogenious_matrix.template topLeftCorner<3, 4>() = matrix3x4(); homogenious_matrix.row(3) = Matrix(Scalar(0), Scalar(0), Scalar(0), Scalar(1)); return homogenious_matrix; } /// Returns the significant first three rows of the matrix above. /// SOPHUS_FUNC Matrix matrix3x4() const { Matrix matrix; matrix.template topLeftCorner<3, 3>() = rotationMatrix(); matrix.col(3) = translation(); return matrix; } /// Assignment operator. /// SOPHUS_FUNC SE3Base& operator=(SE3Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC SE3Base& operator=(SE3Base const& other) { so3() = other.so3(); translation() = other.translation(); return *this; } /// Group multiplication, which is rotation concatenation. /// template SOPHUS_FUNC SE3Product operator*( SE3Base const& other) const { return SE3Product( so3() * other.so3(), translation() + so3() * other.translation()); } /// Group action on 3-points. /// /// This function rotates and translates a three dimensional point ``p`` by /// the SE(3) element ``bar_T_foo = (bar_R_foo, t_bar)`` (= rigid body /// transformation): /// /// ``p_bar = bar_R_foo * p_foo + t_bar``. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { return so3() * p + translation(); } /// Group action on homogeneous 3-points. See above for more details. /// template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { const PointProduct tp = so3() * p.template head<3>() + p(3) * translation(); return HomogeneousPointProduct(tp(0), tp(1), tp(2), p(3)); } /// Group action on lines. /// /// This function rotates and translates a parametrized line /// ``l(t) = o + t * d`` by the SE(3) element: /// /// Origin is transformed using SE(3) action /// Direction is transformed using rotation part /// SOPHUS_FUNC Line operator*(Line const& l) const { return Line((*this) * l.origin(), so3() * l.direction()); } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SE3's Scalar type. /// template >::value>::type> SOPHUS_FUNC SE3Base& operator*=(SE3Base const& other) { *static_cast(this) = *this * other; return *this; } /// Returns rotation matrix. /// SOPHUS_FUNC Matrix3 rotationMatrix() const { return so3().matrix(); } /// Mutator of SO3 group. /// SOPHUS_FUNC SO3Type& so3() { return static_cast(this)->so3(); } /// Accessor of SO3 group. /// SOPHUS_FUNC SO3Type const& so3() const { return static_cast(this)->so3(); } /// Takes in quaternion, and normalizes it. /// /// Precondition: The quaternion must not be close to zero. /// SOPHUS_FUNC void setQuaternion(Eigen::Quaternion const& quat) { so3().setQuaternion(quat); } /// Sets ``so3`` using ``rotation_matrix``. /// /// Precondition: ``R`` must be orthogonal and ``det(R)=1``. /// SOPHUS_FUNC void setRotationMatrix(Matrix3 const& R) { SOPHUS_ENSURE(isOrthogonal(R), "R is not orthogonal:\n %", R); SOPHUS_ENSURE(R.determinant() > Scalar(0), "det(R) is not positive: %", R.determinant()); so3().setQuaternion(Eigen::Quaternion(R)); } /// Returns internal parameters of SE(3). /// /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real, t[0], t[1], t[2]), /// with q being the unit quaternion, t the translation 3-vector. /// SOPHUS_FUNC Sophus::Vector params() const { Sophus::Vector p; p << so3().params(), translation(); return p; } /// Mutator of translation vector. /// SOPHUS_FUNC TranslationType& translation() { return static_cast(this)->translation(); } /// Accessor of translation vector /// SOPHUS_FUNC TranslationType const& translation() const { return static_cast(this)->translation(); } /// Accessor of unit quaternion. /// SOPHUS_FUNC QuaternionType const& unit_quaternion() const { return this->so3().unit_quaternion(); } }; /// SE3 using default storage; derived from SE3Base. template class SE3 : public SE3Base> { using Base = SE3Base>; public: static int constexpr DoF = Base::DoF; static int constexpr num_parameters = Base::num_parameters; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using SO3Member = SO3; using TranslationMember = Vector3; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes rigid body motion to the identity. /// SOPHUS_FUNC SE3(); /// Copy constructor /// SOPHUS_FUNC SE3(SE3 const& other) = default; /// Copy-like constructor from OtherDerived. /// template SOPHUS_FUNC SE3(SE3Base const& other) : so3_(other.so3()), translation_(other.translation()) { static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from SO3 and translation vector /// template SOPHUS_FUNC SE3(SO3Base const& so3, Eigen::MatrixBase const& translation) : so3_(so3), translation_(translation) { static_assert(std::is_same::value, "must be same Scalar type"); static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from rotation matrix and translation vector /// /// Precondition: Rotation matrix needs to be orthogonal with determinant /// of 1. /// SOPHUS_FUNC SE3(Matrix3 const& rotation_matrix, Point const& translation) : so3_(rotation_matrix), translation_(translation) {} /// Constructor from quaternion and translation vector. /// /// Precondition: ``quaternion`` must not be close to zero. /// SOPHUS_FUNC SE3(Eigen::Quaternion const& quaternion, Point const& translation) : so3_(quaternion), translation_(translation) {} /// Constructor from 4x4 matrix /// /// Precondition: Rotation matrix needs to be orthogonal with determinant /// of 1. The last row must be ``(0, 0, 0, 1)``. /// SOPHUS_FUNC explicit SE3(Matrix4 const& T) : so3_(T.template topLeftCorner<3, 3>()), translation_(T.template block<3, 1>(0, 3)) { SOPHUS_ENSURE((T.row(3) - Matrix(Scalar(0), Scalar(0), Scalar(0), Scalar(1))) .squaredNorm() < Constants::epsilon(), "Last row is not (0,0,0,1), but (%).", T.row(3)); } /// This provides unsafe read/write access to internal data. SO(3) is /// represented by an Eigen::Quaternion (four parameters). When using direct /// write access, the user needs to take care of that the quaternion stays /// normalized. /// SOPHUS_FUNC Scalar* data() { // so3_ and translation_ are laid out sequentially with no padding return so3_.data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { // so3_ and translation_ are laid out sequentially with no padding return so3_.data(); } /// Mutator of SO3 /// SOPHUS_FUNC SO3Member& so3() { return so3_; } /// Accessor of SO3 /// SOPHUS_FUNC SO3Member const& so3() const { return so3_; } /// Mutator of translation vector /// SOPHUS_FUNC TranslationMember& translation() { return translation_; } /// Accessor of translation vector /// SOPHUS_FUNC TranslationMember const& translation() const { return translation_; } /// Returns derivative of exp(x) wrt. x. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x( Tangent const& upsilon_omega) { using std::cos; using std::pow; using std::sin; using std::sqrt; Sophus::Matrix J; Sophus::Vector upsilon = upsilon_omega.template head<3>(); Sophus::Vector omega = upsilon_omega.template tail<3>(); Scalar const c0 = omega[0] * omega[0]; Scalar const c1 = omega[1] * omega[1]; Scalar const c2 = omega[2] * omega[2]; Scalar const c3 = c0 + c1 + c2; Scalar const o(0); Scalar const h(0.5); Scalar const i(1); if (c3 < Constants::epsilon()) { Scalar const ux = Scalar(0.5) * upsilon[0]; Scalar const uy = Scalar(0.5) * upsilon[1]; Scalar const uz = Scalar(0.5) * upsilon[2]; /// clang-format off J << o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o, o, i, o, o, o, uz, -uy, o, i, o, -uz, o, ux, o, o, i, uy, -ux, o; /// clang-format on return J; } Scalar const c4 = sqrt(c3); Scalar const c5 = Scalar(1.0) / c4; Scalar const c6 = Scalar(0.5) * c4; Scalar const c7 = sin(c6); Scalar const c8 = c5 * c7; Scalar const c9 = pow(c3, -3.0L / 2.0L); Scalar const c10 = c7 * c9; Scalar const c11 = Scalar(1.0) / c3; Scalar const c12 = cos(c6); Scalar const c13 = Scalar(0.5) * c11 * c12; Scalar const c14 = c7 * c9 * omega[0]; Scalar const c15 = Scalar(0.5) * c11 * c12 * omega[0]; Scalar const c16 = -c14 * omega[1] + c15 * omega[1]; Scalar const c17 = -c14 * omega[2] + c15 * omega[2]; Scalar const c18 = omega[1] * omega[2]; Scalar const c19 = -c10 * c18 + c13 * c18; Scalar const c20 = c5 * omega[0]; Scalar const c21 = Scalar(0.5) * c7; Scalar const c22 = c5 * omega[1]; Scalar const c23 = c5 * omega[2]; Scalar const c24 = -c1; Scalar const c25 = -c2; Scalar const c26 = c24 + c25; Scalar const c27 = sin(c4); Scalar const c28 = -c27 + c4; Scalar const c29 = c28 * c9; Scalar const c30 = cos(c4); Scalar const c31 = -c30 + Scalar(1); Scalar const c32 = c11 * c31; Scalar const c33 = c32 * omega[2]; Scalar const c34 = c29 * omega[0]; Scalar const c35 = c34 * omega[1]; Scalar const c36 = c32 * omega[1]; Scalar const c37 = c34 * omega[2]; Scalar const c38 = pow(c3, -5.0L / 2.0L); Scalar const c39 = Scalar(3) * c28 * c38 * omega[0]; Scalar const c40 = c26 * c9; Scalar const c41 = -c20 * c30 + c20; Scalar const c42 = c27 * c9 * omega[0]; Scalar const c43 = c42 * omega[1]; Scalar const c44 = pow(c3, -2); Scalar const c45 = Scalar(2) * c31 * c44 * omega[0]; Scalar const c46 = c45 * omega[1]; Scalar const c47 = c29 * omega[2]; Scalar const c48 = c43 - c46 + c47; Scalar const c49 = Scalar(3) * c0 * c28 * c38; Scalar const c50 = c9 * omega[0] * omega[2]; Scalar const c51 = c41 * c50 - c49 * omega[2]; Scalar const c52 = c9 * omega[0] * omega[1]; Scalar const c53 = c41 * c52 - c49 * omega[1]; Scalar const c54 = c42 * omega[2]; Scalar const c55 = c45 * omega[2]; Scalar const c56 = c29 * omega[1]; Scalar const c57 = -c54 + c55 + c56; Scalar const c58 = Scalar(-2) * c56; Scalar const c59 = Scalar(3) * c28 * c38 * omega[1]; Scalar const c60 = -c22 * c30 + c22; Scalar const c61 = -c18 * c39; Scalar const c62 = c32 + c61; Scalar const c63 = c27 * c9; Scalar const c64 = c1 * c63; Scalar const c65 = Scalar(2) * c31 * c44; Scalar const c66 = c1 * c65; Scalar const c67 = c50 * c60; Scalar const c68 = -c1 * c39 + c52 * c60; Scalar const c69 = c18 * c63; Scalar const c70 = c18 * c65; Scalar const c71 = c34 - c69 + c70; Scalar const c72 = Scalar(-2) * c47; Scalar const c73 = Scalar(3) * c28 * c38 * omega[2]; Scalar const c74 = -c23 * c30 + c23; Scalar const c75 = -c32 + c61; Scalar const c76 = c2 * c63; Scalar const c77 = c2 * c65; Scalar const c78 = c52 * c74; Scalar const c79 = c34 + c69 - c70; Scalar const c80 = -c2 * c39 + c50 * c74; Scalar const c81 = -c0; Scalar const c82 = c25 + c81; Scalar const c83 = c32 * omega[0]; Scalar const c84 = c18 * c29; Scalar const c85 = Scalar(-2) * c34; Scalar const c86 = c82 * c9; Scalar const c87 = c0 * c63; Scalar const c88 = c0 * c65; Scalar const c89 = c9 * omega[1] * omega[2]; Scalar const c90 = c41 * c89; Scalar const c91 = c54 - c55 + c56; Scalar const c92 = -c1 * c73 + c60 * c89; Scalar const c93 = -c43 + c46 + c47; Scalar const c94 = -c2 * c59 + c74 * c89; Scalar const c95 = c24 + c81; Scalar const c96 = c9 * c95; J(0, 0) = o; J(0, 1) = o; J(0, 2) = o; J(0, 3) = -c0 * c10 + c0 * c13 + c8; J(0, 4) = c16; J(0, 5) = c17; J(1, 0) = o; J(1, 1) = o; J(1, 2) = o; J(1, 3) = c16; J(1, 4) = -c1 * c10 + c1 * c13 + c8; J(1, 5) = c19; J(2, 0) = o; J(2, 1) = o; J(2, 2) = o; J(2, 3) = c17; J(2, 4) = c19; J(2, 5) = -c10 * c2 + c13 * c2 + c8; J(3, 0) = o; J(3, 1) = o; J(3, 2) = o; J(3, 3) = -c20 * c21; J(3, 4) = -c21 * c22; J(3, 5) = -c21 * c23; J(4, 0) = c26 * c29 + Scalar(1); J(4, 1) = -c33 + c35; J(4, 2) = c36 + c37; J(4, 3) = upsilon[0] * (-c26 * c39 + c40 * c41) + upsilon[1] * (c53 + c57) + upsilon[2] * (c48 + c51); J(4, 4) = upsilon[0] * (-c26 * c59 + c40 * c60 + c58) + upsilon[1] * (c68 + c71) + upsilon[2] * (c62 + c64 - c66 + c67); J(4, 5) = upsilon[0] * (-c26 * c73 + c40 * c74 + c72) + upsilon[1] * (c75 - c76 + c77 + c78) + upsilon[2] * (c79 + c80); J(5, 0) = c33 + c35; J(5, 1) = c29 * c82 + Scalar(1); J(5, 2) = -c83 + c84; J(5, 3) = upsilon[0] * (c53 + c91) + upsilon[1] * (-c39 * c82 + c41 * c86 + c85) + upsilon[2] * (c75 - c87 + c88 + c90); J(5, 4) = upsilon[0] * (c68 + c79) + upsilon[1] * (-c59 * c82 + c60 * c86) + upsilon[2] * (c92 + c93); J(5, 5) = upsilon[0] * (c62 + c76 - c77 + c78) + upsilon[1] * (c72 - c73 * c82 + c74 * c86) + upsilon[2] * (c57 + c94); J(6, 0) = -c36 + c37; J(6, 1) = c83 + c84; J(6, 2) = c29 * c95 + Scalar(1); J(6, 3) = upsilon[0] * (c51 + c93) + upsilon[1] * (c62 + c87 - c88 + c90) + upsilon[2] * (-c39 * c95 + c41 * c96 + c85); J(6, 4) = upsilon[0] * (-c64 + c66 + c67 + c75) + upsilon[1] * (c48 + c92) + upsilon[2] * (c58 - c59 * c95 + c60 * c96); J(6, 5) = upsilon[0] * (c71 + c80) + upsilon[1] * (c91 + c94) + upsilon[2] * (-c73 * c95 + c74 * c96); return J; } /// Returns derivative of exp(x) wrt. x_i at x=0. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x_at_0() { Sophus::Matrix J; Scalar const o(0); Scalar const h(0.5); Scalar const i(1); // clang-format off J << o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o, o, i, o, o, o, o, o, o, i, o, o, o, o, o, o, i, o, o, o; // clang-format on return J; } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) { return generator(i); } /// Group exponential /// /// This functions takes in an element of tangent space (= twist ``a``) and /// returns the corresponding element of the group SE(3). /// /// The first three components of ``a`` represent the translational part /// ``upsilon`` in the tangent space of SE(3), while the last three components /// of ``a`` represents the rotation vector ``omega``. /// To be more specific, this function computes ``expmat(hat(a))`` with /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator /// of SE(3), see below. /// SOPHUS_FUNC static SE3 exp(Tangent const& a) { using std::cos; using std::sin; Vector3 const omega = a.template tail<3>(); Scalar theta; SO3 const so3 = SO3::expAndTheta(omega, &theta); Matrix3 const Omega = SO3::hat(omega); Matrix3 const Omega_sq = Omega * Omega; Matrix3 V; if (theta < Constants::epsilon()) { V = so3.matrix(); /// Note: That is an accurate expansion! } else { Scalar theta_sq = theta * theta; V = (Matrix3::Identity() + (Scalar(1) - cos(theta)) / (theta_sq)*Omega + (theta - sin(theta)) / (theta_sq * theta) * Omega_sq); } return SE3(so3, V * a.template head<3>()); } /// Returns closest SE3 given arbirary 4x4 matrix. /// template SOPHUS_FUNC static enable_if_t::value, SE3> fitToSE3(Matrix4 const& T) { return SE3(SO3::fitToSO3(T.template block<3, 3>(0, 0)), T.template block<3, 1>(0, 3)); } /// Returns the ith infinitesimal generators of SE(3). /// /// The infinitesimal generators of SE(3) are: /// /// ``` /// | 0 0 0 1 | /// G_0 = | 0 0 0 0 | /// | 0 0 0 0 | /// | 0 0 0 0 | /// /// | 0 0 0 0 | /// G_1 = | 0 0 0 1 | /// | 0 0 0 0 | /// | 0 0 0 0 | /// /// | 0 0 0 0 | /// G_2 = | 0 0 0 0 | /// | 0 0 0 1 | /// | 0 0 0 0 | /// /// | 0 0 0 0 | /// G_3 = | 0 0 -1 0 | /// | 0 1 0 0 | /// | 0 0 0 0 | /// /// | 0 0 1 0 | /// G_4 = | 0 0 0 0 | /// | -1 0 0 0 | /// | 0 0 0 0 | /// /// | 0 -1 0 0 | /// G_5 = | 1 0 0 0 | /// | 0 0 0 0 | /// | 0 0 0 0 | /// ``` /// /// Precondition: ``i`` must be in [0, 5]. /// SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 && i <= 5, "i should be in range [0,5]."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } /// hat-operator /// /// It takes in the 6-vector representation (= twist) and returns the /// corresponding matrix representation of Lie algebra element. /// /// Formally, the hat()-operator of SE(3) is defined as /// /// ``hat(.): R^6 -> R^{4x4}, hat(a) = sum_i a_i * G_i`` (for i=0,...,5) /// /// with ``G_i`` being the ith infinitesimal generator of SE(3). /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& a) { Transformation Omega; Omega.setZero(); Omega.template topLeftCorner<3, 3>() = SO3::hat(a.template tail<3>()); Omega.col(3).template head<3>() = a.template head<3>(); return Omega; } /// Lie bracket /// /// It computes the Lie bracket of SE(3). To be more specific, it computes /// /// ``[omega_1, omega_2]_se3 := vee([hat(omega_1), hat(omega_2)])`` /// /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the /// hat()-operator and ``vee(.)`` the vee()-operator of SE(3). /// SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) { Vector3 const upsilon1 = a.template head<3>(); Vector3 const upsilon2 = b.template head<3>(); Vector3 const omega1 = a.template tail<3>(); Vector3 const omega2 = b.template tail<3>(); Tangent res; res.template head<3>() = omega1.cross(upsilon2) + upsilon1.cross(omega2); res.template tail<3>() = omega1.cross(omega2); return res; } /// Construct x-axis rotation. /// static SOPHUS_FUNC SE3 rotX(Scalar const& x) { return SE3(SO3::rotX(x), Sophus::Vector3::Zero()); } /// Construct y-axis rotation. /// static SOPHUS_FUNC SE3 rotY(Scalar const& y) { return SE3(SO3::rotY(y), Sophus::Vector3::Zero()); } /// Construct z-axis rotation. /// static SOPHUS_FUNC SE3 rotZ(Scalar const& z) { return SE3(SO3::rotZ(z), Sophus::Vector3::Zero()); } /// Draw uniform sample from SE(3) manifold. /// /// Translations are drawn component-wise from the range [-1, 1]. /// template static SE3 sampleUniform(UniformRandomBitGenerator& generator) { std::uniform_real_distribution uniform(Scalar(-1), Scalar(1)); return SE3(SO3::sampleUniform(generator), Vector3(uniform(generator), uniform(generator), uniform(generator))); } /// Construct a translation only SE3 instance. /// template static SOPHUS_FUNC SE3 trans(T0 const& x, T1 const& y, T2 const& z) { return SE3(SO3(), Vector3(x, y, z)); } static SOPHUS_FUNC SE3 trans(Vector3 const& xyz) { return SE3(SO3(), xyz); } /// Construct x-axis translation. /// static SOPHUS_FUNC SE3 transX(Scalar const& x) { return SE3::trans(x, Scalar(0), Scalar(0)); } /// Construct y-axis translation. /// static SOPHUS_FUNC SE3 transY(Scalar const& y) { return SE3::trans(Scalar(0), y, Scalar(0)); } /// Construct z-axis translation. /// static SOPHUS_FUNC SE3 transZ(Scalar const& z) { return SE3::trans(Scalar(0), Scalar(0), z); } /// vee-operator /// /// It takes 4x4-matrix representation ``Omega`` and maps it to the /// corresponding 6-vector representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | 0 -f e a | /// | f 0 -d b | /// | -e d 0 c /// | 0 0 0 0 | . /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { Tangent upsilon_omega; upsilon_omega.template head<3>() = Omega.col(3).template head<3>(); upsilon_omega.template tail<3>() = SO3::vee(Omega.template topLeftCorner<3, 3>()); return upsilon_omega; } protected: SO3Member so3_; TranslationMember translation_; }; template SE3::SE3() : translation_(TranslationMember::Zero()) { static_assert(std::is_standard_layout::value, "Assume standard layout for the use of offsetof check below."); static_assert( offsetof(SE3, so3_) + sizeof(Scalar) * SO3::num_parameters == offsetof(SE3, translation_), "This class assumes packed storage and hence will only work " "correctly depending on the compiler (options) - in " "particular when using [this->data(), this-data() + " "num_parameters] to access the raw data in a contiguous fashion."); } } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``SE3``; derived from SE3Base. /// /// Allows us to wrap SE3 objects around POD array. template class Map, Options> : public Sophus::SE3Base, Options>> { public: using Base = Sophus::SE3Base, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : so3_(coeffs), translation_(coeffs + Sophus::SO3::num_parameters) {} /// Mutator of SO3 /// SOPHUS_FUNC Map, Options>& so3() { return so3_; } /// Accessor of SO3 /// SOPHUS_FUNC Map, Options> const& so3() const { return so3_; } /// Mutator of translation vector /// SOPHUS_FUNC Map>& translation() { return translation_; } /// Accessor of translation vector /// SOPHUS_FUNC Map> const& translation() const { return translation_; } protected: Map, Options> so3_; Map, Options> translation_; }; /// Specialization of Eigen::Map for ``SE3 const``; derived from SE3Base. /// /// Allows us to wrap SE3 objects around POD array. template class Map const, Options> : public Sophus::SE3Base const, Options>> { public: using Base = Sophus::SE3Base const, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : so3_(coeffs), translation_(coeffs + Sophus::SO3::num_parameters) {} /// Accessor of SO3 /// SOPHUS_FUNC Map const, Options> const& so3() const { return so3_; } /// Accessor of translation vector /// SOPHUS_FUNC Map const, Options> const& translation() const { return translation_; } protected: Map const, Options> const so3_; Map const, Options> const translation_; }; } // namespace Eigen #endif ================================================ FILE: LIO-Livox/include/sophus/sim2.hpp ================================================ /// @file /// Similarity group Sim(2) - scaling, rotation and translation in 2d. #ifndef SOPHUS_SIM2_HPP #define SOPHUS_SIM2_HPP #include "rxso2.hpp" #include "sim_details.hpp" namespace Sophus { template class Sim2; using Sim2d = Sim2; using Sim2f = Sim2; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { using Scalar = Scalar_; using TranslationType = Sophus::Vector2; using RxSO2Type = Sophus::RxSO2; }; template struct traits, Options>> : traits> { using Scalar = Scalar_; using TranslationType = Map, Options>; using RxSO2Type = Map, Options>; }; template struct traits const, Options>> : traits const> { using Scalar = Scalar_; using TranslationType = Map const, Options>; using RxSO2Type = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// Sim2 base type - implements Sim2 class but is storage agnostic. /// /// Sim(2) is the group of rotations and translation and scaling in 2d. It is /// the semi-direct product of R+xSO(2) and the 2d Euclidean vector space. The /// class is represented using a composition of RxSO2 for scaling plus /// rotation and a 2-vector for translation. /// /// Sim(2) is neither compact, nor a commutative group. /// /// See RxSO2 for more details of the scaling + rotation representation in 2d. /// template class Sim2Base { public: using Scalar = typename Eigen::internal::traits::Scalar; using TranslationType = typename Eigen::internal::traits::TranslationType; using RxSO2Type = typename Eigen::internal::traits::RxSO2Type; /// Degrees of freedom of manifold, number of dimensions in tangent space /// (two for translation, one for rotation and one for scaling). static int constexpr DoF = 4; /// Number of internal parameters used (2-tuple for complex number, two for /// translation). static int constexpr num_parameters = 4; /// Group transformations are 3x3 matrices. static int constexpr N = 3; using Transformation = Matrix; using Point = Vector2; using HomogeneousPoint = Vector3; using Line = ParametrizedLine2; using Tangent = Vector; using Adjoint = Matrix; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with SIM2 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using Sim2Product = Sim2>; template using PointProduct = Vector2>; template using HomogeneousPointProduct = Vector3>; /// Adjoint transformation /// /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. /// SOPHUS_FUNC Adjoint Adj() const { Adjoint res; res.setZero(); res.template block<2, 2>(0, 0) = rxso2().matrix(); res(0, 2) = translation()[1]; res(1, 2) = -translation()[0]; res.template block<2, 1>(0, 3) = -translation(); res(2, 2) = Scalar(1); res(3, 3) = Scalar(1); return res; } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC Sim2 cast() const { return Sim2(rxso2().template cast(), translation().template cast()); } /// Returns group inverse. /// SOPHUS_FUNC Sim2 inverse() const { RxSO2 invR = rxso2().inverse(); return Sim2(invR, invR * (translation() * Scalar(-1))); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (rigid body transformations) to elements of the /// tangent space (twist). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of Sim(2). /// SOPHUS_FUNC Tangent log() const { /// The derivation of the closed-form Sim(2) logarithm for is done /// analogously to the closed-form solution of the SE(2) logarithm, see /// J. Gallier, D. Xu, "Computing exponentials of skew symmetric matrices /// and logarithms of orthogonal matrices", IJRA 2002. /// https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf /// (Sec. 6., pp. 8) Tangent res; Vector2 const theta_sigma = rxso2().log(); Scalar const theta = theta_sigma[0]; Scalar const sigma = theta_sigma[1]; Matrix2 const Omega = SO2::hat(theta); Matrix2 const W_inv = details::calcWInv(Omega, theta, sigma, scale()); res.segment(0, 2) = W_inv * translation(); res[2] = theta; res[3] = sigma; return res; } /// Returns 3x3 matrix representation of the instance. /// /// It has the following form: /// /// | s*R t | /// | o 1 | /// /// where ``R`` is a 2x2 rotation matrix, ``s`` a scale factor, ``t`` a /// translation 2-vector and ``o`` a 2-column vector of zeros. /// SOPHUS_FUNC Transformation matrix() const { Transformation homogenious_matrix; homogenious_matrix.template topLeftCorner<2, 3>() = matrix2x3(); homogenious_matrix.row(2) = Matrix(Scalar(0), Scalar(0), Scalar(1)); return homogenious_matrix; } /// Returns the significant first two rows of the matrix above. /// SOPHUS_FUNC Matrix matrix2x3() const { Matrix matrix; matrix.template topLeftCorner<2, 2>() = rxso2().matrix(); matrix.col(2) = translation(); return matrix; } /// Assignment operator. /// SOPHUS_FUNC Sim2Base& operator=(Sim2Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC Sim2Base& operator=( Sim2Base const& other) { rxso2() = other.rxso2(); translation() = other.translation(); return *this; } /// Group multiplication, which is rotation plus scaling concatenation. /// /// Note: That scaling is calculated with saturation. See RxSO2 for /// details. /// template SOPHUS_FUNC Sim2Product operator*( Sim2Base const& other) const { return Sim2Product( rxso2() * other.rxso2(), translation() + rxso2() * other.translation()); } /// Group action on 2-points. /// /// This function rotates, scales and translates a two dimensional point /// ``p`` by the Sim(2) element ``(bar_sR_foo, t_bar)`` (= similarity /// transformation): /// /// ``p_bar = bar_sR_foo * p_foo + t_bar``. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { return rxso2() * p + translation(); } /// Group action on homogeneous 2-points. See above for more details. /// template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { const PointProduct tp = rxso2() * p.template head<2>() + p(2) * translation(); return HomogeneousPointProduct(tp(0), tp(1), p(2)); } /// Group action on lines. /// /// This function rotates, scales and translates a parametrized line /// ``l(t) = o + t * d`` by the Sim(2) element: /// /// Origin ``o`` is rotated, scaled and translated /// Direction ``d`` is rotated /// SOPHUS_FUNC Line operator*(Line const& l) const { Line rotatedLine = rxso2() * l; return Line(rotatedLine.origin() + translation(), rotatedLine.direction()); } /// Returns internal parameters of Sim(2). /// /// It returns (c[0], c[1], t[0], t[1]), /// with c being the complex number, t the translation 3-vector. /// SOPHUS_FUNC Sophus::Vector params() const { Sophus::Vector p; p << rxso2().params(), translation(); return p; } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SO2's Scalar type. /// template >::value>::type> SOPHUS_FUNC Sim2Base& operator*=( Sim2Base const& other) { *static_cast(this) = *this * other; return *this; } /// Setter of non-zero complex number. /// /// Precondition: ``z`` must not be close to zero. /// SOPHUS_FUNC void setComplex(Vector2 const& z) { rxso2().setComplex(z); } /// Accessor of complex number. /// SOPHUS_FUNC typename Eigen::internal::traits::RxSO2Type::ComplexType const& complex() const { return rxso2().complex(); } /// Returns Rotation matrix /// SOPHUS_FUNC Matrix2 rotationMatrix() const { return rxso2().rotationMatrix(); } /// Mutator of SO2 group. /// SOPHUS_FUNC RxSO2Type& rxso2() { return static_cast(this)->rxso2(); } /// Accessor of SO2 group. /// SOPHUS_FUNC RxSO2Type const& rxso2() const { return static_cast(this)->rxso2(); } /// Returns scale. /// SOPHUS_FUNC Scalar scale() const { return rxso2().scale(); } /// Setter of complex number using rotation matrix ``R``, leaves scale as is. /// SOPHUS_FUNC void setRotationMatrix(Matrix2& R) { rxso2().setRotationMatrix(R); } /// Sets scale and leaves rotation as is. /// /// Note: This function as a significant computational cost, since it has to /// call the square root twice. /// SOPHUS_FUNC void setScale(Scalar const& scale) { rxso2().setScale(scale); } /// Setter of complexnumber using scaled rotation matrix ``sR``. /// /// Precondition: The 2x2 matrix must be "scaled orthogonal" /// and have a positive determinant. /// SOPHUS_FUNC void setScaledRotationMatrix(Matrix2 const& sR) { rxso2().setScaledRotationMatrix(sR); } /// Mutator of translation vector /// SOPHUS_FUNC TranslationType& translation() { return static_cast(this)->translation(); } /// Accessor of translation vector /// SOPHUS_FUNC TranslationType const& translation() const { return static_cast(this)->translation(); } }; /// Sim2 using default storage; derived from Sim2Base. template class Sim2 : public Sim2Base> { public: using Base = Sim2Base>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using RxSo2Member = RxSO2; using TranslationMember = Vector2; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes similarity transform to the identity. /// SOPHUS_FUNC Sim2(); /// Copy constructor /// SOPHUS_FUNC Sim2(Sim2 const& other) = default; /// Copy-like constructor from OtherDerived. /// template SOPHUS_FUNC Sim2(Sim2Base const& other) : rxso2_(other.rxso2()), translation_(other.translation()) { static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from RxSO2 and translation vector /// template SOPHUS_FUNC Sim2(RxSO2Base const& rxso2, Eigen::MatrixBase const& translation) : rxso2_(rxso2), translation_(translation) { static_assert(std::is_same::value, "must be same Scalar type"); static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from complex number and translation vector. /// /// Precondition: complex number must not be close to zero. /// template SOPHUS_FUNC Sim2(Vector2 const& complex_number, Eigen::MatrixBase const& translation) : rxso2_(complex_number), translation_(translation) { static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from 3x3 matrix /// /// Precondition: Top-left 2x2 matrix needs to be "scaled-orthogonal" with /// positive determinant. The last row must be ``(0, 0, 1)``. /// SOPHUS_FUNC explicit Sim2(Matrix const& T) : rxso2_((T.template topLeftCorner<2, 2>()).eval()), translation_(T.template block<2, 1>(0, 2)) {} /// This provides unsafe read/write access to internal data. Sim(2) is /// represented by a complex number (two parameters) and a 2-vector. When /// using direct write access, the user needs to take care of that the /// complex number is not set close to zero. /// SOPHUS_FUNC Scalar* data() { // rxso2_ and translation_ are laid out sequentially with no padding return rxso2_.data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { // rxso2_ and translation_ are laid out sequentially with no padding return rxso2_.data(); } /// Accessor of RxSO2 /// SOPHUS_FUNC RxSo2Member& rxso2() { return rxso2_; } /// Mutator of RxSO2 /// SOPHUS_FUNC RxSo2Member const& rxso2() const { return rxso2_; } /// Mutator of translation vector /// SOPHUS_FUNC TranslationMember& translation() { return translation_; } /// Accessor of translation vector /// SOPHUS_FUNC TranslationMember const& translation() const { return translation_; } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) { return generator(i); } /// Derivative of Lie bracket with respect to first element. /// /// This function returns ``D_a [a, b]`` with ``D_a`` being the /// differential operator with respect to ``a``, ``[a, b]`` being the lie /// bracket of the Lie algebra sim(2). /// See ``lieBracket()`` below. /// /// Group exponential /// /// This functions takes in an element of tangent space and returns the /// corresponding element of the group Sim(2). /// /// The first two components of ``a`` represent the translational part /// ``upsilon`` in the tangent space of Sim(2), the following two components /// of ``a`` represents the rotation ``theta`` and the final component /// represents the logarithm of the scaling factor ``sigma``. /// To be more specific, this function computes ``expmat(hat(a))`` with /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator /// of Sim(2), see below. /// SOPHUS_FUNC static Sim2 exp(Tangent const& a) { // For the derivation of the exponential map of Sim(N) see // H. Strasdat, "Local Accuracy and Global Consistency for Efficient Visual // SLAM", PhD thesis, 2012. // http:///hauke.strasdat.net/files/strasdat_thesis_2012.pdf (A.5, pp. 186) Vector2 const upsilon = a.segment(0, 2); Scalar const theta = a[2]; Scalar const sigma = a[3]; RxSO2 rxso2 = RxSO2::exp(a.template tail<2>()); Matrix2 const Omega = SO2::hat(theta); Matrix2 const W = details::calcW(Omega, theta, sigma); return Sim2(rxso2, W * upsilon); } /// Returns the ith infinitesimal generators of Sim(2). /// /// The infinitesimal generators of Sim(2) are: /// /// ``` /// | 0 0 1 | /// G_0 = | 0 0 0 | /// | 0 0 0 | /// /// | 0 0 0 | /// G_1 = | 0 0 1 | /// | 0 0 0 | /// /// | 0 -1 0 | /// G_2 = | 1 0 0 | /// | 0 0 0 | /// /// | 1 0 0 | /// G_3 = | 0 1 0 | /// | 0 0 0 | /// ``` /// /// Precondition: ``i`` must be in [0, 3]. /// SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 || i <= 3, "i should be in range [0,3]."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } /// hat-operator /// /// It takes in the 4-vector representation and returns the corresponding /// matrix representation of Lie algebra element. /// /// Formally, the hat()-operator of Sim(2) is defined as /// /// ``hat(.): R^4 -> R^{3x3}, hat(a) = sum_i a_i * G_i`` (for i=0,...,6) /// /// with ``G_i`` being the ith infinitesimal generator of Sim(2). /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& a) { Transformation Omega; Omega.template topLeftCorner<2, 2>() = RxSO2::hat(a.template tail<2>()); Omega.col(2).template head<2>() = a.template head<2>(); Omega.row(2).setZero(); return Omega; } /// Lie bracket /// /// It computes the Lie bracket of Sim(2). To be more specific, it computes /// /// ``[omega_1, omega_2]_sim2 := vee([hat(omega_1), hat(omega_2)])`` /// /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the /// hat()-operator and ``vee(.)`` the vee()-operator of Sim(2). /// SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) { Vector2 const upsilon1 = a.template head<2>(); Vector2 const upsilon2 = b.template head<2>(); Scalar const theta1 = a[2]; Scalar const theta2 = b[2]; Scalar const sigma1 = a[3]; Scalar const sigma2 = b[3]; Tangent res; res[0] = -theta1 * upsilon2[1] + theta2 * upsilon1[1] + sigma1 * upsilon2[0] - sigma2 * upsilon1[0]; res[1] = theta1 * upsilon2[0] - theta2 * upsilon1[0] + sigma1 * upsilon2[1] - sigma2 * upsilon1[1]; res[2] = Scalar(0); res[3] = Scalar(0); return res; } /// Draw uniform sample from Sim(2) manifold. /// /// Translations are drawn component-wise from the range [-1, 1]. /// The scale factor is drawn uniformly in log2-space from [-1, 1], /// hence the scale is in [0.5, 2]. /// template static Sim2 sampleUniform(UniformRandomBitGenerator& generator) { std::uniform_real_distribution uniform(Scalar(-1), Scalar(1)); return Sim2(RxSO2::sampleUniform(generator), Vector2(uniform(generator), uniform(generator))); } /// vee-operator /// /// It takes the 3x3-matrix representation ``Omega`` and maps it to the /// corresponding 4-vector representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | d -c a | /// | c d b | /// | 0 0 0 | /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { Tangent upsilon_omega_sigma; upsilon_omega_sigma.template head<2>() = Omega.col(2).template head<2>(); upsilon_omega_sigma.template tail<2>() = RxSO2::vee(Omega.template topLeftCorner<2, 2>()); return upsilon_omega_sigma; } protected: RxSo2Member rxso2_; TranslationMember translation_; }; template Sim2::Sim2() : translation_(TranslationMember::Zero()) { static_assert(std::is_standard_layout::value, "Assume standard layout for the use of offsetof check below."); static_assert( offsetof(Sim2, rxso2_) + sizeof(Scalar) * RxSO2::num_parameters == offsetof(Sim2, translation_), "This class assumes packed storage and hence will only work " "correctly depending on the compiler (options) - in " "particular when using [this->data(), this-data() + " "num_parameters] to access the raw data in a contiguous fashion."); } } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``Sim2``; derived from Sim2Base. /// /// Allows us to wrap Sim2 objects around POD array. template class Map, Options> : public Sophus::Sim2Base, Options>> { public: using Base = Sophus::Sim2Base, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : rxso2_(coeffs), translation_(coeffs + Sophus::RxSO2::num_parameters) {} /// Mutator of RxSO2 /// SOPHUS_FUNC Map, Options>& rxso2() { return rxso2_; } /// Accessor of RxSO2 /// SOPHUS_FUNC Map, Options> const& rxso2() const { return rxso2_; } /// Mutator of translation vector /// SOPHUS_FUNC Map, Options>& translation() { return translation_; } /// Accessor of translation vector SOPHUS_FUNC Map, Options> const& translation() const { return translation_; } protected: Map, Options> rxso2_; Map, Options> translation_; }; /// Specialization of Eigen::Map for ``Sim2 const``; derived from Sim2Base. /// /// Allows us to wrap RxSO2 objects around POD array. template class Map const, Options> : public Sophus::Sim2Base const, Options>> { public: using Base = Sophus::Sim2Base const, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : rxso2_(coeffs), translation_(coeffs + Sophus::RxSO2::num_parameters) {} /// Accessor of RxSO2 /// SOPHUS_FUNC Map const, Options> const& rxso2() const { return rxso2_; } /// Accessor of translation vector /// SOPHUS_FUNC Map const, Options> const& translation() const { return translation_; } protected: Map const, Options> const rxso2_; Map const, Options> const translation_; }; } // namespace Eigen #endif ================================================ FILE: LIO-Livox/include/sophus/sim3.hpp ================================================ /// @file /// Similarity group Sim(3) - scaling, rotation and translation in 3d. #ifndef SOPHUS_SIM3_HPP #define SOPHUS_SIM3_HPP #include "rxso3.hpp" #include "sim_details.hpp" namespace Sophus { template class Sim3; using Sim3d = Sim3; using Sim3f = Sim3; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { using Scalar = Scalar_; using TranslationType = Sophus::Vector3; using RxSO3Type = Sophus::RxSO3; }; template struct traits, Options>> : traits> { using Scalar = Scalar_; using TranslationType = Map, Options>; using RxSO3Type = Map, Options>; }; template struct traits const, Options>> : traits const> { using Scalar = Scalar_; using TranslationType = Map const, Options>; using RxSO3Type = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// Sim3 base type - implements Sim3 class but is storage agnostic. /// /// Sim(3) is the group of rotations and translation and scaling in 3d. It is /// the semi-direct product of R+xSO(3) and the 3d Euclidean vector space. The /// class is represented using a composition of RxSO3 for scaling plus /// rotation and a 3-vector for translation. /// /// Sim(3) is neither compact, nor a commutative group. /// /// See RxSO3 for more details of the scaling + rotation representation in 3d. /// template class Sim3Base { public: using Scalar = typename Eigen::internal::traits::Scalar; using TranslationType = typename Eigen::internal::traits::TranslationType; using RxSO3Type = typename Eigen::internal::traits::RxSO3Type; using QuaternionType = typename RxSO3Type::QuaternionType; /// Degrees of freedom of manifold, number of dimensions in tangent space /// (three for translation, three for rotation and one for scaling). static int constexpr DoF = 7; /// Number of internal parameters used (4-tuple for quaternion, three for /// translation). static int constexpr num_parameters = 7; /// Group transformations are 4x4 matrices. static int constexpr N = 4; using Transformation = Matrix; using Point = Vector3; using HomogeneousPoint = Vector4; using Line = ParametrizedLine3; using Tangent = Vector; using Adjoint = Matrix; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with Sim3 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using Sim3Product = Sim3>; template using PointProduct = Vector3>; template using HomogeneousPointProduct = Vector4>; /// Adjoint transformation /// /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. /// SOPHUS_FUNC Adjoint Adj() const { Matrix3 const R = rxso3().rotationMatrix(); Adjoint res; res.setZero(); res.template block<3, 3>(0, 0) = rxso3().matrix(); res.template block<3, 3>(0, 3) = SO3::hat(translation()) * R; res.template block<3, 1>(0, 6) = -translation(); res.template block<3, 3>(3, 3) = R; res(6, 6) = Scalar(1); return res; } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC Sim3 cast() const { return Sim3(rxso3().template cast(), translation().template cast()); } /// Returns group inverse. /// SOPHUS_FUNC Sim3 inverse() const { RxSO3 invR = rxso3().inverse(); return Sim3(invR, invR * (translation() * Scalar(-1))); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (rigid body transformations) to elements of the /// tangent space (twist). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of Sim(3). /// SOPHUS_FUNC Tangent log() const { // The derivation of the closed-form Sim(3) logarithm for is done // analogously to the closed-form solution of the SE(3) logarithm, see // J. Gallier, D. Xu, "Computing exponentials of skew symmetric matrices // and logarithms of orthogonal matrices", IJRA 2002. // https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf // (Sec. 6., pp. 8) Tangent res; auto omega_sigma_and_theta = rxso3().logAndTheta(); Vector3 const omega = omega_sigma_and_theta.tangent.template head<3>(); Scalar sigma = omega_sigma_and_theta.tangent[3]; Matrix3 const Omega = SO3::hat(omega); Matrix3 const W_inv = details::calcWInv( Omega, omega_sigma_and_theta.theta, sigma, scale()); res.segment(0, 3) = W_inv * translation(); res.segment(3, 3) = omega; res[6] = sigma; return res; } /// Returns 4x4 matrix representation of the instance. /// /// It has the following form: /// /// | s*R t | /// | o 1 | /// /// where ``R`` is a 3x3 rotation matrix, ``s`` a scale factor, ``t`` a /// translation 3-vector and ``o`` a 3-column vector of zeros. /// SOPHUS_FUNC Transformation matrix() const { Transformation homogenious_matrix; homogenious_matrix.template topLeftCorner<3, 4>() = matrix3x4(); homogenious_matrix.row(3) = Matrix(Scalar(0), Scalar(0), Scalar(0), Scalar(1)); return homogenious_matrix; } /// Returns the significant first three rows of the matrix above. /// SOPHUS_FUNC Matrix matrix3x4() const { Matrix matrix; matrix.template topLeftCorner<3, 3>() = rxso3().matrix(); matrix.col(3) = translation(); return matrix; } /// Assignment operator. /// SOPHUS_FUNC Sim3Base& operator=(Sim3Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC Sim3Base& operator=( Sim3Base const& other) { rxso3() = other.rxso3(); translation() = other.translation(); return *this; } /// Group multiplication, which is rotation plus scaling concatenation. /// /// Note: That scaling is calculated with saturation. See RxSO3 for /// details. /// template SOPHUS_FUNC Sim3Product operator*( Sim3Base const& other) const { return Sim3Product( rxso3() * other.rxso3(), translation() + rxso3() * other.translation()); } /// Group action on 3-points. /// /// This function rotates, scales and translates a three dimensional point /// ``p`` by the Sim(3) element ``(bar_sR_foo, t_bar)`` (= similarity /// transformation): /// /// ``p_bar = bar_sR_foo * p_foo + t_bar``. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { return rxso3() * p + translation(); } /// Group action on homogeneous 3-points. See above for more details. /// template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { const PointProduct tp = rxso3() * p.template head<3>() + p(3) * translation(); return HomogeneousPointProduct(tp(0), tp(1), tp(2), p(3)); } /// Group action on lines. /// /// This function rotates, scales and translates a parametrized line /// ``l(t) = o + t * d`` by the Sim(3) element: /// /// Origin ``o`` is rotated, scaled and translated /// Direction ``d`` is rotated /// SOPHUS_FUNC Line operator*(Line const& l) const { Line rotatedLine = rxso3() * l; return Line(rotatedLine.origin() + translation(), rotatedLine.direction()); } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SO3's Scalar type. /// template >::value>::type> SOPHUS_FUNC Sim3Base& operator*=( Sim3Base const& other) { *static_cast(this) = *this * other; return *this; } /// Returns internal parameters of Sim(3). /// /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real, t[0], t[1], t[2]), /// with q being the quaternion, t the translation 3-vector. /// SOPHUS_FUNC Sophus::Vector params() const { Sophus::Vector p; p << rxso3().params(), translation(); return p; } /// Setter of non-zero quaternion. /// /// Precondition: ``quat`` must not be close to zero. /// SOPHUS_FUNC void setQuaternion(Eigen::Quaternion const& quat) { rxso3().setQuaternion(quat); } /// Accessor of quaternion. /// SOPHUS_FUNC QuaternionType const& quaternion() const { return rxso3().quaternion(); } /// Returns Rotation matrix /// SOPHUS_FUNC Matrix3 rotationMatrix() const { return rxso3().rotationMatrix(); } /// Mutator of SO3 group. /// SOPHUS_FUNC RxSO3Type& rxso3() { return static_cast(this)->rxso3(); } /// Accessor of SO3 group. /// SOPHUS_FUNC RxSO3Type const& rxso3() const { return static_cast(this)->rxso3(); } /// Returns scale. /// SOPHUS_FUNC Scalar scale() const { return rxso3().scale(); } /// Setter of quaternion using rotation matrix ``R``, leaves scale as is. /// SOPHUS_FUNC void setRotationMatrix(Matrix3& R) { rxso3().setRotationMatrix(R); } /// Sets scale and leaves rotation as is. /// /// Note: This function as a significant computational cost, since it has to /// call the square root twice. /// SOPHUS_FUNC void setScale(Scalar const& scale) { rxso3().setScale(scale); } /// Setter of quaternion using scaled rotation matrix ``sR``. /// /// Precondition: The 3x3 matrix must be "scaled orthogonal" /// and have a positive determinant. /// SOPHUS_FUNC void setScaledRotationMatrix(Matrix3 const& sR) { rxso3().setScaledRotationMatrix(sR); } /// Mutator of translation vector /// SOPHUS_FUNC TranslationType& translation() { return static_cast(this)->translation(); } /// Accessor of translation vector /// SOPHUS_FUNC TranslationType const& translation() const { return static_cast(this)->translation(); } }; /// Sim3 using default storage; derived from Sim3Base. template class Sim3 : public Sim3Base> { public: using Base = Sim3Base>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using RxSo3Member = RxSO3; using TranslationMember = Vector3; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes similarity transform to the identity. /// SOPHUS_FUNC Sim3(); /// Copy constructor /// SOPHUS_FUNC Sim3(Sim3 const& other) = default; /// Copy-like constructor from OtherDerived. /// template SOPHUS_FUNC Sim3(Sim3Base const& other) : rxso3_(other.rxso3()), translation_(other.translation()) { static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from RxSO3 and translation vector /// template SOPHUS_FUNC Sim3(RxSO3Base const& rxso3, Eigen::MatrixBase const& translation) : rxso3_(rxso3), translation_(translation) { static_assert(std::is_same::value, "must be same Scalar type"); static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from quaternion and translation vector. /// /// Precondition: quaternion must not be close to zero. /// template SOPHUS_FUNC Sim3(Eigen::QuaternionBase const& quaternion, Eigen::MatrixBase const& translation) : rxso3_(quaternion), translation_(translation) { static_assert(std::is_same::value, "must be same Scalar type"); static_assert(std::is_same::value, "must be same Scalar type"); } /// Constructor from 4x4 matrix /// /// Precondition: Top-left 3x3 matrix needs to be "scaled-orthogonal" with /// positive determinant. The last row must be ``(0, 0, 0, 1)``. /// SOPHUS_FUNC explicit Sim3(Matrix const& T) : rxso3_(T.template topLeftCorner<3, 3>()), translation_(T.template block<3, 1>(0, 3)) {} /// This provides unsafe read/write access to internal data. Sim(3) is /// represented by an Eigen::Quaternion (four parameters) and a 3-vector. When /// using direct write access, the user needs to take care of that the /// quaternion is not set close to zero. /// SOPHUS_FUNC Scalar* data() { // rxso3_ and translation_ are laid out sequentially with no padding return rxso3_.data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { // rxso3_ and translation_ are laid out sequentially with no padding return rxso3_.data(); } /// Accessor of RxSO3 /// SOPHUS_FUNC RxSo3Member& rxso3() { return rxso3_; } /// Mutator of RxSO3 /// SOPHUS_FUNC RxSo3Member const& rxso3() const { return rxso3_; } /// Mutator of translation vector /// SOPHUS_FUNC TranslationMember& translation() { return translation_; } /// Accessor of translation vector /// SOPHUS_FUNC TranslationMember const& translation() const { return translation_; } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) { return generator(i); } /// Group exponential /// /// This functions takes in an element of tangent space and returns the /// corresponding element of the group Sim(3). /// /// The first three components of ``a`` represent the translational part /// ``upsilon`` in the tangent space of Sim(3), the following three components /// of ``a`` represents the rotation vector ``omega`` and the final component /// represents the logarithm of the scaling factor ``sigma``. /// To be more specific, this function computes ``expmat(hat(a))`` with /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator /// of Sim(3), see below. /// SOPHUS_FUNC static Sim3 exp(Tangent const& a) { // For the derivation of the exponential map of Sim(3) see // H. Strasdat, "Local Accuracy and Global Consistency for Efficient Visual // SLAM", PhD thesis, 2012. // http:///hauke.strasdat.net/files/strasdat_thesis_2012.pdf (A.5, pp. 186) Vector3 const upsilon = a.segment(0, 3); Vector3 const omega = a.segment(3, 3); Scalar const sigma = a[6]; Scalar theta; RxSO3 rxso3 = RxSO3::expAndTheta(a.template tail<4>(), &theta); Matrix3 const Omega = SO3::hat(omega); Matrix3 const W = details::calcW(Omega, theta, sigma); return Sim3(rxso3, W * upsilon); } /// Returns the ith infinitesimal generators of Sim(3). /// /// The infinitesimal generators of Sim(3) are: /// /// ``` /// | 0 0 0 1 | /// G_0 = | 0 0 0 0 | /// | 0 0 0 0 | /// | 0 0 0 0 | /// /// | 0 0 0 0 | /// G_1 = | 0 0 0 1 | /// | 0 0 0 0 | /// | 0 0 0 0 | /// /// | 0 0 0 0 | /// G_2 = | 0 0 0 0 | /// | 0 0 0 1 | /// | 0 0 0 0 | /// /// | 0 0 0 0 | /// G_3 = | 0 0 -1 0 | /// | 0 1 0 0 | /// | 0 0 0 0 | /// /// | 0 0 1 0 | /// G_4 = | 0 0 0 0 | /// | -1 0 0 0 | /// | 0 0 0 0 | /// /// | 0 -1 0 0 | /// G_5 = | 1 0 0 0 | /// | 0 0 0 0 | /// | 0 0 0 0 | /// /// | 1 0 0 0 | /// G_6 = | 0 1 0 0 | /// | 0 0 1 0 | /// | 0 0 0 0 | /// ``` /// /// Precondition: ``i`` must be in [0, 6]. /// SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 || i <= 6, "i should be in range [0,6]."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } /// hat-operator /// /// It takes in the 7-vector representation and returns the corresponding /// matrix representation of Lie algebra element. /// /// Formally, the hat()-operator of Sim(3) is defined as /// /// ``hat(.): R^7 -> R^{4x4}, hat(a) = sum_i a_i * G_i`` (for i=0,...,6) /// /// with ``G_i`` being the ith infinitesimal generator of Sim(3). /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& a) { Transformation Omega; Omega.template topLeftCorner<3, 3>() = RxSO3::hat(a.template tail<4>()); Omega.col(3).template head<3>() = a.template head<3>(); Omega.row(3).setZero(); return Omega; } /// Lie bracket /// /// It computes the Lie bracket of Sim(3). To be more specific, it computes /// /// ``[omega_1, omega_2]_sim3 := vee([hat(omega_1), hat(omega_2)])`` /// /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the /// hat()-operator and ``vee(.)`` the vee()-operator of Sim(3). /// SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) { Vector3 const upsilon1 = a.template head<3>(); Vector3 const upsilon2 = b.template head<3>(); Vector3 const omega1 = a.template segment<3>(3); Vector3 const omega2 = b.template segment<3>(3); Scalar sigma1 = a[6]; Scalar sigma2 = b[6]; Tangent res; res.template head<3>() = SO3::hat(omega1) * upsilon2 + SO3::hat(upsilon1) * omega2 + sigma1 * upsilon2 - sigma2 * upsilon1; res.template segment<3>(3) = omega1.cross(omega2); res[6] = Scalar(0); return res; } /// Draw uniform sample from Sim(3) manifold. /// /// Translations are drawn component-wise from the range [-1, 1]. /// The scale factor is drawn uniformly in log2-space from [-1, 1], /// hence the scale is in [0.5, 2]. /// template static Sim3 sampleUniform(UniformRandomBitGenerator& generator) { std::uniform_real_distribution uniform(Scalar(-1), Scalar(1)); return Sim3(RxSO3::sampleUniform(generator), Vector3(uniform(generator), uniform(generator), uniform(generator))); } /// vee-operator /// /// It takes the 4x4-matrix representation ``Omega`` and maps it to the /// corresponding 7-vector representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | g -f e a | /// | f g -d b | /// | -e d g c | /// | 0 0 0 0 | /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { Tangent upsilon_omega_sigma; upsilon_omega_sigma.template head<3>() = Omega.col(3).template head<3>(); upsilon_omega_sigma.template tail<4>() = RxSO3::vee(Omega.template topLeftCorner<3, 3>()); return upsilon_omega_sigma; } protected: RxSo3Member rxso3_; TranslationMember translation_; }; template Sim3::Sim3() : translation_(TranslationMember::Zero()) { static_assert(std::is_standard_layout::value, "Assume standard layout for the use of offsetof check below."); static_assert( offsetof(Sim3, rxso3_) + sizeof(Scalar) * RxSO3::num_parameters == offsetof(Sim3, translation_), "This class assumes packed storage and hence will only work " "correctly depending on the compiler (options) - in " "particular when using [this->data(), this-data() + " "num_parameters] to access the raw data in a contiguous fashion."); } } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``Sim3``; derived from Sim3Base. /// /// Allows us to wrap Sim3 objects around POD array. template class Map, Options> : public Sophus::Sim3Base, Options>> { public: using Base = Sophus::Sim3Base, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : rxso3_(coeffs), translation_(coeffs + Sophus::RxSO3::num_parameters) {} /// Mutator of RxSO3 /// SOPHUS_FUNC Map, Options>& rxso3() { return rxso3_; } /// Accessor of RxSO3 /// SOPHUS_FUNC Map, Options> const& rxso3() const { return rxso3_; } /// Mutator of translation vector /// SOPHUS_FUNC Map, Options>& translation() { return translation_; } /// Accessor of translation vector SOPHUS_FUNC Map, Options> const& translation() const { return translation_; } protected: Map, Options> rxso3_; Map, Options> translation_; }; /// Specialization of Eigen::Map for ``Sim3 const``; derived from Sim3Base. /// /// Allows us to wrap RxSO3 objects around POD array. template class Map const, Options> : public Sophus::Sim3Base const, Options>> { using Base = Sophus::Sim3Base const, Options>>; public: using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : rxso3_(coeffs), translation_(coeffs + Sophus::RxSO3::num_parameters) {} /// Accessor of RxSO3 /// SOPHUS_FUNC Map const, Options> const& rxso3() const { return rxso3_; } /// Accessor of translation vector /// SOPHUS_FUNC Map const, Options> const& translation() const { return translation_; } protected: Map const, Options> const rxso3_; Map const, Options> const translation_; }; } // namespace Eigen #endif ================================================ FILE: LIO-Livox/include/sophus/sim_details.hpp ================================================ #ifndef SOPHUS_SIM_DETAILS_HPP #define SOPHUS_SIM_DETAILS_HPP #include "types.hpp" namespace Sophus { namespace details { template Matrix calcW(Matrix const& Omega, Scalar const theta, Scalar const sigma) { using std::abs; using std::cos; using std::exp; using std::sin; static Matrix const I = Matrix::Identity(); static Scalar const one(1); static Scalar const half(0.5); Matrix const Omega2 = Omega * Omega; Scalar const scale = exp(sigma); Scalar A, B, C; if (abs(sigma) < Constants::epsilon()) { C = one; if (abs(theta) < Constants::epsilon()) { A = half; B = Scalar(1. / 6.); } else { Scalar theta_sq = theta * theta; A = (one - cos(theta)) / theta_sq; B = (theta - sin(theta)) / (theta_sq * theta); } } else { C = (scale - one) / sigma; if (abs(theta) < Constants::epsilon()) { Scalar sigma_sq = sigma * sigma; A = ((sigma - one) * scale + one) / sigma_sq; B = (scale * half * sigma_sq + scale - one - sigma * scale) / (sigma_sq * sigma); } else { Scalar theta_sq = theta * theta; Scalar a = scale * sin(theta); Scalar b = scale * cos(theta); Scalar c = theta_sq + sigma * sigma; A = (a * sigma + (one - b) * theta) / (theta * c); B = (C - ((b - one) * sigma + a * theta) / (c)) * one / (theta_sq); } } return A * Omega + B * Omega2 + C * I; } template Matrix calcWInv(Matrix const& Omega, Scalar const theta, Scalar const sigma, Scalar const scale) { using std::abs; using std::cos; using std::sin; static Matrix const I = Matrix::Identity(); static Scalar const half(0.5); static Scalar const one(1); static Scalar const two(2); Matrix const Omega2 = Omega * Omega; Scalar const scale_sq = scale * scale; Scalar const theta_sq = theta * theta; Scalar const sin_theta = sin(theta); Scalar const cos_theta = cos(theta); Scalar a, b, c; if (abs(sigma * sigma) < Constants::epsilon()) { c = one - half * sigma; a = -half; if (abs(theta_sq) < Constants::epsilon()) { b = Scalar(1. / 12.); } else { b = (theta * sin_theta + two * cos_theta - two) / (two * theta_sq * (cos_theta - one)); } } else { Scalar const scale_cu = scale_sq * scale; c = sigma / (scale - one); if (abs(theta_sq) < Constants::epsilon()) { a = (-sigma * scale + scale - one) / ((scale - one) * (scale - one)); b = (scale_sq * sigma - two * scale_sq + scale * sigma + two * scale) / (two * scale_cu - Scalar(6) * scale_sq + Scalar(6) * scale - two); } else { Scalar const s_sin_theta = scale * sin_theta; Scalar const s_cos_theta = scale * cos_theta; a = (theta * s_cos_theta - theta - sigma * s_sin_theta) / (theta * (scale_sq - two * s_cos_theta + one)); b = -scale * (theta * s_sin_theta - theta * sin_theta + sigma * s_cos_theta - scale * sigma + sigma * cos_theta - sigma) / (theta_sq * (scale_cu - two * scale * s_cos_theta - scale_sq + two * s_cos_theta + scale - one)); } } return a * Omega + b * Omega2 + c * I; } } // namespace details } // namespace Sophus #endif ================================================ FILE: LIO-Livox/include/sophus/so2.hpp ================================================ /// @file /// Special orthogonal group SO(2) - rotation in 2d. #ifndef SOPHUS_SO2_HPP #define SOPHUS_SO2_HPP #include #include // Include only the selective set of Eigen headers that we need. // This helps when using Sophus with unusual compilers, like nvcc. #include #include "rotation_matrix.hpp" #include "types.hpp" namespace Sophus { template class SO2; using SO2d = SO2; using SO2f = SO2; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using ComplexType = Sophus::Vector2; }; template struct traits, Options_>> : traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using ComplexType = Map, Options>; }; template struct traits const, Options_>> : traits const> { static constexpr int Options = Options_; using Scalar = Scalar_; using ComplexType = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// SO2 base type - implements SO2 class but is storage agnostic. /// /// SO(2) is the group of rotations in 2d. As a matrix group, it is the set of /// matrices which are orthogonal such that ``R * R' = I`` (with ``R'`` being /// the transpose of ``R``) and have a positive determinant. In particular, the /// determinant is 1. Let ``theta`` be the rotation angle, the rotation matrix /// can be written in close form: /// /// | cos(theta) -sin(theta) | /// | sin(theta) cos(theta) | /// /// As a matter of fact, the first column of those matrices is isomorph to the /// set of unit complex numbers U(1). Thus, internally, SO2 is represented as /// complex number with length 1. /// /// SO(2) is a compact and commutative group. First it is compact since the set /// of rotation matrices is a closed and bounded set. Second it is commutative /// since ``R(alpha) * R(beta) = R(beta) * R(alpha)``, simply because ``alpha + /// beta = beta + alpha`` with ``alpha`` and ``beta`` being rotation angles /// (about the same axis). /// /// Class invariant: The 2-norm of ``unit_complex`` must be close to 1. /// Technically speaking, it must hold that: /// /// ``|unit_complex().squaredNorm() - 1| <= Constants::epsilon()``. template class SO2Base { public: static constexpr int Options = Eigen::internal::traits::Options; using Scalar = typename Eigen::internal::traits::Scalar; using ComplexT = typename Eigen::internal::traits::ComplexType; using ComplexTemporaryType = Sophus::Vector2; /// Degrees of freedom of manifold, number of dimensions in tangent space (one /// since we only have in-plane rotations). static int constexpr DoF = 1; /// Number of internal parameters used (complex numbers are a tuples). static int constexpr num_parameters = 2; /// Group transformations are 2x2 matrices. static int constexpr N = 2; using Transformation = Matrix; using Point = Vector2; using HomogeneousPoint = Vector3; using Line = ParametrizedLine2; using Tangent = Scalar; using Adjoint = Scalar; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with SO2 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using SO2Product = SO2>; template using PointProduct = Vector2>; template using HomogeneousPointProduct = Vector3>; /// Adjoint transformation /// /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. /// /// It simply ``1``, since ``SO(2)`` is a commutative group. /// SOPHUS_FUNC Adjoint Adj() const { return Scalar(1); } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC SO2 cast() const { return SO2(unit_complex().template cast()); } /// This provides unsafe read/write access to internal data. SO(2) is /// represented by a unit complex number (two parameters). When using direct /// write access, the user needs to take care of that the complex number stays /// normalized. /// SOPHUS_FUNC Scalar* data() { return unit_complex_nonconst().data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { return unit_complex().data(); } /// Returns group inverse. /// SOPHUS_FUNC SO2 inverse() const { return SO2(unit_complex().x(), -unit_complex().y()); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (rotation matrices) to elements of the tangent space /// (rotation angles). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of SO(2). /// SOPHUS_FUNC Scalar log() const { using std::atan2; return atan2(unit_complex().y(), unit_complex().x()); } /// It re-normalizes ``unit_complex`` to unit length. /// /// Note: Because of the class invariant, there is typically no need to call /// this function directly. /// SOPHUS_FUNC void normalize() { using std::sqrt; Scalar length = sqrt(unit_complex().x() * unit_complex().x() + unit_complex().y() * unit_complex().y()); SOPHUS_ENSURE(length >= Constants::epsilon(), "Complex number should not be close to zero!"); unit_complex_nonconst().x() /= length; unit_complex_nonconst().y() /= length; } /// Returns 2x2 matrix representation of the instance. /// /// For SO(2), the matrix representation is an orthogonal matrix ``R`` with /// ``det(R)=1``, thus the so-called "rotation matrix". /// SOPHUS_FUNC Transformation matrix() const { Scalar const& real = unit_complex().x(); Scalar const& imag = unit_complex().y(); Transformation R; // clang-format off R << real, -imag, imag, real; // clang-format on return R; } /// Assignment operator /// SOPHUS_FUNC SO2Base& operator=(SO2Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC SO2Base& operator=(SO2Base const& other) { unit_complex_nonconst() = other.unit_complex(); return *this; } /// Group multiplication, which is rotation concatenation. /// template SOPHUS_FUNC SO2Product operator*( SO2Base const& other) const { using ResultT = ReturnScalar; Scalar const lhs_real = unit_complex().x(); Scalar const lhs_imag = unit_complex().y(); typename OtherDerived::Scalar const& rhs_real = other.unit_complex().x(); typename OtherDerived::Scalar const& rhs_imag = other.unit_complex().y(); // complex multiplication ResultT const result_real = lhs_real * rhs_real - lhs_imag * rhs_imag; ResultT const result_imag = lhs_real * rhs_imag + lhs_imag * rhs_real; ResultT const squared_norm = result_real * result_real + result_imag * result_imag; // We can assume that the squared-norm is close to 1 since we deal with a // unit complex number. Due to numerical precision issues, there might // be a small drift after pose concatenation. Hence, we need to renormalizes // the complex number here. // Since squared-norm is close to 1, we do not need to calculate the costly // square-root, but can use an approximation around 1 (see // http://stackoverflow.com/a/12934750 for details). if (squared_norm != ResultT(1.0)) { ResultT const scale = ResultT(2.0) / (ResultT(1.0) + squared_norm); return SO2Product(result_real * scale, result_imag * scale); } return SO2Product(result_real, result_imag); } /// Group action on 2-points. /// /// This function rotates a 2 dimensional point ``p`` by the SO2 element /// ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { Scalar const& real = unit_complex().x(); Scalar const& imag = unit_complex().y(); return PointProduct(real * p[0] - imag * p[1], imag * p[0] + real * p[1]); } /// Group action on homogeneous 2-points. /// /// This function rotates a homogeneous 2 dimensional point ``p`` by the SO2 /// element ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``. /// template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { Scalar const& real = unit_complex().x(); Scalar const& imag = unit_complex().y(); return HomogeneousPointProduct( real * p[0] - imag * p[1], imag * p[0] + real * p[1], p[2]); } /// Group action on lines. /// /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO2 /// element: /// /// Both direction ``d`` and origin ``o`` are rotated as a 2 dimensional point /// SOPHUS_FUNC Line operator*(Line const& l) const { return Line((*this) * l.origin(), (*this) * l.direction()); } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SO2's Scalar type. /// template >::value>::type> SOPHUS_FUNC SO2Base operator*=(SO2Base const& other) { *static_cast(this) = *this * other; return *this; } /// Returns derivative of this * SO2::exp(x) wrt. x at x=0. /// SOPHUS_FUNC Matrix Dx_this_mul_exp_x_at_0() const { return Matrix(-unit_complex()[1], unit_complex()[0]); } /// Returns internal parameters of SO(2). /// /// It returns (c[0], c[1]), with c being the unit complex number. /// SOPHUS_FUNC Sophus::Vector params() const { return unit_complex(); } /// Takes in complex number / tuple and normalizes it. /// /// Precondition: The complex number must not be close to zero. /// SOPHUS_FUNC void setComplex(Point const& complex) { unit_complex_nonconst() = complex; normalize(); } /// Accessor of unit quaternion. /// SOPHUS_FUNC ComplexT const& unit_complex() const { return static_cast(this)->unit_complex(); } private: /// Mutator of unit_complex is private to ensure class invariant. That is /// the complex number must stay close to unit length. /// SOPHUS_FUNC ComplexT& unit_complex_nonconst() { return static_cast(this)->unit_complex_nonconst(); } }; /// SO2 using default storage; derived from SO2Base. template class SO2 : public SO2Base> { public: using Base = SO2Base>; static int constexpr DoF = Base::DoF; static int constexpr num_parameters = Base::num_parameters; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using ComplexMember = Vector2; /// ``Base`` is friend so unit_complex_nonconst can be accessed from ``Base``. friend class SO2Base>; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes unit complex number to identity rotation. /// SOPHUS_FUNC SO2() : unit_complex_(Scalar(1), Scalar(0)) {} /// Copy constructor /// SOPHUS_FUNC SO2(SO2 const& other) = default; /// Copy-like constructor from OtherDerived. /// template SOPHUS_FUNC SO2(SO2Base const& other) : unit_complex_(other.unit_complex()) {} /// Constructor from rotation matrix /// /// Precondition: rotation matrix need to be orthogonal with determinant of 1. /// SOPHUS_FUNC explicit SO2(Transformation const& R) : unit_complex_(Scalar(0.5) * (R(0, 0) + R(1, 1)), Scalar(0.5) * (R(1, 0) - R(0, 1))) { SOPHUS_ENSURE(isOrthogonal(R), "R is not orthogonal:\n %", R); SOPHUS_ENSURE(R.determinant() > Scalar(0), "det(R) is not positive: %", R.determinant()); } /// Constructor from pair of real and imaginary number. /// /// Precondition: The pair must not be close to zero. /// SOPHUS_FUNC SO2(Scalar const& real, Scalar const& imag) : unit_complex_(real, imag) { Base::normalize(); } /// Constructor from 2-vector. /// /// Precondition: The vector must not be close to zero. /// template SOPHUS_FUNC explicit SO2(Eigen::MatrixBase const& complex) : unit_complex_(complex) { static_assert(std::is_same::value, "must be same Scalar type"); Base::normalize(); } /// Constructor from an rotation angle. /// SOPHUS_FUNC explicit SO2(Scalar theta) { unit_complex_nonconst() = SO2::exp(theta).unit_complex(); } /// Accessor of unit complex number /// SOPHUS_FUNC ComplexMember const& unit_complex() const { return unit_complex_; } /// Group exponential /// /// This functions takes in an element of tangent space (= rotation angle /// ``theta``) and returns the corresponding element of the group SO(2). /// /// To be more specific, this function computes ``expmat(hat(omega))`` /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the /// hat()-operator of SO(2). /// SOPHUS_FUNC static SO2 exp(Tangent const& theta) { using std::cos; using std::sin; return SO2(cos(theta), sin(theta)); } /// Returns derivative of exp(x) wrt. x. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x( Tangent const& theta) { using std::cos; using std::sin; return Sophus::Matrix(-sin(theta), cos(theta)); } /// Returns derivative of exp(x) wrt. x_i at x=0. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x_at_0() { return Sophus::Matrix(Scalar(0), Scalar(1)); } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int) { return generator(); } /// Returns the infinitesimal generators of SO(2). /// /// The infinitesimal generators of SO(2) is: /// /// | 0 1 | /// | -1 0 | /// SOPHUS_FUNC static Transformation generator() { return hat(Scalar(1)); } /// hat-operator /// /// It takes in the scalar representation ``theta`` (= rotation angle) and /// returns the corresponding matrix representation of Lie algebra element. /// /// Formally, the hat()-operator of SO(2) is defined as /// /// ``hat(.): R^2 -> R^{2x2}, hat(theta) = theta * G`` /// /// with ``G`` being the infinitesimal generator of SO(2). /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& theta) { Transformation Omega; // clang-format off Omega << Scalar(0), -theta, theta, Scalar(0); // clang-format on return Omega; } /// Returns closed SO2 given arbitrary 2x2 matrix. /// template static SOPHUS_FUNC enable_if_t::value, SO2> fitToSO2(Transformation const& R) { return SO2(makeRotationMatrix(R)); } /// Lie bracket /// /// It returns the Lie bracket of SO(2). Since SO(2) is a commutative group, /// the Lie bracket is simple ``0``. /// SOPHUS_FUNC static Tangent lieBracket(Tangent const&, Tangent const&) { return Scalar(0); } /// Draw uniform sample from SO(2) manifold. /// template static SO2 sampleUniform(UniformRandomBitGenerator& generator) { static_assert(IsUniformRandomBitGenerator::value, "generator must meet the UniformRandomBitGenerator concept"); std::uniform_real_distribution uniform(-Constants::pi(), Constants::pi()); return SO2(uniform(generator)); } /// vee-operator /// /// It takes the 2x2-matrix representation ``Omega`` and maps it to the /// corresponding scalar representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | 0 -a | /// | a 0 | /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { using std::abs; return Omega(1, 0); } protected: /// Mutator of complex number is protected to ensure class invariant. /// SOPHUS_FUNC ComplexMember& unit_complex_nonconst() { return unit_complex_; } ComplexMember unit_complex_; }; } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``SO2``; derived from SO2Base. /// /// Allows us to wrap SO2 objects around POD array (e.g. external c style /// complex number / tuple). template class Map, Options> : public Sophus::SO2Base, Options>> { public: using Base = Sophus::SO2Base, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; /// ``Base`` is friend so unit_complex_nonconst can be accessed from ``Base``. friend class Sophus::SO2Base, Options>>; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : unit_complex_(coeffs) {} /// Accessor of unit complex number. /// SOPHUS_FUNC Map, Options> const& unit_complex() const { return unit_complex_; } protected: /// Mutator of unit_complex is protected to ensure class invariant. /// SOPHUS_FUNC Map, Options>& unit_complex_nonconst() { return unit_complex_; } Map, Options> unit_complex_; }; /// Specialization of Eigen::Map for ``SO2 const``; derived from SO2Base. /// /// Allows us to wrap SO2 objects around POD array (e.g. external c style /// complex number / tuple). template class Map const, Options> : public Sophus::SO2Base const, Options>> { public: using Base = Sophus::SO2Base const, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : unit_complex_(coeffs) {} /// Accessor of unit complex number. /// SOPHUS_FUNC Map const, Options> const& unit_complex() const { return unit_complex_; } protected: /// Mutator of unit_complex is protected to ensure class invariant. /// Map const, Options> const unit_complex_; }; } // namespace Eigen #endif // SOPHUS_SO2_HPP ================================================ FILE: LIO-Livox/include/sophus/so3.hpp ================================================ /// @file /// Special orthogonal group SO(3) - rotation in 3d. #ifndef SOPHUS_SO3_HPP #define SOPHUS_SO3_HPP #include "rotation_matrix.hpp" #include "so2.hpp" #include "types.hpp" // Include only the selective set of Eigen headers that we need. // This helps when using Sophus with unusual compilers, like nvcc. #include #include #include namespace Sophus { template class SO3; using SO3d = SO3; using SO3f = SO3; } // namespace Sophus namespace Eigen { namespace internal { template struct traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using QuaternionType = Eigen::Quaternion; }; template struct traits, Options_>> : traits> { static constexpr int Options = Options_; using Scalar = Scalar_; using QuaternionType = Map, Options>; }; template struct traits const, Options_>> : traits const> { static constexpr int Options = Options_; using Scalar = Scalar_; using QuaternionType = Map const, Options>; }; } // namespace internal } // namespace Eigen namespace Sophus { /// SO3 base type - implements SO3 class but is storage agnostic. /// /// SO(3) is the group of rotations in 3d. As a matrix group, it is the set of /// matrices which are orthogonal such that ``R * R' = I`` (with ``R'`` being /// the transpose of ``R``) and have a positive determinant. In particular, the /// determinant is 1. Internally, the group is represented as a unit quaternion. /// Unit quaternion can be seen as members of the special unitary group SU(2). /// SU(2) is a double cover of SO(3). Hence, for every rotation matrix ``R``, /// there exist two unit quaternions: ``(r, v)`` and ``(-r, -v)``, with ``r`` /// the real part and ``v`` being the imaginary 3-vector part of the quaternion. /// /// SO(3) is a compact, but non-commutative group. First it is compact since the /// set of rotation matrices is a closed and bounded set. Second it is /// non-commutative since the equation ``R_1 * R_2 = R_2 * R_1`` does not hold /// in general. For example rotating an object by some degrees about its /// ``x``-axis and then by some degrees about its y axis, does not lead to the /// same orientation when rotation first about ``y`` and then about ``x``. /// /// Class invariant: The 2-norm of ``unit_quaternion`` must be close to 1. /// Technically speaking, it must hold that: /// /// ``|unit_quaternion().squaredNorm() - 1| <= Constants::epsilon()``. template class SO3Base { public: static constexpr int Options = Eigen::internal::traits::Options; using Scalar = typename Eigen::internal::traits::Scalar; using QuaternionType = typename Eigen::internal::traits::QuaternionType; using QuaternionTemporaryType = Eigen::Quaternion; /// Degrees of freedom of group, number of dimensions in tangent space. static int constexpr DoF = 3; /// Number of internal parameters used (quaternion is a 4-tuple). static int constexpr num_parameters = 4; /// Group transformations are 3x3 matrices. static int constexpr N = 3; using Transformation = Matrix; using Point = Vector3; using HomogeneousPoint = Vector4; using Line = ParametrizedLine3; using Tangent = Vector; using Adjoint = Matrix; struct TangentAndTheta { EIGEN_MAKE_ALIGNED_OPERATOR_NEW Tangent tangent; Scalar theta; }; /// For binary operations the return type is determined with the /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map /// types, as well as other compatible scalar types such as Ceres::Jet and /// double scalars with SO3 operations. template using ReturnScalar = typename Eigen::ScalarBinaryOpTraits< Scalar, typename OtherDerived::Scalar>::ReturnType; template using SO3Product = SO3>; template using PointProduct = Vector3>; template using HomogeneousPointProduct = Vector4>; /// Adjoint transformation // /// This function return the adjoint transformation ``Ad`` of the group /// element ``A`` such that for all ``x`` it holds that /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below. // /// For SO(3), it simply returns the rotation matrix corresponding to ``A``. /// SOPHUS_FUNC Adjoint Adj() const { return matrix(); } /// Extract rotation angle about canonical X-axis /// template SOPHUS_FUNC enable_if_t::value, S> angleX() const { Sophus::Matrix3 R = matrix(); Sophus::Matrix2 Rx = R.template block<2, 2>(1, 1); return SO2(makeRotationMatrix(Rx)).log(); } /// Extract rotation angle about canonical Y-axis /// template SOPHUS_FUNC enable_if_t::value, S> angleY() const { Sophus::Matrix3 R = matrix(); Sophus::Matrix2 Ry; // clang-format off Ry << R(0, 0), R(2, 0), R(0, 2), R(2, 2); // clang-format on return SO2(makeRotationMatrix(Ry)).log(); } /// Extract rotation angle about canonical Z-axis /// template SOPHUS_FUNC enable_if_t::value, S> angleZ() const { Sophus::Matrix3 R = matrix(); Sophus::Matrix2 Rz = R.template block<2, 2>(0, 0); return SO2(makeRotationMatrix(Rz)).log(); } /// Returns copy of instance casted to NewScalarType. /// template SOPHUS_FUNC SO3 cast() const { return SO3(unit_quaternion().template cast()); } /// This provides unsafe read/write access to internal data. SO(3) is /// represented by an Eigen::Quaternion (four parameters). When using direct /// write access, the user needs to take care of that the quaternion stays /// normalized. /// /// Note: The first three Scalars represent the imaginary parts, while the /// forth Scalar represent the real part. /// SOPHUS_FUNC Scalar* data() { return unit_quaternion_nonconst().coeffs().data(); } /// Const version of data() above. /// SOPHUS_FUNC Scalar const* data() const { return unit_quaternion().coeffs().data(); } /// Returns derivative of this * SO3::exp(x) wrt. x at x=0. /// SOPHUS_FUNC Matrix Dx_this_mul_exp_x_at_0() const { Matrix J; Eigen::Quaternion const q = unit_quaternion(); Scalar const c0 = Scalar(0.5) * q.w(); Scalar const c1 = Scalar(0.5) * q.z(); Scalar const c2 = -c1; Scalar const c3 = Scalar(0.5) * q.y(); Scalar const c4 = Scalar(0.5) * q.x(); Scalar const c5 = -c4; Scalar const c6 = -c3; J(0, 0) = c0; J(0, 1) = c2; J(0, 2) = c3; J(1, 0) = c1; J(1, 1) = c0; J(1, 2) = c5; J(2, 0) = c6; J(2, 1) = c4; J(2, 2) = c0; J(3, 0) = c5; J(3, 1) = c6; J(3, 2) = c2; return J; } /// Returns internal parameters of SO(3). /// /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real), with q being the /// unit quaternion. /// SOPHUS_FUNC Sophus::Vector params() const { return unit_quaternion().coeffs(); } /// Returns group inverse. /// SOPHUS_FUNC SO3 inverse() const { return SO3(unit_quaternion().conjugate()); } /// Logarithmic map /// /// Computes the logarithm, the inverse of the group exponential which maps /// element of the group (rotation matrices) to elements of the tangent space /// (rotation-vector). /// /// To be specific, this function computes ``vee(logmat(.))`` with /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator /// of SO(3). /// SOPHUS_FUNC Tangent log() const { return logAndTheta().tangent; } /// As above, but also returns ``theta = |omega|``. /// SOPHUS_FUNC TangentAndTheta logAndTheta() const { TangentAndTheta J; using std::abs; using std::atan; using std::sqrt; Scalar squared_n = unit_quaternion().vec().squaredNorm(); Scalar w = unit_quaternion().w(); Scalar two_atan_nbyw_by_n; /// Atan-based log thanks to /// /// C. Hertzberg et al.: /// "Integrating Generic Sensor Fusion Algorithms with Sound State /// Representation through Encapsulation of Manifolds" /// Information Fusion, 2011 if (squared_n < Constants::epsilon() * Constants::epsilon()) { // If quaternion is normalized and n=0, then w should be 1; // w=0 should never happen here! SOPHUS_ENSURE(abs(w) >= Constants::epsilon(), "Quaternion (%) should be normalized!", unit_quaternion().coeffs().transpose()); Scalar squared_w = w * w; two_atan_nbyw_by_n = Scalar(2) / w - Scalar(2.0/3.0) * (squared_n) / (w * squared_w); J.theta = Scalar(2) * squared_n / w; } else { Scalar n = sqrt(squared_n); if (abs(w) < Constants::epsilon()) { if (w > Scalar(0)) { two_atan_nbyw_by_n = Constants::pi() / n; } else { two_atan_nbyw_by_n = -Constants::pi() / n; } } else { two_atan_nbyw_by_n = Scalar(2) * atan(n / w) / n; } J.theta = two_atan_nbyw_by_n * n; } J.tangent = two_atan_nbyw_by_n * unit_quaternion().vec(); return J; } /// It re-normalizes ``unit_quaternion`` to unit length. /// /// Note: Because of the class invariant, there is typically no need to call /// this function directly. /// SOPHUS_FUNC void normalize() { Scalar length = unit_quaternion_nonconst().norm(); SOPHUS_ENSURE(length >= Constants::epsilon(), "Quaternion (%) should not be close to zero!", unit_quaternion_nonconst().coeffs().transpose()); unit_quaternion_nonconst().coeffs() /= length; } /// Returns 3x3 matrix representation of the instance. /// /// For SO(3), the matrix representation is an orthogonal matrix ``R`` with /// ``det(R)=1``, thus the so-called "rotation matrix". /// SOPHUS_FUNC Transformation matrix() const { return unit_quaternion().toRotationMatrix(); } /// Assignment operator. /// SOPHUS_FUNC SO3Base& operator=(SO3Base const& other) = default; /// Assignment-like operator from OtherDerived. /// template SOPHUS_FUNC SO3Base& operator=(SO3Base const& other) { unit_quaternion_nonconst() = other.unit_quaternion(); return *this; } /// Group multiplication, which is rotation concatenation. /// template SOPHUS_FUNC SO3Product operator*( SO3Base const& other) const { using QuaternionProductType = typename SO3Product::QuaternionType; const QuaternionType& a = unit_quaternion(); const typename OtherDerived::QuaternionType& b = other.unit_quaternion(); /// NOTE: We cannot use Eigen's Quaternion multiplication because it always /// returns a Quaternion of the same Scalar as this object, so it is not /// able to multiple Jets and doubles correctly. return SO3Product(QuaternionProductType( a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(), a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(), a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x())); } /// Group action on 3-points. /// /// This function rotates a 3 dimensional point ``p`` by the SO3 element /// ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``. /// /// Since SO3 is internally represented by a unit quaternion ``q``, it is /// implemented as ``p_bar = q * p_foo * q^{*}`` /// with ``q^{*}`` being the quaternion conjugate of ``q``. /// /// Geometrically, ``p`` is rotated by angle ``|omega|`` around the /// axis ``omega/|omega|`` with ``omega := vee(log(bar_R_foo))``. /// /// For ``vee``-operator, see below. /// template ::value>::type> SOPHUS_FUNC PointProduct operator*( Eigen::MatrixBase const& p) const { /// NOTE: We cannot use Eigen's Quaternion transformVector because it always /// returns a Vector3 of the same Scalar as this quaternion, so it is not /// able to be applied to Jets and doubles correctly. const QuaternionType& q = unit_quaternion(); PointProduct uv = q.vec().cross(p); uv += uv; return p + q.w() * uv + q.vec().cross(uv); } /// Group action on homogeneous 3-points. See above for more details. template ::value>::type> SOPHUS_FUNC HomogeneousPointProduct operator*( Eigen::MatrixBase const& p) const { const auto rp = *this * p.template head<3>(); return HomogeneousPointProduct(rp(0), rp(1), rp(2), p(3)); } /// Group action on lines. /// /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO3 /// element: /// /// Both direction ``d`` and origin ``o`` are rotated as a 3 dimensional point /// SOPHUS_FUNC Line operator*(Line const& l) const { return Line((*this) * l.origin(), (*this) * l.direction()); } /// In-place group multiplication. This method is only valid if the return /// type of the multiplication is compatible with this SO3's Scalar type. /// template >::value>::type> SOPHUS_FUNC SO3Base& operator*=(SO3Base const& other) { *static_cast(this) = *this * other; return *this; } /// Takes in quaternion, and normalizes it. /// /// Precondition: The quaternion must not be close to zero. /// SOPHUS_FUNC void setQuaternion(Eigen::Quaternion const& quaternion) { unit_quaternion_nonconst() = quaternion; normalize(); } /// Accessor of unit quaternion. /// SOPHUS_FUNC QuaternionType const& unit_quaternion() const { return static_cast(this)->unit_quaternion(); } private: /// Mutator of unit_quaternion is private to ensure class invariant. That is /// the quaternion must stay close to unit length. /// SOPHUS_FUNC QuaternionType& unit_quaternion_nonconst() { return static_cast(this)->unit_quaternion_nonconst(); } }; /// SO3 using default storage; derived from SO3Base. template class SO3 : public SO3Base> { public: using Base = SO3Base>; static int constexpr DoF = Base::DoF; static int constexpr num_parameters = Base::num_parameters; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using QuaternionMember = Eigen::Quaternion; /// ``Base`` is friend so unit_quaternion_nonconst can be accessed from /// ``Base``. friend class SO3Base>; EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// Default constructor initializes unit quaternion to identity rotation. /// SOPHUS_FUNC SO3() : unit_quaternion_(Scalar(1), Scalar(0), Scalar(0), Scalar(0)) {} /// Copy constructor /// SOPHUS_FUNC SO3(SO3 const& other) = default; /// Copy-like constructor from OtherDerived. /// template SOPHUS_FUNC SO3(SO3Base const& other) : unit_quaternion_(other.unit_quaternion()) {} /// Constructor from rotation matrix /// /// Precondition: rotation matrix needs to be orthogonal with determinant /// of 1. /// SOPHUS_FUNC SO3(Transformation const& R) : unit_quaternion_(R) { SOPHUS_ENSURE(isOrthogonal(R), "R is not orthogonal:\n %", R * R.transpose()); SOPHUS_ENSURE(R.determinant() > Scalar(0), "det(R) is not positive: %", R.determinant()); } /// Constructor from quaternion /// /// Precondition: quaternion must not be close to zero. /// template SOPHUS_FUNC explicit SO3(Eigen::QuaternionBase const& quat) : unit_quaternion_(quat) { static_assert( std::is_same::Scalar, Scalar>::value, "Input must be of same scalar type"); Base::normalize(); } /// Accessor of unit quaternion. /// SOPHUS_FUNC QuaternionMember const& unit_quaternion() const { return unit_quaternion_; } /// Returns derivative of exp(x) wrt. x. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x( Tangent const& omega) { using std::cos; using std::exp; using std::sin; using std::sqrt; Scalar const c0 = omega[0] * omega[0]; Scalar const c1 = omega[1] * omega[1]; Scalar const c2 = omega[2] * omega[2]; Scalar const c3 = c0 + c1 + c2; if (c3 < Constants::epsilon()) { return Dx_exp_x_at_0(); } Scalar const c4 = sqrt(c3); Scalar const c5 = 1.0 / c4; Scalar const c6 = 0.5 * c4; Scalar const c7 = sin(c6); Scalar const c8 = c5 * c7; Scalar const c9 = pow(c3, -3.0L / 2.0L); Scalar const c10 = c7 * c9; Scalar const c11 = Scalar(1.0) / c3; Scalar const c12 = cos(c6); Scalar const c13 = Scalar(0.5) * c11 * c12; Scalar const c14 = c7 * c9 * omega[0]; Scalar const c15 = Scalar(0.5) * c11 * c12 * omega[0]; Scalar const c16 = -c14 * omega[1] + c15 * omega[1]; Scalar const c17 = -c14 * omega[2] + c15 * omega[2]; Scalar const c18 = omega[1] * omega[2]; Scalar const c19 = -c10 * c18 + c13 * c18; Scalar const c20 = Scalar(0.5) * c5 * c7; Sophus::Matrix J; J(0, 0) = -c0 * c10 + c0 * c13 + c8; J(0, 1) = c16; J(0, 2) = c17; J(1, 0) = c16; J(1, 1) = -c1 * c10 + c1 * c13 + c8; J(1, 2) = c19; J(2, 0) = c17; J(2, 1) = c19; J(2, 2) = -c10 * c2 + c13 * c2 + c8; J(3, 0) = -c20 * omega[0]; J(3, 1) = -c20 * omega[1]; J(3, 2) = -c20 * omega[2]; return J; } /// Returns derivative of exp(x) wrt. x_i at x=0. /// SOPHUS_FUNC static Sophus::Matrix Dx_exp_x_at_0() { Sophus::Matrix J; // clang-format off J << Scalar(0.5), Scalar(0), Scalar(0), Scalar(0), Scalar(0.5), Scalar(0), Scalar(0), Scalar(0), Scalar(0.5), Scalar(0), Scalar(0), Scalar(0); // clang-format on return J; } /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``. /// SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) { return generator(i); } /// Group exponential /// /// This functions takes in an element of tangent space (= rotation vector /// ``omega``) and returns the corresponding element of the group SO(3). /// /// To be more specific, this function computes ``expmat(hat(omega))`` /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the /// hat()-operator of SO(3). /// SOPHUS_FUNC static SO3 exp(Tangent const& omega) { Scalar theta; return expAndTheta(omega, &theta); } /// As above, but also returns ``theta = |omega|`` as out-parameter. /// /// Precondition: ``theta`` must not be ``nullptr``. /// SOPHUS_FUNC static SO3 expAndTheta(Tangent const& omega, Scalar* theta) { SOPHUS_ENSURE(theta != nullptr, "must not be nullptr."); using std::abs; using std::cos; using std::sin; using std::sqrt; Scalar theta_sq = omega.squaredNorm(); Scalar imag_factor; Scalar real_factor; if (theta_sq < Constants::epsilon() * Constants::epsilon()) { *theta = Scalar(0); Scalar theta_po4 = theta_sq * theta_sq; imag_factor = Scalar(0.5) - Scalar(1.0 / 48.0) * theta_sq + Scalar(1.0 / 3840.0) * theta_po4; real_factor = Scalar(1) - Scalar(1.0 / 8.0) * theta_sq + Scalar(1.0 / 384.0) * theta_po4; } else { *theta = sqrt(theta_sq); Scalar half_theta = Scalar(0.5) * (*theta); Scalar sin_half_theta = sin(half_theta); imag_factor = sin_half_theta / (*theta); real_factor = cos(half_theta); } SO3 q; q.unit_quaternion_nonconst() = QuaternionMember(real_factor, imag_factor * omega.x(), imag_factor * omega.y(), imag_factor * omega.z()); SOPHUS_ENSURE(abs(q.unit_quaternion().squaredNorm() - Scalar(1)) < Sophus::Constants::epsilon(), "SO3::exp failed! omega: %, real: %, img: %", omega.transpose(), real_factor, imag_factor); return q; } /// Returns closest SO3 given arbitrary 3x3 matrix. /// template static SOPHUS_FUNC enable_if_t::value, SO3> fitToSO3(Transformation const& R) { return SO3(::Sophus::makeRotationMatrix(R)); } /// Returns the ith infinitesimal generators of SO(3). /// /// The infinitesimal generators of SO(3) are: /// /// ``` /// | 0 0 0 | /// G_0 = | 0 0 -1 | /// | 0 1 0 | /// /// | 0 0 1 | /// G_1 = | 0 0 0 | /// | -1 0 0 | /// /// | 0 -1 0 | /// G_2 = | 1 0 0 | /// | 0 0 0 | /// ``` /// /// Precondition: ``i`` must be 0, 1 or 2. /// SOPHUS_FUNC static Transformation generator(int i) { SOPHUS_ENSURE(i >= 0 && i <= 2, "i should be in range [0,2]."); Tangent e; e.setZero(); e[i] = Scalar(1); return hat(e); } /// hat-operator /// /// It takes in the 3-vector representation ``omega`` (= rotation vector) and /// returns the corresponding matrix representation of Lie algebra element. /// /// Formally, the hat()-operator of SO(3) is defined as /// /// ``hat(.): R^3 -> R^{3x3}, hat(omega) = sum_i omega_i * G_i`` /// (for i=0,1,2) /// /// with ``G_i`` being the ith infinitesimal generator of SO(3). /// /// The corresponding inverse is the vee()-operator, see below. /// SOPHUS_FUNC static Transformation hat(Tangent const& omega) { Transformation Omega; // clang-format off Omega << Scalar(0), -omega(2), omega(1), omega(2), Scalar(0), -omega(0), -omega(1), omega(0), Scalar(0); // clang-format on return Omega; } /// Lie bracket /// /// It computes the Lie bracket of SO(3). To be more specific, it computes /// /// ``[omega_1, omega_2]_so3 := vee([hat(omega_1), hat(omega_2)])`` /// /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the /// hat()-operator and ``vee(.)`` the vee()-operator of SO3. /// /// For the Lie algebra so3, the Lie bracket is simply the cross product: /// /// ``[omega_1, omega_2]_so3 = omega_1 x omega_2.`` /// SOPHUS_FUNC static Tangent lieBracket(Tangent const& omega1, Tangent const& omega2) { return omega1.cross(omega2); } /// Construct x-axis rotation. /// static SOPHUS_FUNC SO3 rotX(Scalar const& x) { return SO3::exp(Sophus::Vector3(x, Scalar(0), Scalar(0))); } /// Construct y-axis rotation. /// static SOPHUS_FUNC SO3 rotY(Scalar const& y) { return SO3::exp(Sophus::Vector3(Scalar(0), y, Scalar(0))); } /// Construct z-axis rotation. /// static SOPHUS_FUNC SO3 rotZ(Scalar const& z) { return SO3::exp(Sophus::Vector3(Scalar(0), Scalar(0), z)); } /// Draw uniform sample from SO(3) manifold. /// Based on: http://planning.cs.uiuc.edu/node198.html /// template static SO3 sampleUniform(UniformRandomBitGenerator& generator) { static_assert(IsUniformRandomBitGenerator::value, "generator must meet the UniformRandomBitGenerator concept"); std::uniform_real_distribution uniform(Scalar(0), Scalar(1)); std::uniform_real_distribution uniform_twopi( Scalar(0), 2 * Constants::pi()); const Scalar u1 = uniform(generator); const Scalar u2 = uniform_twopi(generator); const Scalar u3 = uniform_twopi(generator); const Scalar a = sqrt(1 - u1); const Scalar b = sqrt(u1); return SO3( QuaternionMember(a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3))); } /// vee-operator /// /// It takes the 3x3-matrix representation ``Omega`` and maps it to the /// corresponding vector representation of Lie algebra. /// /// This is the inverse of the hat()-operator, see above. /// /// Precondition: ``Omega`` must have the following structure: /// /// | 0 -c b | /// | c 0 -a | /// | -b a 0 | /// SOPHUS_FUNC static Tangent vee(Transformation const& Omega) { return Tangent(Omega(2, 1), Omega(0, 2), Omega(1, 0)); } SOPHUS_FUNC static Transformation JacobianRInv(Tangent const& w) { Transformation Jrinv = Transformation::Identity(); Scalar theta = w.norm(); Scalar theta2 = theta * theta; Scalar s = ( 1.0 - (1.0+cos(theta))*theta / (2.0*sin(theta)) ); // very small angle if(theta < 0.00001) { return Jrinv; } else { Tangent k = w.normalized(); Transformation K = SO3::hat(k); Jrinv = Transformation::Identity() + 0.5*SO3::hat(w) + ( 1.0 - (1.0+cos(theta))*theta / (2.0*sin(theta)) ) *K*K; } return Jrinv; } protected: /// Mutator of unit_quaternion is protected to ensure class invariant. /// SOPHUS_FUNC QuaternionMember& unit_quaternion_nonconst() { return unit_quaternion_; } QuaternionMember unit_quaternion_; }; } // namespace Sophus namespace Eigen { /// Specialization of Eigen::Map for ``SO3``; derived from SO3Base. /// /// Allows us to wrap SO3 objects around POD array (e.g. external c style /// quaternion). template class Map, Options> : public Sophus::SO3Base, Options>> { public: using Base = Sophus::SO3Base, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; /// ``Base`` is friend so unit_quaternion_nonconst can be accessed from /// ``Base``. friend class Sophus::SO3Base, Options>>; // LCOV_EXCL_START SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map); // LCOV_EXCL_STOP using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar* coeffs) : unit_quaternion_(coeffs) {} /// Accessor of unit quaternion. /// SOPHUS_FUNC Map, Options> const& unit_quaternion() const { return unit_quaternion_; } protected: /// Mutator of unit_quaternion is protected to ensure class invariant. /// SOPHUS_FUNC Map, Options>& unit_quaternion_nonconst() { return unit_quaternion_; } Map, Options> unit_quaternion_; }; /// Specialization of Eigen::Map for ``SO3 const``; derived from SO3Base. /// /// Allows us to wrap SO3 objects around POD array (e.g. external c style /// quaternion). template class Map const, Options> : public Sophus::SO3Base const, Options>> { public: using Base = Sophus::SO3Base const, Options>>; using Scalar = Scalar_; using Transformation = typename Base::Transformation; using Point = typename Base::Point; using HomogeneousPoint = typename Base::HomogeneousPoint; using Tangent = typename Base::Tangent; using Adjoint = typename Base::Adjoint; using Base::operator*=; using Base::operator*; SOPHUS_FUNC Map(Scalar const* coeffs) : unit_quaternion_(coeffs) {} /// Accessor of unit quaternion. /// SOPHUS_FUNC Map const, Options> const& unit_quaternion() const { return unit_quaternion_; } protected: /// Mutator of unit_quaternion is protected to ensure class invariant. /// Map const, Options> const unit_quaternion_; }; } // namespace Eigen #endif ================================================ FILE: LIO-Livox/include/sophus/test_macros.hpp ================================================ #ifndef SOPUHS_TESTS_MACROS_HPP #define SOPUHS_TESTS_MACROS_HPP #include #include namespace Sophus { namespace details { template class Pretty { public: static std::string impl(Scalar s) { return FormatString("%", s); } }; template class Pretty> { public: static std::string impl(Matrix const& v) { return FormatString("\n%\n", v); } }; template std::string pretty(T const& v) { return Pretty::impl(v); } template void testFailed(bool& passed, char const* func, char const* file, int line, std::string const& msg) { std::cerr << FormatString("Test failed in function %, file %, line %\n", func, file, line); std::cerr << msg << "\n\n"; passed = false; } } // namespace details void processTestResult(bool passed) { if (!passed) { // LCOV_EXCL_START std::cerr << "failed!" << std::endl << std::endl; exit(-1); // LCOV_EXCL_STOP } std::cerr << "passed." << std::endl << std::endl; } } // namespace Sophus #define SOPHUS_STRINGIFY(x) #x /// GenericTests whether condition is true. /// The in-out parameter passed will be set to false if test fails. #define SOPHUS_TEST(passed, condition, ...) \ do { \ if (!(condition)) { \ std::string msg = Sophus::details::FormatString( \ "condition ``%`` is false\n", SOPHUS_STRINGIFY(condition)); \ msg += Sophus::details::FormatString(__VA_ARGS__); \ Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \ msg); \ } \ } while (false) /// GenericTests whether left is equal to right given a threshold. /// The in-out parameter passed will be set to false if test fails. #define SOPHUS_TEST_EQUAL(passed, left, right, ...) \ do { \ if (left != right) { \ std::string msg = Sophus::details::FormatString( \ "% (=%) is not equal to % (=%)\n", SOPHUS_STRINGIFY(left), \ Sophus::details::pretty(left), SOPHUS_STRINGIFY(right), \ Sophus::details::pretty(right)); \ msg += Sophus::details::FormatString(__VA_ARGS__); \ Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \ msg); \ } \ } while (false) /// GenericTests whether left is equal to right given a threshold. /// The in-out parameter passed will be set to false if test fails. #define SOPHUS_TEST_NEQ(passed, left, right, ...) \ do { \ if (left == right) { \ std::string msg = Sophus::details::FormatString( \ "% (=%) shoudl not be equal to % (=%)\n", SOPHUS_STRINGIFY(left), \ Sophus::details::pretty(left), SOPHUS_STRINGIFY(right), \ Sophus::details::pretty(right)); \ msg += Sophus::details::FormatString(__VA_ARGS__); \ Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \ msg); \ } \ } while (false) /// GenericTests whether left is approximatly equal to right given a threshold. /// The in-out parameter passed will be set to false if test fails. #define SOPHUS_TEST_APPROX(passed, left, right, thr, ...) \ do { \ auto nrm = Sophus::maxMetric((left), (right)); \ if (!(nrm < (thr))) { \ std::string msg = Sophus::details::FormatString( \ "% (=%) is not approx % (=%); % is %; nrm is %\n", \ SOPHUS_STRINGIFY(left), Sophus::details::pretty(left), \ SOPHUS_STRINGIFY(right), Sophus::details::pretty(right), \ SOPHUS_STRINGIFY(thr), Sophus::details::pretty(thr), nrm); \ msg += Sophus::details::FormatString(__VA_ARGS__); \ Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \ msg); \ } \ } while (false) /// GenericTests whether left is NOT approximatly equal to right given a /// threshold. The in-out parameter passed will be set to false if test fails. #define SOPHUS_TEST_NOT_APPROX(passed, left, right, thr, ...) \ do { \ auto nrm = Sophus::maxMetric((left), (right)); \ if (nrm < (thr)) { \ std::string msg = Sophus::details::FormatString( \ "% (=%) is approx % (=%), but it should not!\n % is %; nrm is %\n", \ SOPHUS_STRINGIFY(left), Sophus::details::pretty(left), \ SOPHUS_STRINGIFY(right), Sophus::details::pretty(right), \ SOPHUS_STRINGIFY(thr), Sophus::details::pretty(thr), nrm); \ msg += Sophus::details::FormatString(__VA_ARGS__); \ Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \ msg); \ } \ } while (false) #endif // SOPUHS_TESTS_MACROS_HPP ================================================ FILE: LIO-Livox/include/sophus/types.hpp ================================================ /// @file /// Common type aliases. #ifndef SOPHUS_TYPES_HPP #define SOPHUS_TYPES_HPP #include #include "common.hpp" namespace Sophus { template using Vector = Eigen::Matrix; template using Vector2 = Vector; using Vector2f = Vector2; using Vector2d = Vector2; template using Vector3 = Vector; using Vector3f = Vector3; using Vector3d = Vector3; template using Vector4 = Vector; using Vector4f = Vector4; using Vector4d = Vector4; template using Vector6 = Vector; using Vector6f = Vector6; using Vector6d = Vector6; template using Vector7 = Vector; using Vector7f = Vector7; using Vector7d = Vector7; template using Matrix = Eigen::Matrix; template using Matrix2 = Matrix; using Matrix2f = Matrix2; using Matrix2d = Matrix2; template using Matrix3 = Matrix; using Matrix3f = Matrix3; using Matrix3d = Matrix3; template using Matrix4 = Matrix; using Matrix4f = Matrix4; using Matrix4d = Matrix4; template using Matrix6 = Matrix; using Matrix6f = Matrix6; using Matrix6d = Matrix6; template using Matrix7 = Matrix; using Matrix7f = Matrix7; using Matrix7d = Matrix7; template using ParametrizedLine = Eigen::ParametrizedLine; template using ParametrizedLine3 = ParametrizedLine; using ParametrizedLine3f = ParametrizedLine3; using ParametrizedLine3d = ParametrizedLine3; template using ParametrizedLine2 = ParametrizedLine; using ParametrizedLine2f = ParametrizedLine2; using ParametrizedLine2d = ParametrizedLine2; namespace details { template class MaxMetric { public: static Scalar impl(Scalar s0, Scalar s1) { using std::abs; return abs(s0 - s1); } }; template class MaxMetric> { public: static Scalar impl(Matrix const& p0, Matrix const& p1) { return (p0 - p1).template lpNorm(); } }; template class SetToZero { public: static void impl(Scalar& s) { s = Scalar(0); } }; template class SetToZero> { public: static void impl(Matrix& v) { v.setZero(); } }; template class SetElementAt; template class SetElementAt { public: static void impl(Scalar& s, Scalar value, int at) { SOPHUS_ENSURE(at == 0, "is %", at); s = value; } }; template class SetElementAt, Scalar> { public: static void impl(Vector& v, Scalar value, int at) { SOPHUS_ENSURE(at >= 0 && at < N, "is %", at); v[at] = value; } }; template class SquaredNorm { public: static Scalar impl(Scalar const& s) { return s * s; } }; template class SquaredNorm> { public: static Scalar impl(Matrix const& s) { return s.squaredNorm(); } }; template class Transpose { public: static Scalar impl(Scalar const& s) { return s; } }; template class Transpose> { public: static Matrix impl(Matrix const& s) { return s.transpose(); } }; } // namespace details /// Returns maximum metric between two points ``p0`` and ``p1``, with ``p0, p1`` /// being matrices or a scalars. /// template auto maxMetric(T const& p0, T const& p1) -> decltype(details::MaxMetric::impl(p0, p1)) { return details::MaxMetric::impl(p0, p1); } /// Sets point ``p`` to zero, with ``p`` being a matrix or a scalar. /// template void setToZero(T& p) { return details::SetToZero::impl(p); } /// Sets ``i``th component of ``p`` to ``value``, with ``p`` being a /// matrix or a scalar. If ``p`` is a scalar, ``i`` must be ``0``. /// template void setElementAt(T& p, Scalar value, int i) { return details::SetElementAt::impl(p, value, i); } /// Returns the squared 2-norm of ``p``, with ``p`` being a vector or a scalar. /// template auto squaredNorm(T const& p) -> decltype(details::SquaredNorm::impl(p)) { return details::SquaredNorm::impl(p); } /// Returns ``p.transpose()`` if ``p`` is a matrix, and simply ``p`` if m is a /// scalar. /// template auto transpose(T const& p) -> decltype(details::Transpose::impl(T())) { return details::Transpose::impl(p); } template struct IsFloatingPoint { static bool const value = std::is_floating_point::value; }; template struct IsFloatingPoint> { static bool const value = std::is_floating_point::value; }; template struct GetScalar { using Scalar = Scalar_; }; template struct GetScalar> { using Scalar = Scalar_; }; /// If the Vector type is of fixed size, then IsFixedSizeVector::value will be /// true. template ::type> struct IsFixedSizeVector : std::true_type {}; /// Planes in 3d are hyperplanes. template using Plane3 = Eigen::Hyperplane; using Plane3d = Plane3; using Plane3f = Plane3; /// Lines in 2d are hyperplanes. template using Line2 = Eigen::Hyperplane; using Line2d = Line2; using Line2f = Line2; } // namespace Sophus #endif // SOPHUS_TYPES_HPP ================================================ FILE: LIO-Livox/include/sophus/velocities.hpp ================================================ #ifndef SOPHUS_VELOCITIES_HPP #define SOPHUS_VELOCITIES_HPP #include #include "num_diff.hpp" #include "se3.hpp" namespace Sophus { namespace experimental { // Experimental since the API will certainly change drastically in the future. // Transforms velocity vector by rotation ``foo_R_bar``. // // Note: vel_bar can be either a linear or a rotational velocity vector. // template Vector3 transformVelocity(SO3 const& foo_R_bar, Vector3 const& vel_bar) { // For rotational velocities note that: // // vel_bar = vee(foo_R_bar * hat(vel_bar) * foo_R_bar^T) // = vee(hat(Adj(foo_R_bar) * vel_bar)) // = Adj(foo_R_bar) * vel_bar // = foo_R_bar * vel_bar. // return foo_R_bar * vel_bar; } // Transforms velocity vector by pose ``foo_T_bar``. // // Note: vel_bar can be either a linear or a rotational velocity vector. // template Vector3 transformVelocity(SE3 const& foo_T_bar, Vector3 const& vel_bar) { return transformVelocity(foo_T_bar.so3(), vel_bar); } // finite difference approximation of instantanious velocity in frame foo // template Vector3 finiteDifferenceRotationalVelocity( std::function(Scalar)> const& foo_R_bar, Scalar t, Scalar h = Constants::epsilon()) { // https://en.wikipedia.org/w/index.php?title=Angular_velocity&oldid=791867792#Angular_velocity_tensor // // W = dR(t)/dt * R^{-1}(t) Matrix3 dR_dt_in_frame_foo = curveNumDiff( [&foo_R_bar](Scalar t0) -> Matrix3 { return foo_R_bar(t0).matrix(); }, t, h); // velocity tensor Matrix3 W_in_frame_foo = dR_dt_in_frame_foo * (foo_R_bar(t)).inverse().matrix(); return SO3::vee(W_in_frame_foo); } // finite difference approximation of instantanious velocity in frame foo // template Vector3 finiteDifferenceRotationalVelocity( std::function(Scalar)> const& foo_T_bar, Scalar t, Scalar h = Constants::epsilon()) { return finiteDifferenceRotationalVelocity( [&foo_T_bar](Scalar t) -> SO3 { return foo_T_bar(t).so3(); }, t, h); } } // namespace experimental } // namespace Sophus #endif // SOPHUS_VELOCITIES_HPP ================================================ FILE: LIO-Livox/include/utils/ceresfunc.h ================================================ #ifndef LIO_LIVOX_CERESFUNC_H #define LIO_LIVOX_CERESFUNC_H #include #include #include #include #include #include "sophus/so3.hpp" #include "IMUIntegrator/IMUIntegrator.h" #include "math_utils.hpp" const int NUM_THREADS = 4; /** \brief Residual Block Used for marginalization */ struct ResidualBlockInfo { ResidualBlockInfo(ceres::CostFunction *_cost_function, ceres::LossFunction *_loss_function, std::vector _parameter_blocks, std::vector _drop_set) : cost_function(_cost_function), loss_function(_loss_function), parameter_blocks(std::move(_parameter_blocks)), drop_set(std::move(_drop_set)) {} void Evaluate(){ residuals.resize(cost_function->num_residuals()); std::vector block_sizes = cost_function->parameter_block_sizes(); raw_jacobians = new double *[block_sizes.size()]; jacobians.resize(block_sizes.size()); for (int i = 0; i < static_cast(block_sizes.size()); i++) { jacobians[i].resize(cost_function->num_residuals(), block_sizes[i]); raw_jacobians[i] = jacobians[i].data(); } cost_function->Evaluate(parameter_blocks.data(), residuals.data(), raw_jacobians); if (loss_function) { double residual_scaling_, alpha_sq_norm_; double sq_norm, rho[3]; sq_norm = residuals.squaredNorm(); loss_function->Evaluate(sq_norm, rho); double sqrt_rho1_ = sqrt(rho[1]); if ((sq_norm == 0.0) || (rho[2] <= 0.0)) { residual_scaling_ = sqrt_rho1_; alpha_sq_norm_ = 0.0; } else { const double D = 1.0 + 2.0 * sq_norm * rho[2] / rho[1]; const double alpha = 1.0 - sqrt(D); residual_scaling_ = sqrt_rho1_ / (1 - alpha); alpha_sq_norm_ = alpha / sq_norm; } for (int i = 0; i < static_cast(parameter_blocks.size()); i++) { jacobians[i] = sqrt_rho1_ * (jacobians[i] - alpha_sq_norm_ * residuals * (residuals.transpose() * jacobians[i])); } residuals *= residual_scaling_; } } ceres::CostFunction *cost_function; ceres::LossFunction *loss_function; std::vector parameter_blocks; std::vector drop_set; double **raw_jacobians{}; std::vector> jacobians; Eigen::VectorXd residuals; }; struct ThreadsStruct { std::vector sub_factors; Eigen::MatrixXd A; Eigen::VectorXd b; std::unordered_map parameter_block_size; std::unordered_map parameter_block_idx; }; /** \brief Multi-thread to process marginalization */ void* ThreadsConstructA(void* threadsstruct); /** \brief marginalization infomation */ class MarginalizationInfo { public: ~MarginalizationInfo(){ // ROS_WARN("release marginlizationinfo"); for (auto it = parameter_block_data.begin(); it != parameter_block_data.end(); ++it) delete[] it->second; for (int i = 0; i < (int)factors.size(); i++) { delete[] factors[i]->raw_jacobians; delete factors[i]->cost_function; delete factors[i]; } } void addResidualBlockInfo(ResidualBlockInfo *residual_block_info){ factors.emplace_back(residual_block_info); std::vector ¶meter_blocks = residual_block_info->parameter_blocks; std::vector parameter_block_sizes = residual_block_info->cost_function->parameter_block_sizes(); for (int i = 0; i < static_cast(residual_block_info->parameter_blocks.size()); i++) { double *addr = parameter_blocks[i]; int size = parameter_block_sizes[i]; parameter_block_size[reinterpret_cast(addr)] = size; } for (int i = 0; i < static_cast(residual_block_info->drop_set.size()); i++) { double *addr = parameter_blocks[residual_block_info->drop_set[i]]; parameter_block_idx[reinterpret_cast(addr)] = 0; } } void preMarginalize(){ for (auto it : factors) { it->Evaluate(); std::vector block_sizes = it->cost_function->parameter_block_sizes(); for (int i = 0; i < static_cast(block_sizes.size()); i++) { long addr = reinterpret_cast(it->parameter_blocks[i]); int size = block_sizes[i]; if (parameter_block_data.find(addr) == parameter_block_data.end()) { double *data = new double[size]; memcpy(data, it->parameter_blocks[i], sizeof(double) * size); parameter_block_data[addr] = data; } } } } void marginalize(){ int pos = 0; for (auto &it : parameter_block_idx) { it.second = pos; pos += parameter_block_size[it.first]; } m = pos; for (const auto &it : parameter_block_size) { if (parameter_block_idx.find(it.first) == parameter_block_idx.end()) { parameter_block_idx[it.first] = pos; pos += it.second; } } n = pos - m; Eigen::MatrixXd A(pos, pos); Eigen::VectorXd b(pos); A.setZero(); b.setZero(); pthread_t tids[NUM_THREADS]; ThreadsStruct threadsstruct[NUM_THREADS]; int i = 0; for (auto it : factors) { threadsstruct[i].sub_factors.push_back(it); i++; i = i % NUM_THREADS; } for (int i = 0; i < NUM_THREADS; i++) { threadsstruct[i].A = Eigen::MatrixXd::Zero(pos,pos); threadsstruct[i].b = Eigen::VectorXd::Zero(pos); threadsstruct[i].parameter_block_size = parameter_block_size; threadsstruct[i].parameter_block_idx = parameter_block_idx; int ret = pthread_create( &tids[i], NULL, ThreadsConstructA ,(void*)&(threadsstruct[i])); if (ret != 0) { std::cout<<"pthread_create error"<= 0; i--) { pthread_join( tids[i], NULL ); A += threadsstruct[i].A; b += threadsstruct[i].b; } Eigen::MatrixXd Amm = 0.5 * (A.block(0, 0, m, m) + A.block(0, 0, m, m).transpose()); Eigen::SelfAdjointEigenSolver saes(Amm); Eigen::MatrixXd Amm_inv = saes.eigenvectors() * Eigen::VectorXd((saes.eigenvalues().array() > eps).select(saes.eigenvalues().array().inverse(), 0)).asDiagonal() * saes.eigenvectors().transpose(); Eigen::VectorXd bmm = b.segment(0, m); Eigen::MatrixXd Amr = A.block(0, m, m, n); Eigen::MatrixXd Arm = A.block(m, 0, n, m); Eigen::MatrixXd Arr = A.block(m, m, n, n); Eigen::VectorXd brr = b.segment(m, n); A = Arr - Arm * Amm_inv * Amr; b = brr - Arm * Amm_inv * bmm; Eigen::SelfAdjointEigenSolver saes2(A); Eigen::VectorXd S = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array(), 0)); Eigen::VectorXd S_inv = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array().inverse(), 0)); Eigen::VectorXd S_sqrt = S.cwiseSqrt(); Eigen::VectorXd S_inv_sqrt = S_inv.cwiseSqrt(); linearized_jacobians = S_sqrt.asDiagonal() * saes2.eigenvectors().transpose(); linearized_residuals = S_inv_sqrt.asDiagonal() * saes2.eigenvectors().transpose() * b; } std::vector getParameterBlocks(std::unordered_map &addr_shift){ std::vector keep_block_addr; keep_block_size.clear(); keep_block_idx.clear(); keep_block_data.clear(); for (const auto &it : parameter_block_idx) { if (it.second >= m) { keep_block_size.push_back(parameter_block_size[it.first]); keep_block_idx.push_back(parameter_block_idx[it.first]); keep_block_data.push_back(parameter_block_data[it.first]); keep_block_addr.push_back(addr_shift[it.first]); } } sum_block_size = std::accumulate(std::begin(keep_block_size), std::end(keep_block_size), 0); return keep_block_addr; } std::vector factors; int m, n; std::unordered_map parameter_block_size; int sum_block_size; std::unordered_map parameter_block_idx; std::unordered_map parameter_block_data; std::vector keep_block_size; std::vector keep_block_idx; std::vector keep_block_data; Eigen::MatrixXd linearized_jacobians; Eigen::VectorXd linearized_residuals; const double eps = 1e-8; }; /** \brief Ceres Cost Funtion Used for Marginalization */ class MarginalizationFactor : public ceres::CostFunction { public: explicit MarginalizationFactor(MarginalizationInfo* _marginalization_info):marginalization_info(_marginalization_info){ int cnt = 0; for (auto it : marginalization_info->keep_block_size) { mutable_parameter_block_sizes()->push_back(it); cnt += it; } set_num_residuals(marginalization_info->n); }; bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const override{ int n = marginalization_info->n; int m = marginalization_info->m; Eigen::VectorXd dx(n); for (int i = 0; i < static_cast(marginalization_info->keep_block_size.size()); i++) { int size = marginalization_info->keep_block_size[i]; int idx = marginalization_info->keep_block_idx[i] - m; Eigen::VectorXd x = Eigen::Map(parameters[i], size); Eigen::VectorXd x0 = Eigen::Map(marginalization_info->keep_block_data[i], size); if(size == 6){ dx.segment<3>(idx + 0) = x.segment<3>(0) - x0.segment<3>(0); dx.segment<3>(idx + 3) = (Sophus::SO3d::exp(x.segment<3>(3)).inverse() * Sophus::SO3d::exp(x0.segment<3>(3))).log(); }else{ dx.segment(idx, size) = x - x0; } } Eigen::Map(residuals, n) = marginalization_info->linearized_residuals + marginalization_info->linearized_jacobians * dx; if (jacobians) { for (int i = 0; i < static_cast(marginalization_info->keep_block_size.size()); i++) { if (jacobians[i]) { int size = marginalization_info->keep_block_size[i]; int idx = marginalization_info->keep_block_idx[i] - m; Eigen::Map> jacobian(jacobians[i], n, size); jacobian.setZero(); jacobian.leftCols(size) = marginalization_info->linearized_jacobians.middleCols(idx, size); } } } return true; } MarginalizationInfo* marginalization_info; }; /** \brief Ceres Cost Funtion between Lidar Pose and Ground */ struct Cost_NavState_PR_Ground { Cost_NavState_PR_Ground(Eigen::VectorXf ground_plane_coeff_):ground_plane_coeff(ground_plane_coeff_) { // std::cout << ground_plane_coeff.transpose() << std::endl; } template bool operator()( const T *pri_, T *residual) const { Eigen::Map> PRi(pri_); Eigen::Matrix Pi = PRi.template segment<3>(0); Sophus::SO3 SO3_Ri = Sophus::SO3::exp(PRi.template segment<3>(3)); Eigen::Map > eResiduals(residual); eResiduals = Eigen::Matrix::Zero(); Eigen::Matrix T_wl = Eigen::Matrix::Identity(); T_wl.topLeftCorner(3,3) = SO3_Ri.matrix(); T_wl.topRightCorner(3,1) = Pi; Eigen::Matrix T_lw = T_wl.inverse(); Eigen::Matrix R_lw = T_lw.topLeftCorner(3,3); Eigen::Matrix t_lw = T_lw.topRightCorner(3,1); Eigen::Matrix ground_plane_coeff_temp = ground_plane_coeff.cast().template segment<4>(0); Eigen::Matrix local_ground_plane; local_ground_plane.template segment<3>(0) = R_lw * init_ground_plane_coeff.cast().template segment<3>(0); local_ground_plane.template segment<1>(3) = init_ground_plane_coeff.cast().template segment<1>(3) - t_lw.transpose() * local_ground_plane.template segment<3>(0); eResiduals = livox_slam_ware::ominus(ground_plane_coeff_temp, local_ground_plane); eResiduals.applyOnTheLeft(sqrt_information.template cast()); return true; } static ceres::CostFunction *Create(Eigen::VectorXf ground_plane_coeff) { return (new ceres::AutoDiffCostFunction( new Cost_NavState_PR_Ground(ground_plane_coeff))); } Eigen::VectorXf ground_plane_coeff; static Eigen::Matrix sqrt_information; static Eigen::VectorXf init_ground_plane_coeff; }; /** \brief Ceres Cost Funtion between Lidar Pose and IMU Preintegration */ struct Cost_NavState_PRV_Bias { Cost_NavState_PRV_Bias(IMUIntegrator& measure_, Eigen::Vector3d& GravityVec_, Eigen::Matrix sqrt_information_): imu_measure(measure_), GravityVec(GravityVec_), sqrt_information(std::move(sqrt_information_)){} template bool operator()( const T *pri_, const T *velobiasi_, const T *prj_, const T *velobiasj_, T *residual) const { Eigen::Map> PRi(pri_); Eigen::Matrix Pi = PRi.template segment<3>(0); Sophus::SO3 SO3_Ri = Sophus::SO3::exp(PRi.template segment<3>(3)); Eigen::Map> PRj(prj_); Eigen::Matrix Pj = PRj.template segment<3>(0); Sophus::SO3 SO3_Rj = Sophus::SO3::exp(PRj.template segment<3>(3)); Eigen::Map> velobiasi(velobiasi_); Eigen::Matrix Vi = velobiasi.template segment<3>(0); Eigen::Matrix dbgi = velobiasi.template segment<3>(3) - imu_measure.GetBiasGyr().cast(); Eigen::Matrix dbai = velobiasi.template segment<3>(6) - imu_measure.GetBiasAcc().cast(); Eigen::Map> velobiasj(velobiasj_); Eigen::Matrix Vj = velobiasj.template segment<3>(0); Eigen::Map > eResiduals(residual); eResiduals = Eigen::Matrix::Zero(); T dTij = T(imu_measure.GetDeltaTime()); T dT2 = dTij*dTij; Eigen::Matrix dPij = imu_measure.GetDeltaP().cast(); Eigen::Matrix dVij = imu_measure.GetDeltaV().cast(); Sophus::SO3 dRij = Sophus::SO3(imu_measure.GetDeltaQ().cast()); Sophus::SO3 RiT = SO3_Ri.inverse(); Eigen::Matrix rPij = RiT*(Pj - Pi - Vi*dTij - 0.5*GravityVec.cast()*dT2) - (dPij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BG).cast()*dbgi + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BA).cast()*dbai); Sophus::SO3 dR_dbg = Sophus::SO3::exp( imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_R, IMUIntegrator::O_BG).cast()*dbgi); Sophus::SO3 rRij = (dRij * dR_dbg).inverse() * RiT * SO3_Rj; Eigen::Matrix rPhiij = rRij.log(); Eigen::Matrix rVij = RiT*(Vj - Vi - GravityVec.cast()*dTij) - (dVij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BG).cast()*dbgi + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BA).cast()*dbai); eResiduals.template segment<3>(0) = rPij; eResiduals.template segment<3>(3) = rPhiij; eResiduals.template segment<3>(6) = rVij; eResiduals.template segment<6>(9) = velobiasj.template segment<6>(3) - velobiasi.template segment<6>(3); eResiduals.applyOnTheLeft(sqrt_information.template cast()); return true; } static ceres::CostFunction *Create(IMUIntegrator& measure_, Eigen::Vector3d& GravityVec_, Eigen::Matrix sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_NavState_PRV_Bias(measure_, GravityVec_, std::move(sqrt_information_)))); } IMUIntegrator imu_measure; Eigen::Vector3d GravityVec; Eigen::Matrix sqrt_information; }; /** \brief Ceres Cost Funtion between PointCloud Sharp Feature and Map Cloud */ struct Cost_NavState_IMU_Line { Cost_NavState_IMU_Line(Eigen::Vector3d _p, Eigen::Vector3d _vtx1, Eigen::Vector3d _vtx2, const Eigen::Matrix4d& Tbl, Eigen::Matrix sqrt_information_): point(std::move(_p)), vtx1(std::move(_vtx1)), vtx2(std::move(_vtx2)), sqrt_information(std::move(sqrt_information_)){ l12 = std::sqrt((vtx1(0) - vtx2(0))*(vtx1(0) - vtx2(0)) + (vtx1(1) - vtx2(1))* (vtx1(1) - vtx2(1)) + (vtx1(2) - vtx2(2))*(vtx1(2) - vtx2(2))); Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3); qbl = Eigen::Quaterniond(m3d).normalized(); qlb = qbl.conjugate(); Pbl = Tbl.topRightCorner(3,1); Plb = -(qlb * Pbl); } template bool operator()(const T *PRi, T *residual) const { Eigen::Matrix cp{T(point.x()), T(point.y()), T(point.z())}; Eigen::Matrix lpa{T(vtx1.x()), T(vtx1.y()), T(vtx1.z())}; Eigen::Matrix lpb{T(vtx2.x()), T(vtx2.y()), T(vtx2.z())}; Eigen::Map> pri_wb(PRi); Eigen::Quaternion q_wb = Sophus::SO3::exp(pri_wb.template segment<3>(3)).unit_quaternion(); Eigen::Matrix t_wb = pri_wb.template segment<3>(0); Eigen::Quaternion q_wl = q_wb * qbl.cast(); Eigen::Matrix t_wl = q_wb * Pbl.cast() + t_wb; Eigen::Matrix P_to_Map = q_wl * cp + t_wl; T a012 = ceres::sqrt( ((P_to_Map(0) - lpa(0)) * (P_to_Map(1) - lpb(1)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(1) - lpa(1))) * ((P_to_Map(0) - lpa(0)) * (P_to_Map(1) - lpb(1)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(1) - lpa(1))) + ((P_to_Map(0) - lpa(0)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(2) - lpa(2))) * ((P_to_Map(0) - lpa(0)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(2) - lpa(2))) + ((P_to_Map(1) - lpa(1)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(1) - lpb(1)) * (P_to_Map(2) - lpa(2))) * ((P_to_Map(1) - lpa(1)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(1) - lpb(1)) * (P_to_Map(2) - lpa(2)))); T ld2 = a012 / T(l12); T _weight = T(1) - T(0.9) * ceres::abs(ld2) / ceres::sqrt( ceres::sqrt( P_to_Map(0) * P_to_Map(0) + P_to_Map(1) * P_to_Map(1) + P_to_Map(2) * P_to_Map(2) )); residual[0] = T(sqrt_information(0)) * _weight * ld2; return true; } static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_, const Eigen::Vector3d& last_point_a_, const Eigen::Vector3d& last_point_b_, const Eigen::Matrix4d& Tbl, Eigen::Matrix sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_NavState_IMU_Line(curr_point_, last_point_a_, last_point_b_, Tbl, std::move(sqrt_information_)))); } Eigen::Vector3d point; Eigen::Vector3d vtx1; Eigen::Vector3d vtx2; double l12; Eigen::Quaterniond qbl, qlb; Eigen::Vector3d Pbl, Plb; Eigen::Matrix sqrt_information; }; /** \brief Ceres Cost Funtion between PointCloud Flat Feature and Map Cloud */ struct Cost_NavState_IMU_Plan { Cost_NavState_IMU_Plan(Eigen::Vector3d _p, double _pa, double _pb, double _pc, double _pd, const Eigen::Matrix4d& Tbl, Eigen::Matrix sqrt_information_): point(std::move(_p)), pa(_pa), pb(_pb), pc(_pc), pd(_pd), sqrt_information(std::move(sqrt_information_)){ Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3); qbl = Eigen::Quaterniond(m3d).normalized(); qlb = qbl.conjugate(); Pbl = Tbl.topRightCorner(3,1); Plb = -(qlb * Pbl); } template bool operator()(const T *PRi, T *residual) const { Eigen::Matrix cp{T(point.x()), T(point.y()), T(point.z())}; Eigen::Map> pri_wb(PRi); Eigen::Quaternion q_wb = Sophus::SO3::exp(pri_wb.template segment<3>(3)).unit_quaternion(); Eigen::Matrix t_wb = pri_wb.template segment<3>(0); Eigen::Quaternion q_wl = q_wb * qbl.cast(); Eigen::Matrix t_wl = q_wb * Pbl.cast() + t_wb; Eigen::Matrix P_to_Map = q_wl * cp + t_wl; T pd2 = T(pa) * P_to_Map(0) + T(pb) * P_to_Map(1) + T(pc) * P_to_Map(2) + T(pd); T _weight = T(1) - T(0.9) * ceres::abs(pd2) /ceres::sqrt( ceres::sqrt( P_to_Map(0) * P_to_Map(0) + P_to_Map(1) * P_to_Map(1) + P_to_Map(2) * P_to_Map(2) )); residual[0] = T(sqrt_information(0)) * _weight * pd2; return true; } static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_, const double& pa_, const double& pb_, const double& pc_, const double& pd_, const Eigen::Matrix4d& Tbl, Eigen::Matrix sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_NavState_IMU_Plan(curr_point_, pa_, pb_, pc_, pd_, Tbl, std::move(sqrt_information_)))); } double pa, pb, pc, pd; Eigen::Vector3d point; Eigen::Quaterniond qbl, qlb; Eigen::Vector3d Pbl, Plb; Eigen::Matrix sqrt_information; }; /** \brief Ceres Cost Funtion between PointCloud Flat Feature and Map Cloud */ struct Cost_NavState_IMU_Plan_Vec { Cost_NavState_IMU_Plan_Vec(Eigen::Vector3d _p, Eigen::Vector3d _p_proj, const Eigen::Matrix4d& Tbl, Eigen::Matrix _sqrt_information): point(std::move(_p)), point_proj(std::move(_p_proj)), sqrt_information(std::move(_sqrt_information)){ Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3); qbl = Eigen::Quaterniond(m3d).normalized(); qlb = qbl.conjugate(); Pbl = Tbl.topRightCorner(3,1); Plb = -(qlb * Pbl); } template bool operator()(const T *PRi, T *residual) const { Eigen::Matrix cp{T(point.x()), T(point.y()), T(point.z())}; Eigen::Matrix cp_proj{T(point_proj.x()), T(point_proj.y()), T(point_proj.z())}; Eigen::Map> pri_wb(PRi); Eigen::Quaternion q_wb = Sophus::SO3::exp(pri_wb.template segment<3>(3)).unit_quaternion(); Eigen::Matrix t_wb = pri_wb.template segment<3>(0); Eigen::Quaternion q_wl = q_wb * qbl.cast(); Eigen::Matrix t_wl = q_wb * Pbl.cast() + t_wb; Eigen::Matrix P_to_Map = q_wl * cp + t_wl; Eigen::Map > eResiduals(residual); eResiduals = P_to_Map - cp_proj; T _weight = T(1) - T(0.9) * (P_to_Map - cp_proj).norm() /ceres::sqrt( ceres::sqrt( P_to_Map(0) * P_to_Map(0) + P_to_Map(1) * P_to_Map(1) + P_to_Map(2) * P_to_Map(2) )); eResiduals *= _weight; eResiduals.applyOnTheLeft(sqrt_information.template cast()); return true; } static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_, const Eigen::Vector3d& p_proj_, const Eigen::Matrix4d& Tbl, const Eigen::Matrix sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_NavState_IMU_Plan_Vec(curr_point_, p_proj_, Tbl, sqrt_information_))); } Eigen::Vector3d point; Eigen::Vector3d point_proj; Eigen::Quaterniond qbl, qlb; Eigen::Vector3d Pbl, Plb; Eigen::Matrix sqrt_information; }; struct Cost_NonFeature_ICP { Cost_NonFeature_ICP(Eigen::Vector3d _p, double _pa, double _pb, double _pc, double _pd, const Eigen::Matrix4d& Tbl, Eigen::Matrix sqrt_information_): point(std::move(_p)), pa(_pa), pb(_pb), pc(_pc), pd(_pd), sqrt_information(std::move(sqrt_information_)){ Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3); qbl = Eigen::Quaterniond(m3d).normalized(); qlb = qbl.conjugate(); Pbl = Tbl.topRightCorner(3,1); Plb = -(qlb * Pbl); } template bool operator()(const T *PRi, T *residual) const { Eigen::Matrix cp{T(point.x()), T(point.y()), T(point.z())}; Eigen::Map> pri_wb(PRi); Eigen::Quaternion q_wb = Sophus::SO3::exp(pri_wb.template segment<3>(3)).unit_quaternion(); Eigen::Matrix t_wb = pri_wb.template segment<3>(0); Eigen::Quaternion q_wl = q_wb * qbl.cast(); Eigen::Matrix t_wl = q_wb * Pbl.cast() + t_wb; Eigen::Matrix P_to_Map = q_wl * cp + t_wl; T pd2 = T(pa) * P_to_Map(0) + T(pb) * P_to_Map(1) + T(pc) * P_to_Map(2) + T(pd); T _weight = T(1) - T(0.9) * ceres::abs(pd2) /ceres::sqrt( ceres::sqrt( P_to_Map(0) * P_to_Map(0) + P_to_Map(1) * P_to_Map(1) + P_to_Map(2) * P_to_Map(2) )); residual[0] = T(sqrt_information(0)) * _weight * pd2; return true; } static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_, const double& pa_, const double& pb_, const double& pc_, const double& pd_, const Eigen::Matrix4d& Tbl, Eigen::Matrix sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_NonFeature_ICP(curr_point_, pa_, pb_, pc_, pd_, Tbl, std::move(sqrt_information_)))); } double pa, pb, pc, pd; Eigen::Vector3d point; Eigen::Quaterniond qbl, qlb; Eigen::Vector3d Pbl, Plb; Eigen::Matrix sqrt_information; }; /** \brief Ceres Cost Funtion for Initial Gravity Direction */ struct Cost_Initial_G { EIGEN_MAKE_ALIGNED_OPERATOR_NEW Cost_Initial_G(Eigen::Vector3d acc_): acc(acc_){} template bool operator()( const T *q, T *residual) const { Eigen::Matrix acc_T = acc.cast(); Eigen::Quaternion q_wg{q[0], q[1], q[2], q[3]}; Eigen::Matrix g_I{T(0), T(0), T(-9.805)}; Eigen::Matrix resi = q_wg * g_I - acc_T; residual[0] = resi[0]; residual[1] = resi[1]; residual[2] = resi[2]; return true; } static ceres::CostFunction *Create(Eigen::Vector3d acc_) { return (new ceres::AutoDiffCostFunction( new Cost_Initial_G(acc_))); } Eigen::Vector3d acc; }; /** \brief Ceres Cost Funtion of IMU Factor in LIO Initialization */ struct Cost_Initialization_IMU { EIGEN_MAKE_ALIGNED_OPERATOR_NEW Cost_Initialization_IMU(IMUIntegrator& measure_, Eigen::Vector3d ri_, Eigen::Vector3d rj_, Eigen::Vector3d dp_, Eigen::Matrix sqrt_information_): imu_measure(measure_), ri(ri_), rj(rj_), dp(dp_), sqrt_information(std::move(sqrt_information_)){} template bool operator()(const T *rwg_, const T *vi_, const T *vj_, const T *ba_, const T *bg_, T *residual) const { Eigen::Matrix G_I{T(0), T(0), T(-9.805)}; Eigen::Map> ba(ba_); Eigen::Map> bg(bg_); Eigen::Matrix dbg = bg - imu_measure.GetBiasGyr().cast(); Eigen::Matrix dba = ba - imu_measure.GetBiasAcc().cast(); Sophus::SO3 SO3_Ri = Sophus::SO3::exp(ri.cast()); Sophus::SO3 SO3_Rj = Sophus::SO3::exp(rj.cast()); Eigen::Matrix dP = dp.cast(); Eigen::Map> rwg(rwg_); Sophus::SO3 SO3_Rwg = Sophus::SO3::exp(rwg); Eigen::Map> vi(vi_); Eigen::Matrix Vi = vi; Eigen::Map> vj(vj_); Eigen::Matrix Vj = vj; Eigen::Map > eResiduals(residual); eResiduals = Eigen::Matrix::Zero(); T dTij = T(imu_measure.GetDeltaTime()); T dT2 = dTij*dTij; Eigen::Matrix dPij = imu_measure.GetDeltaP().cast(); Eigen::Matrix dVij = imu_measure.GetDeltaV().cast(); Sophus::SO3 dRij = Sophus::SO3(imu_measure.GetDeltaQ().cast()); Sophus::SO3 RiT = SO3_Ri.inverse(); Eigen::Matrix rPij = RiT*(dP - Vi*dTij - SO3_Rwg*G_I*dT2*T(0.5)) - (dPij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BG).cast()*dbg + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BA).cast()*dba); Sophus::SO3 dR_dbg = Sophus::SO3::exp( imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_R, IMUIntegrator::O_BG).cast()*dbg); Sophus::SO3 rRij = (dRij * dR_dbg).inverse() * RiT * SO3_Rj; Eigen::Matrix rPhiij = rRij.log(); Eigen::Matrix rVij = RiT*(Vj - Vi - SO3_Rwg*G_I*dTij) - (dVij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BG).cast()*dbg + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BA).cast()*dba); eResiduals.template segment<3>(0) = rPij; eResiduals.template segment<3>(3) = rPhiij; eResiduals.template segment<3>(6) = rVij; eResiduals.applyOnTheLeft(sqrt_information.template cast()); return true; } static ceres::CostFunction *Create(IMUIntegrator& measure_, Eigen::Vector3d ri_, Eigen::Vector3d rj_, Eigen::Vector3d dp_, Eigen::Matrix sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_Initialization_IMU(measure_, ri_, rj_, dp_, std::move(sqrt_information_)))); } IMUIntegrator imu_measure; Eigen::Vector3d ri; Eigen::Vector3d rj; Eigen::Vector3d dp; Eigen::Matrix sqrt_information; }; /** \brief Ceres Cost Funtion of IMU Biases and Velocity Prior Factor in LIO Initialization */ struct Cost_Initialization_Prior_bv { EIGEN_MAKE_ALIGNED_OPERATOR_NEW Cost_Initialization_Prior_bv(Eigen::Vector3d prior_, Eigen::Matrix3d sqrt_information_): prior(prior_), sqrt_information(std::move(sqrt_information_)){} template bool operator()(const T *bv_, T *residual) const { Eigen::Map> bv(bv_); Eigen::Matrix Bv = bv; Eigen::Matrix prior_T(prior.cast()); Eigen::Matrix prior_Bv = prior_T; Eigen::Map > eResiduals(residual); eResiduals = Eigen::Matrix::Zero(); eResiduals = Bv - prior_Bv; eResiduals.applyOnTheLeft(sqrt_information.template cast()); return true; } static ceres::CostFunction *Create(Eigen::Vector3d prior_, Eigen::Matrix3d sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_Initialization_Prior_bv(prior_, std::move(sqrt_information_)))); } Eigen::Vector3d prior; Eigen::Matrix3d sqrt_information; }; /** \brief Ceres Cost Funtion of Rwg Prior Factor in LIO Initialization */ struct Cost_Initialization_Prior_R { EIGEN_MAKE_ALIGNED_OPERATOR_NEW Cost_Initialization_Prior_R(Eigen::Vector3d prior_, Eigen::Matrix3d sqrt_information_): prior(prior_), sqrt_information(std::move(sqrt_information_)){} template bool operator()( const T *r_wg_, T *residual) const { Eigen::Map> r_wg(r_wg_); Eigen::Matrix R_wg = r_wg; Sophus::SO3 SO3_R_wg = Sophus::SO3::exp(R_wg); Eigen::Matrix prior_T(prior.cast()); Sophus::SO3 prior_R_wg = Sophus::SO3::exp(prior_T); Sophus::SO3 d_R = SO3_R_wg.inverse() * prior_R_wg; Eigen::Matrix d_Phi = d_R.log(); Eigen::Map > eResiduals(residual); eResiduals = Eigen::Matrix::Zero(); eResiduals = d_Phi; eResiduals.applyOnTheLeft(sqrt_information.template cast()); return true; } static ceres::CostFunction *Create(Eigen::Vector3d prior_, Eigen::Matrix3d sqrt_information_) { return (new ceres::AutoDiffCostFunction( new Cost_Initialization_Prior_R(prior_, std::move(sqrt_information_)))); } Eigen::Vector3d prior; Eigen::Matrix3d sqrt_information; }; #endif //LIO_LIVOX_CERESFUNC_H ================================================ FILE: LIO-Livox/include/utils/math_utils.hpp ================================================ /* * COPYRIGHT AND PERMISSION NOTICE * Penn Software MSCKF_VIO * Copyright (C) 2017 The Trustees of the University of Pennsylvania * All rights reserved. */ // The original file belongs to MSCKF_VIO (https://github.com/KumarRobotics/msckf_vio/) // Some changes have been made to use it in livox_slam_ware #ifndef MATH_UTILS_HPP #define MATH_UTILS_HPP #include #include namespace livox_slam_ware { /* * @brief Create a skew-symmetric matrix from a 3-element vector. * @note Performs the operation: * w -> [ 0 -w3 w2] * [ w3 0 -w1] * [-w2 w1 0] */ inline Eigen::Matrix3d skewSymmetric(const Eigen::Vector3d& w) { Eigen::Matrix3d w_hat; w_hat(0, 0) = 0; w_hat(0, 1) = -w(2); w_hat(0, 2) = w(1); w_hat(1, 0) = w(2); w_hat(1, 1) = 0; w_hat(1, 2) = -w(0); w_hat(2, 0) = -w(1); w_hat(2, 1) = w(0); w_hat(2, 2) = 0; return w_hat; } /* * @brief Normalize the given quaternion to unit quaternion. */ inline void quaternionNormalize(Eigen::Vector4d& q) { double norm = q.norm(); q = q / norm; return; } /* * @brief Perform q1 * q2. * * Format of q1 and q2 is as [x,y,z,w] */ inline Eigen::Vector4d quaternionMultiplication( const Eigen::Vector4d& q1, const Eigen::Vector4d& q2) { Eigen::Matrix4d L; // QXC: Hamilton L(0, 0) = q1(3); L(0, 1) = -q1(2); L(0, 2) = q1(1); L(0, 3) = q1(0); L(1, 0) = q1(2); L(1, 1) = q1(3); L(1, 2) = -q1(0); L(1, 3) = q1(1); L(2, 0) = -q1(1); L(2, 1) = q1(0); L(2, 2) = q1(3); L(2, 3) = q1(2); L(3, 0) = -q1(0); L(3, 1) = -q1(1); L(3, 2) = -q1(2); L(3, 3) = q1(3); Eigen::Vector4d q = L * q2; quaternionNormalize(q); return q; } /* * @brief Convert the vector part of a quaternion to a * full quaternion. * @note This function is useful to convert delta quaternion * which is usually a 3x1 vector to a full quaternion. * For more details, check Section 3.2 "Kalman Filter Update" in * "Indirect Kalman Filter for 3D Attitude Estimation: * A Tutorial for quaternion Algebra". */ inline Eigen::Vector4d smallAngleQuaternion( const Eigen::Vector3d& dtheta) { Eigen::Vector3d dq = dtheta / 2.0; Eigen::Vector4d q; double dq_square_norm = dq.squaredNorm(); if (dq_square_norm <= 1) { q.head<3>() = dq; q(3) = std::sqrt(1-dq_square_norm); } else { q.head<3>() = dq; q(3) = 1; q = q / std::sqrt(1+dq_square_norm); } return q; } /* * @brief Convert the vector part of a quaternion to a * full quaternion. * @note This function is useful to convert delta quaternion * which is usually a 3x1 vector to a full quaternion. * For more details, check Section 3.2 "Kalman Filter Update" in * "Indirect Kalman Filter for 3D Attitude Estimation: * A Tutorial for quaternion Algebra". */ inline Eigen::Quaterniond getSmallAngleQuaternion( const Eigen::Vector3d& dtheta) { Eigen::Vector3d dq = dtheta / 2.0; Eigen::Quaterniond q; double dq_square_norm = dq.squaredNorm(); if (dq_square_norm <= 1) { q.x() = dq(0); q.y() = dq(1); q.z() = dq(2); q.w() = std::sqrt(1-dq_square_norm); } else { q.x() = dq(0); q.y() = dq(1); q.z() = dq(2); q.w() = 1; q.normalize(); } return q; } /* * @brief Convert a quaternion to the corresponding rotation matrix * @note Pay attention to the convention used. The function follows the * conversion in "Indirect Kalman Filter for 3D Attitude Estimation: * A Tutorial for Quaternion Algebra", Equation (78). * * The input quaternion should be in the form * [q1, q2, q3, q4(scalar)]^T */ inline Eigen::Matrix3d quaternionToRotation( const Eigen::Vector4d& q) { // QXC: Hamilton const double& qw = q(3); const double& qx = q(0); const double& qy = q(1); const double& qz = q(2); Eigen::Matrix3d R; R(0, 0) = 1-2*(qy*qy+qz*qz); R(0, 1) = 2*(qx*qy-qw*qz); R(0, 2) = 2*(qx*qz+qw*qy); R(1, 0) = 2*(qx*qy+qw*qz); R(1, 1) = 1-2*(qx*qx+qz*qz); R(1, 2) = 2*(qy*qz-qw*qx); R(2, 0) = 2*(qx*qz-qw*qy); R(2, 1) = 2*(qy*qz+qw*qx); R(2, 2) = 1-2*(qx*qx+qy*qy); return R; } /* * @brief Convert a rotation matrix to a quaternion. * @note Pay attention to the convention used. The function follows the * conversion in "Indirect Kalman Filter for 3D Attitude Estimation: * A Tutorial for Quaternion Algebra", Equation (78). * * The input quaternion should be in the form * [q1, q2, q3, q4(scalar)]^T */ inline Eigen::Vector4d rotationToQuaternion( const Eigen::Matrix3d& R) { Eigen::Vector4d score; score(0) = R(0, 0); score(1) = R(1, 1); score(2) = R(2, 2); score(3) = R.trace(); int max_row = 0, max_col = 0; score.maxCoeff(&max_row, &max_col); Eigen::Vector4d q = Eigen::Vector4d::Zero(); // QXC: Hamilton if (max_row == 0) { q(0) = std::sqrt(1+2*R(0, 0)-R.trace()) / 2.0; q(1) = (R(0, 1)+R(1, 0)) / (4*q(0)); q(2) = (R(0, 2)+R(2, 0)) / (4*q(0)); q(3) = (R(2, 1)-R(1, 2)) / (4*q(0)); } else if (max_row == 1) { q(1) = std::sqrt(1+2*R(1, 1)-R.trace()) / 2.0; q(0) = (R(0, 1)+R(1, 0)) / (4*q(1)); q(2) = (R(1, 2)+R(2, 1)) / (4*q(1)); q(3) = (R(0, 2)-R(2, 0)) / (4*q(1)); } else if (max_row == 2) { q(2) = std::sqrt(1+2*R(2, 2)-R.trace()) / 2.0; q(0) = (R(0, 2)+R(2, 0)) / (4*q(2)); q(1) = (R(1, 2)+R(2, 1)) / (4*q(2)); q(3) = (R(1, 0)-R(0, 1)) / (4*q(2)); } else { q(3) = std::sqrt(1+R.trace()) / 2.0; q(0) = (R(2, 1)-R(1, 2)) / (4*q(3)); q(1) = (R(0, 2)-R(2, 0)) / (4*q(3)); q(2) = (R(1, 0)-R(0, 1)) / (4*q(3)); } if (q(3) < 0) q = -q; quaternionNormalize(q); return q; } /* * @brief Convert a rotation matrix to a eulerAngles. */ template void R2ypr(const Eigen::Matrix& R, Eigen::Matrix &ypr) { Eigen::Matrix n = R.col(0); Eigen::Matrix o = R.col(1); Eigen::Matrix a = R.col(2); T y = atan2(n(1), n(0)); T p = atan2(-n(2), n(0) * cos(y) + n(1) * sin(y)); T r = atan2(a(0) * sin(y) - a(1) * cos(y), -o(0) * sin(y) + o(1) * cos(y)); ypr(0) = y; ypr(1) = p; ypr(2) = r; } /* * @brief Calculate the error between the two planes. */ template Eigen::Matrix ominus(const Eigen::Matrix& ground_plane_coeff, Eigen::Matrix &local_ground_plane) { Eigen::Matrix n_ground_plane_coeff = ground_plane_coeff.template segment<3>(0); Eigen::Matrix n_local_ground_plane = local_ground_plane.template segment<3>(0); Eigen::Matrix x(1, 0, 0); Eigen::Matrix normal_x = x.template cast(); Eigen::Matrix R = Eigen::Quaternion::FromTwoVectors(n_local_ground_plane, normal_x).toRotationMatrix(); Eigen::Matrix n_ground_plane_coeff_after_rotation = R * n_ground_plane_coeff; Eigen::Matrix result; result(0) = atan2(n_ground_plane_coeff_after_rotation(1), n_ground_plane_coeff_after_rotation(0)); result(1) = atan2(n_ground_plane_coeff_after_rotation(2), n_ground_plane_coeff_after_rotation.template head<2>().norm()); result(2) = -local_ground_plane(3) + ground_plane_coeff(3); return result; } } // end namespace livox_slam_ware #endif // MATH_UTILS_HPP ================================================ FILE: LIO-Livox/include/utils/pcl_utils.hpp ================================================ #ifndef PCL_UTILS_HPP #define PCL_UTILS_HPP #include #include namespace livox_slam_ware { /* * @brief Get the plane coeffs. */ template Eigen::VectorXf get_plane_coeffs (typename pcl::PointCloud::Ptr lidar_cloud) { Eigen::VectorXf coeff; typename pcl::PointCloud::Ptr ground_cloud; ground_cloud.reset(new pcl::PointCloud); for(const auto& p : lidar_cloud->points){ if(std::fabs(p.normal_y + 1.0) < 1e-5) { ground_cloud->push_back(p); } } typename pcl::SampleConsensusModelPlane::Ptr model( new pcl::SampleConsensusModelPlane(ground_cloud));//定义待拟合平面的model,并使用待拟合点云初始化 pcl::RandomSampleConsensus ransac(model);//定义RANSAC算法模型 ransac.setDistanceThreshold(0.05);//设定阈值 ransac.computeModel();//拟合 ransac.getModelCoefficients(coeff);//获取拟合平面参数,对于平面ax+by_cz_d=0,coeff分别按顺序保存a,b,c,d // make the normal upward // 法向量颠倒个方向 if(coeff.head<3>().dot(Eigen::Vector3f::UnitZ()) < 0.0f) { coeff *= -1.0f; } return coeff; } } #endif // PCL_UTILS_HPP ================================================ FILE: LIO-Livox/launch/livox_odometry.launch ================================================ [1.0, 0.0, 0.0, -0.05512, 0.0, 1.0, 0.0, -0.02226, 0.0, 0.0, 1.0, 0.0297, 0.0, 0.0, 0.0, 1.0] ================================================ FILE: LIO-Livox/package.xml ================================================ livox_odometry 0.0.0 This is the modified version of LIO-Livox, where serval new factors are added. For original version of LIO-Livox, please refer to: https://github.com/Livox-SDK/LIO-Livox livox BSD livox Siyuan with Xudong catkin message_generation geometry_msgs nav_msgs roscpp rospy std_msgs sensor_msgs tf livox_ros_driver tf_conversions eigen_conversions pcl_conversions pcl_ros message_filters std_srvs message_runtime geometry_msgs nav_msgs sensor_msgs roscpp rospy std_msgs tf livox_ros_driver tf_conversions eigen_conversions pcl_conversions pcl_ros message_filters std_srvs rostest rosbag ================================================ FILE: LIO-Livox/rviz_cfg/lio.rviz ================================================ Panels: - Class: rviz/Displays Help Height: 85 Name: Displays Property Tree Widget: Expanded: - /Global Options1 - /odometry1 - /odometry1/TF1 - /mapping1 - /mapping1/Path1 - /mapping1/PointCloud21 - /Grid1 Splitter Ratio: 0.5166666507720947 Tree Height: 1207 - Class: rviz/Selection Name: Selection - Class: rviz/Tool Properties Expanded: - /2D Pose Estimate1 - /2D Nav Goal1 - /Publish Point1 Name: Tool Properties Splitter Ratio: 0.5886790156364441 - Class: rviz/Views Expanded: - /Current View1 Name: Views Splitter Ratio: 0.49433961510658264 - Class: rviz/Time Experimental: false Name: Time SyncMode: 0 SyncSource: PointCloud2 Preferences: PromptSaveOnExit: true Toolbars: toolButtonStyle: 2 Visualization Manager: Class: "" Displays: - Class: rviz/Group Displays: - Class: rviz/TF Enabled: true Frame Timeout: 5000 Frames: All Enabled: true livox_frame: Value: true world: Value: true Marker Scale: 10 Name: TF Show Arrows: false Show Axes: true Show Names: true Tree: world: livox_frame: {} Update Interval: 0 Value: true - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 10 Min Value: -10 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 255; 255; 255 Color Transformer: FlatColor Decay Time: 0 Enabled: true Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 255 Min Color: 0; 0; 0 Min Intensity: 0 Name: full_cloud Position Transformer: XYZ Queue Size: 10 Selectable: true Size (Pixels): 1.5 Size (m): 0.10000000149011612 Style: Points Topic: /livox_full_cloud_mapped Unreliable: false Use Fixed Frame: true Use rainbow: true Value: true Enabled: true Name: odometry - Class: rviz/Group Displays: - Alpha: 1 Buffer Length: 1 Class: rviz/Path Color: 255; 85; 255 Enabled: true Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 Line Style: Lines Line Width: 0.029999999329447746 Name: Path Offset: X: 0 Y: 0 Z: 0 Pose Color: 255; 85; 255 Pose Style: None Radius: 0.029999999329447746 Shaft Diameter: 0.10000000149011612 Shaft Length: 0.10000000149011612 Topic: /livox_odometry_path_mapped Unreliable: false Value: true - Alpha: 0.05000000074505806 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 37.26676559448242 Min Value: -14.624585151672363 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 0; 255; 127 Color Transformer: Intensity Decay Time: 9999 Enabled: true Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 187 Min Color: 0; 0; 0 Min Intensity: 0 Name: PointCloud2 Position Transformer: XYZ Queue Size: 10 Selectable: true Size (Pixels): 1 Size (m): 0.009999999776482582 Style: Points Topic: /livox_full_cloud_mapped Unreliable: false Use Fixed Frame: true Use rainbow: true Value: true Enabled: true Name: mapping - Alpha: 0.5 Cell Size: 1 Class: rviz/Grid Color: 160; 160; 164 Enabled: true Line Style: Line Width: 0.029999999329447746 Value: Lines Name: Grid Normal Cell Count: 0 Offset: X: 0 Y: 0 Z: 0 Plane: XY Plane Cell Count: 120 Reference Frame: world Value: true Enabled: true Global Options: Background Color: 0; 0; 0 Default Light: true Fixed Frame: world Frame Rate: 30 Name: root Tools: - Class: rviz/Interact Hide Inactive Objects: true - Class: rviz/MoveCamera - Class: rviz/Select - Class: rviz/FocusCamera - Class: rviz/Measure - Class: rviz/SetInitialPose Theta std deviation: 0.2617993950843811 Topic: /initialpose X std deviation: 0.5 Y std deviation: 0.5 - Class: rviz/SetGoal Topic: /move_base_simple/goal - Class: rviz/PublishPoint Single click: true Topic: /clicked_point Value: true Views: Current: Class: rviz/Orbit Distance: 167.27032470703125 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: X: -0.31172746419906616 Y: 27.33951187133789 Z: 0.9463337659835815 Focal Shape Fixed Size: true Focal Shape Size: 0.05000000074505806 Invert Z Axis: false Name: Current View Near Clip Distance: 0.009999999776482582 Pitch: 1.5697963237762451 Target Frame: world Value: Orbit (rviz) Yaw: 3.1316657066345215 Saved: ~ Window Geometry: Displays: collapsed: true Height: 1410 Hide Left Dock: true Hide Right Dock: true QMainWindow State: 000000ff00000000fd00000004000000000000016a0000054dfc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006b00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007300000000170000054d000000eb00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006502000001dd00000169000004900000024500000001000002870000054dfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000170000054d000000b900fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d00000039fc0100000002fb0000000800540069006d006500000000000000073d000004f900fffffffb0000000800540069006d00650100000000000004500000000000000000000009af0000054d00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730000000000ffffffff0000000000000000 Selection: collapsed: false Time: collapsed: false Tool Properties: collapsed: false Views: collapsed: true Width: 2479 X: 81 Y: 30 ================================================ FILE: LIO-Livox/src/lio/Estimator.cpp ================================================ #include "Estimator/Estimator.h" Estimator::Estimator(const float& filter_corner, const float& filter_surf){ laserCloudCornerFromLocal.reset(new pcl::PointCloud); laserCloudSurfFromLocal.reset(new pcl::PointCloud); laserCloudNonFeatureFromLocal.reset(new pcl::PointCloud); // 初始化起始地面点云 initGroundCloud.reset(new pcl::PointCloud); init_ground_count = 0; laserCloudCornerLast.resize(SLIDEWINDOWSIZE); for(auto& p:laserCloudCornerLast) p.reset(new pcl::PointCloud); laserCloudSurfLast.resize(SLIDEWINDOWSIZE); for(auto& p:laserCloudSurfLast) p.reset(new pcl::PointCloud); laserCloudNonFeatureLast.resize(SLIDEWINDOWSIZE); for(auto& p:laserCloudNonFeatureLast) p.reset(new pcl::PointCloud); laserCloudCornerStack.resize(SLIDEWINDOWSIZE); for(auto& p:laserCloudCornerStack) p.reset(new pcl::PointCloud); laserCloudSurfStack.resize(SLIDEWINDOWSIZE); for(auto& p:laserCloudSurfStack) p.reset(new pcl::PointCloud); laserCloudNonFeatureStack.resize(SLIDEWINDOWSIZE); for(auto& p:laserCloudNonFeatureStack) p.reset(new pcl::PointCloud); laserCloudCornerForMap.reset(new pcl::PointCloud); laserCloudSurfForMap.reset(new pcl::PointCloud); laserCloudNonFeatureForMap.reset(new pcl::PointCloud); transformForMap.setIdentity(); kdtreeCornerFromLocal.reset(new pcl::KdTreeFLANN); kdtreeSurfFromLocal.reset(new pcl::KdTreeFLANN); kdtreeNonFeatureFromLocal.reset(new pcl::KdTreeFLANN); for(int i = 0; i < localMapWindowSize; i++){ localCornerMap[i].reset(new pcl::PointCloud); localSurfMap[i].reset(new pcl::PointCloud); localNonFeatureMap[i].reset(new pcl::PointCloud); } downSizeFilterCorner.setLeafSize(filter_corner, filter_corner, filter_corner); downSizeFilterSurf.setLeafSize(filter_surf, filter_surf, filter_surf); downSizeFilterNonFeature.setLeafSize(0.4, 0.4, 0.4); map_manager = new MAP_MANAGER(filter_corner, filter_surf); threadMap = std::thread(&Estimator::threadMapIncrement, this); } Estimator::~Estimator(){ delete map_manager; } [[noreturn]] void Estimator::threadMapIncrement(){ pcl::PointCloud::Ptr laserCloudCorner(new pcl::PointCloud); pcl::PointCloud::Ptr laserCloudSurf(new pcl::PointCloud); pcl::PointCloud::Ptr laserCloudNonFeature(new pcl::PointCloud); pcl::PointCloud::Ptr laserCloudCorner_to_map(new pcl::PointCloud); pcl::PointCloud::Ptr laserCloudSurf_to_map(new pcl::PointCloud); pcl::PointCloud::Ptr laserCloudNonFeature_to_map(new pcl::PointCloud); Eigen::Matrix4d transform; while(true){ std::unique_lock locker(mtx_Map); if(!laserCloudCornerForMap->empty()){ map_update_ID ++; map_manager->featureAssociateToMap(laserCloudCornerForMap, laserCloudSurfForMap, laserCloudNonFeatureForMap, laserCloudCorner, laserCloudSurf, laserCloudNonFeature, transformForMap); laserCloudCornerForMap->clear(); laserCloudSurfForMap->clear(); laserCloudNonFeatureForMap->clear(); transform = transformForMap; locker.unlock(); *laserCloudCorner_to_map += *laserCloudCorner; *laserCloudSurf_to_map += *laserCloudSurf; *laserCloudNonFeature_to_map += *laserCloudNonFeature; laserCloudCorner->clear(); laserCloudSurf->clear(); laserCloudNonFeature->clear(); if(map_update_ID % map_skip_frame == 0){ map_manager->MapIncrement(laserCloudCorner_to_map, laserCloudSurf_to_map, laserCloudNonFeature_to_map, transform); laserCloudCorner_to_map->clear(); laserCloudSurf_to_map->clear(); laserCloudNonFeature_to_map->clear(); } }else locker.unlock(); std::chrono::milliseconds dura(2); std::this_thread::sleep_for(dura); } } void Estimator::processPointToLine(std::vector& edges, std::vector& vLineFeatures, const pcl::PointCloud::Ptr& laserCloudCorner, const pcl::PointCloud::Ptr& laserCloudCornerLocal, const pcl::KdTreeFLANN::Ptr& kdtreeLocal, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d){ Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity(); Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose(); Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1); if(!vLineFeatures.empty()){ for(const auto& l : vLineFeatures){ auto* e = Cost_NavState_IMU_Line::Create(l.pointOri, l.lineP1, l.lineP2, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); } return; } PointType _pointOri, _pointSel, _coeff; std::vector _pointSearchInd; std::vector _pointSearchSqDis; std::vector _pointSearchInd2; std::vector _pointSearchSqDis2; Eigen::Matrix< double, 3, 3 > _matA1; _matA1.setZero(); int laserCloudCornerStackNum = laserCloudCorner->points.size(); pcl::PointCloud::Ptr kd_pointcloud(new pcl::PointCloud); int debug_num1 = 0; int debug_num2 = 0; int debug_num12 = 0; int debug_num22 = 0; for (int i = 0; i < laserCloudCornerStackNum; i++) { _pointOri = laserCloudCorner->points[i]; MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d); int id = map_manager->FindUsedCornerMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last); if(id == 5000) continue; if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue; if(GlobalCornerMap[id].points.size() > 100) { CornerKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis); if (_pointSearchSqDis[4] < thres_dist) { debug_num1 ++; float cx = 0; float cy = 0; float cz = 0; for (int j = 0; j < 5; j++) { cx += GlobalCornerMap[id].points[_pointSearchInd[j]].x; cy += GlobalCornerMap[id].points[_pointSearchInd[j]].y; cz += GlobalCornerMap[id].points[_pointSearchInd[j]].z; } cx /= 5; cy /= 5; cz /= 5; float a11 = 0; float a12 = 0; float a13 = 0; float a22 = 0; float a23 = 0; float a33 = 0; for (int j = 0; j < 5; j++) { float ax = GlobalCornerMap[id].points[_pointSearchInd[j]].x - cx; float ay = GlobalCornerMap[id].points[_pointSearchInd[j]].y - cy; float az = GlobalCornerMap[id].points[_pointSearchInd[j]].z - cz; a11 += ax * ax; a12 += ax * ay; a13 += ax * az; a22 += ay * ay; a23 += ay * az; a33 += az * az; } a11 /= 5; a12 /= 5; a13 /= 5; a22 /= 5; a23 /= 5; a33 /= 5; _matA1(0, 0) = a11; _matA1(0, 1) = a12; _matA1(0, 2) = a13; _matA1(1, 0) = a12; _matA1(1, 1) = a22; _matA1(1, 2) = a23; _matA1(2, 0) = a13; _matA1(2, 1) = a23; _matA1(2, 2) = a33; Eigen::SelfAdjointEigenSolver saes(_matA1); Eigen::Vector3d unit_direction = saes.eigenvectors().col(2); if (saes.eigenvalues()[2] > 3 * saes.eigenvalues()[1]) { debug_num12 ++; float x1 = cx + 0.1 * unit_direction[0]; float y1 = cy + 0.1 * unit_direction[1]; float z1 = cz + 0.1 * unit_direction[2]; float x2 = cx - 0.1 * unit_direction[0]; float y2 = cy - 0.1 * unit_direction[1]; float z2 = cz - 0.1 * unit_direction[2]; Eigen::Vector3d tripod1(x1, y1, z1); Eigen::Vector3d tripod2(x2, y2, z2); auto* e = Cost_NavState_IMU_Line::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), tripod1, tripod2, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); vLineFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), tripod1, tripod2); vLineFeatures.back().ComputeError(m4d); continue; } } } if(laserCloudCornerLocal->points.size() > 20 ){ kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2); if (_pointSearchSqDis2[4] < thres_dist) { debug_num2 ++; float cx = 0; float cy = 0; float cz = 0; for (int j = 0; j < 5; j++) { cx += laserCloudCornerLocal->points[_pointSearchInd2[j]].x; cy += laserCloudCornerLocal->points[_pointSearchInd2[j]].y; cz += laserCloudCornerLocal->points[_pointSearchInd2[j]].z; } cx /= 5; cy /= 5; cz /= 5; float a11 = 0; float a12 = 0; float a13 = 0; float a22 = 0; float a23 = 0; float a33 = 0; for (int j = 0; j < 5; j++) { float ax = laserCloudCornerLocal->points[_pointSearchInd2[j]].x - cx; float ay = laserCloudCornerLocal->points[_pointSearchInd2[j]].y - cy; float az = laserCloudCornerLocal->points[_pointSearchInd2[j]].z - cz; a11 += ax * ax; a12 += ax * ay; a13 += ax * az; a22 += ay * ay; a23 += ay * az; a33 += az * az; } a11 /= 5; a12 /= 5; a13 /= 5; a22 /= 5; a23 /= 5; a33 /= 5; _matA1(0, 0) = a11; _matA1(0, 1) = a12; _matA1(0, 2) = a13; _matA1(1, 0) = a12; _matA1(1, 1) = a22; _matA1(1, 2) = a23; _matA1(2, 0) = a13; _matA1(2, 1) = a23; _matA1(2, 2) = a33; Eigen::SelfAdjointEigenSolver saes(_matA1); Eigen::Vector3d unit_direction = saes.eigenvectors().col(2); if (saes.eigenvalues()[2] > 3 * saes.eigenvalues()[1]) { debug_num22++; float x1 = cx + 0.1 * unit_direction[0]; float y1 = cy + 0.1 * unit_direction[1]; float z1 = cz + 0.1 * unit_direction[2]; float x2 = cx - 0.1 * unit_direction[0]; float y2 = cy - 0.1 * unit_direction[1]; float z2 = cz - 0.1 * unit_direction[2]; Eigen::Vector3d tripod1(x1, y1, z1); Eigen::Vector3d tripod2(x2, y2, z2); auto* e = Cost_NavState_IMU_Line::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), tripod1, tripod2, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); vLineFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), tripod1, tripod2); vLineFeatures.back().ComputeError(m4d); } } } } } void Estimator::processPointToPlan(std::vector& edges, std::vector& vPlanFeatures, const pcl::PointCloud::Ptr& laserCloudSurf, const pcl::PointCloud::Ptr& laserCloudSurfLocal, const pcl::KdTreeFLANN::Ptr& kdtreeLocal, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d){ Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity(); Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose(); Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1); if(!vPlanFeatures.empty()){ for(const auto& p : vPlanFeatures){ auto* e = Cost_NavState_IMU_Plan::Create(p.pointOri, p.pa, p.pb, p.pc, p.pd, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); } return; } PointType _pointOri, _pointSel, _coeff; std::vector _pointSearchInd; std::vector _pointSearchSqDis; std::vector _pointSearchInd2; std::vector _pointSearchSqDis2; Eigen::Matrix< double, 5, 3 > _matA0; _matA0.setZero(); Eigen::Matrix< double, 5, 1 > _matB0; _matB0.setOnes(); _matB0 *= -1; Eigen::Matrix< double, 3, 1 > _matX0; _matX0.setZero(); int laserCloudSurfStackNum = laserCloudSurf->points.size(); int debug_num1 = 0; int debug_num2 = 0; int debug_num12 = 0; int debug_num22 = 0; for (int i = 0; i < laserCloudSurfStackNum; i++) { _pointOri = laserCloudSurf->points[i]; MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d); int id = map_manager->FindUsedSurfMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last); if(id == 5000) continue; if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue; if(GlobalSurfMap[id].points.size() > 50) { SurfKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis); if (_pointSearchSqDis[4] < 1.0) { debug_num1 ++; for (int j = 0; j < 5; j++) { _matA0(j, 0) = GlobalSurfMap[id].points[_pointSearchInd[j]].x; _matA0(j, 1) = GlobalSurfMap[id].points[_pointSearchInd[j]].y; _matA0(j, 2) = GlobalSurfMap[id].points[_pointSearchInd[j]].z; } _matX0 = _matA0.colPivHouseholderQr().solve(_matB0); float pa = _matX0(0, 0); float pb = _matX0(1, 0); float pc = _matX0(2, 0); float pd = 1; float ps = std::sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; for (int j = 0; j < 5; j++) { if (std::fabs(pa * GlobalSurfMap[id].points[_pointSearchInd[j]].x + pb * GlobalSurfMap[id].points[_pointSearchInd[j]].y + pc * GlobalSurfMap[id].points[_pointSearchInd[j]].z + pd) > 0.2) { planeValid = false; break; } } if (planeValid) { debug_num12 ++; auto* e = Cost_NavState_IMU_Plan::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd); vPlanFeatures.back().ComputeError(m4d); continue; } } } if(laserCloudSurfLocal->points.size() > 20 ){ kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2); if (_pointSearchSqDis2[4] < 1.0) { debug_num2++; for (int j = 0; j < 5; j++) { _matA0(j, 0) = laserCloudSurfLocal->points[_pointSearchInd2[j]].x; _matA0(j, 1) = laserCloudSurfLocal->points[_pointSearchInd2[j]].y; _matA0(j, 2) = laserCloudSurfLocal->points[_pointSearchInd2[j]].z; } _matX0 = _matA0.colPivHouseholderQr().solve(_matB0); float pa = _matX0(0, 0); float pb = _matX0(1, 0); float pc = _matX0(2, 0); float pd = 1; float ps = std::sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; for (int j = 0; j < 5; j++) { if (std::fabs(pa * laserCloudSurfLocal->points[_pointSearchInd2[j]].x + pb * laserCloudSurfLocal->points[_pointSearchInd2[j]].y + pc * laserCloudSurfLocal->points[_pointSearchInd2[j]].z + pd) > 0.2) { planeValid = false; break; } } if (planeValid) { debug_num22 ++; auto* e = Cost_NavState_IMU_Plan::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd); vPlanFeatures.back().ComputeError(m4d); } } } } } void Estimator::processPointToPlanVec(std::vector& edges, std::vector& vPlanFeatures, const pcl::PointCloud::Ptr& laserCloudSurf, const pcl::PointCloud::Ptr& laserCloudSurfLocal, const pcl::KdTreeFLANN::Ptr& kdtreeLocal, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d){ Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity(); Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose(); Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1); if(!vPlanFeatures.empty()){ for(const auto& p : vPlanFeatures){ auto* e = Cost_NavState_IMU_Plan_Vec::Create(p.pointOri, p.pointProj, Tbl, p.sqrt_info); edges.push_back(e); } return; } PointType _pointOri, _pointSel, _coeff; std::vector _pointSearchInd; std::vector _pointSearchSqDis; std::vector _pointSearchInd2; std::vector _pointSearchSqDis2; Eigen::Matrix< double, 5, 3 > _matA0; _matA0.setZero(); Eigen::Matrix< double, 5, 1 > _matB0; _matB0.setOnes(); _matB0 *= -1; Eigen::Matrix< double, 3, 1 > _matX0; _matX0.setZero(); int laserCloudSurfStackNum = laserCloudSurf->points.size(); int debug_num1 = 0; int debug_num2 = 0; int debug_num12 = 0; int debug_num22 = 0; for (int i = 0; i < laserCloudSurfStackNum; i++) { _pointOri = laserCloudSurf->points[i]; MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d); int id = map_manager->FindUsedSurfMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last); if(id == 5000) continue; if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue; if(GlobalSurfMap[id].points.size() > 50) { SurfKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis); if (_pointSearchSqDis[4] < thres_dist) { debug_num1 ++; for (int j = 0; j < 5; j++) { _matA0(j, 0) = GlobalSurfMap[id].points[_pointSearchInd[j]].x; _matA0(j, 1) = GlobalSurfMap[id].points[_pointSearchInd[j]].y; _matA0(j, 2) = GlobalSurfMap[id].points[_pointSearchInd[j]].z; } _matX0 = _matA0.colPivHouseholderQr().solve(_matB0); float pa = _matX0(0, 0); float pb = _matX0(1, 0); float pc = _matX0(2, 0); float pd = 1; float ps = std::sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; for (int j = 0; j < 5; j++) { if (std::fabs(pa * GlobalSurfMap[id].points[_pointSearchInd[j]].x + pb * GlobalSurfMap[id].points[_pointSearchInd[j]].y + pc * GlobalSurfMap[id].points[_pointSearchInd[j]].z + pd) > 0.2) { planeValid = false; break; } } if (planeValid) { debug_num12 ++; double dist = pa * _pointSel.x + pb * _pointSel.y + pc * _pointSel.z + pd; Eigen::Vector3d omega(pa, pb, pc); Eigen::Vector3d point_proj = Eigen::Vector3d(_pointSel.x,_pointSel.y,_pointSel.z) - (dist * omega); Eigen::Vector3d e1(1, 0, 0); Eigen::Matrix3d J = e1 * omega.transpose(); Eigen::JacobiSVD svd(J, Eigen::ComputeThinU | Eigen::ComputeThinV); Eigen::Matrix3d R_svd = svd.matrixV() * svd.matrixU().transpose(); Eigen::Matrix3d info = (1.0/IMUIntegrator::lidar_m) * Eigen::Matrix3d::Identity(); info(1, 1) *= plan_weight_tan; info(2, 2) *= plan_weight_tan; Eigen::Matrix3d sqrt_info = info * R_svd.transpose(); auto* e = Cost_NavState_IMU_Plan_Vec::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), point_proj, Tbl, sqrt_info); edges.push_back(e); vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), point_proj, sqrt_info); vPlanFeatures.back().ComputeError(m4d); continue; } } } if(laserCloudSurfLocal->points.size() > 20 ){ kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2); if (_pointSearchSqDis2[4] < thres_dist) { debug_num2++; for (int j = 0; j < 5; j++) { _matA0(j, 0) = laserCloudSurfLocal->points[_pointSearchInd2[j]].x; _matA0(j, 1) = laserCloudSurfLocal->points[_pointSearchInd2[j]].y; _matA0(j, 2) = laserCloudSurfLocal->points[_pointSearchInd2[j]].z; } _matX0 = _matA0.colPivHouseholderQr().solve(_matB0); float pa = _matX0(0, 0); float pb = _matX0(1, 0); float pc = _matX0(2, 0); float pd = 1; float ps = std::sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; for (int j = 0; j < 5; j++) { if (std::fabs(pa * laserCloudSurfLocal->points[_pointSearchInd2[j]].x + pb * laserCloudSurfLocal->points[_pointSearchInd2[j]].y + pc * laserCloudSurfLocal->points[_pointSearchInd2[j]].z + pd) > 0.2) { planeValid = false; break; } } if (planeValid) { debug_num22 ++; double dist = pa * _pointSel.x + pb * _pointSel.y + pc * _pointSel.z + pd; Eigen::Vector3d omega(pa, pb, pc); Eigen::Vector3d point_proj = Eigen::Vector3d(_pointSel.x,_pointSel.y,_pointSel.z) - (dist * omega); Eigen::Vector3d e1(1, 0, 0); Eigen::Matrix3d J = e1 * omega.transpose(); Eigen::JacobiSVD svd(J, Eigen::ComputeThinU | Eigen::ComputeThinV); Eigen::Matrix3d R_svd = svd.matrixV() * svd.matrixU().transpose(); Eigen::Matrix3d info = (1.0/IMUIntegrator::lidar_m) * Eigen::Matrix3d::Identity(); info(1, 1) *= plan_weight_tan; info(2, 2) *= plan_weight_tan; Eigen::Matrix3d sqrt_info = info * R_svd.transpose(); auto* e = Cost_NavState_IMU_Plan_Vec::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), point_proj, Tbl, sqrt_info); edges.push_back(e); vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), point_proj, sqrt_info); vPlanFeatures.back().ComputeError(m4d); } } } } } void Estimator::processNonFeatureICP(std::vector& edges, std::vector& vNonFeatures, const pcl::PointCloud::Ptr& laserCloudNonFeature, const pcl::PointCloud::Ptr& laserCloudNonFeatureLocal, const pcl::KdTreeFLANN::Ptr& kdtreeLocal, const Eigen::Matrix4d& exTlb, const Eigen::Matrix4d& m4d){ Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity(); Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose(); Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1); if(!vNonFeatures.empty()){ for(const auto& p : vNonFeatures){ auto* e = Cost_NonFeature_ICP::Create(p.pointOri, p.pa, p.pb, p.pc, p.pd, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); } return; } PointType _pointOri, _pointSel, _coeff; std::vector _pointSearchInd; std::vector _pointSearchSqDis; std::vector _pointSearchInd2; std::vector _pointSearchSqDis2; Eigen::Matrix< double, 5, 3 > _matA0; _matA0.setZero(); Eigen::Matrix< double, 5, 1 > _matB0; _matB0.setOnes(); _matB0 *= -1; Eigen::Matrix< double, 3, 1 > _matX0; _matX0.setZero(); int laserCloudNonFeatureStackNum = laserCloudNonFeature->points.size(); for (int i = 0; i < laserCloudNonFeatureStackNum; i++) { _pointOri = laserCloudNonFeature->points[i]; MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d); int id = map_manager->FindUsedNonFeatureMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last); if(id == 5000) continue; if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue; if(GlobalNonFeatureMap[id].points.size() > 100) { NonFeatureKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis); if (_pointSearchSqDis[4] < 1 * thres_dist) { for (int j = 0; j < 5; j++) { _matA0(j, 0) = GlobalNonFeatureMap[id].points[_pointSearchInd[j]].x; _matA0(j, 1) = GlobalNonFeatureMap[id].points[_pointSearchInd[j]].y; _matA0(j, 2) = GlobalNonFeatureMap[id].points[_pointSearchInd[j]].z; } _matX0 = _matA0.colPivHouseholderQr().solve(_matB0); float pa = _matX0(0, 0); float pb = _matX0(1, 0); float pc = _matX0(2, 0); float pd = 1; float ps = std::sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; for (int j = 0; j < 5; j++) { if (std::fabs(pa * GlobalNonFeatureMap[id].points[_pointSearchInd[j]].x + pb * GlobalNonFeatureMap[id].points[_pointSearchInd[j]].y + pc * GlobalNonFeatureMap[id].points[_pointSearchInd[j]].z + pd) > 0.2) { planeValid = false; break; } } if(planeValid) { auto* e = Cost_NonFeature_ICP::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); vNonFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd); vNonFeatures.back().ComputeError(m4d); continue; } } } if(laserCloudNonFeatureLocal->points.size() > 20 ){ kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2); if (_pointSearchSqDis2[4] < 1 * thres_dist) { for (int j = 0; j < 5; j++) { _matA0(j, 0) = laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].x; _matA0(j, 1) = laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].y; _matA0(j, 2) = laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].z; } _matX0 = _matA0.colPivHouseholderQr().solve(_matB0); float pa = _matX0(0, 0); float pb = _matX0(1, 0); float pc = _matX0(2, 0); float pd = 1; float ps = std::sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; for (int j = 0; j < 5; j++) { if (std::fabs(pa * laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].x + pb * laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].y + pc * laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].z + pd) > 0.2) { planeValid = false; break; } } if(planeValid) { auto* e = Cost_NonFeature_ICP::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd, Tbl, Eigen::Matrix(1/IMUIntegrator::lidar_m)); edges.push_back(e); vNonFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z), pa, pb, pc, pd); vNonFeatures.back().ComputeError(m4d); } } } } } void Estimator::vector2double(const std::list& lidarFrameList){ int i = 0; for(const auto& l : lidarFrameList){ Eigen::Map> PR(para_PR[i]); PR.segment<3>(0) = l.P; PR.segment<3>(3) = Sophus::SO3d(l.Q).log(); Eigen::Map> VBias(para_VBias[i]); VBias.segment<3>(0) = l.V; VBias.segment<3>(3) = l.bg; VBias.segment<3>(6) = l.ba; i++; } } void Estimator::double2vector(std::list& lidarFrameList){ int i = 0; for(auto& l : lidarFrameList){ Eigen::Map> PR(para_PR[i]); Eigen::Map> VBias(para_VBias[i]); l.P = PR.segment<3>(0); l.Q = Sophus::SO3d::exp(PR.segment<3>(3)).unit_quaternion(); l.V = VBias.segment<3>(0); l.bg = VBias.segment<3>(3); l.ba = VBias.segment<3>(6); i++; } } void Estimator::EstimateLidarPose(std::list& lidarFrameList, const Eigen::Matrix4d& exTlb, const Eigen::Vector3d& gravity, nav_msgs::Odometry& debugInfo){ Eigen::Matrix3d exRbl = exTlb.topLeftCorner(3,3).transpose(); Eigen::Vector3d exPbl = -1.0 * exRbl * exTlb.topRightCorner(3,1); Eigen::Matrix4d transformTobeMapped = Eigen::Matrix4d::Identity(); transformTobeMapped.topLeftCorner(3,3) = lidarFrameList.back().Q * exRbl; transformTobeMapped.topRightCorner(3,1) = lidarFrameList.back().Q * exPbl + lidarFrameList.back().P; int laserCloudCornerFromMapNum = map_manager->get_corner_map()->points.size(); int laserCloudSurfFromMapNum = map_manager->get_surf_map()->points.size(); int laserCloudCornerFromLocalNum = laserCloudCornerFromLocal->points.size(); int laserCloudSurfFromLocalNum = laserCloudSurfFromLocal->points.size(); int stack_count = 0; for(const auto& l : lidarFrameList){ laserCloudCornerLast[stack_count]->clear(); for(const auto& p : l.laserCloud->points){ if(std::fabs(p.normal_z - 1.0) < 1e-5) laserCloudCornerLast[stack_count]->push_back(p); } laserCloudSurfLast[stack_count]->clear(); for(const auto& p : l.laserCloud->points){ if(std::fabs(p.normal_z - 2.0) < 1e-5) laserCloudSurfLast[stack_count]->push_back(p); } laserCloudNonFeatureLast[stack_count]->clear(); for(const auto& p : l.laserCloud->points){ if(std::fabs(p.normal_z - 3.0) < 1e-5) laserCloudNonFeatureLast[stack_count]->push_back(p); } laserCloudCornerStack[stack_count]->clear(); downSizeFilterCorner.setInputCloud(laserCloudCornerLast[stack_count]); downSizeFilterCorner.filter(*laserCloudCornerStack[stack_count]); laserCloudSurfStack[stack_count]->clear(); downSizeFilterSurf.setInputCloud(laserCloudSurfLast[stack_count]); downSizeFilterSurf.filter(*laserCloudSurfStack[stack_count]); laserCloudNonFeatureStack[stack_count]->clear(); downSizeFilterNonFeature.setInputCloud(laserCloudNonFeatureLast[stack_count]); downSizeFilterNonFeature.filter(*laserCloudNonFeatureStack[stack_count]); stack_count++; } if ( ((laserCloudCornerFromMapNum > 0 && laserCloudSurfFromMapNum > 100) || (laserCloudCornerFromLocalNum > 0 && laserCloudSurfFromLocalNum > 100))) { Estimate(lidarFrameList, exTlb, gravity); } transformTobeMapped = Eigen::Matrix4d::Identity(); transformTobeMapped.topLeftCorner(3,3) = lidarFrameList.front().Q * exRbl; transformTobeMapped.topRightCorner(3,1) = lidarFrameList.front().Q * exPbl + lidarFrameList.front().P; // 如果地面点云不到十帧则进行添加 for(const auto& l : lidarFrameList) { if (init_ground_count <= 10) { PointType temp_point; for(const auto& p : l.laserCloud->points){ if(std::fabs(p.normal_y + 1.0) < 1e-5) { MAP_MANAGER::pointAssociateToMap(&p, &temp_point, transformTobeMapped); initGroundCloud->push_back(temp_point); } } init_ground_count++; if (init_ground_count > 10) { // 第一次超过十帧地面点云的时候,进行初始化的地面计算 init_ground_plane_coeff = livox_slam_ware::get_plane_coeffs(initGroundCloud); Cost_NavState_PR_Ground::init_ground_plane_coeff = init_ground_plane_coeff; } } } // std::cout << "the size of init_ground_plane_coeff " << init_ground_plane_coeff.size() << std::endl; std::unique_lock locker(mtx_Map); *laserCloudCornerForMap = *laserCloudCornerStack[0]; *laserCloudSurfForMap = *laserCloudSurfStack[0]; *laserCloudNonFeatureForMap = *laserCloudNonFeatureStack[0]; transformForMap = transformTobeMapped; laserCloudCornerFromLocal->clear(); laserCloudSurfFromLocal->clear(); laserCloudNonFeatureFromLocal->clear(); MapIncrementLocal(laserCloudCornerForMap,laserCloudSurfForMap,laserCloudNonFeatureForMap,transformTobeMapped); locker.unlock(); } void Estimator::Estimate(std::list& lidarFrameList, const Eigen::Matrix4d& exTlb, const Eigen::Vector3d& gravity){ int num_corner_map = 0; int num_surf_map = 0; static uint32_t frame_count = 0; int windowSize = lidarFrameList.size(); Eigen::Matrix4d transformTobeMapped = Eigen::Matrix4d::Identity(); Eigen::Matrix3d exRbl = exTlb.topLeftCorner(3,3).transpose(); Eigen::Vector3d exPbl = -1.0 * exRbl * exTlb.topRightCorner(3,1); kdtreeCornerFromLocal->setInputCloud(laserCloudCornerFromLocal); kdtreeSurfFromLocal->setInputCloud(laserCloudSurfFromLocal); kdtreeNonFeatureFromLocal->setInputCloud(laserCloudNonFeatureFromLocal); std::unique_lock locker3(map_manager->mtx_MapManager); for(int i = 0; i < 4851; i++){ CornerKdMap[i] = map_manager->getCornerKdMap(i); SurfKdMap[i] = map_manager->getSurfKdMap(i); NonFeatureKdMap[i] = map_manager->getNonFeatureKdMap(i); GlobalSurfMap[i] = map_manager->laserCloudSurf_for_match[i]; GlobalCornerMap[i] = map_manager->laserCloudCorner_for_match[i]; GlobalNonFeatureMap[i] = map_manager->laserCloudNonFeature_for_match[i]; } laserCenWidth_last = map_manager->get_laserCloudCenWidth_last(); laserCenHeight_last = map_manager->get_laserCloudCenHeight_last(); laserCenDepth_last = map_manager->get_laserCloudCenDepth_last(); locker3.unlock(); // store point to line features std::vector> vLineFeatures(windowSize); for(auto& v : vLineFeatures){ v.reserve(2000); } // store point to plan features std::vector> vPlanFeatures(windowSize); for(auto& v : vPlanFeatures){ v.reserve(2000); } std::vector> vNonFeatures(windowSize); for(auto& v : vNonFeatures){ v.reserve(2000); } if(windowSize == SLIDEWINDOWSIZE) { plan_weight_tan = 0.0003; thres_dist = 1.0; } else { plan_weight_tan = 0.0; thres_dist = 25.0; } // excute optimize process const int max_iters = 5; for(int iterOpt=0; iterOptground_plane_coeff), nullptr, para_PR[f]); } } // add IMU CostFunction for(int f=1; fimuIntegrator, const_cast(gravity), Eigen::LLT> (frame_curr->imuIntegrator.GetCovariance().inverse()) .matrixL().transpose()), nullptr, para_PR[f-1], para_VBias[f-1], para_PR[f], para_VBias[f]); } if (last_marginalization_info){ // construct new marginlization_factor auto *marginalization_factor = new MarginalizationFactor(last_marginalization_info); problem.AddResidualBlock(marginalization_factor, nullptr, last_marginalization_parameter_blocks); } Eigen::Quaterniond q_before_opti = lidarFrameList.back().Q; Eigen::Vector3d t_before_opti = lidarFrameList.back().P; std::vector> edgesLine(windowSize); std::vector> edgesPlan(windowSize); std::vector> edgesNon(windowSize); std::thread threads[3]; for(int f=0; fQ * exRbl; transformTobeMapped.topRightCorner(3,1) = frame_curr->Q * exPbl + frame_curr->P; threads[0] = std::thread(&Estimator::processPointToLine, this, std::ref(edgesLine[f]), std::ref(vLineFeatures[f]), std::ref(laserCloudCornerStack[f]), std::ref(laserCloudCornerFromLocal), std::ref(kdtreeCornerFromLocal), std::ref(exTlb), std::ref(transformTobeMapped)); threads[1] = std::thread(&Estimator::processPointToPlanVec, this, std::ref(edgesPlan[f]), std::ref(vPlanFeatures[f]), std::ref(laserCloudSurfStack[f]), std::ref(laserCloudSurfFromLocal), std::ref(kdtreeSurfFromLocal), std::ref(exTlb), std::ref(transformTobeMapped)); threads[2] = std::thread(&Estimator::processNonFeatureICP, this, std::ref(edgesNon[f]), std::ref(vNonFeatures[f]), std::ref(laserCloudNonFeatureStack[f]), std::ref(laserCloudNonFeatureFromLocal), std::ref(kdtreeNonFeatureFromLocal), std::ref(exTlb), std::ref(transformTobeMapped)); threads[0].join(); threads[1].join(); threads[2].join(); } int cntSurf = 0; int cntCorner = 0; int cntNon = 0; if(windowSize == SLIDEWINDOWSIZE) { thres_dist = 1.0; if(iterOpt == 0){ for(int f=0; f 1e-5){ problem.AddResidualBlock(e, loss_function, para_PR[f]); vPlanFeatures[f][cntFtu].valid = true; }else{ vPlanFeatures[f][cntFtu].valid = false; } cntFtu++; cntSurf++; } cntFtu = 0; for (auto &e : edgesNon[f]) { if(std::fabs(vNonFeatures[f][cntFtu].error) > 1e-5){ problem.AddResidualBlock(e, loss_function, para_PR[f]); vNonFeatures[f][cntFtu].valid = true; }else{ vNonFeatures[f][cntFtu].valid = false; } cntFtu++; cntNon++; } } }else{ for(int f=0; f 1e-5){ problem.AddResidualBlock(e, loss_function, para_PR[f]); vLineFeatures[f][cntFtu].valid = true; }else{ vLineFeatures[f][cntFtu].valid = false; } cntFtu++; cntCorner++; } cntFtu = 0; for (auto &e : edgesPlan[f]) { if(std::fabs(vPlanFeatures[f][cntFtu].error) > 1e-5){ problem.AddResidualBlock(e, loss_function, para_PR[f]); vPlanFeatures[f][cntFtu].valid = true; }else{ vPlanFeatures[f][cntFtu].valid = false; } cntFtu++; cntSurf++; } cntFtu = 0; for (auto &e : edgesNon[f]) { if(std::fabs(vNonFeatures[f][cntFtu].error) > 1e-5){ problem.AddResidualBlock(e, loss_function, para_PR[f]); vNonFeatures[f][cntFtu].valid = true; }else{ vNonFeatures[f][cntFtu].valid = false; } cntFtu++; cntNon++; } } } ceres::Solver::Options options; options.linear_solver_type = ceres::DENSE_SCHUR; options.trust_region_strategy_type = ceres::DOGLEG; options.max_num_iterations = 10; options.minimizer_progress_to_stdout = false; options.num_threads = 6; ceres::Solver::Summary summary; ceres::Solve(options, &problem, &summary); double2vector(lidarFrameList); Eigen::Quaterniond q_after_opti = lidarFrameList.back().Q; Eigen::Vector3d t_after_opti = lidarFrameList.back().P; Eigen::Vector3d V_after_opti = lidarFrameList.back().V; double deltaR = (q_before_opti.angularDistance(q_after_opti)) * 180.0 / M_PI; double deltaT = (t_before_opti - t_after_opti).norm(); if (deltaR < 0.05 && deltaT < 0.05 || (iterOpt+1) == max_iters){ ROS_INFO("Frame: %d\n",frame_count++); if(windowSize != SLIDEWINDOWSIZE) break; // apply marginalization auto *marginalization_info = new MarginalizationInfo(); if (last_marginalization_info){ std::vector drop_set; for (int i = 0; i < static_cast(last_marginalization_parameter_blocks.size()); i++) { if (last_marginalization_parameter_blocks[i] == para_PR[0] || last_marginalization_parameter_blocks[i] == para_VBias[0]) drop_set.push_back(i); } auto *marginalization_factor = new MarginalizationFactor(last_marginalization_info); auto *residual_block_info = new ResidualBlockInfo(marginalization_factor, nullptr, last_marginalization_parameter_blocks, drop_set); marginalization_info->addResidualBlockInfo(residual_block_info); } auto frame_curr = lidarFrameList.begin(); std::advance(frame_curr, 1); ceres::CostFunction* IMU_Cost = Cost_NavState_PRV_Bias::Create(frame_curr->imuIntegrator, const_cast(gravity), Eigen::LLT> (frame_curr->imuIntegrator.GetCovariance().inverse()) .matrixL().transpose()); auto *residual_block_info = new ResidualBlockInfo(IMU_Cost, nullptr, std::vector{para_PR[0], para_VBias[0], para_PR[1], para_VBias[1]}, std::vector{0, 1}); marginalization_info->addResidualBlockInfo(residual_block_info); // marginalize the Ground constraint if (init_ground_plane_coeff.size()) { ceres::CostFunction* Ground_Cost = Cost_NavState_PR_Ground::Create(lidarFrameList.begin()->ground_plane_coeff); auto *ground_residual_block_info = new ResidualBlockInfo(Ground_Cost, nullptr, std::vector{para_PR[0]}, std::vector{0}); marginalization_info->addResidualBlockInfo(ground_residual_block_info); } int f = 0; transformTobeMapped = Eigen::Matrix4d::Identity(); transformTobeMapped.topLeftCorner(3,3) = frame_curr->Q * exRbl; transformTobeMapped.topRightCorner(3,1) = frame_curr->Q * exPbl + frame_curr->P; edgesLine[f].clear(); edgesPlan[f].clear(); edgesNon[f].clear(); threads[0] = std::thread(&Estimator::processPointToLine, this, std::ref(edgesLine[f]), std::ref(vLineFeatures[f]), std::ref(laserCloudCornerStack[f]), std::ref(laserCloudCornerFromLocal), std::ref(kdtreeCornerFromLocal), std::ref(exTlb), std::ref(transformTobeMapped)); threads[1] = std::thread(&Estimator::processPointToPlanVec, this, std::ref(edgesPlan[f]), std::ref(vPlanFeatures[f]), std::ref(laserCloudSurfStack[f]), std::ref(laserCloudSurfFromLocal), std::ref(kdtreeSurfFromLocal), std::ref(exTlb), std::ref(transformTobeMapped)); threads[2] = std::thread(&Estimator::processNonFeatureICP, this, std::ref(edgesNon[f]), std::ref(vNonFeatures[f]), std::ref(laserCloudNonFeatureStack[f]), std::ref(laserCloudNonFeatureFromLocal), std::ref(kdtreeNonFeatureFromLocal), std::ref(exTlb), std::ref(transformTobeMapped)); threads[0].join(); threads[1].join(); threads[2].join(); int cntFtu = 0; for (auto &e : edgesLine[f]) { if(vLineFeatures[f][cntFtu].valid){ auto *residual_block_info = new ResidualBlockInfo(e, nullptr, std::vector{para_PR[0]}, std::vector{0}); marginalization_info->addResidualBlockInfo(residual_block_info); } cntFtu++; } cntFtu = 0; for (auto &e : edgesPlan[f]) { if(vPlanFeatures[f][cntFtu].valid){ auto *residual_block_info = new ResidualBlockInfo(e, nullptr, std::vector{para_PR[0]}, std::vector{0}); marginalization_info->addResidualBlockInfo(residual_block_info); } cntFtu++; } cntFtu = 0; for (auto &e : edgesNon[f]) { if(vNonFeatures[f][cntFtu].valid){ auto *residual_block_info = new ResidualBlockInfo(e, nullptr, std::vector{para_PR[0]}, std::vector{0}); marginalization_info->addResidualBlockInfo(residual_block_info); } cntFtu++; } marginalization_info->preMarginalize(); marginalization_info->marginalize(); std::unordered_map addr_shift; for (int i = 1; i < SLIDEWINDOWSIZE; i++) { addr_shift[reinterpret_cast(para_PR[i])] = para_PR[i - 1]; addr_shift[reinterpret_cast(para_VBias[i])] = para_VBias[i - 1]; } std::vector parameter_blocks = marginalization_info->getParameterBlocks(addr_shift); delete last_marginalization_info; last_marginalization_info = marginalization_info; last_marginalization_parameter_blocks = parameter_blocks; break; } if(windowSize != SLIDEWINDOWSIZE) { for(int f=0; f::Ptr& laserCloudCornerStack, const pcl::PointCloud::Ptr& laserCloudSurfStack, const pcl::PointCloud::Ptr& laserCloudNonFeatureStack, const Eigen::Matrix4d& transformTobeMapped){ int laserCloudCornerStackNum = laserCloudCornerStack->points.size(); int laserCloudSurfStackNum = laserCloudSurfStack->points.size(); int laserCloudNonFeatureStackNum = laserCloudNonFeatureStack->points.size(); PointType pointSel; PointType pointSel2; size_t Id = localMapID % localMapWindowSize; localCornerMap[Id]->clear(); localSurfMap[Id]->clear(); localNonFeatureMap[Id]->clear(); for (int i = 0; i < laserCloudCornerStackNum; i++) { MAP_MANAGER::pointAssociateToMap(&laserCloudCornerStack->points[i], &pointSel, transformTobeMapped); localCornerMap[Id]->push_back(pointSel); } for (int i = 0; i < laserCloudSurfStackNum; i++) { MAP_MANAGER::pointAssociateToMap(&laserCloudSurfStack->points[i], &pointSel2, transformTobeMapped); localSurfMap[Id]->push_back(pointSel2); } for (int i = 0; i < laserCloudNonFeatureStackNum; i++) { MAP_MANAGER::pointAssociateToMap(&laserCloudNonFeatureStack->points[i], &pointSel2, transformTobeMapped); localNonFeatureMap[Id]->push_back(pointSel2); } for (int i = 0; i < localMapWindowSize; i++) { *laserCloudCornerFromLocal += *localCornerMap[i]; *laserCloudSurfFromLocal += *localSurfMap[i]; *laserCloudNonFeatureFromLocal += *localNonFeatureMap[i]; } pcl::PointCloud::Ptr temp(new pcl::PointCloud()); downSizeFilterCorner.setInputCloud(laserCloudCornerFromLocal); downSizeFilterCorner.filter(*temp); laserCloudCornerFromLocal = temp; pcl::PointCloud::Ptr temp2(new pcl::PointCloud()); downSizeFilterSurf.setInputCloud(laserCloudSurfFromLocal); downSizeFilterSurf.filter(*temp2); laserCloudSurfFromLocal = temp2; pcl::PointCloud::Ptr temp3(new pcl::PointCloud()); downSizeFilterNonFeature.setInputCloud(laserCloudNonFeatureFromLocal); downSizeFilterNonFeature.filter(*temp3); laserCloudNonFeatureFromLocal = temp3; localMapID ++; } ================================================ FILE: LIO-Livox/src/lio/IMUIntegrator.cpp ================================================ #include "IMUIntegrator/IMUIntegrator.h" IMUIntegrator::IMUIntegrator(){ Reset(); noise.setZero(); noise.block<3, 3>(0, 0) = Eigen::Matrix3d::Identity() * gyr_n * gyr_n; noise.block<3, 3>(3, 3) = Eigen::Matrix3d::Identity() * acc_n * acc_n; noise.block<3, 3>(6, 6) = Eigen::Matrix3d::Identity() * gyr_w * gyr_w; noise.block<3, 3>(9, 9) = Eigen::Matrix3d::Identity() * acc_w * acc_w; } /** \brief constructor of IMUIntegrator * \param[in] vIMU: IMU messages need to be integrated */ IMUIntegrator::IMUIntegrator(std::vector vIMU): vimuMsg(std::move(vIMU)){ Reset(); noise.setZero(); noise.block<3, 3>(0, 0) = Eigen::Matrix3d::Identity() * gyr_n * gyr_n; noise.block<3, 3>(3, 3) = Eigen::Matrix3d::Identity() * acc_n * acc_n; noise.block<3, 3>(6, 6) = Eigen::Matrix3d::Identity() * gyr_w * gyr_w; noise.block<3, 3>(9, 9) = Eigen::Matrix3d::Identity() * acc_w * acc_w; } void IMUIntegrator::Reset(){ dq.setIdentity(); dp.setZero(); dv.setZero(); dtime = 0; covariance.setZero(); jacobian.setIdentity(); linearized_bg.setZero(); linearized_ba.setZero(); } const Eigen::Quaterniond & IMUIntegrator::GetDeltaQ() const {return dq;} const Eigen::Vector3d & IMUIntegrator::GetDeltaP() const {return dp;} const Eigen::Vector3d & IMUIntegrator::GetDeltaV() const {return dv;} const double & IMUIntegrator::GetDeltaTime() const {return dtime;} const Eigen::Vector3d & IMUIntegrator::GetBiasGyr() const {return linearized_bg;} const Eigen::Vector3d& IMUIntegrator::GetBiasAcc() const {return linearized_ba;} const Eigen::Matrix& IMUIntegrator::GetCovariance(){return covariance;} const Eigen::Matrix & IMUIntegrator::GetJacobian() const {return jacobian;} void IMUIntegrator::PushIMUMsg(const sensor_msgs::ImuConstPtr& imu){ vimuMsg.push_back(imu); } void IMUIntegrator::PushIMUMsg(const std::vector& vimu){ vimuMsg.insert(vimuMsg.end(), vimu.begin(), vimu.end()); } const std::vector & IMUIntegrator::GetIMUMsg() const {return vimuMsg;} void IMUIntegrator::GyroIntegration(double lastTime){ double current_time = lastTime; for(auto & imu : vimuMsg){ Eigen::Vector3d gyr; gyr << imu->angular_velocity.x, imu->angular_velocity.y, imu->angular_velocity.z; double dt = imu->header.stamp.toSec() - current_time; ROS_ASSERT(dt >= 0); Eigen::Matrix3d dR = Sophus::SO3d::exp(gyr*dt).matrix(); Eigen::Quaterniond qr(dq*dR); if (qr.w()<0) qr.coeffs() *= -1; dq = qr.normalized(); current_time = imu->header.stamp.toSec(); } } void IMUIntegrator::PreIntegration(double lastTime, const Eigen::Vector3d& bg, const Eigen::Vector3d& ba){ Reset(); linearized_bg = bg; linearized_ba = ba; double current_time = lastTime; for(auto & imu : vimuMsg){ Eigen::Vector3d gyr; gyr << imu->angular_velocity.x, imu->angular_velocity.y, imu->angular_velocity.z; Eigen::Vector3d acc; acc << imu->linear_acceleration.x * gnorm, imu->linear_acceleration.y * gnorm, imu->linear_acceleration.z * gnorm; double dt = imu->header.stamp.toSec() - current_time; if(dt <= 0 ) ROS_WARN("dt <= 0"); gyr -= bg; acc -= ba; double dt2 = dt*dt; Eigen::Vector3d gyr_dt = gyr*dt; Eigen::Matrix3d dR = Sophus::SO3d::exp(gyr_dt).matrix(); Eigen::Matrix3d Jr = Eigen::Matrix3d::Identity(); double gyr_dt_norm = gyr_dt.norm(); if(gyr_dt_norm > 0.00001){ Eigen::Vector3d k = gyr_dt.normalized(); Eigen::Matrix3d K = Sophus::SO3d::hat(k); Jr = Eigen::Matrix3d::Identity() - (1-cos(gyr_dt_norm))/gyr_dt_norm*K + (1-sin(gyr_dt_norm)/gyr_dt_norm)*K*K; } Eigen::Matrix A = Eigen::Matrix::Identity(); A.block<3,3>(0,3) = -0.5*dq.matrix()*Sophus::SO3d::hat(acc)*dt2; A.block<3,3>(0,6) = Eigen::Matrix3d::Identity()*dt; A.block<3,3>(0,12) = -0.5*dq.matrix()*dt2; A.block<3,3>(3,3) = dR.transpose(); A.block<3,3>(3,9) = - Jr*dt; A.block<3,3>(6,3) = -dq.matrix()*Sophus::SO3d::hat(acc)*dt; A.block<3,3>(6,12) = -dq.matrix()*dt; Eigen::Matrix B = Eigen::Matrix::Zero(); B.block<3,3>(0,3) = 0.5*dq.matrix()*dt2; B.block<3,3>(3,0) = Jr*dt; B.block<3,3>(6,3) = dq.matrix()*dt; B.block<3,3>(9,6) = Eigen::Matrix3d::Identity()*dt; B.block<3,3>(12,9) = Eigen::Matrix3d::Identity()*dt; jacobian = A * jacobian; covariance = A * covariance * A.transpose() + B * noise * B.transpose(); dp += dv*dt + 0.5*dq.matrix()*acc*dt2; dv += dq.matrix()*acc*dt; Eigen::Matrix3d m3dR = dq.matrix()*dR; Eigen::Quaterniond qtmp(m3dR); if (qtmp.w()<0) qtmp.coeffs() *= -1; dq = qtmp.normalized(); dtime += dt; current_time = imu->header.stamp.toSec(); } } Eigen::Vector3d IMUIntegrator::GetAverageAcc() { int i = 0; Eigen::Vector3d sum_acc(0, 0, 0); for(auto & imu : vimuMsg){ Eigen::Vector3d acc; acc << imu->linear_acceleration.x * gnorm, imu->linear_acceleration.y * gnorm, imu->linear_acceleration.z * gnorm; sum_acc += acc; i++; if(i > 30) break; } return sum_acc / i; } ================================================ FILE: LIO-Livox/src/lio/LidarFeatureExtractor.cpp ================================================ #include "LidarFeatureExtractor/LidarFeatureExtractor.h" LidarFeatureExtractor::LidarFeatureExtractor(int n_scans,int NumCurvSize,float DistanceFaraway,int NumFlat, int PartNum,float FlatThreshold,float BreakCornerDis,float LidarNearestDis,float KdTreeCornerOutlierDis) :N_SCANS(n_scans), thNumCurvSize(NumCurvSize), thDistanceFaraway(DistanceFaraway), thNumFlat(NumFlat), thPartNum(PartNum), thFlatThreshold(FlatThreshold), thBreakCornerDis(BreakCornerDis), thLidarNearestDis(LidarNearestDis){ vlines.resize(N_SCANS); for(auto & ptr : vlines){ ptr.reset(new pcl::PointCloud()); } vcorner.resize(N_SCANS); vsurf.resize(N_SCANS); } bool LidarFeatureExtractor::plane_judge(const std::vector& point_list,const int plane_threshold) { int num = point_list.size(); float cx = 0; float cy = 0; float cz = 0; for (int j = 0; j < num; j++) { cx += point_list[j].x; cy += point_list[j].y; cz += point_list[j].z; } cx /= num; cy /= num; cz /= num; //mean square error float a11 = 0; float a12 = 0; float a13 = 0; float a22 = 0; float a23 = 0; float a33 = 0; for (int j = 0; j < num; j++) { float ax = point_list[j].x - cx; float ay = point_list[j].y - cy; float az = point_list[j].z - cz; a11 += ax * ax; a12 += ax * ay; a13 += ax * az; a22 += ay * ay; a23 += ay * az; a33 += az * az; } a11 /= num; a12 /= num; a13 /= num; a22 /= num; a23 /= num; a33 /= num; Eigen::Matrix< double, 3, 3 > _matA1; _matA1.setZero(); Eigen::Matrix< double, 3, 1 > _matD1; _matD1.setZero(); Eigen::Matrix< double, 3, 3 > _matV1; _matV1.setZero(); _matA1(0, 0) = a11; _matA1(0, 1) = a12; _matA1(0, 2) = a13; _matA1(1, 0) = a12; _matA1(1, 1) = a22; _matA1(1, 2) = a23; _matA1(2, 0) = a13; _matA1(2, 1) = a23; _matA1(2, 2) = a33; Eigen::JacobiSVD svd(_matA1, Eigen::ComputeThinU | Eigen::ComputeThinV); _matD1 = svd.singularValues(); _matV1 = svd.matrixU(); if (_matD1(0, 0) < plane_threshold * _matD1(1, 0)) { return true; } else{ return false; } } void LidarFeatureExtractor::detectFeaturePoint(pcl::PointCloud::Ptr& cloud, std::vector& pointsLessSharp, std::vector& pointsLessFlat){ int CloudFeatureFlag[20000]; float cloudCurvature[20000]; float cloudDepth[20000]; int cloudSortInd[20000]; float cloudReflect[20000]; int reflectSortInd[20000]; int cloudAngle[20000]; pcl::PointCloud::Ptr& laserCloudIn = cloud; int cloudSize = laserCloudIn->points.size(); PointType point; pcl::PointCloud::Ptr _laserCloud(new pcl::PointCloud()); _laserCloud->reserve(cloudSize); for (int i = 0; i < cloudSize; i++) { point.x = laserCloudIn->points[i].x; point.y = laserCloudIn->points[i].y; point.z = laserCloudIn->points[i].z; #ifdef UNDISTORT point.normal_x = laserCloudIn.points[i].normal_x; #else point.normal_x = 1.0; #endif point.intensity = laserCloudIn->points[i].intensity; if (!pcl_isfinite(point.x) || !pcl_isfinite(point.y) || !pcl_isfinite(point.z)) { continue; } _laserCloud->push_back(point); CloudFeatureFlag[i] = 0; } cloudSize = _laserCloud->size(); int debugnum1 = 0; int debugnum2 = 0; int debugnum3 = 0; int debugnum4 = 0; int debugnum5 = 0; int count_num = 1; bool left_surf_flag = false; bool right_surf_flag = false; //---------------------------------------- surf feature extract --------------------------------------------- int scanStartInd = 5; int scanEndInd = cloudSize - 6; int thDistanceFaraway_fea = 0; for (int i = 5; i < cloudSize - 5; i ++ ) { float diffX = 0; float diffY = 0; float diffZ = 0; float dis = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x + _laserCloud->points[i].y * _laserCloud->points[i].y + _laserCloud->points[i].z * _laserCloud->points[i].z); Eigen::Vector3d pt_last(_laserCloud->points[i-1].x, _laserCloud->points[i-1].y, _laserCloud->points[i-1].z); Eigen::Vector3d pt_cur(_laserCloud->points[i].x, _laserCloud->points[i].y, _laserCloud->points[i].z); Eigen::Vector3d pt_next(_laserCloud->points[i+1].x, _laserCloud->points[i+1].y, _laserCloud->points[i+1].z); double angle_last = (pt_last-pt_cur).dot(pt_cur) / ((pt_last-pt_cur).norm()*pt_cur.norm()); double angle_next = (pt_next-pt_cur).dot(pt_cur) / ((pt_next-pt_cur).norm()*pt_cur.norm()); if (dis > thDistanceFaraway || (fabs(angle_last) > 0.966 && fabs(angle_next) > 0.966)) { thNumCurvSize = 2; } else { thNumCurvSize = 3; } if(fabs(angle_last) > 0.966 && fabs(angle_next) > 0.966) { cloudAngle[i] = 1; } float diffR = -2 * thNumCurvSize * _laserCloud->points[i].intensity; for (int j = 1; j <= thNumCurvSize; ++j) { diffX += _laserCloud->points[i - j].x + _laserCloud->points[i + j].x; diffY += _laserCloud->points[i - j].y + _laserCloud->points[i + j].y; diffZ += _laserCloud->points[i - j].z + _laserCloud->points[i + j].z; diffR += _laserCloud->points[i - j].intensity + _laserCloud->points[i + j].intensity; } diffX -= 2 * thNumCurvSize * _laserCloud->points[i].x; diffY -= 2 * thNumCurvSize * _laserCloud->points[i].y; diffZ -= 2 * thNumCurvSize * _laserCloud->points[i].z; cloudDepth[i] = dis; cloudCurvature[i] = diffX * diffX + diffY * diffY + diffZ * diffZ;// / (2 * thNumCurvSize * dis + 1e-3); cloudSortInd[i] = i; cloudReflect[i] = diffR; reflectSortInd[i] = i; } for (int j = 0; j < thPartNum; j++) { int sp = scanStartInd + (scanEndInd - scanStartInd) * j / thPartNum; int ep = scanStartInd + (scanEndInd - scanStartInd) * (j + 1) / thPartNum - 1; // sort the curvatures from small to large for (int k = sp + 1; k <= ep; k++) { for (int l = k; l >= sp + 1; l--) { if (cloudCurvature[cloudSortInd[l]] < cloudCurvature[cloudSortInd[l - 1]]) { int temp = cloudSortInd[l - 1]; cloudSortInd[l - 1] = cloudSortInd[l]; cloudSortInd[l] = temp; } } } // sort the reflectivity from small to large for (int k = sp + 1; k <= ep; k++) { for (int l = k; l >= sp + 1; l--) { if (cloudReflect[reflectSortInd[l]] < cloudReflect[reflectSortInd[l - 1]]) { int temp = reflectSortInd[l - 1]; reflectSortInd[l - 1] = reflectSortInd[l]; reflectSortInd[l] = temp; } } } int smallestPickedNum = 1; int sharpestPickedNum = 1; for (int k = sp; k <= ep; k++) { int ind = cloudSortInd[k]; if (CloudFeatureFlag[ind] != 0) continue; if (cloudCurvature[ind] < thFlatThreshold * cloudDepth[ind] * thFlatThreshold * cloudDepth[ind]) { CloudFeatureFlag[ind] = 3; for (int l = 1; l <= thNumCurvSize; l++) { float diffX = _laserCloud->points[ind + l].x - _laserCloud->points[ind + l - 1].x; float diffY = _laserCloud->points[ind + l].y - _laserCloud->points[ind + l - 1].y; float diffZ = _laserCloud->points[ind + l].z - _laserCloud->points[ind + l - 1].z; if (diffX * diffX + diffY * diffY + diffZ * diffZ > 0.02 || cloudDepth[ind] > thDistanceFaraway) { break; } CloudFeatureFlag[ind + l] = 1; } for (int l = -1; l >= -thNumCurvSize; l--) { float diffX = _laserCloud->points[ind + l].x - _laserCloud->points[ind + l + 1].x; float diffY = _laserCloud->points[ind + l].y - _laserCloud->points[ind + l + 1].y; float diffZ = _laserCloud->points[ind + l].z - _laserCloud->points[ind + l + 1].z; if (diffX * diffX + diffY * diffY + diffZ * diffZ > 0.02 || cloudDepth[ind] > thDistanceFaraway) { break; } CloudFeatureFlag[ind + l] = 1; } } } for (int k = sp; k <= ep; k++) { int ind = cloudSortInd[k]; if(((CloudFeatureFlag[ind] == 3) && (smallestPickedNum <= thNumFlat)) || ((CloudFeatureFlag[ind] == 3) && (cloudDepth[ind] > thDistanceFaraway)) || cloudAngle[ind] == 1){ smallestPickedNum ++; CloudFeatureFlag[ind] = 2; if(cloudDepth[ind] > thDistanceFaraway) { thDistanceFaraway_fea++; } } int idx = reflectSortInd[k]; if(cloudCurvature[idx] < 0.7 * thFlatThreshold * cloudDepth[idx] * thFlatThreshold * cloudDepth[idx] && sharpestPickedNum <= 3 && cloudReflect[idx] > 20.0){ sharpestPickedNum ++; CloudFeatureFlag[idx] = 300; } } } //---------------------------------------- line feature where surfaces meet ------------------------------------- for (int i = 5; i < cloudSize - 5; i += count_num ) { float depth = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x + _laserCloud->points[i].y * _laserCloud->points[i].y + _laserCloud->points[i].z * _laserCloud->points[i].z); //left curvature float ldiffX = _laserCloud->points[i - 4].x + _laserCloud->points[i - 3].x - 4 * _laserCloud->points[i - 2].x + _laserCloud->points[i - 1].x + _laserCloud->points[i].x; float ldiffY = _laserCloud->points[i - 4].y + _laserCloud->points[i - 3].y - 4 * _laserCloud->points[i - 2].y + _laserCloud->points[i - 1].y + _laserCloud->points[i].y; float ldiffZ = _laserCloud->points[i - 4].z + _laserCloud->points[i - 3].z - 4 * _laserCloud->points[i - 2].z + _laserCloud->points[i - 1].z + _laserCloud->points[i].z; float left_curvature = ldiffX * ldiffX + ldiffY * ldiffY + ldiffZ * ldiffZ; if(left_curvature < thFlatThreshold * depth){ std::vector left_list; for(int j = -4; j < 0; j++){ left_list.push_back(_laserCloud->points[i + j]); } left_surf_flag = true; } else{ left_surf_flag = false; } //right curvature float rdiffX = _laserCloud->points[i + 4].x + _laserCloud->points[i + 3].x - 4 * _laserCloud->points[i + 2].x + _laserCloud->points[i + 1].x + _laserCloud->points[i].x; float rdiffY = _laserCloud->points[i + 4].y + _laserCloud->points[i + 3].y - 4 * _laserCloud->points[i + 2].y + _laserCloud->points[i + 1].y + _laserCloud->points[i].y; float rdiffZ = _laserCloud->points[i + 4].z + _laserCloud->points[i + 3].z - 4 * _laserCloud->points[i + 2].z + _laserCloud->points[i + 1].z + _laserCloud->points[i].z; float right_curvature = rdiffX * rdiffX + rdiffY * rdiffY + rdiffZ * rdiffZ; if(right_curvature < thFlatThreshold * depth){ std::vector right_list; for(int j = 1; j < 5; j++){ right_list.push_back(_laserCloud->points[i + j]); } count_num = 4; right_surf_flag = true; } else{ count_num = 1; right_surf_flag = false; } //calculate the included angle if(left_surf_flag && right_surf_flag){ debugnum4 ++; Eigen::Vector3d norm_left(0,0,0); Eigen::Vector3d norm_right(0,0,0); for(int k = 1;k<5;k++){ Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i - k].x - _laserCloud->points[i].x, _laserCloud->points[i - k].y - _laserCloud->points[i].y, _laserCloud->points[i - k].z - _laserCloud->points[i].z); tmp.normalize(); norm_left += (k/10.0)* tmp; } for(int k = 1;k<5;k++){ Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i + k].x - _laserCloud->points[i].x, _laserCloud->points[i + k].y - _laserCloud->points[i].y, _laserCloud->points[i + k].z - _laserCloud->points[i].z); tmp.normalize(); norm_right += (k/10.0)* tmp; } //calculate the angle between this group and the previous group double cc = fabs( norm_left.dot(norm_right) / (norm_left.norm()*norm_right.norm()) ); //calculate the maximum distance, the distance cannot be too small Eigen::Vector3d last_tmp = Eigen::Vector3d(_laserCloud->points[i - 4].x - _laserCloud->points[i].x, _laserCloud->points[i - 4].y - _laserCloud->points[i].y, _laserCloud->points[i - 4].z - _laserCloud->points[i].z); Eigen::Vector3d current_tmp = Eigen::Vector3d(_laserCloud->points[i + 4].x - _laserCloud->points[i].x, _laserCloud->points[i + 4].y - _laserCloud->points[i].y, _laserCloud->points[i + 4].z - _laserCloud->points[i].z); double last_dis = last_tmp.norm(); double current_dis = current_tmp.norm(); if(cc < 0.5 && last_dis > 0.05 && current_dis > 0.05 ){ // debugnum5 ++; CloudFeatureFlag[i] = 150; } } } //--------------------------------------------------- break points --------------------------------------------- for(int i = 5; i < cloudSize - 5; i ++){ float diff_left[2]; float diff_right[2]; float depth = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x + _laserCloud->points[i].y * _laserCloud->points[i].y + _laserCloud->points[i].z * _laserCloud->points[i].z); for(int count = 1; count < 3; count++ ){ float diffX1 = _laserCloud->points[i + count].x - _laserCloud->points[i].x; float diffY1 = _laserCloud->points[i + count].y - _laserCloud->points[i].y; float diffZ1 = _laserCloud->points[i + count].z - _laserCloud->points[i].z; diff_right[count - 1] = sqrt(diffX1 * diffX1 + diffY1 * diffY1 + diffZ1 * diffZ1); float diffX2 = _laserCloud->points[i - count].x - _laserCloud->points[i].x; float diffY2 = _laserCloud->points[i - count].y - _laserCloud->points[i].y; float diffZ2 = _laserCloud->points[i - count].z - _laserCloud->points[i].z; diff_left[count - 1] = sqrt(diffX2 * diffX2 + diffY2 * diffY2 + diffZ2 * diffZ2); } float depth_right = sqrt(_laserCloud->points[i + 1].x * _laserCloud->points[i + 1].x + _laserCloud->points[i + 1].y * _laserCloud->points[i + 1].y + _laserCloud->points[i + 1].z * _laserCloud->points[i + 1].z); float depth_left = sqrt(_laserCloud->points[i - 1].x * _laserCloud->points[i - 1].x + _laserCloud->points[i - 1].y * _laserCloud->points[i - 1].y + _laserCloud->points[i - 1].z * _laserCloud->points[i - 1].z); if(fabs(diff_right[0] - diff_left[0]) > thBreakCornerDis){ if(diff_right[0] > diff_left[0]){ Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i - 1].x - _laserCloud->points[i].x, _laserCloud->points[i - 1].y - _laserCloud->points[i].y, _laserCloud->points[i - 1].z - _laserCloud->points[i].z); Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x, _laserCloud->points[i].y, _laserCloud->points[i].z); double left_surf_dis = surf_vector.norm(); //calculate the angle between the laser direction and the surface double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) ); std::vector left_list; double min_dis = 10000; double max_dis = 0; for(int j = 0; j < 4; j++){ //TODO: change the plane window size and add thin rod support left_list.push_back(_laserCloud->points[i - j]); Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i - j].x - _laserCloud->points[i - j - 1].x, _laserCloud->points[i - j].y - _laserCloud->points[i - j - 1].y, _laserCloud->points[i - j].z - _laserCloud->points[i - j - 1].z); if(j == 3) break; double temp_dis = temp_vector.norm(); if(temp_dis < min_dis) min_dis = temp_dis; if(temp_dis > max_dis) max_dis = temp_dis; } bool left_is_plane = plane_judge(left_list,100); if( cc < 0.95 ){//(max_dis < 2*min_dis) && left_surf_dis < 0.05 * depth && left_is_plane && if(depth_right > depth_left){ CloudFeatureFlag[i] = 100; } else{ if(depth_right == 0) CloudFeatureFlag[i] = 100; } } } else{ Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i + 1].x - _laserCloud->points[i].x, _laserCloud->points[i + 1].y - _laserCloud->points[i].y, _laserCloud->points[i + 1].z - _laserCloud->points[i].z); Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x, _laserCloud->points[i].y, _laserCloud->points[i].z); double right_surf_dis = surf_vector.norm(); //calculate the angle between the laser direction and the surface double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) ); std::vector right_list; double min_dis = 10000; double max_dis = 0; for(int j = 0; j < 4; j++){ //TODO: change the plane window size and add thin rod support right_list.push_back(_laserCloud->points[i - j]); Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i + j].x - _laserCloud->points[i + j + 1].x, _laserCloud->points[i + j].y - _laserCloud->points[i + j + 1].y, _laserCloud->points[i + j].z - _laserCloud->points[i + j + 1].z); if(j == 3) break; double temp_dis = temp_vector.norm(); if(temp_dis < min_dis) min_dis = temp_dis; if(temp_dis > max_dis) max_dis = temp_dis; } bool right_is_plane = plane_judge(right_list,100); if( cc < 0.95){ //right_is_plane && (max_dis < 2*min_dis) && right_surf_dis < 0.05 * depth && if(depth_right < depth_left){ CloudFeatureFlag[i] = 100; } else{ if(depth_left == 0) CloudFeatureFlag[i] = 100; } } } } // break points select if(CloudFeatureFlag[i] == 100){ debugnum2++; std::vector front_norms; Eigen::Vector3d norm_front(0,0,0); Eigen::Vector3d norm_back(0,0,0); for(int k = 1;k<4;k++){ float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x + _laserCloud->points[i - k].y * _laserCloud->points[i - k].y + _laserCloud->points[i - k].z * _laserCloud->points[i - k].z); if(temp_depth < 1){ continue; } Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i - k].x - _laserCloud->points[i].x, _laserCloud->points[i - k].y - _laserCloud->points[i].y, _laserCloud->points[i - k].z - _laserCloud->points[i].z); tmp.normalize(); front_norms.push_back(tmp); norm_front += (k/6.0)* tmp; } std::vector back_norms; for(int k = 1;k<4;k++){ float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x + _laserCloud->points[i - k].y * _laserCloud->points[i - k].y + _laserCloud->points[i - k].z * _laserCloud->points[i - k].z); if(temp_depth < 1){ continue; } Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i + k].x - _laserCloud->points[i].x, _laserCloud->points[i + k].y - _laserCloud->points[i].y, _laserCloud->points[i + k].z - _laserCloud->points[i].z); tmp.normalize(); back_norms.push_back(tmp); norm_back += (k/6.0)* tmp; } double cc = fabs( norm_front.dot(norm_back) / (norm_front.norm()*norm_back.norm()) ); if(cc < 0.95){ debugnum3++; }else{ CloudFeatureFlag[i] = 101; } } } pcl::PointCloud::Ptr laserCloudCorner(new pcl::PointCloud()); pcl::PointCloud cornerPointsSharp; std::vector pointsLessSharp_ori; int num_surf = 0; int num_corner = 0; //push_back feature for(int i = 5; i < cloudSize - 5; i ++){ float dis = _laserCloud->points[i].x * _laserCloud->points[i].x + _laserCloud->points[i].y * _laserCloud->points[i].y + _laserCloud->points[i].z * _laserCloud->points[i].z; if(dis < thLidarNearestDis*thLidarNearestDis) continue; if(CloudFeatureFlag[i] == 2){ pointsLessFlat.push_back(i); num_surf++; continue; } if(CloudFeatureFlag[i] == 100 || CloudFeatureFlag[i] == 150){ // pointsLessSharp_ori.push_back(i); laserCloudCorner->push_back(_laserCloud->points[i]); } } for(int i = 0; i < laserCloudCorner->points.size();i++){ pointsLessSharp.push_back(pointsLessSharp_ori[i]); num_corner++; } } void LidarFeatureExtractor::FeatureExtract_with_segment(const livox_ros_driver::CustomMsgConstPtr &msg, pcl::PointCloud::Ptr& laserCloud, pcl::PointCloud::Ptr& laserConerFeature, pcl::PointCloud::Ptr& laserSurfFeature, pcl::PointCloud::Ptr& laserNonFeature, sensor_msgs::PointCloud2 &msg_seg, const int Used_Line){ laserCloud->clear(); laserConerFeature->clear(); laserSurfFeature->clear(); laserCloud->clear(); laserCloud->reserve(15000*N_SCANS); for(auto & ptr : vlines){ ptr->clear(); } for(auto & v : vcorner){ v.clear(); } for(auto & v : vsurf){ v.clear(); } int dnum = msg->points.size(); int *idtrans = (int*)calloc(dnum, sizeof(int)); float *data=(float*)calloc(dnum*4,sizeof(float)); int point_num = 0; double timeSpan = ros::Time().fromNSec(msg->points.back().offset_time).toSec(); PointType point; for(const auto& p : msg->points){ int line_num = (int)p.line; if(line_num > Used_Line-1) continue; if(p.x < 0.01) continue; if (!pcl_isfinite(p.x) || !pcl_isfinite(p.y) || !pcl_isfinite(p.z)) { continue; } point.x = p.x; point.y = p.y; point.z = p.z; point.intensity = p.reflectivity; point.normal_x = ros::Time().fromNSec(p.offset_time).toSec() /timeSpan; point.normal_y = _int_as_float(line_num); laserCloud->push_back(point); data[point_num*4+0] = point.x; data[point_num*4+1] = point.y; data[point_num*4+2] = point.z; data[point_num*4+3] = point.intensity; point_num++; } PCSeg pcseg; pcseg.DoSeg(idtrans,data,dnum); int estimate_gnd_count = 0; std::size_t cloud_num = laserCloud->size(); for(std::size_t i=0; ipoints[i].normal_y); laserCloud->points[i].normal_z = _int_as_float(i); vlines[line_idx]->push_back(laserCloud->points[i]); } std::thread threads[N_SCANS]; for(int i=0; ipoints[_float_as_int(vlines[i]->points[vcorner[i][j]].normal_z)].normal_z = 1.0; num_corner++; } } detectFeaturePoint2(laserCloud, laserSurfFeature, laserNonFeature); for(std::size_t i=0; ipoints[i].x * laserCloud->points[i].x + laserCloud->points[i].y * laserCloud->points[i].y + laserCloud->points[i].z * laserCloud->points[i].z; if( idtrans[i] > 9 && dis < 50*50){ laserCloud->points[i].normal_z = 0; } if (idtrans[i] == 0) { laserCloud->points[i].normal_y = -1.0; } } pcl::PointCloud::Ptr laserConerFeature_filter; laserConerFeature_filter.reset(new pcl::PointCloud()); laserConerFeature.reset(new pcl::PointCloud()); laserSurfFeature.reset(new pcl::PointCloud()); laserNonFeature.reset(new pcl::PointCloud()); for(const auto& p : laserCloud->points){ if(std::fabs(p.normal_z - 1.0) < 1e-5) laserConerFeature->push_back(p); } for(const auto& p : laserCloud->points){ if(std::fabs(p.normal_z - 2.0) < 1e-5) laserSurfFeature->push_back(p); if(std::fabs(p.normal_z - 3.0) < 1e-5) laserNonFeature->push_back(p); } } void LidarFeatureExtractor::detectFeaturePoint2(pcl::PointCloud::Ptr& cloud, pcl::PointCloud::Ptr& pointsLessFlat, pcl::PointCloud::Ptr& pointsNonFeature){ int cloudSize = cloud->points.size(); pointsLessFlat.reset(new pcl::PointCloud()); pointsNonFeature.reset(new pcl::PointCloud()); pcl::KdTreeFLANN::Ptr KdTreeCloud; KdTreeCloud.reset(new pcl::KdTreeFLANN); KdTreeCloud->setInputCloud(cloud); std::vector _pointSearchInd; std::vector _pointSearchSqDis; int num_near = 10; int stride = 1; int interval = 4; for(int i = 5; i < cloudSize - 5; i = i+stride) { if(fabs(cloud->points[i].normal_z - 1.0) < 1e-5) { continue; } double thre1d = 0.5; double thre2d = 0.8; double thre3d = 0.5; double thre3d2 = 0.13; double disti = sqrt(cloud->points[i].x * cloud->points[i].x + cloud->points[i].y * cloud->points[i].y + cloud->points[i].z * cloud->points[i].z); if(disti < 30.0) { thre1d = 0.5; thre2d = 0.8; thre3d2 = 0.07; stride = 14; interval = 4; } else if(disti < 60.0) { stride = 10; interval = 3; } else { stride = 1; interval = 0; } if(disti > 100.0) { num_near = 6; cloud->points[i].normal_z = 3.0; pointsNonFeature->points.push_back(cloud->points[i]); continue; } else if(disti > 60.0) { num_near = 8; } else { num_near = 10; } KdTreeCloud->nearestKSearch(cloud->points[i], num_near, _pointSearchInd, _pointSearchSqDis); if (_pointSearchSqDis[num_near-1] > 5.0 && disti < 90.0) { continue; } Eigen::Matrix< double, 3, 3 > _matA1; _matA1.setZero(); float cx = 0; float cy = 0; float cz = 0; for (int j = 0; j < num_near; j++) { cx += cloud->points[_pointSearchInd[j]].x; cy += cloud->points[_pointSearchInd[j]].y; cz += cloud->points[_pointSearchInd[j]].z; } cx /= num_near; cy /= num_near; cz /= num_near; float a11 = 0; float a12 = 0; float a13 = 0; float a22 = 0; float a23 = 0; float a33 = 0; for (int j = 0; j < num_near; j++) { float ax = cloud->points[_pointSearchInd[j]].x - cx; float ay = cloud->points[_pointSearchInd[j]].y - cy; float az = cloud->points[_pointSearchInd[j]].z - cz; a11 += ax * ax; a12 += ax * ay; a13 += ax * az; a22 += ay * ay; a23 += ay * az; a33 += az * az; } a11 /= num_near; a12 /= num_near; a13 /= num_near; a22 /= num_near; a23 /= num_near; a33 /= num_near; _matA1(0, 0) = a11; _matA1(0, 1) = a12; _matA1(0, 2) = a13; _matA1(1, 0) = a12; _matA1(1, 1) = a22; _matA1(1, 2) = a23; _matA1(2, 0) = a13; _matA1(2, 1) = a23; _matA1(2, 2) = a33; Eigen::SelfAdjointEigenSolver saes(_matA1); double a1d = (sqrt(saes.eigenvalues()[2]) - sqrt(saes.eigenvalues()[1])) / sqrt(saes.eigenvalues()[2]); double a2d = (sqrt(saes.eigenvalues()[1]) - sqrt(saes.eigenvalues()[0])) / sqrt(saes.eigenvalues()[2]); double a3d = sqrt(saes.eigenvalues()[0]) / sqrt(saes.eigenvalues()[2]); if(a2d > thre2d || (a3d < thre3d2 && a1d < thre1d)) { for(int k = 1; k < interval; k++) { cloud->points[i-k].normal_z = 2.0; pointsLessFlat->points.push_back(cloud->points[i-k]); cloud->points[i+k].normal_z = 2.0; pointsLessFlat->points.push_back(cloud->points[i+k]); } cloud->points[i].normal_z = 2.0; pointsLessFlat->points.push_back(cloud->points[i]); } else if(a3d > thre3d) { for(int k = 1; k < interval; k++) { cloud->points[i-k].normal_z = 3.0; pointsNonFeature->points.push_back(cloud->points[i-k]); cloud->points[i+k].normal_z = 3.0; pointsNonFeature->points.push_back(cloud->points[i+k]); } cloud->points[i].normal_z = 3.0; pointsNonFeature->points.push_back(cloud->points[i]); } } } void LidarFeatureExtractor::detectFeaturePoint3(pcl::PointCloud::Ptr& cloud, std::vector& pointsLessSharp){ int CloudFeatureFlag[20000]; float cloudCurvature[20000]; float cloudDepth[20000]; int cloudSortInd[20000]; float cloudReflect[20000]; int reflectSortInd[20000]; int cloudAngle[20000]; pcl::PointCloud::Ptr& laserCloudIn = cloud; int cloudSize = laserCloudIn->points.size(); PointType point; pcl::PointCloud::Ptr _laserCloud(new pcl::PointCloud()); _laserCloud->reserve(cloudSize); for (int i = 0; i < cloudSize; i++) { point.x = laserCloudIn->points[i].x; point.y = laserCloudIn->points[i].y; point.z = laserCloudIn->points[i].z; point.normal_x = 1.0; point.intensity = laserCloudIn->points[i].intensity; if (!pcl_isfinite(point.x) || !pcl_isfinite(point.y) || !pcl_isfinite(point.z)) { continue; } _laserCloud->push_back(point); CloudFeatureFlag[i] = 0; } cloudSize = _laserCloud->size(); int count_num = 1; bool left_surf_flag = false; bool right_surf_flag = false; //--------------------------------------------------- break points --------------------------------------------- for(int i = 5; i < cloudSize - 5; i ++){ float diff_left[2]; float diff_right[2]; float depth = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x + _laserCloud->points[i].y * _laserCloud->points[i].y + _laserCloud->points[i].z * _laserCloud->points[i].z); for(int count = 1; count < 3; count++ ){ float diffX1 = _laserCloud->points[i + count].x - _laserCloud->points[i].x; float diffY1 = _laserCloud->points[i + count].y - _laserCloud->points[i].y; float diffZ1 = _laserCloud->points[i + count].z - _laserCloud->points[i].z; diff_right[count - 1] = sqrt(diffX1 * diffX1 + diffY1 * diffY1 + diffZ1 * diffZ1); float diffX2 = _laserCloud->points[i - count].x - _laserCloud->points[i].x; float diffY2 = _laserCloud->points[i - count].y - _laserCloud->points[i].y; float diffZ2 = _laserCloud->points[i - count].z - _laserCloud->points[i].z; diff_left[count - 1] = sqrt(diffX2 * diffX2 + diffY2 * diffY2 + diffZ2 * diffZ2); } float depth_right = sqrt(_laserCloud->points[i + 1].x * _laserCloud->points[i + 1].x + _laserCloud->points[i + 1].y * _laserCloud->points[i + 1].y + _laserCloud->points[i + 1].z * _laserCloud->points[i + 1].z); float depth_left = sqrt(_laserCloud->points[i - 1].x * _laserCloud->points[i - 1].x + _laserCloud->points[i - 1].y * _laserCloud->points[i - 1].y + _laserCloud->points[i - 1].z * _laserCloud->points[i - 1].z); if(fabs(diff_right[0] - diff_left[0]) > thBreakCornerDis){ if(diff_right[0] > diff_left[0]){ Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i - 1].x - _laserCloud->points[i].x, _laserCloud->points[i - 1].y - _laserCloud->points[i].y, _laserCloud->points[i - 1].z - _laserCloud->points[i].z); Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x, _laserCloud->points[i].y, _laserCloud->points[i].z); double left_surf_dis = surf_vector.norm(); //calculate the angle between the laser direction and the surface double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) ); std::vector left_list; double min_dis = 10000; double max_dis = 0; for(int j = 0; j < 4; j++){ //TODO: change the plane window size and add thin rod support left_list.push_back(_laserCloud->points[i - j]); Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i - j].x - _laserCloud->points[i - j - 1].x, _laserCloud->points[i - j].y - _laserCloud->points[i - j - 1].y, _laserCloud->points[i - j].z - _laserCloud->points[i - j - 1].z); if(j == 3) break; double temp_dis = temp_vector.norm(); if(temp_dis < min_dis) min_dis = temp_dis; if(temp_dis > max_dis) max_dis = temp_dis; } // bool left_is_plane = plane_judge(left_list,0.3); if(cc < 0.93){//(max_dis < 2*min_dis) && left_surf_dis < 0.05 * depth && left_is_plane && if(depth_right > depth_left){ CloudFeatureFlag[i] = 100; } else{ if(depth_right == 0) CloudFeatureFlag[i] = 100; } } } else{ Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i + 1].x - _laserCloud->points[i].x, _laserCloud->points[i + 1].y - _laserCloud->points[i].y, _laserCloud->points[i + 1].z - _laserCloud->points[i].z); Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x, _laserCloud->points[i].y, _laserCloud->points[i].z); double right_surf_dis = surf_vector.norm(); //calculate the angle between the laser direction and the surface double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) ); std::vector right_list; double min_dis = 10000; double max_dis = 0; for(int j = 0; j < 4; j++){ //TODO: change the plane window size and add thin rod support right_list.push_back(_laserCloud->points[i - j]); Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i + j].x - _laserCloud->points[i + j + 1].x, _laserCloud->points[i + j].y - _laserCloud->points[i + j + 1].y, _laserCloud->points[i + j].z - _laserCloud->points[i + j + 1].z); if(j == 3) break; double temp_dis = temp_vector.norm(); if(temp_dis < min_dis) min_dis = temp_dis; if(temp_dis > max_dis) max_dis = temp_dis; } // bool right_is_plane = plane_judge(right_list,0.3); if(cc < 0.93){ //right_is_plane && (max_dis < 2*min_dis) && right_surf_dis < 0.05 * depth && if(depth_right < depth_left){ CloudFeatureFlag[i] = 100; } else{ if(depth_left == 0) CloudFeatureFlag[i] = 100; } } } } // break points select if(CloudFeatureFlag[i] == 100){ std::vector front_norms; Eigen::Vector3d norm_front(0,0,0); Eigen::Vector3d norm_back(0,0,0); for(int k = 1;k<4;k++){ float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x + _laserCloud->points[i - k].y * _laserCloud->points[i - k].y + _laserCloud->points[i - k].z * _laserCloud->points[i - k].z); if(temp_depth < 1){ continue; } Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i - k].x - _laserCloud->points[i].x, _laserCloud->points[i - k].y - _laserCloud->points[i].y, _laserCloud->points[i - k].z - _laserCloud->points[i].z); tmp.normalize(); front_norms.push_back(tmp); norm_front += (k/6.0)* tmp; } std::vector back_norms; for(int k = 1;k<4;k++){ float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x + _laserCloud->points[i - k].y * _laserCloud->points[i - k].y + _laserCloud->points[i - k].z * _laserCloud->points[i - k].z); if(temp_depth < 1){ continue; } Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i + k].x - _laserCloud->points[i].x, _laserCloud->points[i + k].y - _laserCloud->points[i].y, _laserCloud->points[i + k].z - _laserCloud->points[i].z); tmp.normalize(); back_norms.push_back(tmp); norm_back += (k/6.0)* tmp; } double cc = fabs( norm_front.dot(norm_back) / (norm_front.norm()*norm_back.norm()) ); if(cc < 0.93){ }else{ CloudFeatureFlag[i] = 101; } } } pcl::PointCloud::Ptr laserCloudCorner(new pcl::PointCloud()); pcl::PointCloud cornerPointsSharp; std::vector pointsLessSharp_ori; int num_surf = 0; int num_corner = 0; for(int i = 5; i < cloudSize - 5; i ++){ Eigen::Vector3d left_pt = Eigen::Vector3d(_laserCloud->points[i - 1].x, _laserCloud->points[i - 1].y, _laserCloud->points[i - 1].z); Eigen::Vector3d right_pt = Eigen::Vector3d(_laserCloud->points[i + 1].x, _laserCloud->points[i + 1].y, _laserCloud->points[i + 1].z); Eigen::Vector3d cur_pt = Eigen::Vector3d(_laserCloud->points[i].x, _laserCloud->points[i].y, _laserCloud->points[i].z); float dis = _laserCloud->points[i].x * _laserCloud->points[i].x + _laserCloud->points[i].y * _laserCloud->points[i].y + _laserCloud->points[i].z * _laserCloud->points[i].z; double clr = fabs(left_pt.dot(right_pt) / (left_pt.norm()*right_pt.norm())); double cl = fabs(left_pt.dot(cur_pt) / (left_pt.norm()*cur_pt.norm())); double cr = fabs(right_pt.dot(cur_pt) / (right_pt.norm()*cur_pt.norm())); if(clr < 0.999){ CloudFeatureFlag[i] = 200; } if(dis < thLidarNearestDis*thLidarNearestDis) continue; if(CloudFeatureFlag[i] == 100 || CloudFeatureFlag[i] == 200){ // pointsLessSharp_ori.push_back(i); laserCloudCorner->push_back(_laserCloud->points[i]); } } for(int i = 0; i < laserCloudCorner->points.size();i++){ pointsLessSharp.push_back(pointsLessSharp_ori[i]); num_corner++; } } void LidarFeatureExtractor::FeatureExtract(const livox_ros_driver::CustomMsgConstPtr &msg, pcl::PointCloud::Ptr& laserCloud, pcl::PointCloud::Ptr& laserConerFeature, pcl::PointCloud::Ptr& laserSurfFeature, const int Used_Line){ laserCloud->clear(); laserConerFeature->clear(); laserSurfFeature->clear(); laserCloud->reserve(15000*N_SCANS); for(auto & ptr : vlines){ ptr->clear(); } for(auto & v : vcorner){ v.clear(); } for(auto & v : vsurf){ v.clear(); } double timeSpan = ros::Time().fromNSec(msg->points.back().offset_time).toSec(); PointType point; for(const auto& p : msg->points){ int line_num = (int)p.line; if(line_num > Used_Line-1) continue; if(p.x < 0.01) continue; point.x = p.x; point.y = p.y; point.z = p.z; point.intensity = p.reflectivity; point.normal_x = ros::Time().fromNSec(p.offset_time).toSec() /timeSpan; point.normal_y = _int_as_float(line_num); laserCloud->push_back(point); } std::size_t cloud_num = laserCloud->size(); for(std::size_t i=0; ipoints[i].normal_y); laserCloud->points[i].normal_z = _int_as_float(i); vlines[line_idx]->push_back(laserCloud->points[i]); laserCloud->points[i].normal_z = 0; } std::thread threads[N_SCANS]; for(int i=0; ipoints[_float_as_int(vlines[i]->points[vcorner[i][j]].normal_z)].normal_z = 1.0; } for(int j=0; jpoints[_float_as_int(vlines[i]->points[vsurf[i][j]].normal_z)].normal_z = 2.0; } } for(const auto& p : laserCloud->points){ if(std::fabs(p.normal_z - 1.0) < 1e-5) laserConerFeature->push_back(p); } for(const auto& p : laserCloud->points){ if(std::fabs(p.normal_z - 2.0) < 1e-5) laserSurfFeature->push_back(p); } } ================================================ FILE: LIO-Livox/src/lio/Map_Manager.cpp ================================================ #include "MapManager/Map_Manager.h" #include MAP_MANAGER::MAP_MANAGER(const float& filter_corner, const float& filter_surf){ for (int i = 0; i < laserCloudNum; i++) { laserCloudCornerArray[i].reset(new pcl::PointCloud()); laserCloudSurfArray[i].reset(new pcl::PointCloud()); laserCloudNonFeatureArray[i].reset(new pcl::PointCloud()); laserCloudCornerArrayStack[i].reset(new pcl::PointCloud()); laserCloudSurfArrayStack[i].reset(new pcl::PointCloud()); laserCloudNonFeatureArrayStack[i].reset(new pcl::PointCloud()); laserCloudCornerKdMap[i].reset(new pcl::KdTreeFLANN); laserCloudSurfKdMap[i].reset(new pcl::KdTreeFLANN); laserCloudNonFeatureKdMap[i].reset(new pcl::KdTreeFLANN); } for (int i = 0; i < localMapWindowSize; i++) { localCornerMap[i].reset(new pcl::PointCloud()); localSurfMap[i].reset(new pcl::PointCloud()); localNonFeatureMap[i].reset(new pcl::PointCloud()); } laserCloudCornerFromMap.reset(new pcl::PointCloud()); laserCloudSurfFromMap.reset(new pcl::PointCloud()); laserCloudNonFeatureFromMap.reset(new pcl::PointCloud()); downSizeFilterCorner.setLeafSize(0.4, 0.4, 0.4); downSizeFilterSurf.setLeafSize(0.4, 0.4, 0.4); downSizeFilterNonFeature.setLeafSize(0.4, 0.4, 0.4); } size_t MAP_MANAGER::ToIndex(int i, int j, int k) { return i + laserCloudDepth * j + laserCloudDepth * laserCloudWidth * k; } /** \brief transform point pi to the MAP coordinate * \param[in] pi: point to be transformed * \param[in] po: point after transfomation * \param[in] _transformTobeMapped: transform matrix between pi and po */ void MAP_MANAGER::pointAssociateToMap(PointType const * const pi, PointType * const po, const Eigen::Matrix4d& _transformTobeMapped){ Eigen::Vector3d pin, pout; pin.x() = pi->x; pin.y() = pi->y; pin.z() = pi->z; pout = _transformTobeMapped.topLeftCorner(3,3) * pin + _transformTobeMapped.topRightCorner(3,1); po->x = pout.x(); po->y = pout.y(); po->z = pout.z(); po->intensity = pi->intensity; po->normal_z = pi->normal_z; po->normal_y = pi->normal_y; } void MAP_MANAGER::featureAssociateToMap(const pcl::PointCloud::Ptr& laserCloudCorner, const pcl::PointCloud::Ptr& laserCloudSurf, const pcl::PointCloud::Ptr& laserCloudNonFeature, const pcl::PointCloud::Ptr& laserCloudCornerToMap, const pcl::PointCloud::Ptr& laserCloudSurfToMap, const pcl::PointCloud::Ptr& laserCloudNonFeatureToMap, const Eigen::Matrix4d& transformTobeMapped){ int laserCloudCornerNum = laserCloudCorner->points.size(); int laserCloudSurfNum = laserCloudSurf->points.size(); int laserCloudNonFeatureNum = laserCloudNonFeature->points.size(); PointType pointSel1,pointSel2,pointSel3; for (int i = 0; i < laserCloudCornerNum; i++) { pointAssociateToMap(&laserCloudCorner->points[i], &pointSel1, transformTobeMapped); laserCloudCornerToMap->push_back(pointSel1); } for (int i = 0; i < laserCloudSurfNum; i++) { pointAssociateToMap(&laserCloudSurf->points[i], &pointSel2, transformTobeMapped); laserCloudSurfToMap->push_back(pointSel2); } for (int i = 0; i < laserCloudNonFeatureNum; i++) { pointAssociateToMap(&laserCloudNonFeature->points[i], &pointSel3, transformTobeMapped); laserCloudNonFeatureToMap->push_back(pointSel3); } } /** \brief add new lidar points to the map * \param[in] laserCloudCornerStack: coner feature points that need to be added to map * \param[in] laserCloudSurfStack: surf feature points that need to be added to map * \param[in] transformTobeMapped: transform matrix of the lidar pose */ void MAP_MANAGER::MapIncrement(const pcl::PointCloud::Ptr& laserCloudCornerStack, const pcl::PointCloud::Ptr& laserCloudSurfStack, const pcl::PointCloud::Ptr& laserCloudNonFeatureStack, const Eigen::Matrix4d& transformTobeMapped){ clock_t t0,t1,t2,t3,t4,t5; t0 = clock(); std::unique_lock locker2(mtx_MapManager); for(int i = 0; i < laserCloudNum; i++){ CornerKdMap_last[i] = *laserCloudCornerKdMap[i]; SurfKdMap_last[i] = *laserCloudSurfKdMap[i]; NonFeatureKdMap_last[i] = *laserCloudNonFeatureKdMap[i]; laserCloudSurf_for_match[i] = *laserCloudSurfArray[i]; laserCloudCorner_for_match[i] = *laserCloudCornerArray[i]; laserCloudNonFeature_for_match[i] = *laserCloudNonFeatureArray[i]; } laserCloudCenWidth_last = laserCloudCenWidth; laserCloudCenHeight_last = laserCloudCenHeight; laserCloudCenDepth_last = laserCloudCenDepth; locker2.unlock(); t1 = clock(); MapMove(transformTobeMapped); t2 = clock(); int laserCloudCornerStackNum = laserCloudCornerStack->points.size(); int laserCloudSurfStackNum = laserCloudSurfStack->points.size(); int laserCloudNonFeatureStackNum = laserCloudNonFeatureStack->points.size(); bool CornerChangeFlag[laserCloudNum] = {false}; bool SurfChangeFlag[laserCloudNum] = {false}; bool NonFeatureChangeFlag[laserCloudNum] = {false}; PointType pointSel; for (int i = 0; i < laserCloudCornerStackNum; i++) { pointSel = laserCloudCornerStack->points[i]; int cubeI = int((pointSel.x + 25.0) / 50.0) + laserCloudCenDepth; int cubeJ = int((pointSel.y + 25.0) / 50.0) + laserCloudCenWidth; int cubeK = int((pointSel.z + 25.0) / 50.0) + laserCloudCenHeight; if (pointSel.x + 25.0 < 0) cubeI--; if (pointSel.y + 25.0 < 0) cubeJ--; if (pointSel.z + 25.0 < 0) cubeK--; if (cubeI >= 0 && cubeI < laserCloudDepth && cubeJ >= 0 && cubeJ < laserCloudWidth && cubeK >= 0 && cubeK < laserCloudHeight) { size_t cubeInd = ToIndex(cubeI, cubeJ, cubeK); laserCloudCornerArray[cubeInd]->push_back(pointSel); CornerChangeFlag[cubeInd] = true; } } for (int i = 0; i < laserCloudSurfStackNum; i++) { pointSel = laserCloudSurfStack->points[i]; int cubeI = int((pointSel.x + 25.0) / 50.0) + laserCloudCenDepth; int cubeJ = int((pointSel.y + 25.0) / 50.0) + laserCloudCenWidth; int cubeK = int((pointSel.z + 25.0) / 50.0) + laserCloudCenHeight; if (pointSel.x + 25.0 < 0) cubeI--; if (pointSel.y + 25.0 < 0) cubeJ--; if (pointSel.z + 25.0 < 0) cubeK--; if (cubeI >= 0 && cubeI < laserCloudDepth && cubeJ >= 0 && cubeJ < laserCloudWidth && cubeK >= 0 && cubeK < laserCloudHeight) { size_t cubeInd = ToIndex(cubeI, cubeJ, cubeK); laserCloudSurfArray[cubeInd]->push_back(pointSel); SurfChangeFlag[cubeInd] = true; } } for (int i = 0; i < laserCloudNonFeatureStackNum; i++) { pointSel = laserCloudNonFeatureStack->points[i]; int cubeI = int((pointSel.x + 25.0) / 50.0) + laserCloudCenDepth; int cubeJ = int((pointSel.y + 25.0) / 50.0) + laserCloudCenWidth; int cubeK = int((pointSel.z + 25.0) / 50.0) + laserCloudCenHeight; if (pointSel.x + 25.0 < 0) cubeI--; if (pointSel.y + 25.0 < 0) cubeJ--; if (pointSel.z + 25.0 < 0) cubeK--; if (cubeI >= 0 && cubeI < laserCloudDepth && cubeJ >= 0 && cubeJ < laserCloudWidth && cubeK >= 0 && cubeK < laserCloudHeight) { size_t cubeInd = ToIndex(cubeI, cubeJ, cubeK); laserCloudNonFeatureArray[cubeInd]->push_back(pointSel); NonFeatureChangeFlag[cubeInd] = true; } } t3 = clock(); laserCloudCornerFromMap->clear(); laserCloudSurfFromMap->clear(); laserCloudNonFeatureFromMap->clear(); for(int i = 0; i < laserCloudNum; i++){ if(CornerChangeFlag[i]){ if(laserCloudCornerArray[i]->points.size() > 300){ downSizeFilterCorner.setInputCloud(laserCloudCornerArray[i]); laserCloudCornerArrayStack[i]->clear(); downSizeFilterCorner.filter(*laserCloudCornerArrayStack[i]); pcl::PointCloud::Ptr tmp = laserCloudCornerArrayStack[i]; laserCloudCornerArrayStack[i] = laserCloudCornerArray[i]; laserCloudCornerArray[i] = tmp; } laserCloudCornerKdMap[i]->setInputCloud(laserCloudCornerArray[i]); *laserCloudCornerFromMap += *laserCloudCornerKdMap[i]->getInputCloud(); } if(SurfChangeFlag[i]){ if(laserCloudSurfArray[i]->points.size() > 300){ downSizeFilterSurf.setInputCloud(laserCloudSurfArray[i]); laserCloudSurfArrayStack[i]->clear(); downSizeFilterSurf.filter(*laserCloudSurfArrayStack[i]); pcl::PointCloud::Ptr tmp = laserCloudSurfArrayStack[i]; laserCloudSurfArrayStack[i] = laserCloudSurfArray[i]; laserCloudSurfArray[i] = tmp; } laserCloudSurfKdMap[i]->setInputCloud(laserCloudSurfArray[i]); *laserCloudSurfFromMap += *laserCloudSurfKdMap[i]->getInputCloud(); } if(NonFeatureChangeFlag[i]){ if(laserCloudNonFeatureArray[i]->points.size() > 300){ downSizeFilterNonFeature.setInputCloud(laserCloudNonFeatureArray[i]); laserCloudNonFeatureArrayStack[i]->clear(); downSizeFilterNonFeature.filter(*laserCloudNonFeatureArrayStack[i]); pcl::PointCloud::Ptr tmp = laserCloudNonFeatureArrayStack[i]; laserCloudNonFeatureArrayStack[i] = laserCloudNonFeatureArray[i]; laserCloudNonFeatureArray[i] = tmp; } laserCloudNonFeatureKdMap[i]->setInputCloud(laserCloudNonFeatureArray[i]); *laserCloudNonFeatureFromMap += *laserCloudNonFeatureKdMap[i]->getInputCloud(); } } t4 = clock(); std::unique_lock locker(mtx_MapManager); for(int i = 0; i < laserCloudNum; i++){ CornerKdMap_copy[i] = *laserCloudCornerKdMap[i]; SurfKdMap_copy[i] = *laserCloudSurfKdMap[i]; NonFeatureKdMap_copy[i] = *laserCloudNonFeatureKdMap[i]; } locker.unlock(); t5 = clock(); currentUpdatePos ++; } /** \brief move the map index if need * \param[in] transformTobeMapped: transform matrix of the lidar pose */ void MAP_MANAGER::MapMove(const Eigen::Matrix4d& transformTobeMapped){ const Eigen::Matrix3d transformTobeMapped_R = transformTobeMapped.topLeftCorner(3, 3); const Eigen::Vector3d transformTobeMapped_t = transformTobeMapped.topRightCorner(3, 1); PointType pointOnYAxis; pointOnYAxis.x = 0.0; pointOnYAxis.y = 0.0; pointOnYAxis.z = 10.0; pointAssociateToMap(&pointOnYAxis, &pointOnYAxis, transformTobeMapped); int centerCubeI = int((transformTobeMapped_t.x() + 25.0) / 50.0) + laserCloudCenDepth; int centerCubeJ = int((transformTobeMapped_t.y() + 25.0) / 50.0) + laserCloudCenWidth; int centerCubeK = int((transformTobeMapped_t.z() + 25.0) / 50.0) + laserCloudCenHeight; if (transformTobeMapped_t.x() + 25.0 < 0) centerCubeI--; if (transformTobeMapped_t.y() + 25.0 < 0) centerCubeJ--; if (transformTobeMapped_t.z() + 25.0 < 0) centerCubeK--; while (centerCubeI < 8) { for (int j = 0; j < laserCloudWidth; j++) { for (int k = 0; k < laserCloudHeight; k++) { int i = laserCloudDepth - 1; pcl::KdTreeFLANN::Ptr laserCloudCubeCornerPointerKd = laserCloudCornerKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeSurfPointerKd = laserCloudSurfKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeNonFeaturePointerKd = laserCloudNonFeatureKdMap[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeCornerPointer = laserCloudCornerArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeSurfPointer = laserCloudSurfArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeNonFeaturePointer = laserCloudNonFeatureArray[ToIndex(i, j, k)]; for (; i >= 1; i--) { const size_t index_a = ToIndex(i, j, k); const size_t index_b = ToIndex(i - 1, j, k); laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b]; laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b]; laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b]; laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b]; laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b]; laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b]; } //此时i已经移动至0 laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd; laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd; laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd; laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer; laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer; laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer; laserCloudCubeCornerPointer->clear(); laserCloudCubeSurfPointer->clear(); laserCloudCubeNonFeaturePointer->clear(); } } centerCubeI++; laserCloudCenDepth++; } while (centerCubeI >= laserCloudDepth - 8) { for (int j = 0; j < laserCloudWidth; j++) { for (int k = 0; k < laserCloudHeight; k++) { int i = 0; pcl::KdTreeFLANN::Ptr laserCloudCubeCornerPointerKd = laserCloudCornerKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeSurfPointerKd = laserCloudSurfKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeNonFeaturePointerKd = laserCloudNonFeatureKdMap[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeCornerPointer = laserCloudCornerArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeSurfPointer = laserCloudSurfArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeNonFeaturePointer = laserCloudNonFeatureArray[ToIndex(i, j, k)]; for (; i < laserCloudDepth - 1; i++) { const size_t index_a = ToIndex(i, j, k); const size_t index_b = ToIndex(i + 1, j, k); laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b]; laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b]; laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b]; laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b]; laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b]; laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b]; } laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd; laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd; laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd; laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer; laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer; laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer; laserCloudCubeCornerPointer->clear(); laserCloudCubeSurfPointer->clear(); laserCloudCubeNonFeaturePointer->clear(); } } centerCubeI--; laserCloudCenDepth--; } while (centerCubeJ < 8) { for (int i = 0; i < laserCloudDepth; i++) { for (int k = 0; k < laserCloudHeight; k++) { int j = laserCloudWidth - 1; pcl::KdTreeFLANN::Ptr laserCloudCubeCornerPointerKd = laserCloudCornerKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeSurfPointerKd = laserCloudSurfKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeNonFeaturePointerKd = laserCloudNonFeatureKdMap[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeCornerPointer = laserCloudCornerArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeSurfPointer = laserCloudSurfArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeNonFeaturePointer = laserCloudNonFeatureArray[ToIndex(i, j, k)]; for (; j >= 1; j--) { const size_t index_a = ToIndex(i, j, k); const size_t index_b = ToIndex(i, j - 1, k); laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b]; laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b]; laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b]; laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b]; laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b]; laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b]; } laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd; laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd; laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd; laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer; laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer; laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer; laserCloudCubeCornerPointer->clear(); laserCloudCubeSurfPointer->clear(); laserCloudCubeNonFeaturePointer->clear(); } } centerCubeJ++; laserCloudCenWidth++; } while (centerCubeJ >= laserCloudWidth - 8) { for (int i = 0; i < laserCloudDepth; i++) { for (int k = 0; k < laserCloudHeight; k++) { int j = 0; pcl::KdTreeFLANN::Ptr laserCloudCubeCornerPointerKd = laserCloudCornerKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeSurfPointerKd = laserCloudSurfKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeNonFeaturePointerKd = laserCloudNonFeatureKdMap[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeCornerPointer = laserCloudCornerArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeSurfPointer = laserCloudSurfArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeNonFeaturePointer = laserCloudNonFeatureArray[ToIndex(i, j, k)]; for (; j < laserCloudWidth - 1; j++) { const size_t index_a = ToIndex(i, j, k); const size_t index_b = ToIndex(i, j + 1, k); laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b]; laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b]; laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b]; laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b]; laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b]; laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b]; } laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd; laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd; laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd; laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer; laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer; laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer; laserCloudCubeCornerPointer->clear(); laserCloudCubeSurfPointer->clear(); laserCloudCubeNonFeaturePointer->clear(); } } centerCubeJ--; laserCloudCenWidth--; } while (centerCubeK < 8) { for (int i = 0; i < laserCloudDepth; i++) { for (int j = 0; j < laserCloudWidth; j++) { int k = laserCloudHeight - 1; pcl::KdTreeFLANN::Ptr laserCloudCubeCornerPointerKd = laserCloudCornerKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeSurfPointerKd = laserCloudSurfKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeNonFeaturePointerKd = laserCloudNonFeatureKdMap[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeCornerPointer = laserCloudCornerArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeSurfPointer = laserCloudSurfArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeNonFeaturePointer = laserCloudNonFeatureArray[ToIndex(i, j, k)]; for (; k >= 1; k--) { const size_t index_a = ToIndex(i, j, k); const size_t index_b = ToIndex(i, j, k - 1); laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b]; laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b]; laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b]; laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b]; laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b]; laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b]; } laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd; laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd; laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd; laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer; laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer; laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer; laserCloudCubeCornerPointer->clear(); laserCloudCubeSurfPointer->clear(); laserCloudCubeNonFeaturePointer->clear(); } } centerCubeK++; laserCloudCenHeight++; } while (centerCubeK >= laserCloudHeight - 8) { for (int i = 0; i < laserCloudDepth; i++) { for (int j = 0; j < laserCloudWidth; j++) { int k = 0; pcl::KdTreeFLANN::Ptr laserCloudCubeCornerPointerKd = laserCloudCornerKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeSurfPointerKd = laserCloudSurfKdMap[ToIndex(i, j, k)]; pcl::KdTreeFLANN::Ptr laserCloudCubeNonFeaturePointerKd = laserCloudNonFeatureKdMap[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeCornerPointer = laserCloudCornerArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeSurfPointer = laserCloudSurfArray[ToIndex(i, j, k)]; pcl::PointCloud::Ptr laserCloudCubeNonFeaturePointer = laserCloudNonFeatureArray[ToIndex(i, j, k)]; for (; k < laserCloudHeight - 1; k++) { const size_t index_a = ToIndex(i, j, k); const size_t index_b = ToIndex(i, j, k + 1); laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b]; laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b]; laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b]; laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b]; laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b]; laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b]; } laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd; laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd; laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd; laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer; laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer; laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer; laserCloudCubeCornerPointer->clear(); laserCloudCubeSurfPointer->clear(); laserCloudCubeNonFeaturePointer->clear(); } } centerCubeK--; laserCloudCenHeight--; } } size_t MAP_MANAGER::FindUsedCornerMap(const PointType *p,int a,int b, int c) { int cubeI = int((p->x + 25.0) / 50.0) + c; int cubeJ = int((p->y + 25.0) / 50.0) + a; int cubeK = int((p->z + 25.0) / 50.0) + b; size_t cubeInd = 0; if (p->x + 25.0 < 0) cubeI--; if (p->y + 25.0 < 0) cubeJ--; if (p->z + 25.0 < 0) cubeK--; if (cubeI >= 0 && cubeI < laserCloudDepth && cubeJ >= 0 && cubeJ < laserCloudWidth && cubeK >= 0 && cubeK < laserCloudHeight) { cubeInd = ToIndex(cubeI, cubeJ, cubeK); } else{ cubeInd = 5000; } return cubeInd; } size_t MAP_MANAGER::FindUsedSurfMap(const PointType *p,int a,int b, int c) { int cubeI = int((p->x + 25.0) / 50.0) + c; int cubeJ = int((p->y + 25.0) / 50.0) + a; int cubeK = int((p->z + 25.0) / 50.0) + b; size_t cubeInd = 0; if (p->x + 25.0 < 0) cubeI--; if (p->y + 25.0 < 0) cubeJ--; if (p->z + 25.0 < 0) cubeK--; if (cubeI >= 0 && cubeI < laserCloudDepth && cubeJ >= 0 && cubeJ < laserCloudWidth && cubeK >= 0 && cubeK < laserCloudHeight) { cubeInd = ToIndex(cubeI, cubeJ, cubeK); } else{ cubeInd = 5000; } return cubeInd; } size_t MAP_MANAGER::FindUsedNonFeatureMap(const PointType *p,int a,int b, int c) { int cubeI = int((p->x + 25.0) / 50.0) + c; int cubeJ = int((p->y + 25.0) / 50.0) + a; int cubeK = int((p->z + 25.0) / 50.0) + b; size_t cubeInd = 0; if (p->x + 25.0 < 0) cubeI--; if (p->y + 25.0 < 0) cubeJ--; if (p->z + 25.0 < 0) cubeK--; if (cubeI >= 0 && cubeI < laserCloudDepth && cubeJ >= 0 && cubeJ < laserCloudWidth && cubeK >= 0 && cubeK < laserCloudHeight) { cubeInd = ToIndex(cubeI, cubeJ, cubeK); } else{ cubeInd = 5000; } return cubeInd; } ================================================ FILE: LIO-Livox/src/lio/PoseEstimation.cpp ================================================ #include "Estimator/Estimator.h" #include #include #include #include typedef pcl::PointXYZINormal PointType; int WINDOWSIZE; bool LidarIMUInited = false; boost::shared_ptr> lidarFrameList; pcl::PointCloud::Ptr laserCloudFullRes; Estimator* estimator; ros::Publisher pubLaserOdometry; ros::Publisher pubLaserOdometryPath; ros::Publisher pubFullLaserCloud; ros::Publisher pubFullLaserCloudIMU; tf::StampedTransform laserOdometryTrans; tf::TransformBroadcaster* tfBroadcaster; ros::Publisher pubGps; bool newfullCloud = false; Eigen::Matrix4d transformAftMapped = Eigen::Matrix4d::Identity(); std::mutex _mutexLidarQueue; std::queue _lidarMsgQueue; std::mutex _mutexIMUQueue; std::queue _imuMsgQueue; Eigen::Matrix4d exTlb = Eigen::Matrix4d::Identity(); Eigen::Matrix3d exRlb, exRbl; Eigen::Vector3d exPlb, exPbl; Eigen::Vector3d GravityVector; float filter_parameter_corner = 0.2; float filter_parameter_surf = 0.4; int IMU_Mode = 2; sensor_msgs::NavSatFix gps; int pushCount = 0; double startTime = 0; nav_msgs::Path laserOdoPath; // save odometry std::string save_path; std::shared_ptr odo; /** \brief publish odometry infomation * \param[in] newPose: pose to be published * \param[in] timefullCloud: time stamp */ void pubOdometry(const Eigen::Matrix4d& newPose, double& timefullCloud){ nav_msgs::Odometry laserOdometry; Eigen::Matrix3d Rcurr = newPose.topLeftCorner(3, 3); Eigen::Quaterniond newQuat(Rcurr); Eigen::Vector3d newPosition = newPose.topRightCorner(3, 1); laserOdometry.header.frame_id = "/world"; laserOdometry.child_frame_id = "/livox_frame"; laserOdometry.header.stamp = ros::Time().fromSec(timefullCloud); laserOdometry.pose.pose.orientation.x = newQuat.x(); laserOdometry.pose.pose.orientation.y = newQuat.y(); laserOdometry.pose.pose.orientation.z = newQuat.z(); laserOdometry.pose.pose.orientation.w = newQuat.w(); laserOdometry.pose.pose.position.x = newPosition.x(); laserOdometry.pose.pose.position.y = newPosition.y(); laserOdometry.pose.pose.position.z = newPosition.z(); pubLaserOdometry.publish(laserOdometry); // saveodometry *odo //<< std::setw(20) << laserOdometry.header.stamp << " " << newPosition.x() << " " << newPosition.y() << " " << newPosition.z() << " " // << gr[0] << " " << gr[1] << " " << gr[2] << " " << 0 << " " << 0 << " " << 0 << " " << 1 << std::endl; Eigen::Vector3d gr; gr.segment<2>(0) = newQuat.toRotationMatrix().block<2, 1>(0, 2); gr[2] = newPosition[2]; geometry_msgs::PoseStamped laserPose; laserPose.header = laserOdometry.header; laserPose.pose = laserOdometry.pose.pose; laserOdoPath.header.stamp = laserOdometry.header.stamp; laserOdoPath.poses.push_back(laserPose); laserOdoPath.header.frame_id = "/world"; pubLaserOdometryPath.publish(laserOdoPath); laserOdometryTrans.frame_id_ = "/world"; laserOdometryTrans.child_frame_id_ = "/livox_frame"; laserOdometryTrans.stamp_ = ros::Time().fromSec(timefullCloud); laserOdometryTrans.setRotation(tf::Quaternion(newQuat.x(), newQuat.y(), newQuat.z(), newQuat.w())); laserOdometryTrans.setOrigin(tf::Vector3(newPosition.x(), newPosition.y(), newPosition.z())); tfBroadcaster->sendTransform(laserOdometryTrans); gps.header.stamp = ros::Time().fromSec(timefullCloud); gps.header.frame_id = "world"; gps.latitude = newPosition.x(); gps.longitude = newPosition.y(); gps.altitude = newPosition.z(); gps.position_covariance = { Rcurr(0, 0), Rcurr(1, 0), Rcurr(2, 0), Rcurr(0, 1), Rcurr(1, 1), Rcurr(2, 1), Rcurr(0, 2), Rcurr(1, 2), Rcurr(2, 2) }; pubGps.publish(gps); } void fullCallBack(const sensor_msgs::PointCloud2ConstPtr &msg){ // push lidar msg to queue std::unique_lock lock(_mutexLidarQueue); _lidarMsgQueue.push(msg); } void imu_callback(const sensor_msgs::ImuConstPtr &imu_msg){ // push IMU msg to queue std::unique_lock lock(_mutexIMUQueue); _imuMsgQueue.push(imu_msg); } /** \brief get IMU messages in a certain time interval * \param[in] startTime: left boundary of time interval * \param[in] endTime: right boundary of time interval * \param[in] vimuMsg: store IMU messages */ bool fetchImuMsgs(double startTime, double endTime, std::vector &vimuMsg){ std::unique_lock lock(_mutexIMUQueue); double current_time = 0; vimuMsg.clear(); while(true) { if(_imuMsgQueue.empty()) { break; } if(_imuMsgQueue.back()->header.stamp.toSec()header.stamp.toSec()>=endTime) { // std::cout << "imu back time is " << std::setprecision(20) << _imuMsgQueue.back()->header.stamp.toSec() << " " // << "endtime is " << std::setprecision(20) << endTime << std::endl; break; } sensor_msgs::ImuConstPtr& tmpimumsg = _imuMsgQueue.front(); double time = tmpimumsg->header.stamp.toSec(); if(time<=endTime && time>startTime){ vimuMsg.push_back(tmpimumsg); current_time = time; _imuMsgQueue.pop(); if(time == endTime) { break; } } else{ if(time<=startTime){ _imuMsgQueue.pop(); } else{ double dt_1 = endTime - current_time; double dt_2 = time - endTime; ROS_ASSERT(dt_1 >= 0); ROS_ASSERT(dt_2 >= 0); ROS_ASSERT(dt_1 + dt_2 > 0); double w1 = dt_2 / (dt_1 + dt_2); double w2 = dt_1 / (dt_1 + dt_2); sensor_msgs::ImuPtr theLastIMU(new sensor_msgs::Imu); theLastIMU->linear_acceleration.x = w1 * vimuMsg.back()->linear_acceleration.x + w2 * tmpimumsg->linear_acceleration.x; theLastIMU->linear_acceleration.y = w1 * vimuMsg.back()->linear_acceleration.y + w2 * tmpimumsg->linear_acceleration.y; theLastIMU->linear_acceleration.z = w1 * vimuMsg.back()->linear_acceleration.z + w2 * tmpimumsg->linear_acceleration.z; theLastIMU->angular_velocity.x = w1 * vimuMsg.back()->angular_velocity.x + w2 * tmpimumsg->angular_velocity.x; theLastIMU->angular_velocity.y = w1 * vimuMsg.back()->angular_velocity.y + w2 * tmpimumsg->angular_velocity.y; theLastIMU->angular_velocity.z = w1 * vimuMsg.back()->angular_velocity.z + w2 * tmpimumsg->angular_velocity.z; theLastIMU->header.stamp.fromSec(endTime); vimuMsg.emplace_back(theLastIMU); break; } } } return !vimuMsg.empty(); } /** \brief Remove Lidar Distortion * \param[in] cloud: lidar cloud need to be undistorted * \param[in] dRlc: delta rotation * \param[in] dtlc: delta displacement */ void RemoveLidarDistortion(pcl::PointCloud::Ptr& cloud, const Eigen::Matrix3d& dRlc, const Eigen::Vector3d& dtlc){ int PointsNum = cloud->points.size(); for (int i = 0; i < PointsNum; i++) { Eigen::Vector3d startP; float s = cloud->points[i].normal_x; Eigen::Quaterniond qlc = Eigen::Quaterniond(dRlc).normalized(); Eigen::Quaterniond delta_qlc = Eigen::Quaterniond::Identity().slerp(s, qlc).normalized(); const Eigen::Vector3d delta_Plc = s * dtlc; startP = delta_qlc * Eigen::Vector3d(cloud->points[i].x,cloud->points[i].y,cloud->points[i].z) + delta_Plc; Eigen::Vector3d _po = dRlc.transpose() * (startP - dtlc); cloud->points[i].x = _po(0); cloud->points[i].y = _po(1); cloud->points[i].z = _po(2); cloud->points[i].normal_x = 1.0; } } /** \brief Convert the lidar point cloud from the odometer to the ground coordinate system * \param[in] cloud: lidar cloud need to be convert * \param[in] Rgo: rotation from odometer to ground */ void ConvertPointCloudFromOdometerToGround(pcl::PointCloud::Ptr& cloud, const Eigen::Matrix3f& Rgo){ int PointsNum = cloud->points.size(); for (int i = 0; i < PointsNum; i++) { Eigen::Vector3f startP{cloud->points[i].x,cloud->points[i].y,cloud->points[i].z}; Eigen::Vector3f _po = Rgo * startP; cloud->points[i].x = _po(0); cloud->points[i].y = _po(1); cloud->points[i].z = _po(2); cloud->points[i].normal_x = 1.0; } } /** \brief Convert the lidar point cloud from the lidar to the IMU coordinate system * \param[in] pi: lidar point need to be convert * \param[in] po: lidar point after convert * \param[in] exTbl: transformation from lidar to imu */ void ConvertPointFromLidarToIMU(PointType const * const pi, PointType * const po, const Eigen::Matrix4d& exTbl){ Eigen::Vector3d pin, pout; pin[0] = pi->x; pin[1] = pi->y; pin[2] = pi->z; pout = exTbl.topLeftCorner(3,3) * pin + exTbl.topRightCorner(3,1); po->x = pout[0]; po->y = pout[1]; po->z = pout[2]; po->intensity = pi->intensity; po->normal_z = pi->normal_z; po->normal_y = pi->normal_y; } bool TryMAPInitialization() { Eigen::Vector3d average_acc = -lidarFrameList->begin()->imuIntegrator.GetAverageAcc(); double info_g = std::fabs(9.805 - average_acc.norm()); average_acc = average_acc * 9.805 / average_acc.norm(); // calculate the initial gravity direction double para_quat[4]; para_quat[0] = 1; para_quat[1] = 0; para_quat[2] = 0; para_quat[3] = 0; ceres::LocalParameterization *quatParam = new ceres::QuaternionParameterization(); ceres::Problem problem_quat; problem_quat.AddParameterBlock(para_quat, 4, quatParam); problem_quat.AddResidualBlock(Cost_Initial_G::Create(average_acc), nullptr, para_quat); ceres::Solver::Options options_quat; ceres::Solver::Summary summary_quat; ceres::Solve(options_quat, &problem_quat, &summary_quat); Eigen::Quaterniond q_wg(para_quat[0], para_quat[1], para_quat[2], para_quat[3]); //build prior factor of LIO initialization Eigen::Vector3d prior_r = Eigen::Vector3d::Zero(); Eigen::Vector3d prior_ba = Eigen::Vector3d::Zero(); Eigen::Vector3d prior_bg = Eigen::Vector3d::Zero(); std::vector prior_v; int v_size = lidarFrameList->size(); for(int i = 0; i < v_size; i++) { prior_v.push_back(Eigen::Vector3d::Zero()); } Sophus::SO3d SO3_R_wg(q_wg.toRotationMatrix()); prior_r = SO3_R_wg.log(); for (int i = 1; i < v_size; i++){ auto iter = lidarFrameList->begin(); auto iter_next = lidarFrameList->begin(); std::advance(iter, i-1); std::advance(iter_next, i); Eigen::Vector3d velo_imu = (iter_next->P - iter->P + iter_next->Q*exPlb - iter->Q*exPlb) / (iter_next->timeStamp - iter->timeStamp); prior_v[i] = velo_imu; } prior_v[0] = prior_v[1]; double para_v[v_size][3]; double para_r[3]; double para_ba[3]; double para_bg[3]; for(int i = 0; i < 3; i++) { para_r[i] = 0; para_ba[i] = 0; para_bg[i] = 0; } for(int i = 0; i < v_size; i++) { for(int j = 0; j < 3; j++) { para_v[i][j] = prior_v[i][j]; } } Eigen::Matrix sqrt_information_r = 2000.0 * Eigen::Matrix::Identity(); Eigen::Matrix sqrt_information_ba = 1000.0 * Eigen::Matrix::Identity(); Eigen::Matrix sqrt_information_bg = 4000.0 * Eigen::Matrix::Identity(); Eigen::Matrix sqrt_information_v = 4000.0 * Eigen::Matrix::Identity(); ceres::Problem::Options problem_options; ceres::Problem problem(problem_options); problem.AddParameterBlock(para_r, 3); problem.AddParameterBlock(para_ba, 3); problem.AddParameterBlock(para_bg, 3); for(int i = 0; i < v_size; i++) { problem.AddParameterBlock(para_v[i], 3); } // add CostFunction problem.AddResidualBlock(Cost_Initialization_Prior_R::Create(prior_r, sqrt_information_r), nullptr, para_r); problem.AddResidualBlock(Cost_Initialization_Prior_bv::Create(prior_ba, sqrt_information_ba), nullptr, para_ba); problem.AddResidualBlock(Cost_Initialization_Prior_bv::Create(prior_bg, sqrt_information_bg), nullptr, para_bg); for(int i = 0; i < v_size; i++) { problem.AddResidualBlock(Cost_Initialization_Prior_bv::Create(prior_v[i], sqrt_information_v), nullptr, para_v[i]); } for(int i = 1; i < v_size; i++) { auto iter = lidarFrameList->begin(); auto iter_next = lidarFrameList->begin(); std::advance(iter, i-1); std::advance(iter_next, i); Eigen::Vector3d pi = iter->P + iter->Q*exPlb; Sophus::SO3d SO3_Ri(iter->Q*exRlb); Eigen::Vector3d ri = SO3_Ri.log(); Eigen::Vector3d pj = iter_next->P + iter_next->Q*exPlb; Sophus::SO3d SO3_Rj(iter_next->Q*exRlb); Eigen::Vector3d rj = SO3_Rj.log(); problem.AddResidualBlock(Cost_Initialization_IMU::Create(iter_next->imuIntegrator, ri, rj, pj-pi, Eigen::LLT> (iter_next->imuIntegrator.GetCovariance().block<9,9>(0,0).inverse()) .matrixL().transpose()), nullptr, para_r, para_v[i-1], para_v[i], para_ba, para_bg); } ceres::Solver::Options options; options.minimizer_progress_to_stdout = false; options.linear_solver_type = ceres::DENSE_QR; options.num_threads = 6; ceres::Solver::Summary summary; ceres::Solve(options, &problem, &summary); Eigen::Vector3d r_wg(para_r[0], para_r[1], para_r[2]); GravityVector = Sophus::SO3d::exp(r_wg) * Eigen::Vector3d(0, 0, -9.805); Eigen::Vector3d ba_vec(para_ba[0], para_ba[1], para_ba[2]); Eigen::Vector3d bg_vec(para_bg[0], para_bg[1], para_bg[2]); if(ba_vec.norm() > 0.5 || bg_vec.norm() > 0.5) { ROS_WARN("Too Large Biases! Initialization Failed!"); return false; } for(int i = 0; i < v_size; i++) { auto iter = lidarFrameList->begin(); std::advance(iter, i); iter->ba = ba_vec; iter->bg = bg_vec; Eigen::Vector3d bv_vec(para_v[i][0], para_v[i][1], para_v[i][2]); if((bv_vec - prior_v[i]).norm() > 2.0) { ROS_WARN("Too Large Velocity! Initialization Failed!"); std::cout<<"delta v norm: "<<(bv_vec - prior_v[i]).norm()<V = bv_vec; } for(size_t i = 0; i < v_size - 1; i++){ auto laser_trans_i = lidarFrameList->begin(); auto laser_trans_j = lidarFrameList->begin(); std::advance(laser_trans_i, i); std::advance(laser_trans_j, i+1); laser_trans_j->imuIntegrator.PreIntegration(laser_trans_i->timeStamp, laser_trans_i->bg, laser_trans_i->ba); } // //if IMU success initialized WINDOWSIZE = Estimator::SLIDEWINDOWSIZE; while(lidarFrameList->size() > WINDOWSIZE){ lidarFrameList->pop_front(); } Eigen::Vector3d Pwl = lidarFrameList->back().P; Eigen::Quaterniond Qwl = lidarFrameList->back().Q; lidarFrameList->back().P = Pwl + Qwl*exPlb; lidarFrameList->back().Q = Qwl * exRlb; // std::cout << "\n=============================\n| Initialization Successful |"<<"\n=============================\n" << std::endl; return true; } /** \brief Mapping main thread */ void process(){ double time_last_lidar = -1; double time_curr_lidar = -1; Eigen::Matrix3d delta_Rl = Eigen::Matrix3d::Identity(); Eigen::Vector3d delta_tl = Eigen::Vector3d::Zero(); Eigen::Matrix3d delta_Rb = Eigen::Matrix3d::Identity(); Eigen::Vector3d delta_tb = Eigen::Vector3d::Zero(); std::vector vimuMsg; while(ros::ok()){ newfullCloud = false; laserCloudFullRes.reset(new pcl::PointCloud()); std::unique_lock lock_lidar(_mutexLidarQueue); if(!_lidarMsgQueue.empty()){ // get new lidar msg time_curr_lidar = _lidarMsgQueue.front()->header.stamp.toSec(); pcl::fromROSMsg(*_lidarMsgQueue.front(), *laserCloudFullRes); _lidarMsgQueue.pop(); newfullCloud = true; } lock_lidar.unlock(); if(newfullCloud){ nav_msgs::Odometry debugInfo; debugInfo.pose.pose.position.x = 0; debugInfo.pose.pose.position.y = 0; debugInfo.pose.pose.position.z = 0; if(IMU_Mode > 0 && time_last_lidar > 0){ // get IMU msg int the Specified time interval vimuMsg.clear(); int countFail = 0; while (!fetchImuMsgs(time_last_lidar, time_curr_lidar, vimuMsg)) { countFail++; if (countFail > 100){ break; } std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) ); } } // this lidar frame init Estimator::LidarFrame lidarFrame; lidarFrame.laserCloud = laserCloudFullRes; lidarFrame.timeStamp = time_curr_lidar; boost::shared_ptr> lidar_list; if(!vimuMsg.empty()){ if(!LidarIMUInited) { // if get IMU msg successfully, use gyro integration to update delta_Rl lidarFrame.imuIntegrator.PushIMUMsg(vimuMsg); lidarFrame.imuIntegrator.GyroIntegration(time_last_lidar); delta_Rb = lidarFrame.imuIntegrator.GetDeltaQ().toRotationMatrix(); delta_Rl = exTlb.topLeftCorner(3, 3) * delta_Rb * exTlb.topLeftCorner(3, 3).transpose(); // predict current lidar pose lidarFrame.P = transformAftMapped.topLeftCorner(3,3) * delta_tb + transformAftMapped.topRightCorner(3,1); Eigen::Matrix3d m3d = transformAftMapped.topLeftCorner(3,3) * delta_Rb; lidarFrame.Q = m3d; lidar_list.reset(new std::list); lidar_list->push_back(lidarFrame); }else{ // if get IMU msg successfully, use pre-integration to update delta lidar pose lidarFrame.imuIntegrator.PushIMUMsg(vimuMsg); lidarFrame.imuIntegrator.PreIntegration(lidarFrameList->back().timeStamp, lidarFrameList->back().bg, lidarFrameList->back().ba); const Eigen::Vector3d& Pwbpre = lidarFrameList->back().P; const Eigen::Quaterniond& Qwbpre = lidarFrameList->back().Q; const Eigen::Vector3d& Vwbpre = lidarFrameList->back().V; const Eigen::Quaterniond& dQ = lidarFrame.imuIntegrator.GetDeltaQ(); const Eigen::Vector3d& dP = lidarFrame.imuIntegrator.GetDeltaP(); const Eigen::Vector3d& dV = lidarFrame.imuIntegrator.GetDeltaV(); double dt = lidarFrame.imuIntegrator.GetDeltaTime(); lidarFrame.Q = Qwbpre * dQ; lidarFrame.P = Pwbpre + Vwbpre*dt + 0.5*GravityVector*dt*dt + Qwbpre*(dP); lidarFrame.V = Vwbpre + GravityVector*dt + Qwbpre*(dV); lidarFrame.bg = lidarFrameList->back().bg; lidarFrame.ba = lidarFrameList->back().ba; Eigen::Quaterniond Qwlpre = Qwbpre * Eigen::Quaterniond(exRbl); Eigen::Vector3d Pwlpre = Qwbpre * exPbl + Pwbpre; Eigen::Quaterniond Qwl = lidarFrame.Q * Eigen::Quaterniond(exRbl); Eigen::Vector3d Pwl = lidarFrame.Q * exPbl + lidarFrame.P; delta_Rl = Qwlpre.conjugate() * Qwl; delta_tl = Qwlpre.conjugate() * (Pwl - Pwlpre); delta_Rb = dQ.toRotationMatrix(); delta_tb = dP; lidarFrameList->push_back(lidarFrame); lidarFrameList->pop_front(); lidar_list = lidarFrameList; } }else{ if(LidarIMUInited) { std::cout << "IMU is empty " << std::endl; break; } else{ // predict current lidar pose lidarFrame.P = transformAftMapped.topLeftCorner(3,3) * delta_tb + transformAftMapped.topRightCorner(3,1); Eigen::Matrix3d m3d = transformAftMapped.topLeftCorner(3,3) * delta_Rb; lidarFrame.Q = m3d; lidar_list.reset(new std::list); lidar_list->push_back(lidarFrame); } } // remove lidar distortion RemoveLidarDistortion(laserCloudFullRes, delta_Rl, delta_tl); static bool first_flag = true; Eigen::Matrix3f Rgo; if (first_flag) { lidar_list->back().ground_plane_coeff = livox_slam_ware::get_plane_coeffs(laserCloudFullRes); Eigen::Vector3f v1 = lidar_list->back().ground_plane_coeff.head<3>(); Eigen::Vector3f v2{0,0,1}; Rgo = Eigen::Quaternionf::FromTwoVectors(v1,v2).toRotationMatrix(); std:: cout << "the rotation matrix is " << Rgo << std::endl; first_flag = false; } ConvertPointCloudFromOdometerToGround(laserCloudFullRes, Rgo); // calculate the ground plane parameters lidarFrame.ground_plane_coeff = livox_slam_ware::get_plane_coeffs(laserCloudFullRes); lidar_list->back().ground_plane_coeff = lidarFrame.ground_plane_coeff; // std::cout << "ground_plane_coeff " << std::endl << lidar_list->back().ground_plane_coeff.transpose() << std::endl; // optimize current lidar pose with IMU estimator->EstimateLidarPose(*lidar_list, exTlb, GravityVector, debugInfo); pcl::PointCloud::Ptr laserCloudCornerMap(new pcl::PointCloud()); pcl::PointCloud::Ptr laserCloudSurfMap(new pcl::PointCloud()); Eigen::Matrix4d transformTobeMapped = Eigen::Matrix4d::Identity(); transformTobeMapped.topLeftCorner(3,3) = lidar_list->front().Q * exRbl; transformTobeMapped.topRightCorner(3,1) = lidar_list->front().Q * exPbl + lidar_list->front().P; // update delta transformation delta_Rb = transformAftMapped.topLeftCorner(3, 3).transpose() * lidar_list->front().Q.toRotationMatrix(); delta_tb = transformAftMapped.topLeftCorner(3, 3).transpose() * (lidar_list->front().P - transformAftMapped.topRightCorner(3, 1)); Eigen::Matrix3d Rwlpre = transformAftMapped.topLeftCorner(3, 3) * exRbl; Eigen::Vector3d Pwlpre = transformAftMapped.topLeftCorner(3, 3) * exPbl + transformAftMapped.topRightCorner(3, 1); delta_Rl = Rwlpre.transpose() * transformTobeMapped.topLeftCorner(3,3); delta_tl = Rwlpre.transpose() * (transformTobeMapped.topRightCorner(3,1) - Pwlpre); transformAftMapped.topLeftCorner(3,3) = lidar_list->front().Q.toRotationMatrix(); transformAftMapped.topRightCorner(3,1) = lidar_list->front().P; if (LidarIMUInited) { // publish odometry rostopic pubOdometry(transformTobeMapped, lidar_list->front().timeStamp); // publish lidar points int laserCloudFullResNum = lidar_list->front().laserCloud->points.size(); pcl::PointCloud::Ptr laserCloudAfterEstimate(new pcl::PointCloud()); pcl::PointCloud::Ptr laserCloudIMU(new pcl::PointCloud()); laserCloudAfterEstimate->reserve(laserCloudFullResNum); laserCloudIMU->reserve(laserCloudFullResNum); Eigen::Matrix4d exTbl = exTlb.inverse(); for (int i = 0; i < laserCloudFullResNum; i++) { PointType temp_point; auto &p = lidar_list->front().laserCloud->points[i]; // if (std::fabs(p.normal_y + 1.0) < 1e-5) { MAP_MANAGER::pointAssociateToMap(&lidar_list->front().laserCloud->points[i], &temp_point, transformAftMapped); laserCloudAfterEstimate->push_back(temp_point); ConvertPointFromLidarToIMU(&lidar_list->front().laserCloud->points[i], &temp_point, exTbl); // temp_point = lidar_list->front().laserCloud->points[i]; laserCloudIMU->push_back(temp_point); // } } // std::cout << "laserCloudAfterEstimate size = " << laserCloudAfterEstimate->size() << std::endl; sensor_msgs::PointCloud2 laserCloudMsg, laserCloudIMUMsg; pcl::toROSMsg(*laserCloudAfterEstimate, laserCloudMsg); pcl::toROSMsg(*laserCloudIMU, laserCloudIMUMsg); laserCloudMsg.header.frame_id = "/world"; laserCloudIMUMsg.header.frame_id = "/world"; laserCloudMsg.header.stamp.fromSec(lidar_list->front().timeStamp); laserCloudIMUMsg.header.stamp.fromSec(lidar_list->front().timeStamp); pubFullLaserCloud.publish(laserCloudMsg); pubFullLaserCloudIMU.publish(laserCloudIMUMsg); } // if tightly coupled IMU message, start IMU initialization if(IMU_Mode > 1 && !LidarIMUInited){ // update lidar frame pose lidarFrame.P = transformTobeMapped.topRightCorner(3,1); Eigen::Matrix3d m3d = transformTobeMapped.topLeftCorner(3,3); lidarFrame.Q = m3d; // static int pushCount = 0; if(pushCount == 0){ lidarFrameList->push_back(lidarFrame); lidarFrameList->back().imuIntegrator.Reset(); if(lidarFrameList->size() > WINDOWSIZE) lidarFrameList->pop_front(); }else{ lidarFrameList->back().laserCloud = lidarFrame.laserCloud; lidarFrameList->back().imuIntegrator.PushIMUMsg(vimuMsg); lidarFrameList->back().timeStamp = lidarFrame.timeStamp; lidarFrameList->back().P = lidarFrame.P; lidarFrameList->back().Q = lidarFrame.Q; } pushCount++; if (pushCount >= 3){ pushCount = 0; if(lidarFrameList->size() > 1){ auto iterRight = std::prev(lidarFrameList->end()); auto iterLeft = std::prev(std::prev(lidarFrameList->end())); iterRight->imuIntegrator.PreIntegration(iterLeft->timeStamp, iterLeft->bg, iterLeft->ba); } if (lidarFrameList->size() == int(WINDOWSIZE / 1.5)) { startTime = lidarFrameList->back().timeStamp; } if (!LidarIMUInited && lidarFrameList->size() == WINDOWSIZE && lidarFrameList->front().timeStamp >= startTime){ std::cout<<"**************Start MAP Initialization!!!******************"< vecTlb; ros::param::get("~Extrinsic_Tlb",vecTlb); ros::param::get("~save_path",save_path); // set extrinsic matrix between lidar & IMU Eigen::Matrix3d R; Eigen::Vector3d t; R << vecTlb[0], vecTlb[1], vecTlb[2], vecTlb[4], vecTlb[5], vecTlb[6], vecTlb[8], vecTlb[9], vecTlb[10]; t << vecTlb[3], vecTlb[7], vecTlb[11]; Eigen::Quaterniond qr(R); R = qr.normalized().toRotationMatrix(); exTlb.topLeftCorner(3,3) = R; exTlb.topRightCorner(3,1) = t; exRlb = R; exRbl = R.transpose(); exPlb = t; exPbl = -1.0 * exRbl * exPlb; ros::Subscriber subFullCloud = nodeHandler.subscribe("/livox_full_cloud", 10000, fullCallBack); ros::Subscriber sub_imu; if(IMU_Mode > 0) sub_imu = nodeHandler.subscribe("/livox/imu", 200000, imu_callback, ros::TransportHints().unreliable()); if(IMU_Mode < 2) WINDOWSIZE = 1; else WINDOWSIZE = 20; pubFullLaserCloud = nodeHandler.advertise("/livox_full_cloud_mapped", 10); pubFullLaserCloudIMU = nodeHandler.advertise("/livox_full_cloud_imu", 10); pubLaserOdometry = nodeHandler.advertise ("/livox_odometry_mapped", 5); pubLaserOdometryPath = nodeHandler.advertise ("/livox_odometry_path_mapped", 5); pubGps = nodeHandler.advertise("/lidar", 1000); tfBroadcaster = new tf::TransformBroadcaster(); laserCloudFullRes.reset(new pcl::PointCloud); estimator = new Estimator(filter_parameter_corner, filter_parameter_surf); lidarFrameList.reset(new std::list); odo.reset(new std::ofstream(save_path)); std::thread thread_process{process}; std::cout << "finished" << std::endl; ros::spin(); std::cout << "finished ros" << std::endl; return 0; } ================================================ FILE: LIO-Livox/src/lio/ScanRegistration.cpp ================================================ #include "LidarFeatureExtractor/LidarFeatureExtractor.h" typedef pcl::PointXYZINormal PointType; ros::Publisher pubFullLaserCloud; ros::Publisher pubSharpCloud; ros::Publisher pubFlatCloud; ros::Publisher pubNonFeature; LidarFeatureExtractor* lidarFeatureExtractor; pcl::PointCloud::Ptr laserCloud; pcl::PointCloud::Ptr laserConerCloud; pcl::PointCloud::Ptr laserSurfCloud; pcl::PointCloud::Ptr laserNonFeatureCloud; int Lidar_Type = 0; int N_SCANS = 6; bool Feature_Mode = false; bool Use_seg = false; void lidarCallBackHorizon(const livox_ros_driver::CustomMsgConstPtr &msg) { sensor_msgs::PointCloud2 msg2; if(Use_seg){ lidarFeatureExtractor->FeatureExtract_with_segment(msg, laserCloud, laserConerCloud, laserSurfCloud, laserNonFeatureCloud, msg2,N_SCANS); } else{ lidarFeatureExtractor->FeatureExtract(msg, laserCloud, laserConerCloud, laserSurfCloud,N_SCANS); } sensor_msgs::PointCloud2 laserCloudMsg; pcl::toROSMsg(*laserCloud, laserCloudMsg); laserCloudMsg.header = msg->header; laserCloudMsg.header.stamp.fromNSec(msg->timebase+msg->points.back().offset_time); pubFullLaserCloud.publish(laserCloudMsg); // std::cout << "time in front is " << laserCloudMsg.header.stamp << std::endl; } int main(int argc, char** argv) { ros::init(argc, argv, "ScanRegistration"); ros::NodeHandle nodeHandler("~"); ros::Subscriber customCloud;; std::string config_file; nodeHandler.getParam("config_file", config_file); cv::FileStorage fsSettings(config_file, cv::FileStorage::READ); if (!fsSettings.isOpened()) { std::cout << "config_file error: cannot open " << config_file << std::endl; return false; } Lidar_Type = static_cast(fsSettings["Lidar_Type"]); N_SCANS = static_cast(fsSettings["Used_Line"]); Feature_Mode = static_cast(fsSettings["Feature_Mode"]); Use_seg = static_cast(fsSettings["Use_seg"]); int NumCurvSize = static_cast(fsSettings["NumCurvSize"]); float DistanceFaraway = static_cast(fsSettings["DistanceFaraway"]); int NumFlat = static_cast(fsSettings["NumFlat"]); int PartNum = static_cast(fsSettings["PartNum"]); float FlatThreshold = static_cast(fsSettings["FlatThreshold"]); float BreakCornerDis = static_cast(fsSettings["BreakCornerDis"]); float LidarNearestDis = static_cast(fsSettings["LidarNearestDis"]); float KdTreeCornerOutlierDis = static_cast(fsSettings["KdTreeCornerOutlierDis"]); laserCloud.reset(new pcl::PointCloud); laserConerCloud.reset(new pcl::PointCloud); laserSurfCloud.reset(new pcl::PointCloud); laserNonFeatureCloud.reset(new pcl::PointCloud); customCloud = nodeHandler.subscribe("/livox/lidar", 100, &lidarCallBackHorizon); pubFullLaserCloud = nodeHandler.advertise("/livox_full_cloud", 10); pubSharpCloud = nodeHandler.advertise("/livox_less_sharp_cloud", 10); pubFlatCloud = nodeHandler.advertise("/livox_less_flat_cloud", 10); pubNonFeature = nodeHandler.advertise("/livox_nonfeature_cloud", 10); lidarFeatureExtractor = new LidarFeatureExtractor(N_SCANS,NumCurvSize,DistanceFaraway,NumFlat,PartNum, FlatThreshold,BreakCornerDis,LidarNearestDis,KdTreeCornerOutlierDis); ros::spin(); return 0; } ================================================ FILE: LIO-Livox/src/lio/ceresfunc.cpp ================================================ #include "utils/ceresfunc.h" void* ThreadsConstructA(void* threadsstruct) { ThreadsStruct* p = ((ThreadsStruct*)threadsstruct); for (auto it : p->sub_factors) { for (int i = 0; i < static_cast(it->parameter_blocks.size()); i++) { int idx_i = p->parameter_block_idx[reinterpret_cast(it->parameter_blocks[i])]; int size_i = p->parameter_block_size[reinterpret_cast(it->parameter_blocks[i])]; Eigen::MatrixXd jacobian_i = it->jacobians[i].leftCols(size_i); for (int j = i; j < static_cast(it->parameter_blocks.size()); j++) { int idx_j = p->parameter_block_idx[reinterpret_cast(it->parameter_blocks[j])]; int size_j = p->parameter_block_size[reinterpret_cast(it->parameter_blocks[j])]; Eigen::MatrixXd jacobian_j = it->jacobians[j].leftCols(size_j); if (i == j) p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j; else { p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j; p->A.block(idx_j, idx_i, size_j, size_i) = p->A.block(idx_i, idx_j, size_i, size_j).transpose(); } } p->b.segment(idx_i, size_i) += jacobian_i.transpose() * it->residuals; } } return threadsstruct; } // Eigen::Vector3d sqrt_information_vec(10, 10, 10); // close Eigen::Vector3d sqrt_information_vec(0.0000205, 0.0000205, 0.0001); // open 0.000205, 0.000205, 0.01 Eigen::Matrix Cost_NavState_PR_Ground::sqrt_information = sqrt_information_vec.asDiagonal().inverse(); Eigen::VectorXf Cost_NavState_PR_Ground::init_ground_plane_coeff(4); ================================================ FILE: LIO-Livox/src/segment/pointsCorrect.cpp ================================================ #include "segment/pointsCorrect.hpp" float gnd_pos[6]; int frame_count = 0; int frame_lenth_threshold = 5;//5 frames update int GetNeiborPCA_cor(SNeiborPCA_cor &npca, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN kdtree, pcl::PointXYZ searchPoint, float fSearchRadius) { std::vector k_dis; pcl::PointCloud::Ptr subCloud(new pcl::PointCloud); if(kdtree.radiusSearch(searchPoint,fSearchRadius,npca.neibors,k_dis)>5) { subCloud->width=npca.neibors.size(); subCloud->height=1; subCloud->points.resize(subCloud->width*subCloud->height); for (int pid=0;pidpoints.size();pid++)//搜索半径内的地面点云 sy { subCloud->points[pid].x=cloud->points[npca.neibors[pid]].x; subCloud->points[pid].y=cloud->points[npca.neibors[pid]].y; subCloud->points[pid].z=cloud->points[npca.neibors[pid]].z; } //利用PCA主元分析法获得点云的三个主方向,获取质心,计算协方差,获得协方差矩阵,求取协方差矩阵的特征值和特长向量,特征向量即为主方向。 sy Eigen::Vector4f pcaCentroid; pcl::compute3DCentroid(*subCloud, pcaCentroid); Eigen::Matrix3f covariance; pcl::computeCovarianceMatrixNormalized(*subCloud, pcaCentroid, covariance); Eigen::SelfAdjointEigenSolver eigen_solver(covariance, Eigen::ComputeEigenvectors); npca.eigenVectorsPCA = eigen_solver.eigenvectors(); npca.eigenValuesPCA = eigen_solver.eigenvalues(); float vsum=npca.eigenValuesPCA(0)+npca.eigenValuesPCA(1)+npca.eigenValuesPCA(2); npca.eigenValuesPCA(0)=npca.eigenValuesPCA(0)/(vsum+0.000001);//单位化 sy npca.eigenValuesPCA(1)=npca.eigenValuesPCA(1)/(vsum+0.000001); npca.eigenValuesPCA(2)=npca.eigenValuesPCA(2)/(vsum+0.000001); } else { npca.neibors.clear(); } //std::cout << "in PCA2\n"; return npca.neibors.size(); } int FilterGndForPos_cor(float* outPoints,float*inPoints,int inNum) { int outNum=0; float dx=2; float dy=2; int x_len = 20; int y_len = 10; int nx=2*x_len/dx; //80 int ny=2*y_len/dy; //10 float offx=-20,offy=-10; float THR=0.4; float *imgMinZ=(float*)calloc(nx*ny,sizeof(float)); float *imgMaxZ=(float*)calloc(nx*ny,sizeof(float)); float *imgSumZ=(float*)calloc(nx*ny,sizeof(float)); float *imgMeanZ=(float*)calloc(nx*ny,sizeof(float)); int *imgNumZ=(int*)calloc(nx*ny,sizeof(int)); int *idtemp = (int*)calloc(inNum,sizeof(int)); for(int ii=0;ii -x_len) && (inPoints[pid*4]-y_len)&&(inPoints[pid*4+1]= nx*ny) continue; imgSumZ[idx+idy*nx] += inPoints[pid*4+2]; imgNumZ[idx+idy*nx] +=1; if(inPoints[pid*4+2]imgMaxZ[idx+idy*nx]){ imgMaxZ[idx+idy*nx]=inPoints[pid*4+2]; } } } for(int pid=0;pid= 60000) break; if(idtemp[pid] > 0 && idtemp[pid] < nx*ny) { imgMeanZ[idtemp[pid]] = float(imgSumZ[idtemp[pid]]/(imgNumZ[idtemp[pid]] + 0.0001)); //最高点与均值高度差小于阈值;点数大于3;均值高度小于1 if((imgMaxZ[idtemp[pid]] - imgMeanZ[idtemp[pid]]) < THR && imgNumZ[idtemp[pid]] > 3 && imgMeanZ[idtemp[pid]] < 2) {// imgMeanZ[idtemp[pid]]<0&& outPoints[outNum*4]=inPoints[pid*4]; outPoints[outNum*4+1]=inPoints[pid*4+1]; outPoints[outNum*4+2]=inPoints[pid*4+2]; outPoints[outNum*4+3]=inPoints[pid*4+3]; outNum++; } } } free(imgMinZ); free(imgMaxZ); free(imgSumZ); free(imgMeanZ); free(imgNumZ); free(idtemp); return outNum; } int CalGndPos_cor(float *gnd, float *fPoints,int pointNum,float fSearchRadius) { // 初始化gnd for(int ii=0;ii<6;ii++) { gnd[ii]=0; } // 转换点云到pcl的格式 pcl::PointCloud::Ptr cloud(new pcl::PointCloud); //去除异常点 if (pointNum <= 3) { return 0; } cloud->width=pointNum; cloud->height=1; cloud->points.resize(cloud->width*cloud->height); for (int pid=0;pidpoints.size();pid++) { cloud->points[pid].x=fPoints[pid*4]; cloud->points[pid].y=fPoints[pid*4+1]; cloud->points[pid].z=fPoints[pid*4+2]; } pcl::KdTreeFLANN kdtree; kdtree.setInputCloud (cloud); int nNum=0; unsigned char* pLabel = (unsigned char*)calloc(pointNum,sizeof(unsigned char)); for(int pid=0;pidpoints[pid].x; searchPoint.y=cloud->points[pid].y; searchPoint.z=cloud->points[pid].z; if(GetNeiborPCA_cor(npca,cloud,kdtree,searchPoint,fSearchRadius)>0) { for(int ii=0;ii5000){ //指的是主方向与次方向差异较大。即这一小块接近平面 sy if(npca.eigenVectorsPCA(2,0)>0) //垂直向上? { gnd[0]+=npca.eigenVectorsPCA(0,0); gnd[1]+=npca.eigenVectorsPCA(1,0); gnd[2]+=npca.eigenVectorsPCA(2,0); gnd[3]+=searchPoint.x; gnd[4]+=searchPoint.y; gnd[5]+=searchPoint.z; } else { gnd[0]+=-npca.eigenVectorsPCA(0,0); gnd[1]+=-npca.eigenVectorsPCA(1,0); gnd[2]+=-npca.eigenVectorsPCA(2,0); gnd[3]+=searchPoint.x; gnd[4]+=searchPoint.y; gnd[5]+=searchPoint.z; } nNum++; } } } } free(pLabel); if(nNum>0) { for(int ii=0;ii<6;ii++) { gnd[ii]/=nNum; //平均法向量 & 地面点云的中心 } if(abs(gnd[0])<0.1){ gnd[0]=gnd[0]*(1-abs(gnd[0]))*4.5; } else if(abs(gnd[0])<0.2){ gnd[0]=gnd[0]*(1-abs(gnd[0]))*3.2; } else{ gnd[0]=gnd[0]*(1-abs(gnd[0]))*2.8; } gnd[1] = gnd[1]*2.3; } return nNum; } int GetRTMatrix_cor(float *RTM, float *v0, float *v1) { // 归一化 float nv0=sqrt(v0[0]*v0[0]+v0[1]*v0[1]+v0[2]*v0[2]); v0[0]/=(nv0+0.000001); v0[1]/=(nv0+0.000001); v0[2]/=(nv0+0.000001); float nv1=sqrt(v1[0]*v1[0]+v1[1]*v1[1]+v1[2]*v1[2]); v1[0]/=(nv1+0.000001); v1[1]/=(nv1+0.000001); v1[2]/=(nv1+0.000001); // 叉乘 float v2[3]; v2[0]=v0[1]*v1[2]-v0[2]*v1[1]; v2[1]=v0[2]*v1[0]-v0[0]*v1[2]; v2[2]=v0[0]*v1[1]-v0[1]*v1[0]; // 正余弦 float cosAng=0,sinAng=0; cosAng=v0[0]*v1[0]+v0[1]*v1[1]+v0[2]*v1[2]; sinAng=sqrt(1-cosAng*cosAng); // 计算旋转矩阵 RTM[0]=v2[0]*v2[0]*(1-cosAng)+cosAng; RTM[4]=v2[1]*v2[1]*(1-cosAng)+cosAng; RTM[8]=v2[2]*v2[2]*(1-cosAng)+cosAng; RTM[1]=RTM[3]=v2[0]*v2[1]*(1-cosAng); RTM[2]=RTM[6]=v2[0]*v2[2]*(1-cosAng); RTM[5]=RTM[7]=v2[1]*v2[2]*(1-cosAng); RTM[1]+=(v2[2])*sinAng; RTM[2]+=(-v2[1])*sinAng; RTM[3]+=(-v2[2])*sinAng; RTM[5]+=(v2[0])*sinAng; RTM[6]+=(v2[1])*sinAng; RTM[7]+=(-v2[0])*sinAng; return 0; } int CorrectPoints_cor(float *fPoints,int pointNum,float *gndPos) { float RTM[9]; float gndHeight=0; float znorm[3]={0,0,1}; float tmp[3]; GetRTMatrix_cor(RTM,gndPos,znorm); gndHeight = RTM[2]*gndPos[3]+RTM[5]*gndPos[4]+RTM[8]*gndPos[5]; for(int pid=0;pid0&&abs(gnd_pos[0]-tmpPos[0])<0.1&&abs(gnd_pos[1]-tmpPos[1])<0.1){//更新法向量 for(int i = 0;i<6;i++){ gnd_pos[i] = (gnd_pos[i]+tmpPos[i])*0.5; } frame_count = 0; } else{ frame_count++; } } else if(tmpPos[5]!=0){ memcpy(gnd_pos,tmpPos,sizeof(tmpPos)); frame_count = 0; } } memcpy(pos,gnd_pos,sizeof(float)*6); free(fPoints3); return 0; } ================================================ FILE: LIO-Livox/src/segment/segment.cpp ================================================ #include "segment/segment.hpp" #define N_FRAME 5 float tmp_gnd_pose[100*6]; int tem_gnd_num = 0; PCSeg::PCSeg() { this->posFlag=0; this->pVImg=(unsigned char*)calloc(DN_SAMPLE_IMG_NX*DN_SAMPLE_IMG_NY*DN_SAMPLE_IMG_NZ,sizeof(unsigned char)); this->corPoints=NULL; } PCSeg::~PCSeg() { if(this->pVImg!=NULL) { free(this->pVImg); } if(this->corPoints!=NULL) { free(this->corPoints); } } int PCSeg::DoSeg(int *pLabel1, float* fPoints1, int pointNum) { // 1 down sampling float *fPoints2=(float*)calloc(pointNum*4,sizeof(float)); int *idtrans1=(int*)calloc(pointNum,sizeof(int)); int *idtrans2=(int*)calloc(pointNum,sizeof(int)); int pntNum=0; if (this->pVImg == NULL) { this->pVImg=(unsigned char*)calloc(DN_SAMPLE_IMG_NX*DN_SAMPLE_IMG_NY*DN_SAMPLE_IMG_NZ,sizeof(unsigned char)); } memset(pVImg,0,sizeof(unsigned char)*DN_SAMPLE_IMG_NX*DN_SAMPLE_IMG_NY*DN_SAMPLE_IMG_NZ);//600*200*30 for(int pid=0;pid -40-190m int iy=(fPoints1[pid*4+1]+DN_SAMPLE_IMG_OFFY)/DN_SAMPLE_IMG_DY; //-40-40m int iz=(fPoints1[pid*4+2]+DN_SAMPLE_IMG_OFFZ)/DN_SAMPLE_IMG_DZ;//认为地面为-1.8? -2.5~17.5 idtrans1[pid]=-1; if((ix>=0)&&(ix=0)&&(iy=0)&&(izgndPos,tmpPos,6*sizeof(float)); this->posFlag=1;//(this->posFlag+1)%SELF_CALI_FRAMES; // 3 点云矫正 this->CorrectPoints(fPoints2,pntNum,this->gndPos); if(this->corPoints!=NULL) free(this->corPoints); this->corPoints=(float*)calloc(pntNum*4,sizeof(float)); this->corNum=pntNum; memcpy(this->corPoints,fPoints2,4*pntNum*sizeof(float)); // 4 粗略地面分割for地上分割 int *pLabelGnd=(int*)calloc(pntNum,sizeof(int)); int gnum=GndSeg(pLabelGnd,fPoints2,pntNum,1.0); // 5 地上分割 int agnum = pntNum-gnum; float *fPoints3=(float*)calloc(agnum*4,sizeof(float)); int *idtrans3=(int*)calloc(agnum,sizeof(int)); int *pLabel2=(int*)calloc(pntNum,sizeof(int)); int agcnt=0;//非地面点数量 int gcnt=0;//地面点数量 for(int ii=0;ii= 10)//前景为0 背景为1 物体分类后 >=10 pLabel2[idtrans3[ii]] = 200;//pLabelAg[ii]; //前景 else if (pLabelAg[ii] > 0) pLabel2[idtrans3[ii]] = 1; //背景1 -> 0 else { pLabel2[idtrans3[ii]] = -1; } } for(int pid=0;pid=0) { pLabel1[pid]= pVImg[idtrans1[pid]]; } else { pLabel1[pid] = -1;//未分类 } } free(fPoints2); free(idtrans1); free(idtrans2); free(idtrans3); free(fPoints3); free(pLabelAg); free(pLabelGnd); free(pLabel2); } int PCSeg::GetMainVectors(float*fPoints, int* pLabel, int pointNum) { float *pClusterPoints=(float*)calloc(4*pointNum,sizeof(float)); for(int ii=0;ii<256;ii++) { this->pnum[ii]=0; this->objClass[ii]=-1; } this->clusterNum=0; int clabel=10; for(int ii=10;ii<256;ii++) { int pnum=0; for(int pid=0;pid0) { SClusterFeature cf = CalOBB(pClusterPoints,pnum); if(cf.cls<1) { this->pVectors[clabel*3]=cf.d0[0]; this->pVectors[clabel*3+1]=cf.d0[1]; this->pVectors[clabel*3+2]=cf.d0[2];//朝向向量 this->pCenters[clabel*3]=cf.center[0]; this->pCenters[clabel*3+1]=cf.center[1]; this->pCenters[clabel*3+2]=cf.center[2]; this->pnum[clabel]=cf.pnum; for(int jj=0;jj<8;jj++) this->pOBBs[clabel*8+jj]=cf.obb[jj]; this->objClass[clabel]=cf.cls;//0 or 1 背景 this->zs[clabel]=cf.zmax;//-cf.zmin; if(clabel!=ii) { for(int pid=0;pidclusterNum++; clabel++; } else { for(int pid=0;pidclusterNum; memcpy(pFeas+3,this->pCenters,sizeof(float)*3*256);//重心 for(int ii=0;ii<256;ii++)//最大容纳256个障碍物 { pFeas[257*3+ii*3]=this->pnum[ii]; pFeas[257*3+ii*3+1]=this->objClass[ii]; pFeas[257*3+ii*3+2]=this->zs[ii]; } for(int ii=0;ii<256;ii++) { for(int jj=0;jj<4;jj++) { pFeas[257*3+256*3+(ii*4+jj)*3]=this->pOBBs[ii*8+jj*2]; pFeas[257*3+256*3+(ii*4+jj)*3+1]=this->pOBBs[ii*8+jj*2+1]; } } } int FilterGndForPos(float* outPoints,float*inPoints,int inNum) { int outNum=0; float dx=2; float dy=2; int x_len = 40; int y_len = 10; int nx=x_len/dx; int ny=2*y_len/dy; float offx=0,offy=-10; float THR=0.2; float *imgMinZ=(float*)calloc(nx*ny,sizeof(float)); float *imgMaxZ=(float*)calloc(nx*ny,sizeof(float)); float *imgSumZ=(float*)calloc(nx*ny,sizeof(float)); float *imgMeanZ=(float*)calloc(nx*ny,sizeof(float)); int *imgNumZ=(int*)calloc(nx*ny,sizeof(int)); int *idtemp = (int*)calloc(inNum,sizeof(int)); for(int ii=0;ii-y_len)&&(inPoints[pid*4+1]imgMaxZ[idx+idy*nx]){ imgMaxZ[idx+idy*nx]=inPoints[pid*4+2]; } } } for(int pid=0;pid0){ imgMeanZ[idtemp[pid]] = float(imgSumZ[idtemp[pid]]/imgNumZ[idtemp[pid]]); if((imgMaxZ[idtemp[pid]]-imgMinZ[idtemp[pid]])3){ outPoints[outNum*4]=inPoints[pid*4]; outPoints[outNum*4+1]=inPoints[pid*4+1]; outPoints[outNum*4+2]=inPoints[pid*4+2]; outPoints[outNum*4+3]=inPoints[pid*4+3]; outNum++; } } } free(imgMinZ); free(imgMaxZ); free(imgSumZ); free(imgMeanZ); free(imgNumZ); free(idtemp); return outNum; } int CalNomarls(float *nvects, float *fPoints,int pointNum,float fSearchRadius) { // 转换点云到pcl的格式 pcl::PointCloud::Ptr cloud(new pcl::PointCloud); cloud->width=pointNum; cloud->height=1; cloud->points.resize(cloud->width*cloud->height); for (int pid=0;pidpoints.size();pid++) { cloud->points[pid].x=fPoints[pid*4]; cloud->points[pid].y=fPoints[pid*4+1]; cloud->points[pid].z=fPoints[pid*4+2]; } //调用pcl的kdtree生成方法 pcl::KdTreeFLANN kdtree; kdtree.setInputCloud (cloud); //遍历每个点,获得候选地面点 int nNum=0; unsigned char* pLabel = (unsigned char*)calloc(pointNum,sizeof(unsigned char)); for(int pid=0;pidpoints[pid].x; searchPoint.y=cloud->points[pid].y; searchPoint.z=cloud->points[pid].z; if(GetNeiborPCA(npca,cloud,kdtree,searchPoint,fSearchRadius)>0) { for(int ii=0;ii0.4) && (npca.eigenValuesPCA[0]<0.01)) { if((npca.eigenVectorsPCA(2,0)>0.9)|(npca.eigenVectorsPCA(2,0)<-0.9)) { if(npca.eigenVectorsPCA(2,0)>0) { nvects[nNum*6]=npca.eigenVectorsPCA(0,0); nvects[nNum*6+1]=npca.eigenVectorsPCA(1,0); nvects[nNum*6+2]=npca.eigenVectorsPCA(2,0); nvects[nNum*6+3]=searchPoint.x; nvects[nNum*6+4]=searchPoint.y; nvects[nNum*6+5]=searchPoint.z; } else { nvects[nNum*6]=-npca.eigenVectorsPCA(0,0); nvects[nNum*6+1]=-npca.eigenVectorsPCA(1,0); nvects[nNum*6+2]=-npca.eigenVectorsPCA(2,0); nvects[nNum*6+3]=searchPoint.x; nvects[nNum*6+4]=searchPoint.y; nvects[nNum*6+5]=searchPoint.z; } nNum++; } } } } } free(pLabel); return nNum; } int CalGndPos(float *gnd, float *fPoints,int pointNum,float fSearchRadius) { // 初始化gnd //float gnd[6]={0};//(vx,vy,vz,x,y,z) for(int ii=0;ii<6;ii++) { gnd[ii]=0; } // 转换点云到pcl的格式 pcl::PointCloud::Ptr cloud(new pcl::PointCloud); //去除异常点 float height = 0; for(int cur = 0;curheight_mean+0.5||fPoints[cur*4+2]width=pointNum; cloud->height=1; cloud->points.resize(cloud->width*cloud->height); for (int pid=0;pidpoints.size();pid++) { if(fPoints[pid*4+2]<=height_mean+0.5&&fPoints[pid*4+2]>=height_mean-0.5){ cloud->points[pid].x=fPoints[pid*4]; cloud->points[pid].y=fPoints[pid*4+1]; cloud->points[pid].z=fPoints[pid*4+2]; } } //调用pcl的kdtree生成方法 pcl::KdTreeFLANN kdtree; kdtree.setInputCloud (cloud); //遍历每个点,获得候选地面点 // float *nvects=(float*)calloc(1000*6,sizeof(float));//最多支持1000个点 int nNum=0; unsigned char* pLabel = (unsigned char*)calloc(pointNum,sizeof(unsigned char)); for(int pid=0;pidpoints[pid].x; searchPoint.y=cloud->points[pid].y; searchPoint.z=cloud->points[pid].z; if(GetNeiborPCA(npca,cloud,kdtree,searchPoint,fSearchRadius)>0) { for(int ii=0;ii0.2) && (npca.eigenValuesPCA[0]<0.1) && searchPoint.x>29.9 && searchPoint.x<40)//&&searchPoint.x>19.9 { if((npca.eigenVectorsPCA(2,0)>0.9)|(npca.eigenVectorsPCA(2,0)<-0.9)) { if(npca.eigenVectorsPCA(2,0)>0) { gnd[0]+=npca.eigenVectorsPCA(0,0); gnd[1]+=npca.eigenVectorsPCA(1,0); gnd[2]+=npca.eigenVectorsPCA(2,0); gnd[3]+=searchPoint.x; gnd[4]+=searchPoint.y; gnd[5]+=searchPoint.z; } else { gnd[0]+=-npca.eigenVectorsPCA(0,0); gnd[1]+=-npca.eigenVectorsPCA(1,0); gnd[2]+=-npca.eigenVectorsPCA(2,0); gnd[3]+=searchPoint.x; gnd[4]+=searchPoint.y; gnd[5]+=searchPoint.z; } nNum++; } } } } } free(pLabel); // 估计地面的高度和法相量 if(nNum>0) { for(int ii=0;ii<6;ii++) { gnd[ii]/=nNum; } // gnd[0] *=5.0; } return nNum; } int GetNeiborPCA(SNeiborPCA &npca, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN kdtree, pcl::PointXYZ searchPoint, float fSearchRadius) { std::vector k_dis; pcl::PointCloud::Ptr subCloud(new pcl::PointCloud); if(kdtree.radiusSearch(searchPoint,fSearchRadius,npca.neibors,k_dis)>5) { subCloud->width=npca.neibors.size(); subCloud->height=1; subCloud->points.resize(subCloud->width*subCloud->height); for (int pid=0;pidpoints.size();pid++) { subCloud->points[pid].x=cloud->points[npca.neibors[pid]].x; subCloud->points[pid].y=cloud->points[npca.neibors[pid]].y; subCloud->points[pid].z=cloud->points[npca.neibors[pid]].z; } Eigen::Vector4f pcaCentroid; pcl::compute3DCentroid(*subCloud, pcaCentroid); Eigen::Matrix3f covariance; pcl::computeCovarianceMatrixNormalized(*subCloud, pcaCentroid, covariance); Eigen::SelfAdjointEigenSolver eigen_solver(covariance, Eigen::ComputeEigenvectors); npca.eigenVectorsPCA = eigen_solver.eigenvectors(); npca.eigenValuesPCA = eigen_solver.eigenvalues(); float vsum=npca.eigenValuesPCA(0)+npca.eigenValuesPCA(1)+npca.eigenValuesPCA(2); npca.eigenValuesPCA(0)=npca.eigenValuesPCA(0)/(vsum+0.000001); npca.eigenValuesPCA(1)=npca.eigenValuesPCA(1)/(vsum+0.000001); npca.eigenValuesPCA(2)=npca.eigenValuesPCA(2)/(vsum+0.000001); } else { npca.neibors.clear(); } return npca.neibors.size(); } int GetRTMatrix(float *RTM, float *v0, float *v1) // v0 gndpos v1:001垂直向上 { // 归一化 float nv0=sqrt(v0[0]*v0[0]+v0[1]*v0[1]+v0[2]*v0[2]); v0[0]/=(nv0+0.000001); v0[1]/=(nv0+0.000001); v0[2]/=(nv0+0.000001); float nv1=sqrt(v1[0]*v1[0]+v1[1]*v1[1]+v1[2]*v1[2]); v1[0]/=(nv1+0.000001); v1[1]/=(nv1+0.000001); v1[2]/=(nv1+0.000001); // 叉乘 // ^ v2 ^ v0 // | / // | / // |-----> v1 // float v2[3]; // 叉乘的向量代表了v0旋转到v1的旋转轴,因为叉乘结果垂直于v0、v1构成的平面 v2[0]=v0[1]*v1[2]-v0[2]*v1[1]; v2[1]=v0[2]*v1[0]-v0[0]*v1[2]; v2[2]=v0[0]*v1[1]-v0[1]*v1[0]; // 正余弦 float cosAng=0,sinAng=0; cosAng=v0[0]*v1[0]+v0[1]*v1[1]+v0[2]*v1[2]; //a.b = |a||b|cos theta sinAng=sqrt(1-cosAng*cosAng); // 计算旋转矩阵 RTM[0]=v2[0]*v2[0]*(1-cosAng)+cosAng; RTM[4]=v2[1]*v2[1]*(1-cosAng)+cosAng; RTM[8]=v2[2]*v2[2]*(1-cosAng)+cosAng; RTM[1]=RTM[3]=v2[0]*v2[1]*(1-cosAng); RTM[2]=RTM[6]=v2[0]*v2[2]*(1-cosAng); RTM[5]=RTM[7]=v2[1]*v2[2]*(1-cosAng); RTM[1]+=(v2[2])*sinAng; RTM[2]+=(-v2[1])*sinAng; RTM[3]+=(-v2[2])*sinAng; RTM[5]+=(v2[0])*sinAng; RTM[6]+=(v2[1])*sinAng; RTM[7]+=(-v2[0])*sinAng; return 0; } int PCSeg::CorrectPoints(float *fPoints,int pointNum,float *gndPos) { float RTM[9]; float gndHeight=0; float znorm[3]={0,0,1}; float tmp[3]; GetRTMatrix(RTM,gndPos,znorm); // RTM 由当前地面法向量gnd,将平面转到以(0,0,1)为法向量的平面 sy gndHeight = RTM[2]*gndPos[3]+RTM[5]*gndPos[4]+RTM[8]*gndPos[5]; for(int pid=0;pid::Ptr cloud(new pcl::PointCloud); cloud->width=pointNum; cloud->height=1; cloud->points.resize(cloud->width*cloud->height); for (int pid=0;pidpoints.size();pid++) { cloud->points[pid].x=fPoints[pid*4]; cloud->points[pid].y=fPoints[pid*4+1]; cloud->points[pid].z=fPoints[pid*4+2]; } //调用pcl的kdtree生成方法 pcl::KdTreeFLANN kdtree; kdtree.setInputCloud (cloud); SegBG(pLabel,cloud,kdtree,0.5); //背景label=1 前景0 SegObjects(pLabel,cloud,kdtree,0.7); FreeSeg(fPoints,pLabel,pointNum); return 0; } int SegBG(int *pLabel, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius) { //从较高的一些点开始从上往下增长,这样能找到一些类似树木、建筑物这样的高大背景 // clock_t t0,t1,tsum=0; int pnum=cloud->points.size(); pcl::PointXYZ searchPoint; // 初始化种子点 std::vector seeds; for (int pid=0;pidpoints[pid].z>4) { pLabel[pid]=1; if (cloud->points[pid].z<6) { seeds.push_back(pid); } } else { pLabel[pid]=0; } } // 区域增长 while(seeds.size()>0) { int sid = seeds[seeds.size()-1]; seeds.pop_back(); std::vector k_dis; std::vector k_inds; // t0=clock(); if (cloud->points[sid].x<44.8) kdtree.radiusSearch(sid,fSearchRadius,k_inds,k_dis); else kdtree.radiusSearch(sid,1.5*fSearchRadius,k_inds,k_dis); for(int ii=0;iipoints[k_inds[ii]].z>0.2)//地面60cm以下不参与背景分割,以防止错误的地面点导致过度分割 { seeds.push_back(k_inds[ii]); } } } } return 0; } SClusterFeature FindACluster(int *pLabel, int seedId, int labelId, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius, float thrHeight) { // 初始化种子 std::vector seeds; seeds.push_back(seedId); pLabel[seedId]=labelId; // int cnum=1;//当前簇里的点数 SClusterFeature cf; cf.pnum=1; cf.xmax=-2000; cf.xmin=2000; cf.ymax=-2000; cf.ymin=2000; cf.zmax=-2000; cf.zmin=2000; cf.zmean=0; // 区域增长 while(seeds.size()>0) //实际上是种子点找了下k近邻,然后k近邻再纳入种子点 sy { int sid=seeds[seeds.size()-1]; seeds.pop_back(); pcl::PointXYZ searchPoint; searchPoint.x=cloud->points[sid].x; searchPoint.y=cloud->points[sid].y; searchPoint.z=cloud->points[sid].z; // 特征统计 { if(searchPoint.x>cf.xmax) cf.xmax=searchPoint.x; if(searchPoint.xcf.ymax) cf.ymax=searchPoint.y; if(searchPoint.ycf.zmax) cf.zmax=searchPoint.z; if(searchPoint.z k_dis; std::vector k_inds; if(searchPoint.x<44.8) kdtree.radiusSearch(searchPoint,fSearchRadius,k_inds,k_dis); else kdtree.radiusSearch(searchPoint,2*fSearchRadius,k_inds,k_dis); for(int ii=0;iipoints[k_inds[ii]].z>thrHeight)//地面60cm以下不参与分割 { seeds.push_back(k_inds[ii]); } } } } cf.zmean/=(cf.pnum+0.000001); return cf; } int SegObjects(int *pLabel, pcl::PointCloud::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius) { int pnum=cloud->points.size(); int labelId=10;//object编号从10开始 //遍历每个非背景点,若高度大于1则寻找一个簇,并给一个编号(>10) for(int pid=0;pidpoints[pid].z>0.4)//高度阈值 { SClusterFeature cf = FindACluster(pLabel,pid,labelId,cloud,kdtree,fSearchRadius,0); int isBg=0; // cluster 分类 float dx=cf.xmax-cf.xmin; float dy=cf.ymax-cf.ymin; float dz=cf.zmax-cf.zmin; float cx=10000; for(int ii=0;iicloud->points[pid].x) { cx=cloud->points[pid].x; } } if((dx>15)||(dy>15)||((dx>10)&&(dy>10)))// 太大 { isBg=2; } else if(((dx>6)||(dy>6))&&(cf.zmean < 1.5))//长而过低 { isBg = 3;//1;//2; } else if(((dx<1.5)&&(dy<1.5))&&(cf.zmax>2.5))//小而过高 { isBg = 4; } else if(cf.pnum<5 || (cf.pnum<10 && cx<50)) //点太少 { isBg=5; } else if((cf.zmean>3)||(cf.zmean<0.3))//太高或太低 { isBg = 6;//1; } if(isBg>0) { for(int ii=0;ii::Ptr cloud, pcl::KdTreeFLANN &kdtree, float fSearchRadius) { int pnum=cloud->points.size(); int *tmpLabel = (int*)calloc(pnum,sizeof(int)); for(int pid=0;pid10) for(int pid=0;pid=10) { FindACluster(tmpLabel,pid,pLabel[pid],cloud,kdtree,fSearchRadius,0); } } for(int pid=0;pid=10)) { pLabel[pid]=tmpLabel[pid]; } } free(tmpLabel); return 0; } int ExpandObjects(int *pLabel, float* fPoints, int pointNum, float fSearchRadius) { int *tmpLabel = (int*)calloc(pointNum,sizeof(int)); for(int pid=0;pid::Ptr cloud(new pcl::PointCloud); cloud->width=pointNum; cloud->height=1; cloud->points.resize(cloud->width*cloud->height); for (int pid=0;pidpoints.size();pid++) { cloud->points[pid].x=fPoints[pid*4]; cloud->points[pid].y=fPoints[pid*4+1]; cloud->points[pid].z=fPoints[pid*4+2]; } //调用pcl的kdtree生成方法 pcl::KdTreeFLANN kdtree; kdtree.setInputCloud (cloud); std::vector k_dis; std::vector k_inds; for(int pid=0;pidpoints.size();pid++) { if((pLabel[pid]>=10)&&(tmpLabel[pid]==0)) { kdtree.radiusSearch(pid,fSearchRadius,k_inds,k_dis); for(int ii=0;iipoints[k_inds[ii]].z>0.2)) { pLabel[k_inds[ii]]=pLabel[pid]; } } } } free(tmpLabel); return 0; } int ExpandBG(int *pLabel, float* fPoints, int pointNum, float fSearchRadius) { int *tmpLabel = (int*)calloc(pointNum,sizeof(int)); for(int pid=0;pid0)&&(pLabel[pid]<10)) { tmpLabel[pid]=1; } } // 转换点云到pcl的格式 pcl::PointCloud::Ptr cloud(new pcl::PointCloud); cloud->width=pointNum; cloud->height=1; cloud->points.resize(cloud->width*cloud->height); for (int pid=0;pidpoints.size();pid++) { cloud->points[pid].x=fPoints[pid*4]; cloud->points[pid].y=fPoints[pid*4+1]; cloud->points[pid].z=fPoints[pid*4+2]; } //调用pcl的kdtree生成方法 pcl::KdTreeFLANN kdtree; kdtree.setInputCloud (cloud); std::vector k_dis; std::vector k_inds; for(int pid=0;pidpoints.size();pid++) { if((tmpLabel[pid]==1)) { kdtree.radiusSearch(pid,fSearchRadius,k_inds,k_dis); for(int ii=0;iipoints[k_inds[ii]].z>0.4)) { pLabel[k_inds[ii]]=pLabel[pid]; } } } } free(tmpLabel); return 0; } int CalFreeRegion(float *pFreeDis, float *fPoints,int *pLabel,int pointNum) { int da=FREE_DELTA_ANG; int thetaId; float dis; for(int ii=0;iidis) { pFreeDis[thetaId]=dis; } } } return 0; } int FreeSeg(float *fPoints,int *pLabel,int pointNum) { float *pFreeDis = (float*)calloc(FREE_ANG_NUM,sizeof(float)); int thetaId; float dis; CalFreeRegion(pFreeDis,fPoints,pLabel,pointNum); for(int pid=0;pid=GND_IMG_NX1 || iy<0 || iy>=GND_IMG_NY1) { tmpLabel1[pid]=-1; continue; } int iid=ix+iy*GND_IMG_NX1; tmpLabel1[pid]=iid; if(pGndImg1[iid]>fPoints[pid*4+2])//找最小高度 { pGndImg1[iid]=fPoints[pid*4+2]; } } int pnum=0; for(int pid=0;pid=0) { if(pGndImg1[tmpLabel1[pid]]+0.5>fPoints[pid*4+2])// 相对高度限制 即pid所在点对应的格子里面高度不超过0.5,标记为1 { {pLabel[pid]=1; pnum++;} } } } free(pGndImg1); free(tmpLabel1); // 绝对高度限制 for(int pid=0;pid1)//for all cases 即使这个点所在格子高度差小于0.5,但绝对高度大于1,那也不行 { pLabel[pid]=0; } else if(fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1]<225)// 10m内,强制要求高度小于0.5 { if(fPoints[pid*4+2]>0.5) { pLabel[pid]=0; } } } else { if(fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1]<400)// 20m内,0.2以下强制设为地面 { if(fPoints[pid*4+2]<0.2) { pLabel[pid]=1; } } } } // 平面拟合与剔除 float zMean=0; gnum=0; for(int pid=0;pidzMean+0.1) pLabel[pid]=0; } } // 个数统计 gnum=0; for(int pid=0;pid::Ptr cloud(new pcl::PointCloud); cloud->width=pointNum; cloud->height=1; cloud->points.resize(cloud->width*cloud->height); for (int pid=0;pidpoints.size();pid++) { cloud->points[pid].x=fPoints[pid*4]; cloud->points[pid].y=fPoints[pid*4+1]; cloud->points[pid].z=0;//fPoints[pid*4+2]; } //调用pcl的kdtree生成方法 pcl::KdTreeFLANN kdtree; kdtree.setInputCloud (cloud); //计算特征向量和特征值 Eigen::Vector4f pcaCentroid; pcl::compute3DCentroid(*cloud, pcaCentroid); Eigen::Matrix3f covariance; pcl::computeCovarianceMatrixNormalized(*cloud, pcaCentroid, covariance); Eigen::SelfAdjointEigenSolver eigen_solver(covariance, Eigen::ComputeEigenvectors); Eigen::Matrix3f eigenVectorsPCA = eigen_solver.eigenvectors(); Eigen::Vector3f eigenValuesPCA = eigen_solver.eigenvalues(); float vsum = eigenValuesPCA(0)+eigenValuesPCA(1)+eigenValuesPCA(2); eigenValuesPCA(0) = eigenValuesPCA(0)/(vsum+0.000001); eigenValuesPCA(1) = eigenValuesPCA(1)/(vsum+0.000001); eigenValuesPCA(2) = eigenValuesPCA(2)/(vsum+0.000001); cf.d0[0]=eigenVectorsPCA(0,2); cf.d0[1]=eigenVectorsPCA(1,2); cf.d0[2]=eigenVectorsPCA(2,2); cf.d1[0]=eigenVectorsPCA(0,1); cf.d1[1]=eigenVectorsPCA(1,1); cf.d1[2]=eigenVectorsPCA(2,1); cf.center[0]=pcaCentroid(0); cf.center[1]=pcaCentroid(1); cf.center[2]=pcaCentroid(2); cf.pnum=pointNum; return cf; } SClusterFeature CalOBB(float *fPoints,int pointNum) { SClusterFeature cf; cf.pnum=pointNum; float *hoff=(float*)calloc(20000,sizeof(float)); int hnum=0; float coef_as[180],coef_bs[180]; for(int ii=0;ii<180;ii++) { coef_as[ii]=cos(ii*0.5/180*FREE_PI); coef_bs[ii]=sin(ii*0.5/180*FREE_PI); } float *rxy=(float*)calloc(pointNum*2,sizeof(float)); float area=-1000;//1000000; int ori=-1; int angid=0; for(int ii=0;ii<180;ii++) { float a_min=10000,a_max=-10000; float b_min=10000,b_max=-10000; for(int pid=0;pidval) a_min=val; if(a_maxval) b_min=val; if(b_maxweights) { weights=hoff[ih]; mh=ih; } } if (mh>0) { if(mh*3weights1) { weights1=hoff[ih]; mh1=ih; } } if(mh1>0) { if(mh1*3area)//(b_max-b_min)*(a_max-a_min)z_max) z_max=fPoints[pid*4+2]; if(fPoints[pid*4+2]15)||(dy>15))// 太大 { cf.cls=1;//bkg } else if((dx>4)&&(dy>4))//太大 { cf.cls=1; } else if((dz<0.5||cf.zmax<1) && (dx>3 || dy>3))//too large { cf.cls=1; } else if(cf.zmax>3 && (dx<0.5 || dy<0.5))//too small { cf.cls=1; } else if(dx<0.5 && dy<0.5)//too small { cf.cls=1; } else if(dz<2&&(dx>6||dy>6||dx/dy>5||dy/dx>5))//too small { //cf.cls=1; } else if((dz<4)&&(dx>3)&&(dy>3))//太大 { //cf.cls=1; } else if(cf.center[0]>45 && (dx>=3 || dy>=3 || dx*dy>2.3))//太da { //cf.cls=1; } else if(dx/dy>5||dy/dx>5) //太窄 { //cf.cls=0; } else if((dz<2.5)&&((dx/dy>3)||(dy/dx>3))) { //cf.cls=0; } else if((dz<2.5)&&(dx>2.5)&&(dy>2.5)) { //cf.cls=1; } free(rxy); free(hoff); return cf; } ================================================ FILE: README.md ================================================ # Livox-Mapping This repository implements an all-in-one and ready-to-use LiDAR-inertial odometry system for Livox LiDAR. The system is developed based on the open-source odometry framework [**LIO-Livox**](https://github.com/Livox-SDK/LIO-Livox) to get the odometry information in our front-end part. Besides, we have added more constraints and features to enhance the mapping performance. At the same time, we provide you one simple lidar-map based localization module in [Livox-Localization](https://github.com/SiyuanHuang95/Livox-Localization). ![Livox_Mapping](images/lio_livox_mapping_down.gif) ## 1. Features We only describe the new features added to our baseline framework [**LIO-Livox**](https://github.com/Livox-SDK/LIO-Livox). For the amazing features developed by LIO-Livox, please check their own repository. ### 1.1 Loop Closure Detection In the LCD part, we use the [**ScanContext**](https://github.com/irapkaist/scancontext) to perform the loop closure detection and then add the loop closure constrains to the optimization solver. With the LCD module, we are able to close the gap and compensate the accumulated translation error of the scan matching in large environments. | with Loop Closure | without Loop Closure | | ------------------------------ | ------------------------------------ | | ![with_sc](images/with_sc.gif) | ![without_sc](images/without_sc.gif) | ### 1.2 Ground Constrain To avoid the drifting alongside the Z-axis, the general problem occurred in LOAM (LiDAR Odometry and Mapping) algorithms, we add the the ground constraints ( the plane detection is done by LIO-Livox) to the optimization solver as **[hdl_graph_slam](https://github.com/koide3/hdl_graph_slam)**. | with Ground Constrain | without Ground Constrain | | -------------------------------------- | ------------------------------------------------- | | ![with_ground](images/with_ground.gif) | ![wo_ground](images/without_ground_constrins.gif) | ### 1.3 Map Merging from Multiple Rosbags In practice, we usually collect environment data in multiple times, thus we need one functionality to fuse the map information from multiple rosbags. In this implementation, we use GPS information (optionally) to convert LiDAR scans collected in different time into one coordinate system and then use ScanContext and ICP to find fine matching result. At the same time, we maintain one lifelong optimization vector in our backend to store the previous key-frames. Through this approach, we are able to connect scans in different collections. | ![bags](images/multi_bag.gif) | | -------------------------------------------- | #### 1.4 GPS Factor We also offer the GPS factor to aid the mapping process. This is only one optional choice. Without the GPS information, the whole system could also work. ### 1.5 Compatible with Mapping Editor *interactive_slam* [**interactive_slam**](https://github.com/SMRT-AIST/interactive_slam) is an open-source 3D LiDAR-based mapping framework. We add one functionality to output the mapping result in the format compatible with *interactive_slam*, by this, you can easily edit your mapping result and get one more accurate LiDAR map. At the same time, if you have one camera alongside the Livox, we also offer one dummy moving objects removal [function](SC-PGO/utils/moving_object_removal/README.md) with aid of [mmdection](https://github.com/open-mmlab/mmdetection) and easy LiDAR points projection. | Represented in interactive_slam | Moving Object Removal | | --------------------------------------------- | ----------------------------------------------------- | | ![interact_slam](images/interactive_slam.png) | ![image-20211223215338497](images/object_removal.png) | ## 2. Prerequisites ### 2.1 Dependencies for baseline Our framework is developed based on the open-source framework [**LIO-Livox**](https://github.com/Livox-SDK/LIO-Livox) for odometry part, [**ScanContext**](https://github.com/irapkaist/scancontext) for loop closure part and [**interactive_slam**](https://github.com/SMRT-AIST/interactive_slam) for map-editing part (if you need), so technically, please refer to these repos for detailed information. ### 2.2 General Dependencies - [Ubuntu](http://ubuntu.com) (tested on 16.04 and 18.04) - [ROS](http://wiki.ros.org/ROS/Installation) (tested with Kinetic and Melodic) - [Eigen](http://eigen.tuxfamily.org/index.php?title=Main_Page) - [Ceres Solver](http://ceres-solver.org/installation.html) - [PCL](http://www.pointclouds.org/downloads/linux.html) - [livox_ros_driver](https://github.com/Livox-SDK/livox_ros_driver) - libsuitesparse ### 2.3 Docker For one quick and convenient test, we are offering you a Docker Image with DockerHub. Please check the DockerHub [Link](https://hub.docker.com/r/siyuanhuang95/livox_slam) for more information. - Pull the docker image. ```shell docker pull siyuanhuang95/livox_slam:release ``` - Execute the docker run command. You can use -v to bind the docker to your local machine as you wish. ```shell docker run --gpus all -it -p 3316:22 --device=/dev/dri --group-add video --volume=/tmp/.X11-unix:/tmp/.X11-unix --env="DISPLAY=$DISPLAY" siyuanhuang95/livox_slam:release /bin/bash ``` - To allow the RVIZ in Docker to run in the host PC. In the local mashine: ```shell xhost + ``` ## 3. Build - Clone the repository and catkin_make ```shell cd ~/catkin_ws/src git clone https://github.com/PJLab-ADG/Livox-Mapping.git cd .. catkin_make ``` - Remember to source the livox_ros_driver before build (follow [livox_ros_driver](https://github.com/hku-mars/FAST_LIO#13-livox_ros_driver)) ## 4. Run ### 4.1 Dataset Requirements Before running the mapping functionality, please make sure the sensor data are published to the correct rostopic. - Livox LiDAR: /livox/lidar - IMU: /livox/imu - GPS: /localization/navstate_info ### 4.2 Run Mapping Function - In one terminal, launch the **mapping thread** ```shell # If you don't have the GPS data cd ~/catkin_ws source devel/setup.bash roslaunch livox_mapping livox_mapping.launch save_path:="PATH_TO_SAVE_SLAM_POSE_RESULT" ``` ```shell # If you have the GPS data cd ~/catkin_ws source devel/setup.bash roslaunch livox_mapping livox_mapping.launch useRTK:="true" save_path:="PATH_TO_SAVE_SLAM_POSE_RESULT" ``` - In another terminal, launch the **odometry thread** ```shell cd ~/catkin_ws source devel/setup.bash roslaunch livox_odometry livox_odometry.launch save_path:="PATH_TO_SAVE_ODOM_RESULT_in_txt" ``` - In another terminal, play the rosbag ```shell rosbag play YOUR_ROSBAG.bag ``` ## 4.3 Multiple Bags Merge If you have more rosbags collected in one same area, you can restart odometry thread at the end of the first odometry thread run and play the next package. - In the terminal for **odometry thread**, use **Ctrl+C** to shut down the **old odometry thread**. - In the terminal for **odometry thread**, use the command below to start one new odometry thread ```shell roslaunch livox_odometry livox_odometry.launch save_path:="PATH_TO_SAVE_ODOM_RESULT_in_txt" ``` - Play one new rosbag. ## 5. Datasets For the ease of usage, we are providing several test rosbags collected in one industrial park located in Shanghai. Please be aware that all codes and datasets included in this repository are for academic research purposes only. Other usages are **NOT** encouraged, and it is at your own risk. If You have any concerns including privacy, please contact us by sending an e-mail to huangsiyuan@pjlab.org.cn The dataset can be downloaded through the Baidu Netdisk with: ```shell Link:https://pan.baidu.com/s/17ElBOWiFVr68975FtXY8ZA Password:pjop ``` ## 6. Acknowledgments Thanks for the authors of **[LIO-Livox](https://github.com/Livox-SDK/LIO-Livox),** [**ScanContext**](https://github.com/irapkaist/scancontext),[**Fast-LIO-SLAM**](https://github.com/gisbi-kim/FAST_LIO_SLAM) and [**interactive_slam**](https://github.com/SMRT-AIST/interactive_slam). ## Contact - If you have any questions, contact here please - Send email to shibotian@pjlab.org.cn [Preferred] or huangsiyuan@pjlab.org.cn. - Report issues on GitHub [Preferred]. - For other coorperation possibilities, please contact shibotian@pjlab.org.cn [Preferred]. ================================================ FILE: SC-PGO/.gitignore ================================================ PCD/ ================================================ FILE: SC-PGO/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.8.3) project(livox_mapping) set(CMAKE_BUILD_TYPE "Release") # set(CMAKE_CXX_FLAGS "-std=c++11") set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_FLAGS_RELEASE "-O3 -Wall -g") find_package(catkin REQUIRED COMPONENTS geometry_msgs nav_msgs sensor_msgs roscpp rospy rosbag std_msgs ad_localization_msgs image_transport cv_bridge tf tf_conversions ) #find_package(Eigen3 REQUIRED) find_package(PCL REQUIRED) find_package(OpenCV REQUIRED) find_package(Ceres REQUIRED) find_package(Boost REQUIRED COMPONENTS timer) find_package(OpenMP REQUIRED) find_package(GTSAM REQUIRED QUIET) include_directories( include ${catkin_INCLUDE_DIRS} ${PCL_INCLUDE_DIRS} ${CERES_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${GTSAM_INCLUDE_DIR} ) catkin_package( CATKIN_DEPENDS geometry_msgs nav_msgs roscpp rospy std_msgs ad_localization_msgs DEPENDS EIGEN3 PCL INCLUDE_DIRS include ) add_executable(livox_mapping src/laserPosegraphOptimization.cpp include/scancontext/Scancontext.cpp ) target_compile_options(livox_mapping PRIVATE ${OpenMP_CXX_FLAGS} ) target_link_libraries(livox_mapping ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${CERES_LIBRARIES} ${OpenMP_CXX_FLAGS} gtsam ) ================================================ FILE: SC-PGO/README.md ================================================ # SC-A-LOAM ## What is SC-A-LOAM? - A real-time LiDAR SLAM package that integrates A-LOAM and ScanContext. - **A-LOAM** for odometry (i.e., consecutive motion estimation) - **ScanContext** for coarse global localization that can deal with big drifts (i.e., place recognition as kidnapped robot problem without initial pose) - and iSAM2 of GTSAM is used for pose-graph optimization. - This package aims to show ScanContext's handy applicability. - The only things a user should do is just to include `Scancontext.h`, call `makeAndSaveScancontextAndKeys` and `detectLoopClosureID`. ## Features 1. A strong place recognition and loop closing - We integrated ScanContext as a loop detector into A-LOAM, and ISAM2-based pose-graph optimization is followed. (see https://youtu.be/okML_zNadhY?t=313 to enjoy the drift-closing moment) 2. A modular implementation - The only difference from A-LOAM is the addition of the `laserPosegraphOptimization.cpp` file. In the new file, we subscribe the point cloud topic and odometry topic (as a result of A-LOAM, published from `laserMapping.cpp`). That is, our implementation is generic to any front-end odometry methods. Thus, our pose-graph optimization module (i.e., `laserPosegraphOptimization.cpp`) can easily be integrated with any odometry algorithms such as non-LOAM family or even other sensors (e.g., visual odometry). -

3. (optional) Altitude stabilization using consumer-level GPS - To make a result more trustworthy, we supports GPS (consumer-level price, such as U-Blox EVK-7P)-based altitude stabilization. The LOAM family of methods are known to be susceptible to z-errors in outdoors. We used the robust loss for only the altitude term. For the details, see the variable `robustGPSNoise` in the `laserPosegraphOptimization.cpp` file. ## Prerequisites (dependencies) - We mainly depend on ROS, Ceres (for A-LOAM), and GTSAM (for pose-graph optimization). - For the details to install the prerequisites, please follow the A-LOAM and LIO-SAM repositiory. - The below examples are done under ROS melodic (ubuntu 18) and GTSAM version 4.x. ## How to use? - First, install the abovementioned dependencies, and follow below lines. ``` mkdir -p ~/catkin_scaloam_ws/src cd ~/catkin_scaloam_ws/src git clone https://github.com/gisbi-kim/SC-A-LOAM.git cd ../ catkin_make source ~/catkin_scaloam_ws/devel/setup.bash roslaunch aloam_velodyne aloam_mulran.launch # for MulRan dataset setting ``` ## Example Results ### Riverside 01, MulRan dataset - The MulRan dataset provides lidar scans (Ouster OS1-64, horizontally mounted, 10Hz) and consumer level gps (U-Blox EVK-7P, 4Hz) data. - About how to use (publishing data) data: see here https://github.com/irapkaist/file_player_mulran - example videos on Riverside 01 sequence. 1. with consumer level GPS-based altitude stabilization: https://youtu.be/FwAVX5TVm04 2. without the z stabilization: https://youtu.be/okML_zNadhY - example result:

### KITTI 05 - For KITTI (HDL-64 sensor), run using the command ``` roslaunch aloam_velodyne aloam_velodyne_HDL_64.launch # for KITTI dataset setting ``` - To publish KITTI scans, you can use mini-kitti publisher, a simple python script: https://github.com/gisbi-kim/mini-kitti-publisher - example video (no GPS used here): https://youtu.be/hk3Xx8SKkv4 - example result:

### Indoor - ScanContext also works at indoor environments (use smaller sc_max_radius value). - example video: https://youtu.be/Uv6_BRmxJho - example result:

### For Livox LiDAR - Scan Context also works for Livox LiDAR data - In this example, Scan Context is integrated with FAST-LIO (https://github.com/hku-mars/FAST_LIO). - Note: No additional integration effort is required. A user just run seperately FAST-LIO node and SC-A-LOAM's posegraphoptimization.cpp node! - example video (tutoial and results): https://youtu.be/Fw9S6D6HozA - example result:

### For Navtech Radar - Scan Context also works for Navtech Radar data! - For the details, please see - https://github.com/gisbi-kim/navtech-radar-slam - used the pose-graph optimization node of this repository (SC-A-LOAM) - [example video](https://www.youtube.com/watch?v=avtIQ8fesgU&t=128s) ## Utilities ### Data saver and Map construction - Similar to the [SC-LIO-SAM's saver utility](https://github.com/gisbi-kim/SC-LIO-SAM#applications), we support pose and scan saver per keyframes. Using these saved data, the map (within ROI) can be constructed offline. See the `utils/python/makeMergedMap.py` and [this tutorial](https://youtu.be/jmR3DH_A4Co). - Below is the example results of MulRan dataset KAIST 03's merged map, visualized using CloudCompare ([download the map data here](https://www.dropbox.com/sh/96jrpx3x6hh316j/AACb07kGbocnQWMIpksmU6MQa?dl=0)).

- A user also can remove dynamic points using these saved keyframe poses and scans. See [this tutorial](https://www.youtube.com/watch?v=UiYYrPMcIRU) and our [Removert project](https://github.com/irapkaist/removert). ## Acknowledgements - Thanks to LOAM, A-LOAM, and LIO-SAM code authors. The major codes in this repository are borrowed from their efforts. ## Maintainer - please contact me through `paulgkim@kaist.ac.kr` ## TODO - Delayed RS loop closings - SLAM with multi-session localization - More examples on other datasets (KITTI, complex urban dataset, etc.) ================================================ FILE: SC-PGO/include/livox_mapping/common.h ================================================ // This is an advanced implementation of the algorithm described in the following paper: // J. Zhang and S. Singh. LOAM: Lidar Odometry and Mapping in Real-time. // Robotics: Science and Systems Conference (RSS). Berkeley, CA, July 2014. // Modifier: Tong Qin qintonguav@gmail.com // Shaozu Cao saozu.cao@connect.ust.hk // Copyright 2013, Ji Zhang, Carnegie Mellon University // Further contributions copyright (c) 2016, Southwest Research Institute // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #pragma once #include #include typedef pcl::PointXYZI PointType; inline double rad2deg(double radians) { return radians * 180.0 / M_PI; } inline double deg2rad(double degrees) { return degrees * M_PI / 180.0; } struct Pose6D { double x; double y; double z; double roll; double pitch; double yaw; }; ================================================ FILE: SC-PGO/include/livox_mapping/tic_toc.h ================================================ // Author: Tong Qin qintonguav@gmail.com // Shaozu Cao saozu.cao@connect.ust.hk #pragma once #include #include #include class TicToc { public: TicToc() { tic(); } void tic() { start = std::chrono::system_clock::now(); } double toc() { end = std::chrono::system_clock::now(); std::chrono::duration elapsed_seconds = end - start; return elapsed_seconds.count() * 1000; } private: std::chrono::time_point start, end; }; class TicTocV2 { public: TicTocV2() { tic(); } TicTocV2( bool _disp ) { disp_ = _disp; tic(); } void tic() { start = std::chrono::system_clock::now(); } void toc( std::string _about_task ) { end = std::chrono::system_clock::now(); std::chrono::duration elapsed_seconds = end - start; double elapsed_ms = elapsed_seconds.count() * 1000; if( disp_ ) { std::cout.precision(3); // 10 for sec, 3 for ms std::cout << _about_task << ": " << elapsed_ms << " msec." << std::endl; } } private: std::chrono::time_point start, end; bool disp_ = false; }; ================================================ FILE: SC-PGO/include/scancontext/KDTreeVectorOfVectorsAdaptor.h ================================================ /*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2011-16 Jose Luis Blanco (joseluisblancoc@gmail.com). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #pragma once #include "scancontext/nanoflann.hpp" #include // ===== This example shows how to use nanoflann with these types of containers: ======= //typedef std::vector > my_vector_of_vectors_t; //typedef std::vector my_vector_of_vectors_t; // This requires #include // ===================================================================================== /** A simple vector-of-vectors adaptor for nanoflann, without duplicating the storage. * The i'th vector represents a point in the state space. * * \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations. * \tparam num_t The type of the point coordinates (typically, double or float). * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. * \tparam IndexType The type for indices in the KD-tree index (typically, size_t of int) */ template struct KDTreeVectorOfVectorsAdaptor { typedef KDTreeVectorOfVectorsAdaptor self_t; typedef typename Distance::template traits::distance_t metric_t; typedef nanoflann::KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType> index_t; index_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index. /// Constructor: takes a const ref to the vector of vectors object with the data points KDTreeVectorOfVectorsAdaptor(const size_t /* dimensionality */, const VectorOfVectorsType &mat, const int leaf_max_size = 10) : m_data(mat) { assert(mat.size() != 0 && mat[0].size() != 0); const size_t dims = mat[0].size(); if (DIM>0 && static_cast(dims) != DIM) throw std::runtime_error("Data set dimensionality does not match the 'DIM' template argument"); index = new index_t( static_cast(dims), *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) ); index->buildIndex(); } ~KDTreeVectorOfVectorsAdaptor() { delete index; } const VectorOfVectorsType &m_data; /** Query for the \a num_closest closest points to a given point (entered as query_point[0:dim-1]). * Note that this is a short-cut method for index->findNeighbors(). * The user can also call index->... methods as desired. * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. */ inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int nChecks_IGNORED = 10) const { nanoflann::KNNResultSet resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); } /** @name Interface expected by KDTreeSingleIndexAdaptor * @{ */ const self_t & derived() const { return *this; } self_t & derived() { return *this; } // Must return the number of data points inline size_t kdtree_get_point_count() const { return m_data.size(); } // Returns the dim'th component of the idx'th point in the class: inline num_t kdtree_get_pt(const size_t idx, const size_t dim) const { return m_data[idx][dim]; } // Optional bounding-box computation: return false to default to a standard bbox computation loop. // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) template bool kdtree_get_bbox(BBOX & /*bb*/) const { return false; } /** @} */ }; // end of KDTreeVectorOfVectorsAdaptor ================================================ FILE: SC-PGO/include/scancontext/Scancontext.cpp ================================================ #include "scancontext/Scancontext.h" // namespace SC2 // { void coreImportTest (void) { cout << "scancontext lib is successfully imported." << endl; } // coreImportTest float rad2deg(float radians) { return radians * 180.0 / M_PI; } float deg2rad(float degrees) { return degrees * M_PI / 180.0; } float xy2theta( const float & _x, const float & _y ) { if ( (_x >= 0) & (_y >= 0)) return (180/M_PI) * atan(_y / _x); if ( (_x < 0) & (_y >= 0)) return 180 - ( (180/M_PI) * atan(_y / (-_x)) ); if ( (_x < 0) & (_y < 0)) return 180 + ( (180/M_PI) * atan(_y / _x) ); if ( (_x >= 0) & (_y < 0)) return 360 - ( (180/M_PI) * atan((-_y) / _x) ); } // xy2theta MatrixXd circshift( MatrixXd &_mat, int _num_shift ) { // shift columns to right direction assert(_num_shift >= 0); if( _num_shift == 0 ) { MatrixXd shifted_mat( _mat ); return shifted_mat; // Early return } MatrixXd shifted_mat = MatrixXd::Zero( _mat.rows(), _mat.cols() ); for ( int col_idx = 0; col_idx < _mat.cols(); col_idx++ ) { int new_location = (col_idx + _num_shift) % _mat.cols(); shifted_mat.col(new_location) = _mat.col(col_idx); } return shifted_mat; } // circshift std::vector eig2stdvec( MatrixXd _eigmat ) { std::vector vec( _eigmat.data(), _eigmat.data() + _eigmat.size() ); return vec; } // eig2stdvec double SCManager::distDirectSC ( MatrixXd &_sc1, MatrixXd &_sc2 ) { int num_eff_cols = 0; // i.e., to exclude all-nonzero sector double sum_sector_similarity = 0; for ( int col_idx = 0; col_idx < _sc1.cols(); col_idx++ ) { VectorXd col_sc1 = _sc1.col(col_idx); VectorXd col_sc2 = _sc2.col(col_idx); if( (col_sc1.norm() == 0) | (col_sc2.norm() == 0) ) continue; // don't count this sector pair. double sector_similarity = col_sc1.dot(col_sc2) / (col_sc1.norm() * col_sc2.norm()); sum_sector_similarity = sum_sector_similarity + sector_similarity; num_eff_cols = num_eff_cols + 1; } double sc_sim = sum_sector_similarity / num_eff_cols; return 1.0 - sc_sim; } // distDirectSC int SCManager::fastAlignUsingVkey( MatrixXd & _vkey1, MatrixXd & _vkey2) { int argmin_vkey_shift = 0; double min_veky_diff_norm = 10000000; for ( int shift_idx = 0; shift_idx < _vkey1.cols(); shift_idx++ ) { MatrixXd vkey2_shifted = circshift(_vkey2, shift_idx); MatrixXd vkey_diff = _vkey1 - vkey2_shifted; double cur_diff_norm = vkey_diff.norm(); if( cur_diff_norm < min_veky_diff_norm ) { argmin_vkey_shift = shift_idx; min_veky_diff_norm = cur_diff_norm; } } return argmin_vkey_shift; } // fastAlignUsingVkey std::pair SCManager::distanceBtnScanContext( MatrixXd &_sc1, MatrixXd &_sc2 ) { // 1. fast align using variant key (not in original IROS18) MatrixXd vkey_sc1 = makeSectorkeyFromScancontext( _sc1 ); MatrixXd vkey_sc2 = makeSectorkeyFromScancontext( _sc2 ); int argmin_vkey_shift = fastAlignUsingVkey( vkey_sc1, vkey_sc2 ); const int SEARCH_RADIUS = round( 0.5 * SEARCH_RATIO * _sc1.cols() ); // a half of search range std::vector shift_idx_search_space { argmin_vkey_shift }; for ( int ii = 1; ii < SEARCH_RADIUS + 1; ii++ ) { shift_idx_search_space.push_back( (argmin_vkey_shift + ii + _sc1.cols()) % _sc1.cols() ); shift_idx_search_space.push_back( (argmin_vkey_shift - ii + _sc1.cols()) % _sc1.cols() ); } std::sort(shift_idx_search_space.begin(), shift_idx_search_space.end()); // 2. fast columnwise diff int argmin_shift = 0; double min_sc_dist = 10000000; for ( int num_shift: shift_idx_search_space ) { MatrixXd sc2_shifted = circshift(_sc2, num_shift); double cur_sc_dist = distDirectSC( _sc1, sc2_shifted ); if( cur_sc_dist < min_sc_dist ) { argmin_shift = num_shift; min_sc_dist = cur_sc_dist; } } return make_pair(min_sc_dist, argmin_shift); } // distanceBtnScanContext MatrixXd SCManager::makeScancontext( pcl::PointCloud & _scan_down ) { TicTocV2 t_making_desc; int num_pts_scan_down = _scan_down.points.size(); // main const int NO_POINT = -1000; MatrixXd desc = NO_POINT * MatrixXd::Ones(PC_NUM_RING, PC_NUM_SECTOR); SCPointType pt; float azim_angle, azim_range; // wihtin 2d plane int ring_idx, sctor_idx; for (int pt_idx = 0; pt_idx < num_pts_scan_down; pt_idx++) { pt.x = _scan_down.points[pt_idx].x; pt.y = _scan_down.points[pt_idx].y; pt.z = _scan_down.points[pt_idx].z + LIDAR_HEIGHT; // naive adding is ok (all points should be > 0). // xyz to ring, sector azim_range = sqrt(pt.x * pt.x + pt.y * pt.y); azim_angle = xy2theta(pt.x, pt.y); // if range is out of roi, pass if( azim_range > PC_MAX_RADIUS ) continue; ring_idx = std::max( std::min( PC_NUM_RING, int(ceil( (azim_range / PC_MAX_RADIUS) * PC_NUM_RING )) ), 1 ); sctor_idx = std::max( std::min( PC_NUM_SECTOR, int(ceil( (azim_angle / 360.0) * PC_NUM_SECTOR )) ), 1 ); // taking maximum z if ( desc(ring_idx-1, sctor_idx-1) < pt.z ) // -1 means cpp starts from 0 desc(ring_idx-1, sctor_idx-1) = pt.z; // update for taking maximum value at that bin } // reset no points to zero (for cosine dist later) for ( int row_idx = 0; row_idx < desc.rows(); row_idx++ ) for ( int col_idx = 0; col_idx < desc.cols(); col_idx++ ) if( desc(row_idx, col_idx) == NO_POINT ) desc(row_idx, col_idx) = 0; t_making_desc.toc("PolarContext making"); return desc; } // SCManager::makeScancontext MatrixXd SCManager::makeRingkeyFromScancontext( Eigen::MatrixXd &_desc ) { /* * summary: rowwise mean vector */ Eigen::MatrixXd invariant_key(_desc.rows(), 1); for ( int row_idx = 0; row_idx < _desc.rows(); row_idx++ ) { Eigen::MatrixXd curr_row = _desc.row(row_idx); invariant_key(row_idx, 0) = curr_row.mean(); } return invariant_key; } // SCManager::makeRingkeyFromScancontext MatrixXd SCManager::makeSectorkeyFromScancontext( Eigen::MatrixXd &_desc ) { /* * summary: columnwise mean vector */ Eigen::MatrixXd variant_key(1, _desc.cols()); for ( int col_idx = 0; col_idx < _desc.cols(); col_idx++ ) { Eigen::MatrixXd curr_col = _desc.col(col_idx); variant_key(0, col_idx) = curr_col.mean(); } return variant_key; } // SCManager::makeSectorkeyFromScancontext const Eigen::MatrixXd& SCManager::getConstRefRecentSCD(void) { return polarcontexts_.back(); } void SCManager::saveScancontextAndKeys( Eigen::MatrixXd _scd ) { Eigen::MatrixXd ringkey = makeRingkeyFromScancontext( _scd ); Eigen::MatrixXd sectorkey = makeSectorkeyFromScancontext( _scd ); std::vector polarcontext_invkey_vec = eig2stdvec( ringkey ); polarcontexts_.push_back( _scd ); polarcontext_invkeys_.push_back( ringkey ); polarcontext_vkeys_.push_back( sectorkey ); polarcontext_invkeys_mat_.push_back( polarcontext_invkey_vec ); } // SCManager::makeAndSaveScancontextAndKeys void SCManager::makeAndSaveScancontextAndKeys( pcl::PointCloud & _scan_down ) { Eigen::MatrixXd sc = makeScancontext(_scan_down); // v1 Eigen::MatrixXd ringkey = makeRingkeyFromScancontext( sc ); Eigen::MatrixXd sectorkey = makeSectorkeyFromScancontext( sc ); std::vector polarcontext_invkey_vec = eig2stdvec( ringkey ); polarcontexts_.push_back( sc ); polarcontext_invkeys_.push_back( ringkey ); polarcontext_vkeys_.push_back( sectorkey ); polarcontext_invkeys_mat_.push_back( polarcontext_invkey_vec ); } // SCManager::makeAndSaveScancontextAndKeys void SCManager::setSCdistThres(double _new_thres) { SC_DIST_THRES = _new_thres; } // SCManager::setThres void SCManager::setMaximumRadius(double _max_r) { PC_MAX_RADIUS = _max_r; } // SCManager::setMaximumRadius std::pair SCManager::detectLoopClosureIDBetweenSession (std::vector& _curr_key, Eigen::MatrixXd& _curr_desc) { int loop_id { -1 }; // init with -1, -1 means no loop (== LeGO-LOAM's variable "closestHistoryFrameID") auto& curr_key = _curr_key; auto& curr_desc = _curr_desc; // current observation (query) // step 0: if first, construct the tree in batch if( ! is_tree_batch_made ) // run only once { polarcontext_invkeys_to_search_.clear(); polarcontext_invkeys_to_search_.assign( polarcontext_invkeys_mat_.begin(), polarcontext_invkeys_mat_.end() ) ; polarcontext_tree_batch_.reset(); polarcontext_tree_batch_ = std::make_unique(PC_NUM_RING /* dim */, polarcontext_invkeys_to_search_, 10 /* max leaf */ ); is_tree_batch_made = true; // for running this block only once } double min_dist = 10000000; // init with somthing large int nn_align = 0; int nn_idx = 0; // step 1: knn search std::vector candidate_indexes( NUM_CANDIDATES_FROM_TREE ); std::vector out_dists_sqr( NUM_CANDIDATES_FROM_TREE ); nanoflann::KNNResultSet knnsearch_result( NUM_CANDIDATES_FROM_TREE ); knnsearch_result.init( &candidate_indexes[0], &out_dists_sqr[0] ); polarcontext_tree_batch_->index->findNeighbors( knnsearch_result, &curr_key[0] /* query */, nanoflann::SearchParams(10) ); // error here // step 2: pairwise distance (find optimal columnwise best-fit using cosine distance) TicTocV2 t_calc_dist; for ( int candidate_iter_idx = 0; candidate_iter_idx < NUM_CANDIDATES_FROM_TREE; candidate_iter_idx++ ) { MatrixXd polarcontext_candidate = polarcontexts_[ candidate_indexes[candidate_iter_idx] ]; std::pair sc_dist_result = distanceBtnScanContext( curr_desc, polarcontext_candidate ); double candidate_dist = sc_dist_result.first; int candidate_align = sc_dist_result.second; if( candidate_dist < min_dist ) { min_dist = candidate_dist; nn_align = candidate_align; nn_idx = candidate_indexes[candidate_iter_idx]; } } t_calc_dist.toc("Distance calc"); // step 3: similarity threshold if( min_dist < SC_DIST_THRES ) loop_id = nn_idx; // To do: return also nn_align (i.e., yaw diff) float yaw_diff_rad = deg2rad(nn_align * PC_UNIT_SECTORANGLE); std::pair result {loop_id, yaw_diff_rad}; return result; } // SCManager::detectLoopClosureIDBetweenSession std::pair SCManager::detectLoopClosureID ( void ) { int loop_id { -1 }; // init with -1, -1 means no loop (== LeGO-LOAM's variable "closestHistoryFrameID") auto curr_key = polarcontext_invkeys_mat_.back(); // current observation (query) auto curr_desc = polarcontexts_.back(); // current observation (query) /* * step 1: candidates from ringkey tree_ */ if( (int)polarcontext_invkeys_mat_.size() < NUM_EXCLUDE_RECENT + 1) { std::pair result {loop_id, 0.0}; return result; // Early return } // tree_ reconstruction (not mandatory to make everytime) if( tree_making_period_conter % TREE_MAKING_PERIOD_ == 0) // to save computation cost { TicTocV2 t_tree_construction; polarcontext_invkeys_to_search_.clear(); polarcontext_invkeys_to_search_.assign( polarcontext_invkeys_mat_.begin(), polarcontext_invkeys_mat_.end() - NUM_EXCLUDE_RECENT ) ; polarcontext_tree_.reset(); polarcontext_tree_ = std::make_unique(PC_NUM_RING /* dim */, polarcontext_invkeys_to_search_, 10 /* max leaf */ ); // tree_ptr_->index->buildIndex(); // inernally called in the constructor of InvKeyTree (for detail, refer the nanoflann and KDtreeVectorOfVectorsAdaptor) t_tree_construction.toc("Tree construction"); } tree_making_period_conter = tree_making_period_conter + 1; double min_dist = 10000000; // init with somthing large int nn_align = 0; int nn_idx = 0; // knn search std::vector candidate_indexes( NUM_CANDIDATES_FROM_TREE ); std::vector out_dists_sqr( NUM_CANDIDATES_FROM_TREE ); TicTocV2 t_tree_search; nanoflann::KNNResultSet knnsearch_result( NUM_CANDIDATES_FROM_TREE ); knnsearch_result.init( &candidate_indexes[0], &out_dists_sqr[0] ); polarcontext_tree_->index->findNeighbors( knnsearch_result, &curr_key[0] /* query */, nanoflann::SearchParams(10) ); t_tree_search.toc("Tree search"); /* * step 2: pairwise distance (find optimal columnwise best-fit using cosine distance) */ TicTocV2 t_calc_dist; for ( int candidate_iter_idx = 0; candidate_iter_idx < NUM_CANDIDATES_FROM_TREE; candidate_iter_idx++ ) { MatrixXd polarcontext_candidate = polarcontexts_[ candidate_indexes[candidate_iter_idx] ]; std::pair sc_dist_result = distanceBtnScanContext( curr_desc, polarcontext_candidate ); double candidate_dist = sc_dist_result.first; int candidate_align = sc_dist_result.second; if( candidate_dist < min_dist ) { min_dist = candidate_dist; nn_align = candidate_align; nn_idx = candidate_indexes[candidate_iter_idx]; } } t_calc_dist.toc("Distance calc"); /* * loop threshold check */ if( min_dist < SC_DIST_THRES ) { loop_id = nn_idx; // std::cout.precision(3); cout << "[Loop found] Nearest distance: " << min_dist << " btn " << polarcontexts_.size()-1 << " and " << nn_idx << "." << endl; // cout << "[Loop found] yaw diff: " << nn_align * PC_UNIT_SECTORANGLE << " deg." << endl; } else { std::cout.precision(3); cout << "[Not loop] Nearest distance: " << min_dist << " btn " << polarcontexts_.size()-1 << " and " << nn_idx << "." << endl; // cout << "[Not loop] yaw diff: " << nn_align * PC_UNIT_SECTORANGLE << " deg." << endl; } // To do: return also nn_align (i.e., yaw diff) float yaw_diff_rad = deg2rad(nn_align * PC_UNIT_SECTORANGLE); std::pair result {loop_id, yaw_diff_rad}; return result; } // SCManager::detectLoopClosureID // } // namespace SC2 ================================================ FILE: SC-PGO/include/scancontext/Scancontext.h ================================================ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "scancontext/nanoflann.hpp" #include "scancontext/KDTreeVectorOfVectorsAdaptor.h" #include "livox_mapping/tic_toc.h" using namespace Eigen; using namespace nanoflann; using std::cout; using std::endl; using std::make_pair; using std::atan2; using std::cos; using std::sin; using SCPointType = pcl::PointXYZI; // using xyz only. but a user can exchange the original bin encoding function (i.e., max hegiht) to max intensity (for detail, refer 20 ICRA Intensity Scan Context) using KeyMat = std::vector >; using InvKeyTree = KDTreeVectorOfVectorsAdaptor< KeyMat, float >; // namespace SC2 // { void coreImportTest ( void ); // sc param-independent helper functions float xy2theta( const float & _x, const float & _y ); MatrixXd circshift( MatrixXd &_mat, int _num_shift ); std::vector eig2stdvec( MatrixXd _eigmat ); class SCManager { public: SCManager( ) = default; // reserving data space (of std::vector) could be considered. but the descriptor is lightweight so don't care. Eigen::MatrixXd makeScancontext( pcl::PointCloud & _scan_down ); Eigen::MatrixXd makeRingkeyFromScancontext( Eigen::MatrixXd &_desc ); Eigen::MatrixXd makeSectorkeyFromScancontext( Eigen::MatrixXd &_desc ); int fastAlignUsingVkey ( MatrixXd & _vkey1, MatrixXd & _vkey2 ); double distDirectSC ( MatrixXd &_sc1, MatrixXd &_sc2 ); // "d" (eq 5) in the original paper (IROS 18) std::pair distanceBtnScanContext ( MatrixXd &_sc1, MatrixXd &_sc2 ); // "D" (eq 6) in the original paper (IROS 18) // User-side API void makeAndSaveScancontextAndKeys( pcl::PointCloud & _scan_down ); std::pair detectLoopClosureID( void ); // int: nearest node index, float: relative yaw // for ltslam // User-side API for multi-session void saveScancontextAndKeys( Eigen::MatrixXd _scd ); std::pair detectLoopClosureIDBetweenSession ( std::vector& curr_key, Eigen::MatrixXd& curr_desc); const Eigen::MatrixXd& getConstRefRecentSCD(void); public: // hyper parameters () const double LIDAR_HEIGHT = 2.0; // lidar height : add this for simply directly using lidar scan in the lidar local coord (not robot base coord) / if you use robot-coord-transformed lidar scans, just set this as 0. const int PC_NUM_RING = 20; // 20 in the original paper (IROS 18) const int PC_NUM_SECTOR = 60; // 60 in the original paper (IROS 18) double PC_MAX_RADIUS = 80.0; // 80 meter max in the original paper (IROS 18) const double PC_UNIT_SECTORANGLE = 360.0 / double(PC_NUM_SECTOR); const double PC_UNIT_RINGGAP = PC_MAX_RADIUS / double(PC_NUM_RING); // tree const int NUM_EXCLUDE_RECENT = 30; // simply just keyframe gap (related with loopClosureFrequency in yaml), but node position distance-based exclusion is ok. const int NUM_CANDIDATES_FROM_TREE = 3; // 10 is enough. (refer the IROS 18 paper) // loop thres const double SEARCH_RATIO = 0.1; // for fast comparison, no Brute-force, but search 10 % is okay. // not was in the original conf paper, but improved ver. // const double SC_DIST_THRES = 0.13; // empirically 0.1-0.2 is fine (rare false-alarms) for 20x60 polar context (but for 0.15 <, DCS or ICP fit score check (e.g., in LeGO-LOAM) should be required for robustness) double SC_DIST_THRES = 0.2; // 0.4-0.6 is good choice for using with robust kernel (e.g., Cauchy, DCS) + icp fitness threshold / if not, recommend 0.1-0.15 // const double SC_DIST_THRES = 0.7; // 0.4-0.6 is good choice for using with robust kernel (e.g., Cauchy, DCS) + icp fitness threshold / if not, recommend 0.1-0.15 // config const int TREE_MAKING_PERIOD_ = 30; // i.e., remaking tree frequency, to avoid non-mandatory every remaking, to save time cost / in the LeGO-LOAM integration, it is synchronized with the loop detection callback (which is 1Hz) so it means the tree is updated evrey 10 sec. But you can use the smaller value because it is enough fast ~ 5-50ms wrt N. int tree_making_period_conter = 0; // setter void setSCdistThres(double _new_thres); void setMaximumRadius(double _max_r); // data std::vector polarcontexts_timestamp_; // optional. std::vector polarcontexts_; std::vector polarcontext_invkeys_; std::vector polarcontext_vkeys_; KeyMat polarcontext_invkeys_mat_; KeyMat polarcontext_invkeys_to_search_; std::unique_ptr polarcontext_tree_; bool is_tree_batch_made = false; std::unique_ptr polarcontext_tree_batch_; }; // SCManager // } // namespace SC2 ================================================ FILE: SC-PGO/include/scancontext/nanoflann.hpp ================================================ /*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * Copyright 2011-2016 Jose Luis Blanco (joseluisblancoc@gmail.com). * All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ /** \mainpage nanoflann C++ API documentation * nanoflann is a C++ header-only library for building KD-Trees, mostly * optimized for 2D or 3D point clouds. * * nanoflann does not require compiling or installing, just an * #include in your code. * * See: * - C++ API organized by modules * - Online README * - Doxygen * documentation */ #ifndef NANOFLANN_HPP_ #define NANOFLANN_HPP_ #include #include #include #include // for abs() #include // for fwrite() #include // for abs() #include #include // std::reference_wrapper #include #include /** Library version: 0xMmP (M=Major,m=minor,P=patch) */ #define NANOFLANN_VERSION 0x132 // Avoid conflicting declaration of min/max macros in windows headers #if !defined(NOMINMAX) && \ (defined(_WIN32) || defined(_WIN32_) || defined(WIN32) || defined(_WIN64)) #define NOMINMAX #ifdef max #undef max #undef min #endif #endif namespace nanoflann { /** @addtogroup nanoflann_grp nanoflann C++ library for ANN * @{ */ /** the PI constant (required to avoid MSVC missing symbols) */ template T pi_const() { return static_cast(3.14159265358979323846); } /** * Traits if object is resizable and assignable (typically has a resize | assign * method) */ template struct has_resize : std::false_type {}; template struct has_resize().resize(1), 0)> : std::true_type {}; template struct has_assign : std::false_type {}; template struct has_assign().assign(1, 0), 0)> : std::true_type {}; /** * Free function to resize a resizable object */ template inline typename std::enable_if::value, void>::type resize(Container &c, const size_t nElements) { c.resize(nElements); } /** * Free function that has no effects on non resizable containers (e.g. * std::array) It raises an exception if the expected size does not match */ template inline typename std::enable_if::value, void>::type resize(Container &c, const size_t nElements) { if (nElements != c.size()) throw std::logic_error("Try to change the size of a std::array."); } /** * Free function to assign to a container */ template inline typename std::enable_if::value, void>::type assign(Container &c, const size_t nElements, const T &value) { c.assign(nElements, value); } /** * Free function to assign to a std::array */ template inline typename std::enable_if::value, void>::type assign(Container &c, const size_t nElements, const T &value) { for (size_t i = 0; i < nElements; i++) c[i] = value; } /** @addtogroup result_sets_grp Result set classes * @{ */ template class KNNResultSet { public: typedef _DistanceType DistanceType; typedef _IndexType IndexType; typedef _CountType CountType; private: IndexType *indices; DistanceType *dists; CountType capacity; CountType count; public: inline KNNResultSet(CountType capacity_) : indices(0), dists(0), capacity(capacity_), count(0) {} inline void init(IndexType *indices_, DistanceType *dists_) { indices = indices_; dists = dists_; count = 0; if (capacity) dists[capacity - 1] = (std::numeric_limits::max)(); } inline CountType size() const { return count; } inline bool full() const { return count == capacity; } /** * Called during search to add an element matching the criteria. * @return true if the search should be continued, false if the results are * sufficient */ inline bool addPoint(DistanceType dist, IndexType index) { CountType i; for (i = count; i > 0; --i) { #ifdef NANOFLANN_FIRST_MATCH // If defined and two points have the same // distance, the one with the lowest-index will be // returned first. if ((dists[i - 1] > dist) || ((dist == dists[i - 1]) && (indices[i - 1] > index))) { #else if (dists[i - 1] > dist) { #endif if (i < capacity) { dists[i] = dists[i - 1]; indices[i] = indices[i - 1]; } } else break; } if (i < capacity) { dists[i] = dist; indices[i] = index; } if (count < capacity) count++; // tell caller that the search shall continue return true; } inline DistanceType worstDist() const { return dists[capacity - 1]; } }; /** operator "<" for std::sort() */ struct IndexDist_Sorter { /** PairType will be typically: std::pair */ template inline bool operator()(const PairType &p1, const PairType &p2) const { return p1.second < p2.second; } }; /** * A result-set class used when performing a radius based search. */ template class RadiusResultSet { public: typedef _DistanceType DistanceType; typedef _IndexType IndexType; public: const DistanceType radius; std::vector> &m_indices_dists; inline RadiusResultSet( DistanceType radius_, std::vector> &indices_dists) : radius(radius_), m_indices_dists(indices_dists) { init(); } inline void init() { clear(); } inline void clear() { m_indices_dists.clear(); } inline size_t size() const { return m_indices_dists.size(); } inline bool full() const { return true; } /** * Called during search to add an element matching the criteria. * @return true if the search should be continued, false if the results are * sufficient */ inline bool addPoint(DistanceType dist, IndexType index) { if (dist < radius) m_indices_dists.push_back(std::make_pair(index, dist)); return true; } inline DistanceType worstDist() const { return radius; } /** * Find the worst result (furtherest neighbor) without copying or sorting * Pre-conditions: size() > 0 */ std::pair worst_item() const { if (m_indices_dists.empty()) throw std::runtime_error("Cannot invoke RadiusResultSet::worst_item() on " "an empty list of results."); typedef typename std::vector>::const_iterator DistIt; DistIt it = std::max_element(m_indices_dists.begin(), m_indices_dists.end(), IndexDist_Sorter()); return *it; } }; /** @} */ /** @addtogroup loadsave_grp Load/save auxiliary functions * @{ */ template void save_value(FILE *stream, const T &value, size_t count = 1) { fwrite(&value, sizeof(value), count, stream); } template void save_value(FILE *stream, const std::vector &value) { size_t size = value.size(); fwrite(&size, sizeof(size_t), 1, stream); fwrite(&value[0], sizeof(T), size, stream); } template void load_value(FILE *stream, T &value, size_t count = 1) { size_t read_cnt = fread(&value, sizeof(value), count, stream); if (read_cnt != count) { throw std::runtime_error("Cannot read from file"); } } template void load_value(FILE *stream, std::vector &value) { size_t size; size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); if (read_cnt != 1) { throw std::runtime_error("Cannot read from file"); } value.resize(size); read_cnt = fread(&value[0], sizeof(T), size, stream); if (read_cnt != size) { throw std::runtime_error("Cannot read from file"); } } /** @} */ /** @addtogroup metric_grp Metric (distance) classes * @{ */ struct Metric {}; /** Manhattan distance functor (generic version, optimized for * high-dimensionality data sets). Corresponding distance traits: * nanoflann::metric_L1 \tparam T Type of the elements (e.g. double, float, * uint8_t) \tparam _DistanceType Type of distance variables (must be signed) * (e.g. float, double, int64_t) */ template struct L1_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; L1_Adaptor(const DataSource &_data_source) : data_source(_data_source) {} inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const { DistanceType result = DistanceType(); const T *last = a + size; const T *lastgroup = last - 3; size_t d = 0; /* Process 4 items with each loop for efficiency. */ while (a < lastgroup) { const DistanceType diff0 = std::abs(a[0] - data_source.kdtree_get_pt(b_idx, d++)); const DistanceType diff1 = std::abs(a[1] - data_source.kdtree_get_pt(b_idx, d++)); const DistanceType diff2 = std::abs(a[2] - data_source.kdtree_get_pt(b_idx, d++)); const DistanceType diff3 = std::abs(a[3] - data_source.kdtree_get_pt(b_idx, d++)); result += diff0 + diff1 + diff2 + diff3; a += 4; if ((worst_dist > 0) && (result > worst_dist)) { return result; } } /* Process last 0-3 components. Not needed for standard vector lengths. */ while (a < last) { result += std::abs(*a++ - data_source.kdtree_get_pt(b_idx, d++)); } return result; } template inline DistanceType accum_dist(const U a, const V b, const size_t) const { return std::abs(a - b); } }; /** Squared Euclidean distance functor (generic version, optimized for * high-dimensionality data sets). Corresponding distance traits: * nanoflann::metric_L2 \tparam T Type of the elements (e.g. double, float, * uint8_t) \tparam _DistanceType Type of distance variables (must be signed) * (e.g. float, double, int64_t) */ template struct L2_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; L2_Adaptor(const DataSource &_data_source) : data_source(_data_source) {} inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const { DistanceType result = DistanceType(); const T *last = a + size; const T *lastgroup = last - 3; size_t d = 0; /* Process 4 items with each loop for efficiency. */ while (a < lastgroup) { const DistanceType diff0 = a[0] - data_source.kdtree_get_pt(b_idx, d++); const DistanceType diff1 = a[1] - data_source.kdtree_get_pt(b_idx, d++); const DistanceType diff2 = a[2] - data_source.kdtree_get_pt(b_idx, d++); const DistanceType diff3 = a[3] - data_source.kdtree_get_pt(b_idx, d++); result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; a += 4; if ((worst_dist > 0) && (result > worst_dist)) { return result; } } /* Process last 0-3 components. Not needed for standard vector lengths. */ while (a < last) { const DistanceType diff0 = *a++ - data_source.kdtree_get_pt(b_idx, d++); result += diff0 * diff0; } return result; } template inline DistanceType accum_dist(const U a, const V b, const size_t) const { return (a - b) * (a - b); } }; /** Squared Euclidean (L2) distance functor (suitable for low-dimensionality * datasets, like 2D or 3D point clouds) Corresponding distance traits: * nanoflann::metric_L2_Simple \tparam T Type of the elements (e.g. double, * float, uint8_t) \tparam _DistanceType Type of distance variables (must be * signed) (e.g. float, double, int64_t) */ template struct L2_Simple_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; L2_Simple_Adaptor(const DataSource &_data_source) : data_source(_data_source) {} inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size) const { DistanceType result = DistanceType(); for (size_t i = 0; i < size; ++i) { const DistanceType diff = a[i] - data_source.kdtree_get_pt(b_idx, i); result += diff * diff; } return result; } template inline DistanceType accum_dist(const U a, const V b, const size_t) const { return (a - b) * (a - b); } }; /** SO2 distance functor * Corresponding distance traits: nanoflann::metric_SO2 * \tparam T Type of the elements (e.g. double, float) * \tparam _DistanceType Type of distance variables (must be signed) (e.g. * float, double) orientation is constrained to be in [-pi, pi] */ template struct SO2_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; const DataSource &data_source; SO2_Adaptor(const DataSource &_data_source) : data_source(_data_source) {} inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size) const { return accum_dist(a[size - 1], data_source.kdtree_get_pt(b_idx, size - 1), size - 1); } /** Note: this assumes that input angles are already in the range [-pi,pi] */ template inline DistanceType accum_dist(const U a, const V b, const size_t) const { DistanceType result = DistanceType(); DistanceType PI = pi_const(); result = b - a; if (result > PI) result -= 2 * PI; else if (result < -PI) result += 2 * PI; return result; } }; /** SO3 distance functor (Uses L2_Simple) * Corresponding distance traits: nanoflann::metric_SO3 * \tparam T Type of the elements (e.g. double, float) * \tparam _DistanceType Type of distance variables (must be signed) (e.g. * float, double) */ template struct SO3_Adaptor { typedef T ElementType; typedef _DistanceType DistanceType; L2_Simple_Adaptor distance_L2_Simple; SO3_Adaptor(const DataSource &_data_source) : distance_L2_Simple(_data_source) {} inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size) const { return distance_L2_Simple.evalMetric(a, b_idx, size); } template inline DistanceType accum_dist(const U a, const V b, const size_t idx) const { return distance_L2_Simple.accum_dist(a, b, idx); } }; /** Metaprogramming helper traits class for the L1 (Manhattan) metric */ struct metric_L1 : public Metric { template struct traits { typedef L1_Adaptor distance_t; }; }; /** Metaprogramming helper traits class for the L2 (Euclidean) metric */ struct metric_L2 : public Metric { template struct traits { typedef L2_Adaptor distance_t; }; }; /** Metaprogramming helper traits class for the L2_simple (Euclidean) metric */ struct metric_L2_Simple : public Metric { template struct traits { typedef L2_Simple_Adaptor distance_t; }; }; /** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */ struct metric_SO2 : public Metric { template struct traits { typedef SO2_Adaptor distance_t; }; }; /** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */ struct metric_SO3 : public Metric { template struct traits { typedef SO3_Adaptor distance_t; }; }; /** @} */ /** @addtogroup param_grp Parameter structs * @{ */ /** Parameters (see README.md) */ struct KDTreeSingleIndexAdaptorParams { KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10) : leaf_max_size(_leaf_max_size) {} size_t leaf_max_size; }; /** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */ struct SearchParams { /** Note: The first argument (checks_IGNORED_) is ignored, but kept for * compatibility with the FLANN interface */ SearchParams(int checks_IGNORED_ = 32, float eps_ = 0, bool sorted_ = true) : checks(checks_IGNORED_), eps(eps_), sorted(sorted_) {} int checks; //!< Ignored parameter (Kept for compatibility with the FLANN //!< interface). float eps; //!< search for eps-approximate neighbours (default: 0) bool sorted; //!< only for radius search, require neighbours sorted by //!< distance (default: true) }; /** @} */ /** @addtogroup memalloc_grp Memory allocation * @{ */ /** * Allocates (using C's malloc) a generic type T. * * Params: * count = number of instances to allocate. * Returns: pointer (of type T*) to memory buffer */ template inline T *allocate(size_t count = 1) { T *mem = static_cast(::malloc(sizeof(T) * count)); return mem; } /** * Pooled storage allocator * * The following routines allow for the efficient allocation of storage in * small chunks from a specified pool. Rather than allowing each structure * to be freed individually, an entire pool of storage is freed at once. * This method has two advantages over just using malloc() and free(). First, * it is far more efficient for allocating small objects, as there is * no overhead for remembering all the information needed to free each * object or consolidating fragmented memory. Second, the decision about * how long to keep an object is made at the time of allocation, and there * is no need to track down all the objects to free them. * */ const size_t WORDSIZE = 16; const size_t BLOCKSIZE = 8192; class PooledAllocator { /* We maintain memory alignment to word boundaries by requiring that all allocations be in multiples of the machine wordsize. */ /* Size of machine word in bytes. Must be power of 2. */ /* Minimum number of bytes requested at a time from the system. Must be * multiple of WORDSIZE. */ size_t remaining; /* Number of bytes left in current block of storage. */ void *base; /* Pointer to base of current block of storage. */ void *loc; /* Current location in block to next allocate memory. */ void internal_init() { remaining = 0; base = NULL; usedMemory = 0; wastedMemory = 0; } public: size_t usedMemory; size_t wastedMemory; /** Default constructor. Initializes a new pool. */ PooledAllocator() { internal_init(); } /** * Destructor. Frees all the memory allocated in this pool. */ ~PooledAllocator() { free_all(); } /** Frees all allocated memory chunks */ void free_all() { while (base != NULL) { void *prev = *(static_cast(base)); /* Get pointer to prev block. */ ::free(base); base = prev; } internal_init(); } /** * Returns a pointer to a piece of new memory of the given size in bytes * allocated from the pool. */ void *malloc(const size_t req_size) { /* Round size up to a multiple of wordsize. The following expression only works for WORDSIZE that is a power of 2, by masking last bits of incremented size to zero. */ const size_t size = (req_size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); /* Check whether a new block must be allocated. Note that the first word of a block is reserved for a pointer to the previous block. */ if (size > remaining) { wastedMemory += remaining; /* Allocate new storage. */ const size_t blocksize = (size + sizeof(void *) + (WORDSIZE - 1) > BLOCKSIZE) ? size + sizeof(void *) + (WORDSIZE - 1) : BLOCKSIZE; // use the standard C malloc to allocate memory void *m = ::malloc(blocksize); if (!m) { fprintf(stderr, "Failed to allocate memory.\n"); return NULL; } /* Fill first word of new block with pointer to previous block. */ static_cast(m)[0] = base; base = m; size_t shift = 0; // int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & // (WORDSIZE-1))) & (WORDSIZE-1); remaining = blocksize - sizeof(void *) - shift; loc = (static_cast(m) + sizeof(void *) + shift); } void *rloc = loc; loc = static_cast(loc) + size; remaining -= size; usedMemory += size; return rloc; } /** * Allocates (using this pool) a generic type T. * * Params: * count = number of instances to allocate. * Returns: pointer (of type T*) to memory buffer */ template T *allocate(const size_t count = 1) { T *mem = static_cast(this->malloc(sizeof(T) * count)); return mem; } }; /** @} */ /** @addtogroup nanoflann_metaprog_grp Auxiliary metaprogramming stuff * @{ */ /** Used to declare fixed-size arrays when DIM>0, dynamically-allocated vectors * when DIM=-1. Fixed size version for a generic DIM: */ template struct array_or_vector_selector { typedef std::array container_t; }; /** Dynamic size version */ template struct array_or_vector_selector<-1, T> { typedef std::vector container_t; }; /** @} */ /** kd-tree base-class * * Contains the member functions common to the classes KDTreeSingleIndexAdaptor * and KDTreeSingleIndexDynamicAdaptor_. * * \tparam Derived The name of the class which inherits this class. * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use, these are all classes derived * from nanoflann::Metric \tparam DIM Dimensionality of data points (e.g. 3 for * 3D points) \tparam IndexType Will be typically size_t or int */ template class KDTreeBaseClass { public: /** Frees the previously-built index. Automatically called within * buildIndex(). */ void freeIndex(Derived &obj) { obj.pool.free_all(); obj.root_node = NULL; obj.m_size_at_index_build = 0; } typedef typename Distance::ElementType ElementType; typedef typename Distance::DistanceType DistanceType; /*--------------------- Internal Data Structures --------------------------*/ struct Node { /** Union used because a node can be either a LEAF node or a non-leaf node, * so both data fields are never used simultaneously */ union { struct leaf { IndexType left, right; //!< Indices of points in leaf node } lr; struct nonleaf { int divfeat; //!< Dimension used for subdivision. DistanceType divlow, divhigh; //!< The values used for subdivision. } sub; } node_type; Node *child1, *child2; //!< Child nodes (both=NULL mean its a leaf node) }; typedef Node *NodePtr; struct Interval { ElementType low, high; }; /** * Array of indices to vectors in the dataset. */ std::vector vind; NodePtr root_node; size_t m_leaf_max_size; size_t m_size; //!< Number of current points in the dataset size_t m_size_at_index_build; //!< Number of points in the dataset when the //!< index was built int dim; //!< Dimensionality of each data point /** Define "BoundingBox" as a fixed-size or variable-size container depending * on "DIM" */ typedef typename array_or_vector_selector::container_t BoundingBox; /** Define "distance_vector_t" as a fixed-size or variable-size container * depending on "DIM" */ typedef typename array_or_vector_selector::container_t distance_vector_t; /** The KD-tree used to find neighbours */ BoundingBox root_bbox; /** * Pooled memory allocator. * * Using a pooled memory allocator is more efficient * than allocating memory directly when there is a large * number small of memory allocations. */ PooledAllocator pool; /** Returns number of points in dataset */ size_t size(const Derived &obj) const { return obj.m_size; } /** Returns the length of each point in the dataset */ size_t veclen(const Derived &obj) { return static_cast(DIM > 0 ? DIM : obj.dim); } /// Helper accessor to the dataset points: inline ElementType dataset_get(const Derived &obj, size_t idx, int component) const { return obj.dataset.kdtree_get_pt(idx, component); } /** * Computes the inde memory usage * Returns: memory used by the index */ size_t usedMemory(Derived &obj) { return obj.pool.usedMemory + obj.pool.wastedMemory + obj.dataset.kdtree_get_point_count() * sizeof(IndexType); // pool memory and vind array memory } void computeMinMax(const Derived &obj, IndexType *ind, IndexType count, int element, ElementType &min_elem, ElementType &max_elem) { min_elem = dataset_get(obj, ind[0], element); max_elem = dataset_get(obj, ind[0], element); for (IndexType i = 1; i < count; ++i) { ElementType val = dataset_get(obj, ind[i], element); if (val < min_elem) min_elem = val; if (val > max_elem) max_elem = val; } } /** * Create a tree node that subdivides the list of vecs from vind[first] * to vind[last]. The routine is called recursively on each sublist. * * @param left index of the first vector * @param right index of the last vector */ NodePtr divideTree(Derived &obj, const IndexType left, const IndexType right, BoundingBox &bbox) { NodePtr node = obj.pool.template allocate(); // allocate memory /* If too few exemplars remain, then make this a leaf node. */ if ((right - left) <= static_cast(obj.m_leaf_max_size)) { node->child1 = node->child2 = NULL; /* Mark as leaf node. */ node->node_type.lr.left = left; node->node_type.lr.right = right; // compute bounding-box of leaf points for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { bbox[i].low = dataset_get(obj, obj.vind[left], i); bbox[i].high = dataset_get(obj, obj.vind[left], i); } for (IndexType k = left + 1; k < right; ++k) { for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { if (bbox[i].low > dataset_get(obj, obj.vind[k], i)) bbox[i].low = dataset_get(obj, obj.vind[k], i); if (bbox[i].high < dataset_get(obj, obj.vind[k], i)) bbox[i].high = dataset_get(obj, obj.vind[k], i); } } } else { IndexType idx; int cutfeat; DistanceType cutval; middleSplit_(obj, &obj.vind[0] + left, right - left, idx, cutfeat, cutval, bbox); node->node_type.sub.divfeat = cutfeat; BoundingBox left_bbox(bbox); left_bbox[cutfeat].high = cutval; node->child1 = divideTree(obj, left, left + idx, left_bbox); BoundingBox right_bbox(bbox); right_bbox[cutfeat].low = cutval; node->child2 = divideTree(obj, left + idx, right, right_bbox); node->node_type.sub.divlow = left_bbox[cutfeat].high; node->node_type.sub.divhigh = right_bbox[cutfeat].low; for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low); bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high); } } return node; } void middleSplit_(Derived &obj, IndexType *ind, IndexType count, IndexType &index, int &cutfeat, DistanceType &cutval, const BoundingBox &bbox) { const DistanceType EPS = static_cast(0.00001); ElementType max_span = bbox[0].high - bbox[0].low; for (int i = 1; i < (DIM > 0 ? DIM : obj.dim); ++i) { ElementType span = bbox[i].high - bbox[i].low; if (span > max_span) { max_span = span; } } ElementType max_spread = -1; cutfeat = 0; for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { ElementType span = bbox[i].high - bbox[i].low; if (span > (1 - EPS) * max_span) { ElementType min_elem, max_elem; computeMinMax(obj, ind, count, i, min_elem, max_elem); ElementType spread = max_elem - min_elem; ; if (spread > max_spread) { cutfeat = i; max_spread = spread; } } } // split in the middle DistanceType split_val = (bbox[cutfeat].low + bbox[cutfeat].high) / 2; ElementType min_elem, max_elem; computeMinMax(obj, ind, count, cutfeat, min_elem, max_elem); if (split_val < min_elem) cutval = min_elem; else if (split_val > max_elem) cutval = max_elem; else cutval = split_val; IndexType lim1, lim2; planeSplit(obj, ind, count, cutfeat, cutval, lim1, lim2); if (lim1 > count / 2) index = lim1; else if (lim2 < count / 2) index = lim2; else index = count / 2; } /** * Subdivide the list of points by a plane perpendicular on axe corresponding * to the 'cutfeat' dimension at 'cutval' position. * * On return: * dataset[ind[0..lim1-1]][cutfeat]cutval */ void planeSplit(Derived &obj, IndexType *ind, const IndexType count, int cutfeat, DistanceType &cutval, IndexType &lim1, IndexType &lim2) { /* Move vector indices for left subtree to front of list. */ IndexType left = 0; IndexType right = count - 1; for (;;) { while (left <= right && dataset_get(obj, ind[left], cutfeat) < cutval) ++left; while (right && left <= right && dataset_get(obj, ind[right], cutfeat) >= cutval) --right; if (left > right || !right) break; // "!right" was added to support unsigned Index types std::swap(ind[left], ind[right]); ++left; --right; } /* If either list is empty, it means that all remaining features * are identical. Split in the middle to maintain a balanced tree. */ lim1 = left; right = count - 1; for (;;) { while (left <= right && dataset_get(obj, ind[left], cutfeat) <= cutval) ++left; while (right && left <= right && dataset_get(obj, ind[right], cutfeat) > cutval) --right; if (left > right || !right) break; // "!right" was added to support unsigned Index types std::swap(ind[left], ind[right]); ++left; --right; } lim2 = left; } DistanceType computeInitialDistances(const Derived &obj, const ElementType *vec, distance_vector_t &dists) const { assert(vec); DistanceType distsq = DistanceType(); for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) { if (vec[i] < obj.root_bbox[i].low) { dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].low, i); distsq += dists[i]; } if (vec[i] > obj.root_bbox[i].high) { dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].high, i); distsq += dists[i]; } } return distsq; } void save_tree(Derived &obj, FILE *stream, NodePtr tree) { save_value(stream, *tree); if (tree->child1 != NULL) { save_tree(obj, stream, tree->child1); } if (tree->child2 != NULL) { save_tree(obj, stream, tree->child2); } } void load_tree(Derived &obj, FILE *stream, NodePtr &tree) { tree = obj.pool.template allocate(); load_value(stream, *tree); if (tree->child1 != NULL) { load_tree(obj, stream, tree->child1); } if (tree->child2 != NULL) { load_tree(obj, stream, tree->child2); } } /** Stores the index in a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when * loading the index object it must be constructed associated to the same * source of data points used while building it. See the example: * examples/saveload_example.cpp \sa loadIndex */ void saveIndex_(Derived &obj, FILE *stream) { save_value(stream, obj.m_size); save_value(stream, obj.dim); save_value(stream, obj.root_bbox); save_value(stream, obj.m_leaf_max_size); save_value(stream, obj.vind); save_tree(obj, stream, obj.root_node); } /** Loads a previous index from a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the * index object must be constructed associated to the same source of data * points used while building the index. See the example: * examples/saveload_example.cpp \sa loadIndex */ void loadIndex_(Derived &obj, FILE *stream) { load_value(stream, obj.m_size); load_value(stream, obj.dim); load_value(stream, obj.root_bbox); load_value(stream, obj.m_leaf_max_size); load_value(stream, obj.vind); load_tree(obj, stream, obj.root_node); } }; /** @addtogroup kdtrees_grp KD-tree classes and adaptors * @{ */ /** kd-tree static index * * Contains the k-d trees and other information for indexing a set of points * for nearest-neighbor matching. * * The class "DatasetAdaptor" must provide the following interface (can be * non-virtual, inlined methods): * * \code * // Must return the number of data poins * inline size_t kdtree_get_point_count() const { ... } * * * // Must return the dim'th component of the idx'th point in the class: * inline T kdtree_get_pt(const size_t idx, const size_t dim) const { ... } * * // Optional bounding-box computation: return false to default to a standard * bbox computation loop. * // Return true if the BBOX was already computed by the class and returned * in "bb" so it can be avoided to redo it again. * // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 * for point clouds) template bool kdtree_get_bbox(BBOX &bb) const * { * bb[0].low = ...; bb[0].high = ...; // 0th dimension limits * bb[1].low = ...; bb[1].high = ...; // 1st dimension limits * ... * return true; * } * * \endcode * * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use: nanoflann::metric_L1, * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \tparam DIM * Dimensionality of data points (e.g. 3 for 3D points) \tparam IndexType Will * be typically size_t or int */ template class KDTreeSingleIndexAdaptor : public KDTreeBaseClass< KDTreeSingleIndexAdaptor, Distance, DatasetAdaptor, DIM, IndexType> { public: /** Deleted copy constructor*/ KDTreeSingleIndexAdaptor( const KDTreeSingleIndexAdaptor &) = delete; /** * The dataset used by this index */ const DatasetAdaptor &dataset; //!< The source of our data const KDTreeSingleIndexAdaptorParams index_params; Distance distance; typedef typename nanoflann::KDTreeBaseClass< nanoflann::KDTreeSingleIndexAdaptor, Distance, DatasetAdaptor, DIM, IndexType> BaseClassRef; typedef typename BaseClassRef::ElementType ElementType; typedef typename BaseClassRef::DistanceType DistanceType; typedef typename BaseClassRef::Node Node; typedef Node *NodePtr; typedef typename BaseClassRef::Interval Interval; /** Define "BoundingBox" as a fixed-size or variable-size container depending * on "DIM" */ typedef typename BaseClassRef::BoundingBox BoundingBox; /** Define "distance_vector_t" as a fixed-size or variable-size container * depending on "DIM" */ typedef typename BaseClassRef::distance_vector_t distance_vector_t; /** * KDTree constructor * * Refer to docs in README.md or online in * https://github.com/jlblancoc/nanoflann * * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 * for 3D points) is determined by means of: * - The \a DIM template parameter if >0 (highest priority) * - Otherwise, the \a dimensionality parameter of this constructor. * * @param inputData Dataset with the input features * @param params Basically, the maximum leaf node size */ KDTreeSingleIndexAdaptor(const int dimensionality, const DatasetAdaptor &inputData, const KDTreeSingleIndexAdaptorParams ¶ms = KDTreeSingleIndexAdaptorParams()) : dataset(inputData), index_params(params), distance(inputData) { BaseClassRef::root_node = NULL; BaseClassRef::m_size = dataset.kdtree_get_point_count(); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; BaseClassRef::dim = dimensionality; if (DIM > 0) BaseClassRef::dim = DIM; BaseClassRef::m_leaf_max_size = params.leaf_max_size; // Create a permutable array of indices to the input vectors. init_vind(); } /** * Builds the index */ void buildIndex() { BaseClassRef::m_size = dataset.kdtree_get_point_count(); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; init_vind(); this->freeIndex(*this); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; if (BaseClassRef::m_size == 0) return; computeBoundingBox(BaseClassRef::root_bbox); BaseClassRef::root_node = this->divideTree(*this, 0, BaseClassRef::m_size, BaseClassRef::root_bbox); // construct the tree } /** \name Query methods * @{ */ /** * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored * inside the result object. * * Params: * result = the result object in which the indices of the * nearest-neighbors are stored vec = the vector for which to search the * nearest neighbors * * \tparam RESULTSET Should be any ResultSet * \return True if the requested neighbors could be found. * \sa knnSearch, radiusSearch */ template bool findNeighbors(RESULTSET &result, const ElementType *vec, const SearchParams &searchParams) const { assert(vec); if (this->size(*this) == 0) return false; if (!BaseClassRef::root_node) throw std::runtime_error( "[nanoflann] findNeighbors() called before building the index."); float epsError = 1 + searchParams.eps; distance_vector_t dists; // fixed or variable-sized container (depending on DIM) auto zero = static_cast(0); assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim), zero); // Fill it with zeros. DistanceType distsq = this->computeInitialDistances(*this, vec, dists); searchLevel(result, vec, BaseClassRef::root_node, distsq, dists, epsError); // "count_leaf" parameter removed since was neither // used nor returned to the user. return result.full(); } /** * Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1]. * Their indices are stored inside the result object. \sa radiusSearch, * findNeighbors \note nChecks_IGNORED is ignored but kept for compatibility * with the original FLANN interface. \return Number `N` of valid points in * the result set. Only the first `N` entries in `out_indices` and * `out_distances_sq` will be valid. Return may be less than `num_closest` * only if the number of elements in the tree is less than `num_closest`. */ size_t knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const { nanoflann::KNNResultSet resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); this->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); return resultSet.size(); } /** * Find all the neighbors to \a query_point[0:dim-1] within a maximum radius. * The output is given as a vector of pairs, of which the first element is a * point index and the second the corresponding distance. Previous contents of * \a IndicesDists are cleared. * * If searchParams.sorted==true, the output list is sorted by ascending * distances. * * For a better performance, it is advisable to do a .reserve() on the vector * if you have any wild guess about the number of expected matches. * * \sa knnSearch, findNeighbors, radiusSearchCustomCallback * \return The number of points within the given radius (i.e. indices.size() * or dists.size() ) */ size_t radiusSearch(const ElementType *query_point, const DistanceType &radius, std::vector> &IndicesDists, const SearchParams &searchParams) const { RadiusResultSet resultSet(radius, IndicesDists); const size_t nFound = radiusSearchCustomCallback(query_point, resultSet, searchParams); if (searchParams.sorted) std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter()); return nFound; } /** * Just like radiusSearch() but with a custom callback class for each point * found in the radius of the query. See the source of RadiusResultSet<> as a * start point for your own classes. \sa radiusSearch */ template size_t radiusSearchCustomCallback( const ElementType *query_point, SEARCH_CALLBACK &resultSet, const SearchParams &searchParams = SearchParams()) const { this->findNeighbors(resultSet, query_point, searchParams); return resultSet.size(); } /** @} */ public: /** Make sure the auxiliary list \a vind has the same size than the current * dataset, and re-generate if size has changed. */ void init_vind() { // Create a permutable array of indices to the input vectors. BaseClassRef::m_size = dataset.kdtree_get_point_count(); if (BaseClassRef::vind.size() != BaseClassRef::m_size) BaseClassRef::vind.resize(BaseClassRef::m_size); for (size_t i = 0; i < BaseClassRef::m_size; i++) BaseClassRef::vind[i] = i; } void computeBoundingBox(BoundingBox &bbox) { resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dataset.kdtree_get_bbox(bbox)) { // Done! It was implemented in derived class } else { const size_t N = dataset.kdtree_get_point_count(); if (!N) throw std::runtime_error("[nanoflann] computeBoundingBox() called but " "no data points found."); for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { bbox[i].low = bbox[i].high = this->dataset_get(*this, 0, i); } for (size_t k = 1; k < N; ++k) { for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { if (this->dataset_get(*this, k, i) < bbox[i].low) bbox[i].low = this->dataset_get(*this, k, i); if (this->dataset_get(*this, k, i) > bbox[i].high) bbox[i].high = this->dataset_get(*this, k, i); } } } } /** * Performs an exact search in the tree starting from a node. * \tparam RESULTSET Should be any ResultSet * \return true if the search should be continued, false if the results are * sufficient */ template bool searchLevel(RESULTSET &result_set, const ElementType *vec, const NodePtr node, DistanceType mindistsq, distance_vector_t &dists, const float epsError) const { /* If this is a leaf node, then do check and return. */ if ((node->child1 == NULL) && (node->child2 == NULL)) { // count_leaf += (node->lr.right-node->lr.left); // Removed since was // neither used nor returned to the user. DistanceType worst_dist = result_set.worstDist(); for (IndexType i = node->node_type.lr.left; i < node->node_type.lr.right; ++i) { const IndexType index = BaseClassRef::vind[i]; // reorder... : i; DistanceType dist = distance.evalMetric( vec, index, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dist < worst_dist) { if (!result_set.addPoint(dist, BaseClassRef::vind[i])) { // the resultset doesn't want to receive any more points, we're done // searching! return false; } } } return true; } /* Which child branch should be taken first? */ int idx = node->node_type.sub.divfeat; ElementType val = vec[idx]; DistanceType diff1 = val - node->node_type.sub.divlow; DistanceType diff2 = val - node->node_type.sub.divhigh; NodePtr bestChild; NodePtr otherChild; DistanceType cut_dist; if ((diff1 + diff2) < 0) { bestChild = node->child1; otherChild = node->child2; cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx); } else { bestChild = node->child2; otherChild = node->child1; cut_dist = distance.accum_dist(val, node->node_type.sub.divlow, idx); } /* Call recursively to search next level down. */ if (!searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError)) { // the resultset doesn't want to receive any more points, we're done // searching! return false; } DistanceType dst = dists[idx]; mindistsq = mindistsq + cut_dist - dst; dists[idx] = cut_dist; if (mindistsq * epsError <= result_set.worstDist()) { if (!searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError)) { // the resultset doesn't want to receive any more points, we're done // searching! return false; } } dists[idx] = dst; return true; } public: /** Stores the index in a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when * loading the index object it must be constructed associated to the same * source of data points used while building it. See the example: * examples/saveload_example.cpp \sa loadIndex */ void saveIndex(FILE *stream) { this->saveIndex_(*this, stream); } /** Loads a previous index from a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the * index object must be constructed associated to the same source of data * points used while building the index. See the example: * examples/saveload_example.cpp \sa loadIndex */ void loadIndex(FILE *stream) { this->loadIndex_(*this, stream); } }; // class KDTree /** kd-tree dynamic index * * Contains the k-d trees and other information for indexing a set of points * for nearest-neighbor matching. * * The class "DatasetAdaptor" must provide the following interface (can be * non-virtual, inlined methods): * * \code * // Must return the number of data poins * inline size_t kdtree_get_point_count() const { ... } * * // Must return the dim'th component of the idx'th point in the class: * inline T kdtree_get_pt(const size_t idx, const size_t dim) const { ... } * * // Optional bounding-box computation: return false to default to a standard * bbox computation loop. * // Return true if the BBOX was already computed by the class and returned * in "bb" so it can be avoided to redo it again. * // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 * for point clouds) template bool kdtree_get_bbox(BBOX &bb) const * { * bb[0].low = ...; bb[0].high = ...; // 0th dimension limits * bb[1].low = ...; bb[1].high = ...; // 1st dimension limits * ... * return true; * } * * \endcode * * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use: nanoflann::metric_L1, * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \tparam DIM * Dimensionality of data points (e.g. 3 for 3D points) \tparam IndexType Will * be typically size_t or int */ template class KDTreeSingleIndexDynamicAdaptor_ : public KDTreeBaseClass, Distance, DatasetAdaptor, DIM, IndexType> { public: /** * The dataset used by this index */ const DatasetAdaptor &dataset; //!< The source of our data KDTreeSingleIndexAdaptorParams index_params; std::vector &treeIndex; Distance distance; typedef typename nanoflann::KDTreeBaseClass< nanoflann::KDTreeSingleIndexDynamicAdaptor_, Distance, DatasetAdaptor, DIM, IndexType> BaseClassRef; typedef typename BaseClassRef::ElementType ElementType; typedef typename BaseClassRef::DistanceType DistanceType; typedef typename BaseClassRef::Node Node; typedef Node *NodePtr; typedef typename BaseClassRef::Interval Interval; /** Define "BoundingBox" as a fixed-size or variable-size container depending * on "DIM" */ typedef typename BaseClassRef::BoundingBox BoundingBox; /** Define "distance_vector_t" as a fixed-size or variable-size container * depending on "DIM" */ typedef typename BaseClassRef::distance_vector_t distance_vector_t; /** * KDTree constructor * * Refer to docs in README.md or online in * https://github.com/jlblancoc/nanoflann * * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 * for 3D points) is determined by means of: * - The \a DIM template parameter if >0 (highest priority) * - Otherwise, the \a dimensionality parameter of this constructor. * * @param inputData Dataset with the input features * @param params Basically, the maximum leaf node size */ KDTreeSingleIndexDynamicAdaptor_( const int dimensionality, const DatasetAdaptor &inputData, std::vector &treeIndex_, const KDTreeSingleIndexAdaptorParams ¶ms = KDTreeSingleIndexAdaptorParams()) : dataset(inputData), index_params(params), treeIndex(treeIndex_), distance(inputData) { BaseClassRef::root_node = NULL; BaseClassRef::m_size = 0; BaseClassRef::m_size_at_index_build = 0; BaseClassRef::dim = dimensionality; if (DIM > 0) BaseClassRef::dim = DIM; BaseClassRef::m_leaf_max_size = params.leaf_max_size; } /** Assignment operator definiton */ KDTreeSingleIndexDynamicAdaptor_ operator=(const KDTreeSingleIndexDynamicAdaptor_ &rhs) { KDTreeSingleIndexDynamicAdaptor_ tmp(rhs); std::swap(BaseClassRef::vind, tmp.BaseClassRef::vind); std::swap(BaseClassRef::m_leaf_max_size, tmp.BaseClassRef::m_leaf_max_size); std::swap(index_params, tmp.index_params); std::swap(treeIndex, tmp.treeIndex); std::swap(BaseClassRef::m_size, tmp.BaseClassRef::m_size); std::swap(BaseClassRef::m_size_at_index_build, tmp.BaseClassRef::m_size_at_index_build); std::swap(BaseClassRef::root_node, tmp.BaseClassRef::root_node); std::swap(BaseClassRef::root_bbox, tmp.BaseClassRef::root_bbox); std::swap(BaseClassRef::pool, tmp.BaseClassRef::pool); return *this; } /** * Builds the index */ void buildIndex() { BaseClassRef::m_size = BaseClassRef::vind.size(); this->freeIndex(*this); BaseClassRef::m_size_at_index_build = BaseClassRef::m_size; if (BaseClassRef::m_size == 0) return; computeBoundingBox(BaseClassRef::root_bbox); BaseClassRef::root_node = this->divideTree(*this, 0, BaseClassRef::m_size, BaseClassRef::root_bbox); // construct the tree } /** \name Query methods * @{ */ /** * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored * inside the result object. * * Params: * result = the result object in which the indices of the * nearest-neighbors are stored vec = the vector for which to search the * nearest neighbors * * \tparam RESULTSET Should be any ResultSet * \return True if the requested neighbors could be found. * \sa knnSearch, radiusSearch */ template bool findNeighbors(RESULTSET &result, const ElementType *vec, const SearchParams &searchParams) const { assert(vec); if (this->size(*this) == 0) return false; if (!BaseClassRef::root_node) return false; float epsError = 1 + searchParams.eps; // fixed or variable-sized container (depending on DIM) distance_vector_t dists; // Fill it with zeros. assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim), static_cast(0)); DistanceType distsq = this->computeInitialDistances(*this, vec, dists); searchLevel(result, vec, BaseClassRef::root_node, distsq, dists, epsError); // "count_leaf" parameter removed since was neither // used nor returned to the user. return result.full(); } /** * Find the "num_closest" nearest neighbors to the \a query_point[0:dim-1]. * Their indices are stored inside the result object. \sa radiusSearch, * findNeighbors \note nChecks_IGNORED is ignored but kept for compatibility * with the original FLANN interface. \return Number `N` of valid points in * the result set. Only the first `N` entries in `out_indices` and * `out_distances_sq` will be valid. Return may be less than `num_closest` * only if the number of elements in the tree is less than `num_closest`. */ size_t knnSearch(const ElementType *query_point, const size_t num_closest, IndexType *out_indices, DistanceType *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const { nanoflann::KNNResultSet resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); this->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); return resultSet.size(); } /** * Find all the neighbors to \a query_point[0:dim-1] within a maximum radius. * The output is given as a vector of pairs, of which the first element is a * point index and the second the corresponding distance. Previous contents of * \a IndicesDists are cleared. * * If searchParams.sorted==true, the output list is sorted by ascending * distances. * * For a better performance, it is advisable to do a .reserve() on the vector * if you have any wild guess about the number of expected matches. * * \sa knnSearch, findNeighbors, radiusSearchCustomCallback * \return The number of points within the given radius (i.e. indices.size() * or dists.size() ) */ size_t radiusSearch(const ElementType *query_point, const DistanceType &radius, std::vector> &IndicesDists, const SearchParams &searchParams) const { RadiusResultSet resultSet(radius, IndicesDists); const size_t nFound = radiusSearchCustomCallback(query_point, resultSet, searchParams); if (searchParams.sorted) std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter()); return nFound; } /** * Just like radiusSearch() but with a custom callback class for each point * found in the radius of the query. See the source of RadiusResultSet<> as a * start point for your own classes. \sa radiusSearch */ template size_t radiusSearchCustomCallback( const ElementType *query_point, SEARCH_CALLBACK &resultSet, const SearchParams &searchParams = SearchParams()) const { this->findNeighbors(resultSet, query_point, searchParams); return resultSet.size(); } /** @} */ public: void computeBoundingBox(BoundingBox &bbox) { resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dataset.kdtree_get_bbox(bbox)) { // Done! It was implemented in derived class } else { const size_t N = BaseClassRef::m_size; if (!N) throw std::runtime_error("[nanoflann] computeBoundingBox() called but " "no data points found."); for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { bbox[i].low = bbox[i].high = this->dataset_get(*this, BaseClassRef::vind[0], i); } for (size_t k = 1; k < N; ++k) { for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) { if (this->dataset_get(*this, BaseClassRef::vind[k], i) < bbox[i].low) bbox[i].low = this->dataset_get(*this, BaseClassRef::vind[k], i); if (this->dataset_get(*this, BaseClassRef::vind[k], i) > bbox[i].high) bbox[i].high = this->dataset_get(*this, BaseClassRef::vind[k], i); } } } } /** * Performs an exact search in the tree starting from a node. * \tparam RESULTSET Should be any ResultSet */ template void searchLevel(RESULTSET &result_set, const ElementType *vec, const NodePtr node, DistanceType mindistsq, distance_vector_t &dists, const float epsError) const { /* If this is a leaf node, then do check and return. */ if ((node->child1 == NULL) && (node->child2 == NULL)) { // count_leaf += (node->lr.right-node->lr.left); // Removed since was // neither used nor returned to the user. DistanceType worst_dist = result_set.worstDist(); for (IndexType i = node->node_type.lr.left; i < node->node_type.lr.right; ++i) { const IndexType index = BaseClassRef::vind[i]; // reorder... : i; if (treeIndex[index] == -1) continue; DistanceType dist = distance.evalMetric( vec, index, (DIM > 0 ? DIM : BaseClassRef::dim)); if (dist < worst_dist) { if (!result_set.addPoint( static_cast(dist), static_cast( BaseClassRef::vind[i]))) { // the resultset doesn't want to receive any more points, we're done // searching! return; // false; } } } return; } /* Which child branch should be taken first? */ int idx = node->node_type.sub.divfeat; ElementType val = vec[idx]; DistanceType diff1 = val - node->node_type.sub.divlow; DistanceType diff2 = val - node->node_type.sub.divhigh; NodePtr bestChild; NodePtr otherChild; DistanceType cut_dist; if ((diff1 + diff2) < 0) { bestChild = node->child1; otherChild = node->child2; cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx); } else { bestChild = node->child2; otherChild = node->child1; cut_dist = distance.accum_dist(val, node->node_type.sub.divlow, idx); } /* Call recursively to search next level down. */ searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); DistanceType dst = dists[idx]; mindistsq = mindistsq + cut_dist - dst; dists[idx] = cut_dist; if (mindistsq * epsError <= result_set.worstDist()) { searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); } dists[idx] = dst; } public: /** Stores the index in a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when * loading the index object it must be constructed associated to the same * source of data points used while building it. See the example: * examples/saveload_example.cpp \sa loadIndex */ void saveIndex(FILE *stream) { this->saveIndex_(*this, stream); } /** Loads a previous index from a binary file. * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the * index object must be constructed associated to the same source of data * points used while building the index. See the example: * examples/saveload_example.cpp \sa loadIndex */ void loadIndex(FILE *stream) { this->loadIndex_(*this, stream); } }; /** kd-tree dynaimic index * * class to create multiple static index and merge their results to behave as * single dynamic index as proposed in Logarithmic Approach. * * Example of usage: * examples/dynamic_pointcloud_example.cpp * * \tparam DatasetAdaptor The user-provided adaptor (see comments above). * \tparam Distance The distance metric to use: nanoflann::metric_L1, * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \tparam DIM * Dimensionality of data points (e.g. 3 for 3D points) \tparam IndexType Will * be typically size_t or int */ template class KDTreeSingleIndexDynamicAdaptor { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::DistanceType DistanceType; protected: size_t m_leaf_max_size; size_t treeCount; size_t pointCount; /** * The dataset used by this index */ const DatasetAdaptor &dataset; //!< The source of our data std::vector treeIndex; //!< treeIndex[idx] is the index of tree in which //!< point at idx is stored. treeIndex[idx]=-1 //!< means that point has been removed. KDTreeSingleIndexAdaptorParams index_params; int dim; //!< Dimensionality of each data point typedef KDTreeSingleIndexDynamicAdaptor_ index_container_t; std::vector index; public: /** Get a const ref to the internal list of indices; the number of indices is * adapted dynamically as the dataset grows in size. */ const std::vector &getAllIndices() const { return index; } private: /** finds position of least significant unset bit */ int First0Bit(IndexType num) { int pos = 0; while (num & 1) { num = num >> 1; pos++; } return pos; } /** Creates multiple empty trees to handle dynamic support */ void init() { typedef KDTreeSingleIndexDynamicAdaptor_ my_kd_tree_t; std::vector index_( treeCount, my_kd_tree_t(dim /*dim*/, dataset, treeIndex, index_params)); index = index_; } public: Distance distance; /** * KDTree constructor * * Refer to docs in README.md or online in * https://github.com/jlblancoc/nanoflann * * The KD-Tree point dimension (the length of each point in the datase, e.g. 3 * for 3D points) is determined by means of: * - The \a DIM template parameter if >0 (highest priority) * - Otherwise, the \a dimensionality parameter of this constructor. * * @param inputData Dataset with the input features * @param params Basically, the maximum leaf node size */ KDTreeSingleIndexDynamicAdaptor(const int dimensionality, const DatasetAdaptor &inputData, const KDTreeSingleIndexAdaptorParams ¶ms = KDTreeSingleIndexAdaptorParams(), const size_t maximumPointCount = 1000000000U) : dataset(inputData), index_params(params), distance(inputData) { treeCount = static_cast(std::log2(maximumPointCount)); pointCount = 0U; dim = dimensionality; treeIndex.clear(); if (DIM > 0) dim = DIM; m_leaf_max_size = params.leaf_max_size; init(); const size_t num_initial_points = dataset.kdtree_get_point_count(); if (num_initial_points > 0) { addPoints(0, num_initial_points - 1); } } /** Deleted copy constructor*/ KDTreeSingleIndexDynamicAdaptor( const KDTreeSingleIndexDynamicAdaptor &) = delete; /** Add points to the set, Inserts all points from [start, end] */ void addPoints(IndexType start, IndexType end) { size_t count = end - start + 1; treeIndex.resize(treeIndex.size() + count); for (IndexType idx = start; idx <= end; idx++) { int pos = First0Bit(pointCount); index[pos].vind.clear(); treeIndex[pointCount] = pos; for (int i = 0; i < pos; i++) { for (int j = 0; j < static_cast(index[i].vind.size()); j++) { index[pos].vind.push_back(index[i].vind[j]); if (treeIndex[index[i].vind[j]] != -1) treeIndex[index[i].vind[j]] = pos; } index[i].vind.clear(); index[i].freeIndex(index[i]); } index[pos].vind.push_back(idx); index[pos].buildIndex(); pointCount++; } } /** Remove a point from the set (Lazy Deletion) */ void removePoint(size_t idx) { if (idx >= pointCount) return; treeIndex[idx] = -1; } /** * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored * inside the result object. * * Params: * result = the result object in which the indices of the * nearest-neighbors are stored vec = the vector for which to search the * nearest neighbors * * \tparam RESULTSET Should be any ResultSet * \return True if the requested neighbors could be found. * \sa knnSearch, radiusSearch */ template bool findNeighbors(RESULTSET &result, const ElementType *vec, const SearchParams &searchParams) const { for (size_t i = 0; i < treeCount; i++) { index[i].findNeighbors(result, &vec[0], searchParams); } return result.full(); } }; /** An L2-metric KD-tree adaptor for working with data directly stored in an * Eigen Matrix, without duplicating the data storage. Each row in the matrix * represents a point in the state space. * * Example of usage: * \code * Eigen::Matrix mat; * // Fill out "mat"... * * typedef KDTreeEigenMatrixAdaptor< Eigen::Matrix > * my_kd_tree_t; const int max_leaf = 10; my_kd_tree_t mat_index(mat, max_leaf * ); mat_index.index->buildIndex(); mat_index.index->... \endcode * * \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality * for the points in the data set, allowing more compiler optimizations. \tparam * Distance The distance metric to use: nanoflann::metric_L1, * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. */ template struct KDTreeEigenMatrixAdaptor { typedef KDTreeEigenMatrixAdaptor self_t; typedef typename MatrixType::Scalar num_t; typedef typename MatrixType::Index IndexType; typedef typename Distance::template traits::distance_t metric_t; typedef KDTreeSingleIndexAdaptor index_t; index_t *index; //! The kd-tree index for the user to call its methods as //! usual with any other FLANN index. /// Constructor: takes a const ref to the matrix object with the data points KDTreeEigenMatrixAdaptor(const size_t dimensionality, const std::reference_wrapper &mat, const int leaf_max_size = 10) : m_data_matrix(mat) { const auto dims = mat.get().cols(); if (size_t(dims) != dimensionality) throw std::runtime_error( "Error: 'dimensionality' must match column count in data matrix"); if (DIM > 0 && int(dims) != DIM) throw std::runtime_error( "Data set dimensionality does not match the 'DIM' template argument"); index = new index_t(static_cast(dims), *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size)); index->buildIndex(); } public: /** Deleted copy constructor */ KDTreeEigenMatrixAdaptor(const self_t &) = delete; ~KDTreeEigenMatrixAdaptor() { delete index; } const std::reference_wrapper m_data_matrix; /** Query for the \a num_closest closest points to a given point (entered as * query_point[0:dim-1]). Note that this is a short-cut method for * index->findNeighbors(). The user can also call index->... methods as * desired. \note nChecks_IGNORED is ignored but kept for compatibility with * the original FLANN interface. */ inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int /* nChecks_IGNORED */ = 10) const { nanoflann::KNNResultSet resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); } /** @name Interface expected by KDTreeSingleIndexAdaptor * @{ */ const self_t &derived() const { return *this; } self_t &derived() { return *this; } // Must return the number of data points inline size_t kdtree_get_point_count() const { return m_data_matrix.get().rows(); } // Returns the dim'th component of the idx'th point in the class: inline num_t kdtree_get_pt(const IndexType idx, size_t dim) const { return m_data_matrix.get().coeff(idx, IndexType(dim)); } // Optional bounding-box computation: return false to default to a standard // bbox computation loop. // Return true if the BBOX was already computed by the class and returned in // "bb" so it can be avoided to redo it again. Look at bb.size() to find out // the expected dimensionality (e.g. 2 or 3 for point clouds) template bool kdtree_get_bbox(BBOX & /*bb*/) const { return false; } /** @} */ }; // end of KDTreeEigenMatrixAdaptor /** @} */ /** @} */ // end of grouping } // namespace nanoflann #endif /* NANOFLANN_HPP_ */ ================================================ FILE: SC-PGO/launch/livox_mapping.launch ================================================ [0.997573, -0.024254, 0.065274, 0.322019, 0.023477, 0.999644, 0.012663, 0.043158, -0.066402, -0.011080, 0.997733, 0.456606, 0.0, 0.0, 0.0, 1.0] ================================================ FILE: SC-PGO/package.xml ================================================ livox_mapping 0.0.0 This is an advanced implentation of LOAM. LOAM is described in the following paper: J. Zhang and S. Singh. LOAM: Lidar Odometry and Mapping in Real-time. Robotics: Science and Systems Conference (RSS). Berkeley, CA, July 2014. qintong BSD Ji Zhang catkin geometry_msgs nav_msgs roscpp rospy std_msgs rosbag sensor_msgs tf tf_conversions image_transport ad_localization_msgs cv_bridge geometry_msgs nav_msgs sensor_msgs roscpp rospy std_msgs rosbag tf tf_conversions image_transport ad_localization_msgs ================================================ FILE: SC-PGO/rviz_cfg/livox_mapping.rviz ================================================ Panels: - Class: rviz/Displays Help Height: 85 Name: Displays Property Tree Widget: Expanded: - /Global Options1 - /Status1 - /odometry1/odomPath1 - /pgoOdom1/Shape1 - /pgoMap1 - /loop scan1 - /loop submap1 Splitter Ratio: 0.440559446811676 Tree Height: 768 - Class: rviz/Selection Name: Selection - Class: rviz/Tool Properties Expanded: - /2D Pose Estimate1 - /2D Nav Goal1 - /Publish Point1 Name: Tool Properties Splitter Ratio: 0.5886790156364441 - Class: rviz/Views Expanded: - /Current View1 Name: Views Splitter Ratio: 0.524163544178009 - Class: rviz/Time Experimental: false Name: Time SyncMode: 0 SyncSource: pgoMap - Class: rviz/Views Expanded: - /Current View1 Name: pgo Views Splitter Ratio: 0.5 - Class: rviz/Views Expanded: - /Current View1 Name: Views Splitter Ratio: 0.5 Preferences: PromptSaveOnExit: true Toolbars: toolButtonStyle: 2 Visualization Manager: Class: "" Displays: - Alpha: 0.5 Cell Size: 1 Class: rviz/Grid Color: 160; 160; 164 Enabled: false Line Style: Line Width: 0.029999999329447746 Value: Lines Name: Grid Normal Cell Count: 0 Offset: X: 0 Y: 0 Z: 0 Plane: XY Plane Cell Count: 160 Reference Frame: Value: false - Class: rviz/Axes Enabled: true Length: 1 Name: Axes Radius: 0.10000000149011612 Reference Frame: camera_init Value: true - Class: rviz/Group Displays: - Alpha: 1 Buffer Length: 1 Class: rviz/Path Color: 255; 170; 0 Enabled: false Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 Line Style: Lines Line Width: 0.029999999329447746 Name: gtPathlc Offset: X: 0 Y: 0 Z: 0 Pose Color: 255; 85; 255 Pose Style: None Radius: 0.029999999329447746 Shaft Diameter: 0.10000000149011612 Shaft Length: 0.10000000149011612 Topic: /path_gt Unreliable: false Value: false - Alpha: 1 Buffer Length: 1 Class: rviz/Path Color: 255; 170; 0 Enabled: true Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 Line Style: Lines Line Width: 0.029999999329447746 Name: gtPathLidar Offset: X: 0 Y: 0 Z: 0 Pose Color: 255; 85; 255 Pose Style: None Radius: 0.029999999329447746 Shaft Diameter: 0.10000000149011612 Shaft Length: 0.10000000149011612 Topic: /path_gt_lidar Unreliable: false Value: true Enabled: false Name: GT - Class: rviz/Group Displays: - Alpha: 1 Buffer Length: 1 Class: rviz/Path Color: 204; 0; 0 Enabled: true Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 Line Style: Billboards Line Width: 0.20000000298023224 Name: odomPath Offset: X: 0 Y: 0 Z: 0 Pose Color: 255; 85; 255 Pose Style: None Radius: 0.029999999329447746 Shaft Diameter: 0.10000000149011612 Shaft Length: 0.10000000149011612 Topic: /laser_odom_path Unreliable: false Value: true - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 10 Min Value: -10 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 255; 255; 255 Color Transformer: FlatColor Decay Time: 0 Enabled: false Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 63.10047912597656 Min Color: 0; 0; 0 Min Intensity: -0.0067862942814826965 Name: PointCloud2 Position Transformer: XYZ Queue Size: 10 Selectable: true Size (Pixels): 3 Size (m): 0.05000000074505806 Style: Flat Squares Topic: /velodyne_cloud_2 Unreliable: false Use Fixed Frame: true Use rainbow: true Value: false Enabled: true Name: odometry - Class: rviz/Group Displays: - Alpha: 1 Buffer Length: 1 Class: rviz/Path Color: 4; 69; 246 Enabled: true Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 Line Style: Billboards Line Width: 0.30000001192092896 Name: mappingPath Offset: X: 0 Y: 0 Z: 0 Pose Color: 255; 85; 255 Pose Style: None Radius: 0.029999999329447746 Shaft Diameter: 0.10000000149011612 Shaft Length: 0.10000000149011612 Topic: /aft_mapped_path Unreliable: false Value: true - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 10 Min Value: -10 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 255; 255; 255 Color Transformer: Intensity Decay Time: 0 Enabled: false Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 50.14332962036133 Min Color: 0; 0; 0 Min Intensity: -0.04392780363559723 Name: allMapCloud Position Transformer: XYZ Queue Size: 10 Selectable: true Size (Pixels): 4 Size (m): 0.20000000298023224 Style: Points Topic: /laser_cloud_map Unreliable: false Use Fixed Frame: true Use rainbow: false Value: false - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 10 Min Value: -10 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 255; 255; 255 Color Transformer: FlatColor Decay Time: 0 Enabled: false Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 15 Min Color: 0; 0; 0 Min Intensity: 0 Name: surround Position Transformer: XYZ Queue Size: 1 Selectable: true Size (Pixels): 1 Size (m): 0.05000000074505806 Style: Squares Topic: /laser_cloud_surround Unreliable: false Use Fixed Frame: true Use rainbow: true Value: false - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 10 Min Value: -10 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 255; 255; 255 Color Transformer: Intensity Decay Time: 0 Enabled: false Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 50.141441345214844 Min Color: 0; 0; 0 Min Intensity: -0.024373823776841164 Name: currPoints Position Transformer: XYZ Queue Size: 10 Selectable: true Size (Pixels): 1.5 Size (m): 0.10000000149011612 Style: Squares Topic: /velodyne_cloud_registered Unreliable: false Use Fixed Frame: true Use rainbow: true Value: false - Angle Tolerance: 0.10000000149011612 Class: rviz/Odometry Covariance: Orientation: Alpha: 0.5 Color: 255; 255; 127 Color Style: Unique Frame: Local Offset: 1 Scale: 1 Value: true Position: Alpha: 0.30000001192092896 Color: 204; 51; 204 Scale: 1 Value: true Value: true Enabled: false Keep: 1 Name: Odometry Position Tolerance: 0.10000000149011612 Shape: Alpha: 1 Axes Length: 1.5 Axes Radius: 0.5 Color: 255; 25; 0 Head Length: 0.10000000149011612 Head Radius: 2 Shaft Length: 0.10000000149011612 Shaft Radius: 20 Value: Axes Topic: /aft_mapped_to_init Unreliable: false Value: false - Class: rviz/TF Enabled: false Frame Timeout: 15 Frames: All Enabled: true Marker Scale: 1 Name: TF Show Arrows: true Show Axes: true Show Names: true Tree: {} Update Interval: 0 Value: false Enabled: true Name: mapping - Alpha: 1 Buffer Length: 1 Class: rviz/Path Color: 25; 255; 0 Enabled: true Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 Line Style: Billboards Line Width: 0.4000000059604645 Name: pgoPath Offset: X: 0 Y: 0 Z: 0 Pose Color: 255; 85; 255 Pose Style: Axes Radius: 0.10000000149011612 Shaft Diameter: 0.10000000149011612 Shaft Length: 0.10000000149011612 Topic: /aft_pgo_path Unreliable: false Value: true - Angle Tolerance: 0.10000000149011612 Class: rviz/Odometry Covariance: Orientation: Alpha: 0.5 Color: 255; 255; 127 Color Style: Unique Frame: Local Offset: 1 Scale: 1 Value: true Position: Alpha: 0.30000001192092896 Color: 204; 51; 204 Scale: 1 Value: true Value: true Enabled: true Keep: 1 Name: pgoOdom Position Tolerance: 0.10000000149011612 Shape: Alpha: 1 Axes Length: 2 Axes Radius: 0.699999988079071 Color: 255; 25; 0 Head Length: 0.30000001192092896 Head Radius: 0.10000000149011612 Shaft Length: 1 Shaft Radius: 0.05000000074505806 Value: Axes Topic: /aft_pgo_odom Unreliable: false Value: true - Alpha: 0.10000000149011612 Autocompute Intensity Bounds: false Autocompute Value Bounds: Max Value: 10 Min Value: -10 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 255; 255; 255 Color Transformer: Intensity Decay Time: 0 Enabled: true Invert Rainbow: true Max Color: 255; 255; 255 Max Intensity: 30 Min Color: 0; 0; 0 Min Intensity: 0 Name: pgoMap Position Transformer: XYZ Queue Size: 1 Selectable: true Size (Pixels): 2 Size (m): 0.4000000059604645 Style: Points Topic: /aft_pgo_map Unreliable: false Use Fixed Frame: true Use rainbow: true Value: true - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 10 Min Value: -10 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 0; 255; 0 Color Transformer: FlatColor Decay Time: 0 Enabled: false Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 20.06364631652832 Min Color: 0; 0; 0 Min Intensity: 3.003571033477783 Name: loop scan Position Transformer: XYZ Queue Size: 10 Selectable: true Size (Pixels): 6 Size (m): 0.5 Style: Points Topic: /loop_scan_local Unreliable: false Use Fixed Frame: true Use rainbow: true Value: false - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: Max Value: 32.84956741333008 Min Value: -16.756437301635742 Value: true Axis: Z Channel Name: intensity Class: rviz/PointCloud2 Color: 255; 31; 11 Color Transformer: FlatColor Decay Time: 0 Enabled: false Invert Rainbow: false Max Color: 255; 255; 127 Max Intensity: 172 Min Color: 0; 0; 0 Min Intensity: 0 Name: loop submap Position Transformer: XYZ Queue Size: 10 Selectable: true Size (Pixels): 5 Size (m): 0.20000000298023224 Style: Points Topic: /loop_submap_local Unreliable: false Use Fixed Frame: true Use rainbow: true Value: false Enabled: true Global Options: Background Color: 255; 255; 255 Default Light: true Fixed Frame: camera_init Frame Rate: 30 Name: root Tools: - Class: rviz/Interact Hide Inactive Objects: true - Class: rviz/MoveCamera - Class: rviz/Select - Class: rviz/FocusCamera - Class: rviz/Measure - Class: rviz/SetInitialPose Theta std deviation: 0.2617993950843811 Topic: /initialpose X std deviation: 0.5 Y std deviation: 0.5 - Class: rviz/SetGoal Topic: /move_base_simple/goal - Class: rviz/PublishPoint Single click: true Topic: /clicked_point Value: true Views: Current: Class: rviz/Orbit Distance: 206.20489501953125 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: X: 45.543460845947266 Y: -22.18519401550293 Z: 0.49449437856674194 Focal Shape Fixed Size: true Focal Shape Size: 0.05000000074505806 Invert Z Axis: false Name: Current View Near Clip Distance: 0.009999999776482582 Pitch: 1.0561977624893188 Target Frame: camera_init Value: Orbit (rviz) Yaw: 2.9488861560821533 Saved: ~ Window Geometry: Displays: collapsed: false Height: 1016 Hide Left Dock: false Hide Right Dock: false QMainWindow State: 000000ff00000000fd0000000400000000000001af00000396fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006b00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000004400000396000000eb00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006500000002ef000000c30000000000000000fb0000001200700067006f00200056006900650077007300000002db000000d7000000b900ffffff000000010000010f00000396fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fc0000004400000396000000da01000020fa000000000100000002fb0000000a005600690065007700730100000000ffffffff0000010200fffffffb0000000a0056006900650077007301000006710000010f0000010200fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007530000003efc0100000002fb0000000800540069006d0065000000000000000753000004f900fffffffb0000000800540069006d00650100000000000004500000000000000000000004870000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: collapsed: false Tool Properties: collapsed: false Views: collapsed: false Width: 1875 X: 685 Y: 104 pgo Views: collapsed: false ================================================ FILE: SC-PGO/src/laserPosegraphOptimization.cpp ================================================ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "csignal" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "livox_mapping/common.h" #include "livox_mapping/tic_toc.h" #include "scancontext/Scancontext.h" using namespace gtsam; using std::cout; using std::endl; double keyframeMeterGap; double keyframeDegGap, keyframeRadGap; double translationAccumulated = 1000000.0; // large value means must add the first given frame. double rotaionAccumulated = 1000000.0; // large value means must add the first given frame. bool isNowKeyFrame = false; Pose6D odom_pose_prev {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // init Pose6D odom_pose_curr {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // init pose is zero Pose6D currRTK {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; tf::Transform odom_to_enu; // init rtk pose in enu tf::Transform T_livox2gnss; std::queue odometryBuf; std::queue fullResBuf; std::queue gpsBuf; std::deque rtkBuf; std::queue > scLoopICPBuf; std::mutex mBuf; std::mutex mKF; std::mutex rtkBufLock; std::mutex rtkKFLock; double timeLaserOdometry = 0.0; double timeLaser = 0.0; pcl::PointCloud::Ptr laserCloudFullRes(new pcl::PointCloud()); pcl::PointCloud::Ptr laserCloudMapAfterPGO(new pcl::PointCloud()); std::vector::Ptr> keyframeLaserClouds; std::vector keyframePoses; std::vector keyframePosesUpdated; std::unordered_map keyframePosesRTK; std::vector keyframeTimes; std::vector keyframeTimesOri; int recentIdxUpdated = 0; gtsam::NonlinearFactorGraph gtSAMgraph; bool gtSAMgraphMade = false; gtsam::Values initialEstimate; gtsam::ISAM2 *isam; gtsam::Values isamCurrentEstimate; noiseModel::Diagonal::shared_ptr priorNoise; noiseModel::Diagonal::shared_ptr odomNoise; noiseModel::Diagonal::shared_ptr RTKNoise; noiseModel::Base::shared_ptr robustLoopNoise; noiseModel::Base::shared_ptr robustGPSNoise; pcl::VoxelGrid downSizeFilterScancontext; SCManager scManager; double scDistThres, scMaximumRadius; pcl::VoxelGrid downSizeFilterICP; std::mutex mtxICP; std::mutex mtxPosegraph; std::mutex mtxRecentPose; pcl::PointCloud::Ptr laserCloudMapPGO(new pcl::PointCloud()); pcl::VoxelGrid downSizeFilterMapPGO; bool laserCloudMapPGORedraw = true; bool useRTK = false; sensor_msgs::NavSatFix::ConstPtr currGPS; bool hasGPSforThisKF = false; bool gpsOffsetInitialized = false; bool hasRTKforThisKF = false; bool rtkOffsetInitialized = false; double gpsAltitudeInitOffset = 0.0; double recentOptimizedX = 0.0; double recentOptimizedY = 0.0; ros::Publisher pubMapAftPGO, pubOdomAftPGO, pubPathAftPGO, pubOdomResultForInteractiveSlam, pubMapResultForInteractiveSlam; ros::Publisher pubLoopScanLocal, pubLoopSubmapLocal; ros::Publisher pubOdomRepubVerifier; // save TUM format path std::string save_path; std::string save_pcd_path; std::shared_ptr odo; std::string padZeros(int val, int num_digits = 6) { std::ostringstream out; out << std::internal << std::setfill('0') << std::setw(num_digits) << val; return out.str(); } gtsam::Pose3 Pose6DtoGTSAMPose3(const Pose6D& p) { return gtsam::Pose3( gtsam::Rot3::RzRyRx(p.roll, p.pitch, p.yaw), gtsam::Point3(p.x, p.y, p.z) ); } // Pose6DtoGTSAMPose3 tf::Transform Pose6DtoTransform(const Pose6D& p) { tf::Vector3 t(p.x, p.y, p.z); tf::Quaternion q; q.setRPY(p.roll, p.pitch, p.yaw); tf::Transform tran(q, t); return tf::Transform(q, t); } // Pose6DtoTransform Pose6D TransformtoPose6D(const tf::Transform& trans) { double roll, pitch, yaw; tf::Matrix3x3(trans.getRotation()).getRPY(roll, pitch, yaw); tf::Vector3 t=trans.getOrigin(); return Pose6D{t[0], t[1], t[2], roll, pitch, yaw}; } // TransformtoPose6D Pose6D PoseInterpolation(const Pose6D& p_before, const Pose6D& p_after, double step_length) { tf::Quaternion q_before, q_after; q_before.setRPY(p_before.roll, p_before.pitch, p_before.yaw); q_after.setRPY(p_after.roll, p_after.pitch, p_after.yaw); tf::Vector3 t_before(p_before.x, p_before.y, p_before.z); tf::Vector3 t_after(p_after.x, p_after.y, p_after.z); tf::Quaternion q_curr = q_before.slerp(q_after, step_length); tf::Vector3 t_curr = t_before.lerp(t_after, step_length); return TransformtoPose6D(tf::Transform(q_curr, t_curr)); } // PoseInterpolation void laserOdometryHandler(const nav_msgs::Odometry::ConstPtr &_laserOdometry) { static double last_odom_time = 0; mBuf.lock(); if (!useRTK && keyframeTimes.size() && abs(_laserOdometry->header.stamp.toSec() - last_odom_time) > 1) { std::cout << std::endl << std::endl << std::endl << std::endl; std::cout << "the time gap is " << _laserOdometry->header.stamp.toSec() - keyframeTimes.back() << std::endl; std::cout << "receive the next rosbag" << std::endl; std::cout << std::endl << std::endl << std::endl << std::endl; gtSAMgraphMade = false; } odometryBuf.push(_laserOdometry); last_odom_time = _laserOdometry->header.stamp.toSec(); mBuf.unlock(); } // laserOdometryHandler void laserCloudFullResHandler(const sensor_msgs::PointCloud2ConstPtr &_laserCloudFullRes) { mBuf.lock(); fullResBuf.push(_laserCloudFullRes); mBuf.unlock(); } // laserCloudFullResHandler void rtkHandler(const ad_localization_msgs::NavStateInfo::ConstPtr &_rtk) { static double last_rtk_time = 0; rtkBufLock.lock(); if (rtkBuf.size() && abs(_rtk->header.stamp.toSec() - last_rtk_time) > 0.2) { std::cout << std::endl << std::endl << std::endl << std::endl; std::cout << "the time gap is " << _rtk->header.stamp.toSec() - last_rtk_time << std::endl; std::cout << "receive the next rosbag" << std::endl; std::cout << std::endl << std::endl << std::endl << std::endl; rtkBuf.clear(); rtkOffsetInitialized = false; gtSAMgraphMade = false; } rtkBuf.push_back(_rtk); last_rtk_time = _rtk->header.stamp.toSec(); rtkBufLock.unlock(); } // gpsHandler void initNoises( void ) { gtsam::Vector priorNoiseVector6(6); priorNoiseVector6 << 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12; priorNoise = noiseModel::Diagonal::Variances(priorNoiseVector6); gtsam::Vector odomNoiseVector6(6); // odomNoiseVector6 << 1e-4, 1e-4, 1e-4, 1e-4, 1e-4, 1e-4; odomNoiseVector6 << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4; odomNoise = noiseModel::Diagonal::Variances(odomNoiseVector6); gtsam::Vector RTKNoiseVector6(6); RTKNoiseVector6 << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4; RTKNoise = noiseModel::Diagonal::Variances(RTKNoiseVector6); double loopNoiseScore = 0.001; // constant is ok... gtsam::Vector robustNoiseVector6(6); // gtsam::Pose3 factor has 6 elements (6D) robustNoiseVector6 << loopNoiseScore, loopNoiseScore, loopNoiseScore, loopNoiseScore, loopNoiseScore, loopNoiseScore; robustLoopNoise = gtsam::noiseModel::Robust::Create( gtsam::noiseModel::mEstimator::Cauchy::Create(1), // optional: replacing Cauchy by DCS or GemanMcClure is okay but Cauchy is empirically good. gtsam::noiseModel::Diagonal::Variances(robustNoiseVector6) ); double bigNoiseTolerentToXY = 1000000000.0; // 1e9 double gpsAltitudeNoiseScore = 250.0; // if height is misaligned after loop clsosing, use this value bigger gtsam::Vector robustNoiseVector3(3); // gps factor has 3 elements (xyz) robustNoiseVector3 << bigNoiseTolerentToXY, bigNoiseTolerentToXY, gpsAltitudeNoiseScore; // means only caring altitude here. (because LOAM-like-methods tends to be asymptotically flyging) robustGPSNoise = gtsam::noiseModel::Robust::Create( gtsam::noiseModel::mEstimator::Cauchy::Create(1), // optional: replacing Cauchy by DCS or GemanMcClure is okay but Cauchy is empirically good. gtsam::noiseModel::Diagonal::Variances(robustNoiseVector3) ); } // initNoises Pose6D getOdom(nav_msgs::Odometry::ConstPtr _odom) { auto tx = _odom->pose.pose.position.x; auto ty = _odom->pose.pose.position.y; auto tz = _odom->pose.pose.position.z; double roll, pitch, yaw; geometry_msgs::Quaternion quat = _odom->pose.pose.orientation; tf::Matrix3x3(tf::Quaternion(quat.x, quat.y, quat.z, quat.w)).getRPY(roll, pitch, yaw); return Pose6D{tx, ty, tz, roll, pitch, yaw}; } // getOdom Pose6D diffTransformation(const Pose6D& _p1, const Pose6D& _p2) { Eigen::Affine3f SE3_p1 = pcl::getTransformation(_p1.x, _p1.y, _p1.z, _p1.roll, _p1.pitch, _p1.yaw); Eigen::Affine3f SE3_p2 = pcl::getTransformation(_p2.x, _p2.y, _p2.z, _p2.roll, _p2.pitch, _p2.yaw); Eigen::Matrix4f SE3_delta0 = SE3_p1.matrix().inverse() * SE3_p2.matrix(); Eigen::Affine3f SE3_delta; SE3_delta.matrix() = SE3_delta0; float dx, dy, dz, droll, dpitch, dyaw; pcl::getTranslationAndEulerAngles (SE3_delta, dx, dy, dz, droll, dpitch, dyaw); // std::cout << "delta : " << dx << ", " << dy << ", " << dz << ", " << droll << ", " << dpitch << ", " << dyaw << std::endl; return Pose6D{double(abs(dx)), double(abs(dy)), double(abs(dz)), double(abs(droll)), double(abs(dpitch)), double(abs(dyaw))}; } // SE3Diff Pose6D convertOdomToENU(const Pose6D &pose_curr) { tf::Transform T_curr = Pose6DtoTransform(pose_curr); tf::Transform T_curr_enu; T_curr_enu.mult(odom_to_enu, T_curr); return TransformtoPose6D(T_curr_enu); } // getOdom pcl::PointCloud::Ptr local2global(const pcl::PointCloud::Ptr &cloudIn, const Pose6D& tf) { pcl::PointCloud::Ptr cloudOut(new pcl::PointCloud()); int cloudSize = cloudIn->size(); cloudOut->resize(cloudSize); Eigen::Affine3f transCur = pcl::getTransformation(tf.x, tf.y, tf.z, tf.roll, tf.pitch, tf.yaw); int numberOfCores = 16; #pragma omp parallel for num_threads(numberOfCores) for (int i = 0; i < cloudSize; ++i) { const auto &pointFrom = cloudIn->points[i]; cloudOut->points[i].x = transCur(0,0) * pointFrom.x + transCur(0,1) * pointFrom.y + transCur(0,2) * pointFrom.z + transCur(0,3); cloudOut->points[i].y = transCur(1,0) * pointFrom.x + transCur(1,1) * pointFrom.y + transCur(1,2) * pointFrom.z + transCur(1,3); cloudOut->points[i].z = transCur(2,0) * pointFrom.x + transCur(2,1) * pointFrom.y + transCur(2,2) * pointFrom.z + transCur(2,3); cloudOut->points[i].intensity = pointFrom.intensity; } return cloudOut; } void pubMap(void) { int SKIP_FRAMES = 2; // sparse map visulalization to save computations int counter = 0; mKF.lock(); laserCloudMapPGO->clear(); // for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) { for (int node_idx=0; node_idx < recentIdxUpdated; node_idx++) { if(counter % SKIP_FRAMES == 0) { *laserCloudMapPGO += *local2global(keyframeLaserClouds[node_idx], keyframePosesUpdated[node_idx]); } counter++; } mKF.unlock(); downSizeFilterMapPGO.setInputCloud(laserCloudMapPGO); downSizeFilterMapPGO.filter(*laserCloudMapPGO); sensor_msgs::PointCloud2 laserCloudMapPGOMsg; pcl::toROSMsg(*laserCloudMapPGO, laserCloudMapPGOMsg); laserCloudMapPGOMsg.header.frame_id = "/camera_init"; pubMapAftPGO.publish(laserCloudMapPGOMsg); } void pubPath( void ) { // pub odom and path nav_msgs::Odometry odomAftPGO; nav_msgs::Path pathAftPGO; pathAftPGO.header.frame_id = "/camera_init"; mKF.lock(); // for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()) - 1; node_idx++) // -1 is just delayed visualization (because sometimes mutexed while adding(push_back) a new one) for (int node_idx=0; node_idx < recentIdxUpdated; node_idx++) // -1 is just delayed visualization (because sometimes mutexed while adding(push_back) a new one) { const Pose6D& pose_est = keyframePosesUpdated.at(node_idx); // upodated poses // const gtsam::Pose3& pose_est = isamCurrentEstimate.at(node_idx); nav_msgs::Odometry odomAftPGOthis; odomAftPGOthis.header.frame_id = "/camera_init"; odomAftPGOthis.child_frame_id = "/aft_pgo"; odomAftPGOthis.header.stamp = ros::Time().fromSec(keyframeTimes.at(node_idx)); odomAftPGOthis.pose.pose.position.x = pose_est.x; odomAftPGOthis.pose.pose.position.y = pose_est.y; odomAftPGOthis.pose.pose.position.z = pose_est.z; odomAftPGOthis.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(pose_est.roll, pose_est.pitch, pose_est.yaw); odomAftPGO = odomAftPGOthis; geometry_msgs::PoseStamped poseStampAftPGO; poseStampAftPGO.header = odomAftPGOthis.header; poseStampAftPGO.pose = odomAftPGOthis.pose.pose; pathAftPGO.header.stamp = odomAftPGOthis.header.stamp; pathAftPGO.header.frame_id = "/camera_init"; pathAftPGO.poses.push_back(poseStampAftPGO); } mKF.unlock(); pubOdomAftPGO.publish(odomAftPGO); // last pose pubPathAftPGO.publish(pathAftPGO); // poses static tf::TransformBroadcaster br; tf::Transform transform; tf::Quaternion q; transform.setOrigin(tf::Vector3(odomAftPGO.pose.pose.position.x, odomAftPGO.pose.pose.position.y, odomAftPGO.pose.pose.position.z)); q.setW(odomAftPGO.pose.pose.orientation.w); q.setX(odomAftPGO.pose.pose.orientation.x); q.setY(odomAftPGO.pose.pose.orientation.y); q.setZ(odomAftPGO.pose.pose.orientation.z); transform.setRotation(q); br.sendTransform(tf::StampedTransform(transform, odomAftPGO.header.stamp, "/camera_init", "/aft_pgo")); } // pubPath void updatePoses(void) { mKF.lock(); for (int node_idx=0; node_idx < int(isamCurrentEstimate.size()); node_idx++) { Pose6D& p =keyframePosesUpdated[node_idx]; p.x = isamCurrentEstimate.at(node_idx).translation().x(); p.y = isamCurrentEstimate.at(node_idx).translation().y(); p.z = isamCurrentEstimate.at(node_idx).translation().z(); p.roll = isamCurrentEstimate.at(node_idx).rotation().roll(); p.pitch = isamCurrentEstimate.at(node_idx).rotation().pitch(); p.yaw = isamCurrentEstimate.at(node_idx).rotation().yaw(); } mKF.unlock(); mtxRecentPose.lock(); const gtsam::Pose3& lastOptimizedPose = isamCurrentEstimate.at(int(isamCurrentEstimate.size())-1); recentOptimizedX = lastOptimizedPose.translation().x(); recentOptimizedY = lastOptimizedPose.translation().y(); recentIdxUpdated = int(keyframePosesUpdated.size()) - 1; mtxRecentPose.unlock(); } // updatePoses void runISAM2opt(void) { // called when a variable added isam->update(gtSAMgraph, initialEstimate); isam->update(); gtSAMgraph.resize(0); initialEstimate.clear(); isamCurrentEstimate = isam->calculateEstimate(); updatePoses(); } pcl::PointCloud::Ptr transformPointCloud(pcl::PointCloud::Ptr cloudIn, gtsam::Pose3 transformIn) { pcl::PointCloud::Ptr cloudOut(new pcl::PointCloud()); PointType *pointFrom; int cloudSize = cloudIn->size(); cloudOut->resize(cloudSize); Eigen::Affine3f transCur = pcl::getTransformation( transformIn.translation().x(), transformIn.translation().y(), transformIn.translation().z(), transformIn.rotation().roll(), transformIn.rotation().pitch(), transformIn.rotation().yaw() ); int numberOfCores = 8; // TODO move to yaml #pragma omp parallel for num_threads(numberOfCores) for (int i = 0; i < cloudSize; ++i) { pointFrom = &cloudIn->points[i]; cloudOut->points[i].x = transCur(0,0) * pointFrom->x + transCur(0,1) * pointFrom->y + transCur(0,2) * pointFrom->z + transCur(0,3); cloudOut->points[i].y = transCur(1,0) * pointFrom->x + transCur(1,1) * pointFrom->y + transCur(1,2) * pointFrom->z + transCur(1,3); cloudOut->points[i].z = transCur(2,0) * pointFrom->x + transCur(2,1) * pointFrom->y + transCur(2,2) * pointFrom->z + transCur(2,3); cloudOut->points[i].intensity = pointFrom->intensity; } return cloudOut; } // transformPointCloud void loopFindNearKeyframesCloud( pcl::PointCloud::Ptr& nearKeyframes, pcl::PointCloud::Ptr& nearKeyframesViz, const int& key, const int& submap_size) { // extract and stacking near keyframes (in global coord) nearKeyframes->clear(); mKF.lock(); tf::Transform T_key = Pose6DtoTransform(keyframePosesUpdated[key]); for (int i = -submap_size; i <= submap_size; ++i) { int keyNear = key + i; if (keyNear < 0 || keyNear >= int(keyframeLaserClouds.size()) ) continue; tf::Transform T_keyNear = Pose6DtoTransform(keyframePosesUpdated[keyNear]); Pose6D pose_relative = TransformtoPose6D(T_key.inverseTimes(T_keyNear)); *nearKeyframes += *local2global(keyframeLaserClouds[keyNear], pose_relative); *nearKeyframesViz += *local2global(keyframeLaserClouds[keyNear], keyframePosesUpdated[keyNear]); } mKF.unlock(); if (nearKeyframes->empty()) return; // downsample near keyframes pcl::PointCloud::Ptr cloud_temp(new pcl::PointCloud()); downSizeFilterICP.setInputCloud(nearKeyframes); downSizeFilterICP.filter(*cloud_temp); *nearKeyframes = *cloud_temp; downSizeFilterICP.setInputCloud(nearKeyframesViz); downSizeFilterICP.filter(*cloud_temp); *nearKeyframesViz = *cloud_temp; } // loopFindNearKeyframesCloud std::optional doICPVirtualRelative( int _loop_kf_idx, int _curr_kf_idx ) { // Check that the loopGap is correct Pose6D pose_loop; Pose6D pose_curr; if (useRTK) { rtkKFLock.lock(); auto it_loop = keyframePosesRTK.find(_loop_kf_idx); auto it_curr = keyframePosesRTK.find(_curr_kf_idx); if (it_loop != keyframePosesRTK.end() && it_curr != keyframePosesRTK.end()) { pose_loop = it_loop->second; pose_curr = it_curr->second; rtkKFLock.unlock(); } else { rtkKFLock.unlock(); std::cout << "this loop don't have rtk init pose, so reject" << std::endl; return std::nullopt; } } else { pose_loop = keyframePosesUpdated[_loop_kf_idx]; pose_curr = keyframePosesUpdated[_curr_kf_idx]; } double loopGapThreshold = 5.0; double dx = pose_loop.x - pose_curr.x, dy = pose_loop.y - pose_curr.y; double loopGap = std::sqrt(dx * dx + dy * dy); if (loopGap > loopGapThreshold) { std::cout << "loopGap is too big " << std::endl; std::cout << "loopGap = " << loopGap << std::endl; std::cout << "pose_curr = " << pose_curr.x << " " << pose_curr.y << " " << pose_curr.z << std::endl; std::cout << "pose_loop = " << pose_loop.x << " " << pose_loop.y << " " << pose_loop.z << std::endl; return std::nullopt; } // parse pointclouds int historyKeyframeSearchNum = 25; // enough. ex. [-25, 25] covers submap length of 50x1 = 50m if every kf gap is 1m pcl::PointCloud::Ptr currKeyframeCloud(new pcl::PointCloud()); pcl::PointCloud::Ptr currKeyframeCloudViz(new pcl::PointCloud()); pcl::PointCloud::Ptr icpKeyframeCloudViz(new pcl::PointCloud()); pcl::PointCloud::Ptr targetKeyframeCloud(new pcl::PointCloud()); pcl::PointCloud::Ptr targetKeyframeCloudViz(new pcl::PointCloud()); loopFindNearKeyframesCloud(currKeyframeCloud, currKeyframeCloudViz, _curr_kf_idx, 0); // use same root of loop kf idx loopFindNearKeyframesCloud(targetKeyframeCloud, targetKeyframeCloudViz, _loop_kf_idx, historyKeyframeSearchNum); // ICP Settings pcl::IterativeClosestPoint icp; icp.setMaxCorrespondenceDistance(150); // giseop , use a value can cover 2*historyKeyframeSearchNum range in meter icp.setMaximumIterations(100); icp.setTransformationEpsilon(1e-6); icp.setEuclideanFitnessEpsilon(1e-6); icp.setRANSACIterations(0); // cal the the initial position transform Eigen::Isometry3d T_init = Eigen::Isometry3d::Identity(); tf::Transform T_relative; tf::Transform T_loop = Pose6DtoTransform(pose_loop); tf::Transform T_curr = Pose6DtoTransform(pose_curr); T_relative = T_loop.inverseTimes(T_curr); tf::transformTFToEigen (T_relative, T_init); // Align pointclouds icp.setInputSource(currKeyframeCloud); icp.setInputTarget(targetKeyframeCloud); pcl::PointCloud::Ptr unused_result(new pcl::PointCloud()); icp.align(*unused_result, T_init.matrix().cast()); float loopFitnessScoreThreshold = 0.3; // user parameter but fixed low value is safe. if (icp.hasConverged() == false || icp.getFitnessScore() > loopFitnessScoreThreshold) { std::cout << "[SC loop] ICP fitness test failed (" << icp.getFitnessScore() << " > " << loopFitnessScoreThreshold << "). Reject this SC loop." << std::endl; return std::nullopt; } else { std::cout << "[SC loop] ICP fitness test passed (" << icp.getFitnessScore() << " < " << loopFitnessScoreThreshold << "). Add this SC loop." << std::endl; } // icp verification sensor_msgs::PointCloud2 targetKeyframeCloudMsg; pcl::toROSMsg(*targetKeyframeCloudViz, targetKeyframeCloudMsg); targetKeyframeCloudMsg.header.frame_id = "/camera_init"; pubLoopSubmapLocal.publish(targetKeyframeCloudMsg); *icpKeyframeCloudViz += *local2global(unused_result, pose_loop); sensor_msgs::PointCloud2 icpKeyframeCloudMsg; pcl::toROSMsg(*icpKeyframeCloudViz, icpKeyframeCloudMsg); icpKeyframeCloudMsg.header.frame_id = "/camera_init"; pubLoopScanLocal.publish(icpKeyframeCloudMsg); // Get pose transformation float x, y, z, roll, pitch, yaw; Eigen::Affine3f correctionLidarFrame; correctionLidarFrame = icp.getFinalTransformation(); pcl::getTranslationAndEulerAngles (correctionLidarFrame, x, y, z, roll, pitch, yaw); gtsam::Pose3 poseTo = Pose3(Rot3::RzRyRx(roll, pitch, yaw), Point3(x, y, z)); gtsam::Pose3 poseFrom = Pose3(Rot3::RzRyRx(0.0, 0.0, 0.0), Point3(0.0, 0.0, 0.0)); return poseFrom.between(poseTo); } // doICPVirtualRelative void process_pg() { while(1) { while ( !odometryBuf.empty() && !fullResBuf.empty() ) { // // pop and check keyframe is or not // mBuf.lock(); while (!odometryBuf.empty() && odometryBuf.front()->header.stamp.toSec() < fullResBuf.front()->header.stamp.toSec()) odometryBuf.pop(); if (odometryBuf.empty()) { mBuf.unlock(); break; } // Time equal check timeLaserOdometry = odometryBuf.front()->header.stamp.toSec(); timeLaser = fullResBuf.front()->header.stamp.toSec(); if (abs(timeLaserOdometry - timeLaser) > 0.01) { std::cout << "timeLaserOdometry - timeLaser = " << timeLaserOdometry - timeLaser << std::endl; return; } // TODO laserCloudFullRes->clear(); pcl::PointCloud::Ptr thisKeyFrame(new pcl::PointCloud()); pcl::fromROSMsg(*fullResBuf.front(), *thisKeyFrame); fullResBuf.pop(); Pose6D pose_curr = getOdom(odometryBuf.front()); ros::Time time_stamp = odometryBuf.front()->header.stamp; odometryBuf.pop(); mBuf.unlock(); // check the rtk data if (useRTK) { rtkBufLock.lock(); if (rtkBuf.empty() || rtkBuf.back()->header.stamp.toSec() < timeLaserOdometry || rtkBuf.front()->header.stamp.toSec() > timeLaserOdometry) { std::cout << "rtk data is abnormal" << std::endl; std::cout << "rtk's front time is " << rtkBuf.front()->header.stamp << std::endl; std::cout << "odo's front time is " << time_stamp << std::endl; std::cout << "rtk's back time is " << rtkBuf.back()->header.stamp << std::endl; std::cout << "rtk's size is " << rtkBuf.size() << std::endl; rtkBufLock.unlock(); return; } // find the first and nearest rtk ad_localization_msgs::NavStateInfo::ConstPtr preRTK; while (!rtkBuf.empty() && rtkBuf.front()->header.stamp.toSec() < timeLaserOdometry) { preRTK = rtkBuf.front(); rtkBuf.pop_front(); } rtkBufLock.unlock(); double frame_time_interval = rtkBuf.front()->header.stamp.toSec() - preRTK->header.stamp.toSec(); if (preRTK->header.stamp.toSec() < timeLaserOdometry && rtkBuf.front()->header.stamp.toSec() > timeLaserOdometry && abs(frame_time_interval) < 0.1) { Pose6D rtkPoseBefore {preRTK->position_enu.x, preRTK->position_enu.y, preRTK->position_enu.z, preRTK->roll, preRTK->pitch, preRTK->yaw}; Pose6D rtkPoseAfter {rtkBuf.front()->position_enu.x, rtkBuf.front()->position_enu.y, rtkBuf.front()->position_enu.z, rtkBuf.front()->roll, rtkBuf.front()->pitch, rtkBuf.front()->yaw}; double step_length = (timeLaserOdometry - preRTK->header.stamp.toSec()) / frame_time_interval; currRTK = PoseInterpolation(rtkPoseBefore, rtkPoseAfter, step_length); currRTK = TransformtoPose6D(Pose6DtoTransform(currRTK) * T_livox2gnss); hasRTKforThisKF = true; } else { hasRTKforThisKF = false; std::cout << "rtk data's gap is too long" << std::endl; return; } } // // Early reject by counting local delta movement (for equi-spereated kf drop) // odom_pose_prev = odom_pose_curr; odom_pose_curr = pose_curr; Pose6D dtf = diffTransformation(odom_pose_prev, odom_pose_curr); // dtf means delta_transform double delta_translation = sqrt(dtf.x*dtf.x + dtf.y*dtf.y + dtf.z*dtf.z); // note: absolute value. translationAccumulated += delta_translation; rotaionAccumulated += (dtf.roll + dtf.pitch + dtf.yaw); // sum just naive approach. if( translationAccumulated > keyframeMeterGap || rotaionAccumulated > keyframeRadGap ) { isNowKeyFrame = true; translationAccumulated = 0.0; // reset rotaionAccumulated = 0.0; // reset } else { isNowKeyFrame = false; } if( ! isNowKeyFrame ) continue; if (useRTK && !rtkOffsetInitialized) { odom_to_enu = Pose6DtoTransform(currRTK) * Pose6DtoTransform(pose_curr).inverse(); Pose6D odom_to_enu_pose = TransformtoPose6D(odom_to_enu); odom_to_enu_pose.roll = 0; odom_to_enu_pose.pitch = 0; odom_to_enu = Pose6DtoTransform(odom_to_enu_pose); std::cout << "enu pose is " << currRTK.x << " " << currRTK.y << " " << currRTK.z << " " << currRTK.roll << " " << currRTK.pitch << " " << currRTK.yaw << " " << std::endl; rtkOffsetInitialized = true; } // convert the current pose to enu if (useRTK) { if (rtkOffsetInitialized) { pose_curr = convertOdomToENU(pose_curr); } else { std::cout << "rtk's Initialize is abnormal" << time_stamp << std::endl; break; } } // // Save data and Add consecutive node // pcl::PointCloud::Ptr thisKeyFrameDS(new pcl::PointCloud()); downSizeFilterScancontext.setInputCloud(thisKeyFrame); downSizeFilterScancontext.filter(*thisKeyFrameDS); mKF.lock(); keyframeLaserClouds.push_back(thisKeyFrameDS); keyframePoses.push_back(pose_curr); keyframePosesUpdated.push_back(pose_curr); // init keyframeTimes.push_back(timeLaserOdometry); keyframeTimesOri.push_back(time_stamp); scManager.makeAndSaveScancontextAndKeys(*thisKeyFrameDS); laserCloudMapPGORedraw = true; mKF.unlock(); rtkKFLock.lock(); if (hasRTKforThisKF) { keyframePosesRTK[keyframePoses.size() - 1] = currRTK; } rtkKFLock.unlock(); const int prev_node_idx = keyframePoses.size() - 2; const int curr_node_idx = keyframePoses.size() - 1; // becuase cpp starts with 0 (actually this index could be any number, but for simple implementation, we follow sequential indexing) if( ! gtSAMgraphMade /* prior node */) { int init_node_idx = 0; if (curr_node_idx > 1) { gtsam::Vector priorNoiseVector6(6); // set a big priorNoise priorNoiseVector6 << 1e6, 1e6, 1e6, 1e6, 1e6, 1e6; priorNoise = noiseModel::Diagonal::Variances(priorNoiseVector6); init_node_idx = curr_node_idx; } gtsam::Pose3 poseOrigin = Pose6DtoGTSAMPose3(keyframePoses.at(init_node_idx)); mtxPosegraph.lock(); { // prior factor gtSAMgraph.add(gtsam::PriorFactor(init_node_idx, poseOrigin, priorNoise)); initialEstimate.insert(init_node_idx, poseOrigin); // runISAM2opt(); } mtxPosegraph.unlock(); gtSAMgraphMade = true; cout << "posegraph prior node " << init_node_idx << " added" << endl; } else /* consecutive node (and odom factor) after the prior added */ { // == keyframePoses.size() > 1 gtsam::Pose3 poseFrom = Pose6DtoGTSAMPose3(keyframePoses.at(prev_node_idx)); gtsam::Pose3 poseTo = Pose6DtoGTSAMPose3(keyframePoses.at(curr_node_idx)); mtxPosegraph.lock(); { // odom factor gtSAMgraph.add(gtsam::BetweenFactor(prev_node_idx, curr_node_idx, poseFrom.between(poseTo), odomNoise)); // // rtk factor // if(hasRTKforThisKF) { // gtsam::Pose3 poseRTK = Pose6DtoGTSAMPose3(currRTK); // gtSAMgraph.add(gtsam::PriorFactor(curr_node_idx, poseRTK, RTKNoise)); // cout << "RTK factor added at node " << curr_node_idx << endl; // } initialEstimate.insert(curr_node_idx, poseTo); // runISAM2opt(); } mtxPosegraph.unlock(); if(curr_node_idx % 100 == 0) cout << "posegraph odom node " << curr_node_idx << " added." << endl; } // if want to print the current graph, use gtSAMgraph.print("\nFactor Graph:\n"); } // ps. // scan context detector is running in another thread (in constant Hz, e.g., 1 Hz) // pub path and point cloud in another thread // wait (must required for running the while loop) std::chrono::milliseconds dura(2); std::this_thread::sleep_for(dura); } } // process_pg void performSCLoopClosure(void) { if( int(keyframePoses.size()) < scManager.NUM_EXCLUDE_RECENT) // do not try too early return; auto detectResult = scManager.detectLoopClosureID(); // first: nn index, second: yaw diff int SCclosestHistoryFrameID = detectResult.first; if( SCclosestHistoryFrameID != -1 ) { const int prev_node_idx = SCclosestHistoryFrameID; const int curr_node_idx = keyframePoses.size() - 1; // because cpp starts 0 and ends n-1 cout << "Loop detected! - between " << prev_node_idx << " and " << curr_node_idx << "" << endl; mBuf.lock(); scLoopICPBuf.push(std::pair(prev_node_idx, curr_node_idx)); // addding actual 6D constraints in the other thread, icp_calculation. mBuf.unlock(); } } // performSCLoopClosure void process_lcd(void) { float loopClosureFrequency = 1.0; // can change ros::Rate rate(loopClosureFrequency); while (ros::ok()) { rate.sleep(); performSCLoopClosure(); // performRSLoopClosure(); // TODO } } // process_lcd void process_icp(void) { while(1) { while ( !scLoopICPBuf.empty() ) { if( scLoopICPBuf.size() > 30 ) { ROS_WARN("Too many loop clousre candidates to be ICPed is waiting ... Do process_lcd less frequently (adjust loopClosureFrequency)"); } mBuf.lock(); std::pair loop_idx_pair = scLoopICPBuf.front(); scLoopICPBuf.pop(); mBuf.unlock(); const int prev_node_idx = loop_idx_pair.first; const int curr_node_idx = loop_idx_pair.second; auto relative_pose_optional = doICPVirtualRelative(prev_node_idx, curr_node_idx); if(relative_pose_optional) { gtsam::Pose3 relative_pose = relative_pose_optional.value(); mtxPosegraph.lock(); gtSAMgraph.add(gtsam::BetweenFactor(prev_node_idx, curr_node_idx, relative_pose, robustLoopNoise)); // runISAM2opt(); mtxPosegraph.unlock(); } } // wait (must required for running the while loop) std::chrono::milliseconds dura(2); std::this_thread::sleep_for(dura); } } // process_icp void process_viz_path(void) { float hz = 10.0; ros::Rate rate(hz); while (ros::ok()) { rate.sleep(); if(recentIdxUpdated > 1) { pubPath(); } } } void process_isam(void) { float hz = 1; ros::Rate rate(hz); while (ros::ok()) { rate.sleep(); if( gtSAMgraphMade ) { mtxPosegraph.lock(); runISAM2opt(); cout << "running isam2 optimization ..." << endl; mtxPosegraph.unlock(); } } } void process_viz_map(void) { float vizmapFrequency = 0.5; // 0.1 means run onces every 10s ros::Rate rate(vizmapFrequency); while (ros::ok()) { rate.sleep(); // if(recentIdxUpdated > 1) { pubMap(); // } } } // pointcloud_viz void process_interactive_slam_result() { ros::Rate rate(50); mKF.lock(); for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) { // pub Map for interactive slam sensor_msgs::PointCloud2 laserCloudMapPGOMsg; pcl::toROSMsg(*keyframeLaserClouds[node_idx], laserCloudMapPGOMsg); laserCloudMapPGOMsg.header.frame_id = "/camera_init"; laserCloudMapPGOMsg.header.stamp = keyframeTimesOri.at(node_idx); pubMapResultForInteractiveSlam.publish(laserCloudMapPGOMsg); // pub Odom for interactive slam const Pose6D& pose_est = keyframePosesUpdated.at(node_idx); // upodated poses nav_msgs::Odometry odomAftPGOthis; odomAftPGOthis.header.frame_id = "/camera_init"; odomAftPGOthis.child_frame_id = "/aft_pgo"; odomAftPGOthis.header.stamp = laserCloudMapPGOMsg.header.stamp; odomAftPGOthis.pose.pose.position.x = pose_est.x; odomAftPGOthis.pose.pose.position.y = pose_est.y; odomAftPGOthis.pose.pose.position.z = pose_est.z; odomAftPGOthis.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(pose_est.roll, pose_est.pitch, pose_est.yaw); pubOdomResultForInteractiveSlam.publish(odomAftPGOthis); // last pose rate.sleep(); } mKF.unlock(); } // pointcloud_viz void save_odo() { std::cout << "save_path = " << save_path << std::endl; mKF.lock(); for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) { const Pose6D& pose_est = keyframePosesUpdated.at(node_idx); // upodated poses const Pose6D& pose_rtk = TransformtoPose6D(Pose6DtoTransform(pose_est) * T_livox2gnss.inverse()); std::stringstream ss_x, ss_y, ss_z; ss_x.precision(7); ss_x.setf(std::ios::fixed); ss_y.precision(7); ss_y.setf(std::ios::fixed); ss_z.precision(7); ss_z.setf(std::ios::fixed); ss_x << pose_rtk.x; ss_y << pose_rtk.y; ss_z << pose_rtk.z; *odo //<< std::setw(20) // << node_idx << " " << keyframeTimesOri.at(node_idx) << " " << ss_x.str() << " " << ss_y.str() << " " << ss_z.str() << " " << pose_est.roll << " " << pose_est.pitch << " " << pose_est.yaw << " " // << 0 << " " << 0 << " " << 0 << " " << 1 << std::endl; } mKF.unlock(); } // pointcloud_viz void save_map() { std::string all_points_dir(save_pcd_path + "scans_lc.pcd"); std::cout << "all_points_dir = " << all_points_dir << std::endl; mKF.lock(); laserCloudMapPGO->clear(); for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) { *laserCloudMapPGO += *local2global(keyframeLaserClouds[node_idx], keyframePosesUpdated[node_idx]); } pcl::io::savePCDFile(all_points_dir, *laserCloudMapPGO); mKF.unlock(); } // pointcloud_viz void MySigintHandler(int sig) { // Before exit the main thread, the odom result and the map data will be saved and sent to interactive_slam ROS_INFO("shutting down!"); save_odo(); save_map(); process_interactive_slam_result(); ros::shutdown(); } int main(int argc, char **argv) { ros::init(argc, argv, "laserPGO"); ros::NodeHandle nh; nh.param("save_path", save_path, "/"); std::cout << "save_pcd_path = " << save_path << std::endl; nh.param("save_pcd_path", save_pcd_path, "/"); std::cout << "save_pcd_path = " << save_pcd_path << std::endl; odo.reset(new std::ofstream(save_path)); nh.param("keyframe_meter_gap", keyframeMeterGap, 2.0); // pose assignment every k m move nh.param("keyframe_deg_gap", keyframeDegGap, 10.0); // pose assignment every k deg rot keyframeRadGap = deg2rad(keyframeDegGap); std:: cout << "keyframeMeterGap = " << keyframeMeterGap << std:: endl; std:: cout << "keyframeDegGap = " << keyframeDegGap << std:: endl; std:: cout << "keyframeRadGap = " << keyframeRadGap << std:: endl; nh.param("sc_dist_thres", scDistThres, 0.2); nh.param("sc_max_radius", scMaximumRadius, 80.0); // 80 is recommended for outdoor, and lower (ex, 20, 40) values are recommended for indoor nh.param("useRTK", useRTK, false); std:: cout << "useRTK = " << useRTK << std:: endl; std::vector vecTlivox2gnss; ros::param::get("Extrinsic_T_livox2gnss", vecTlivox2gnss); ISAM2Params parameters; parameters.relinearizeThreshold = 0.01; parameters.relinearizeSkip = 1; isam = new ISAM2(parameters); initNoises(); scManager.setSCdistThres(scDistThres); scManager.setMaximumRadius(scMaximumRadius); float filter_size = 0.4; downSizeFilterScancontext.setLeafSize(filter_size, filter_size, filter_size); downSizeFilterICP.setLeafSize(filter_size, filter_size, filter_size); double mapVizFilterSize; nh.param("mapviz_filter_size", mapVizFilterSize, 0.4); // pose assignment every k frames downSizeFilterMapPGO.setLeafSize(mapVizFilterSize, mapVizFilterSize, mapVizFilterSize); ros::Subscriber subLaserCloudFullRes = nh.subscribe("/velodyne_cloud_registered_local", 100, laserCloudFullResHandler); ros::Subscriber subLaserOdometry = nh.subscribe("/aft_mapped_to_init", 100, laserOdometryHandler); ros::Subscriber subRTK = nh.subscribe("/localization/navstate_info", 100, rtkHandler); pubOdomAftPGO = nh.advertise("/aft_pgo_odom", 100); pubOdomRepubVerifier = nh.advertise("/repub_odom", 100); pubPathAftPGO = nh.advertise("/aft_pgo_path", 100); pubMapAftPGO = nh.advertise("/aft_pgo_map", 100); pubLoopScanLocal = nh.advertise("/loop_scan_local", 100); pubLoopSubmapLocal = nh.advertise("/loop_submap_local", 100); // save interactive_slam_pose pubOdomResultForInteractiveSlam = nh.advertise("/interactive_slam_odom", 1000); pubMapResultForInteractiveSlam = nh.advertise("/interactive_slam_map", 1000); signal(SIGINT, MySigintHandler); Eigen::Isometry3d T_livox2gnss_temp; T_livox2gnss_temp.matrix() << vecTlivox2gnss[0], vecTlivox2gnss[1], vecTlivox2gnss[2], vecTlivox2gnss[3], vecTlivox2gnss[4], vecTlivox2gnss[5], vecTlivox2gnss[6], vecTlivox2gnss[7], vecTlivox2gnss[8], vecTlivox2gnss[9], vecTlivox2gnss[10], vecTlivox2gnss[11], vecTlivox2gnss[12], vecTlivox2gnss[13], vecTlivox2gnss[14], vecTlivox2gnss[15]; tf::transformEigenToTF(T_livox2gnss_temp, T_livox2gnss); std::thread posegraph_slam {process_pg}; // pose graph construction std::thread lc_detection {process_lcd}; // loop closure detection std::thread icp_calculation {process_icp}; // loop constraint calculation via icp std::thread isam_update {process_isam}; // if you want to call less isam2 run (for saving redundant computations and no real-time visulization is required), uncommment this and comment all the above runisam2opt when node is added. std::thread viz_map {process_viz_map}; // visualization - map (low frequency because it is heavy) std::thread viz_path {process_viz_path}; // visualization - path (high frequency) ros::spin(); return 0; } ================================================ FILE: SC-PGO/utils/moving_object_removal/README.md ================================================ # Moving Object Removal Please refer to [mmdection](https://github.com/open-mmlab/mmdetection) for the installation process. We just use the most dummy projection function to remove the LiDAR points that fall into the bounding box with label of movable objects. | Detection with mmDection | Remove with Projection | | ---------------------------------- | --------------------------------- | | ![detection](images/detection.png) | ![removal](images/projection.png) | ================================================ FILE: SC-PGO/utils/moving_object_removal/move_object_removal_livox.py ================================================ import pcl from loguru import logger import os import sys import time import numpy as np import cv2 import matplotlib.pyplot as plt from numba import njit from numba.cuda import jit from decimal import Decimal from mmdet.apis import inference_detector from matplotlib.patches import Polygon from shapely.geometry import Point as shapePoint from shapely.geometry.polygon import Polygon as shapePolygon np.set_printoptions(precision=10) CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') MOVABLE_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',) def set_log_level(level: str = "TRACE", save_log: bool = False, log_file_name: str = None) -> None: logger.remove() logger.add(sys.stdout, level=level) if save_log: if log_file_name is None: curr_time = time.ctime(time.time()) else: curr_time = log_file_name logger.add(f"/data/test/{curr_time}.log", level=level) def set_calib_parameters(): extrinsic = np.array([[0.0265714, -0.99964, 0.00379047, 0.0606143], [-0.00218415, -0.00384985, -0.99999, 0.288651], [0.999644, 0.0265626, -0.00228567, 0.258449], [0, 0, 0, 1]]) intrinsic = np.array([[1023.37, 0, 922.516], [0, 994.267, 556.527], [0, 0, 1]]) distortion = np.array([[-0.396302, 0.1187, -0.00558465, 0.00159696]]) return extrinsic, intrinsic, distortion def init_model(config=None, check_point=None): from mmdet.apis import (init_detector) if config is None: config = '../configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py' if check_point is None: check_point = '../checkpoints/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth' model = init_detector(config, check_point, device='cuda:0') return model # align the pc_list with image_list def align_file_lists(img_list, pc_list): # image files are full, need to select images according to the odom files pc_timestamps = [float(f.split("/")[-1].split(".pcd")[0]) for f in pc_list] img_timestamps = [float(s.split("/")[-1].split(".png")[0]) for s in img_list] selected_img_list = [] img_idx = 0 for i in range(len(pc_list)): cur_img_time = img_timestamps[img_idx] cur_pc_time = pc_timestamps[i] while np.abs(cur_img_time - cur_pc_time) > 0.1: if img_idx > len(img_timestamps) - 1: break img_idx += 1 cur_img_time = img_timestamps[img_idx] selected_img_list.append(img_list[img_idx]) if len(selected_img_list) != len(pc_list): logger.error("The file size is not same!") return selected_img_list, pc_list def files_saving_path(base_path=None): if base_path is None: base_path = "../demo/inference_result" proj_file_path = os.path.join(base_path, "proj") if not os.path.exists(proj_file_path): os.makedirs(proj_file_path) detection_img_path = os.path.join(base_path, "detection") if not os.path.exists(detection_img_path): os.makedirs(detection_img_path) npy_path = os.path.join(base_path, "npy") if not os.path.exists(npy_path): os.makedirs(npy_path) pcd_path = os.path.join(base_path, "pcd") if not os.path.exists(pcd_path): os.makedirs(pcd_path) return proj_file_path, detection_img_path, npy_path, pcd_path def fetch_dataset(data_base, livox_pc_path=None, img_path=None): if livox_pc_path is None: livox_pc_path = os.path.join(data_base, "pcd") if img_path is None: img_path = os.path.join(data_base, "img") if not os.path.exists(livox_pc_path): raise RuntimeError(f"PCD path {livox_pc_path} does not exist") if not os.path.exists(img_path): raise RuntimeError(f"Image path {img_path} does not exist") image_list, pc_list = [], [] image_list = [os.path.join(img_path, f) for f in os.listdir(img_path) if ".png" in f] image_list.sort(key=lambda s: float(s.split("/")[-1].split(".png")[0])) odom_flag = True if odom_flag: pc_list = [os.path.join(livox_pc_path, f) for f in os.listdir(livox_pc_path) if ".pcd" in f] pc_list.sort(key=lambda s: float( float(s.split("/")[-1].split(".pcd")[0].split("_")[0]))) image_list, pc_list = align_file_lists(image_list, pc_list) else: pc_list = [os.path.join(livox_pc_path, f) for f in os.listdir(livox_pc_path) if ".npy" in f] pc_list.sort(key=lambda s: float( float(s.split("/")[-1].split(".npy")[0]) )) return pc_list, image_list def img_undistort(img_ori, intrinsic_param, distort_param): intrinsic_param = intrinsic_param[:, :3] iden = np.identity(3) w, h = img_ori.shape[1], img_ori.shape[0] mapx, mapy = cv2.initUndistortRectifyMap(intrinsic_param, distort_param, iden, intrinsic_param, (w, h), cv2.CV_32FC1) img_dst = cv2.remap(img_ori, mapx, mapy, cv2.INTER_LINEAR) return img_dst def convert_rot_trans(extrinsic): rot_matrix = extrinsic[0:3, 0:3] rot_vec, _ = cv2.Rodrigues(rot_matrix) trans_vec = extrinsic[0:3, 3] return rot_vec, trans_vec def project_pts_to_img(pts, image, extrinsic, intrinsic, cam_dist, polygons): img_width = image.shape[1] img_height = image.shape[0] if len(pts) < 10: logger.debug("too few points to project to the image") return if pts.shape[1] < 4: pts_ = np.ones((len(pts), 4)) pts_[:, 0:3] = pts else: pts_ = pts ration = 0.2 min_w = -ration * img_width max_w = (1 + ration) * img_width min_h = - ration * img_height max_h = (1 + ration) * img_height pt_img_ = [] pts_filtered = [] for pt in pts_: pt_geo = np.copy(pt) pt_geo[3] = 1.0 if np.any(np.isnan(pt)): continue pt_z = extrinsic[2, 0] * pt_geo[0] + extrinsic[2, 1] * pt_geo[1] + extrinsic[2, 2] * pt_geo[2] + extrinsic[ 2, 3] if pt_z < 1.0: continue pt_cam = np.dot(extrinsic, pt_geo) pt_img = np.dot(intrinsic, pt_cam[0:3] / pt_cam[2]) if pt_img[0] < min_w or pt_img[0] > max_w: continue if pt_img[1] < min_h or pt_img[1] > max_h: continue pt_img_.append(pt_geo[0:3]) pts_filtered.append(pt) if len(pt_img_) < 1: logger.warning("no point correspondence found between lidar-image") return pt_img_, pts_filtered rvec, tvec = convert_rot_trans(extrinsic) dis_cam = np.asarray(cam_dist) proj_pts, _ = cv2.projectPoints(np.asarray(pt_img_), rvec, tvec, np.asarray(intrinsic), dis_cam) proj_pts = proj_pts.reshape(-1, 2) inds = np.logical_and(0 < proj_pts[:, 0], proj_pts[:, 0] < img_width) proj_pts = proj_pts[inds, :] pts_filtered = np.asarray(pts_filtered)[inds, :] inds = np.logical_and(0 < proj_pts[:, 1], proj_pts[:, 1] < img_height) proj_pts = proj_pts[inds, :] pts_filtered = pts_filtered[inds, :] flags = np.ones(len(proj_pts), dtype=bool) pts_inside = [] pts_lidar_outside = [] pts_image_outside = [] for poly in polygons: shape_poly = shapePolygon(poly.get_xy()) for idx, (pt_lidar, pt_img) in enumerate(zip(pts_filtered, proj_pts)): if not flags[idx]: continue if shape_poly.contains(shapePoint(pt_img[0], pt_img[1])): pts_inside.append(pt_img) flags[idx] = False pts_image_outside = proj_pts[flags] pts_lidar_outside = pts_filtered[flags] pt_img = np.asarray(pts_image_outside) pt_lidar = np.asarray(pts_lidar_outside) return pt_img, pt_lidar def img_process(img, model, intrinsic_param, distort_param, visual_flag=False): img = img_undistort(img, intrinsic_param, distort_param) inference_result = inference_detector(model, img) return img, inference_result def projection_visualization(img, pts, save_pth=None): ax = plt.subplot(111) plt.title("Projection") plt.imshow(img) img_width = img.shape[1] img_height = img.shape[0] counter = 0 counter_pt = 0 if pts is not None and len(pts) > 0: for pt in pts: if pt[0] < 0 or pt[0] > img_width: continue if pt[1] < 0 or pt[1] > img_height: continue counter += 1 if len(pts) > 5000: skip_rate = 20 else: skip_rate = 10 if counter % skip_rate == 0: ax.plot(pt[0], pt[1], "x", c='b') counter_pt += 1 else: continue if save_pth is None: logger.trace(" number of lidar pts found in the image: %d" % counter) plt.pause(0.1) plt.show() plt.clf() else: plt.savefig(save_pth, dpi=200) plt.clf() def enlarge_bbox(bbox_init, img_size, enlarge_step=10): bbox_init[0] = max(bbox_init[0] - enlarge_step, 0) bbox_init[1] = max(bbox_init[1] - enlarge_step, 0) bbox_init[2] = min(bbox_init[2] + enlarge_step, img_size[0]) bbox_init[3] = min(bbox_init[3] + enlarge_step, img_size[1]) return bbox_init def extract_inference_result(model, img, result, score_thr=0.6): if hasattr(model, 'module'): model = model.module if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) if score_thr > 0: assert bboxes.shape[1] == 5 scores = bboxes[:, -1] inds = scores > score_thr bboxes = bboxes[inds, :] labels = labels[inds] polygons = [] labels_text = [] w, h = img.shape[1], img.shape[0] for i, (bbox, label) in enumerate(zip(bboxes, labels)): label_text = CLASSES[label] if label_text not in MOVABLE_CLASSES: continue bbox_int = bbox.astype(np.int32) bbox_int = enlarge_bbox(bbox_int, [w, h], enlarge_step=20) poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] np_poly = np.array(poly).reshape((4, 2)) polygons.append(Polygon(np_poly)) labels_text.append(label_text) return polygons, labels_text def inference_and_remove(pc_list, img_list, inference_model, save_path=None): extrinsic, intrinsic, distortion = set_calib_parameters() proj_folder, det_folder, npy_folder, pcd_folder = files_saving_path(save_path) for pc_, img_ in zip(pc_list, img_list): logger.trace("The processed pc: %s " % pc_) logger.trace("The processed img: %s " % img_) img = cv2.imread(img_) if ".npy" in pc_: pc = np.load(pc_) else: pc = pcl.load_XYZI(pc_).to_array() img_undis, inference_result = img_process(img, inference_model, intrinsic, distortion) polygons, labels_text = extract_inference_result(inference_model, img, inference_result) pt_img, pt_lidar = project_pts_to_img(pc, img, extrinsic, intrinsic, distortion, polygons) img_name = img_[img_.rfind("/") + 1:] pc_name = pc_[pc_.rfind("/") + 1: pc_.rfind(".")] + ".npy" proj_file_pth = os.path.join(proj_folder, img_name) save_img = os.path.join(det_folder, img_name) pcd_path = os.path.join(npy_folder, pc_name) inference_model.show_result(img, inference_result, score_thr=0.6, out_file=save_img) logger.trace("Save trimmed NPY to: %s " % pcd_path) np.save(pcd_path, pt_lidar) projection_visualization(img, pt_img, proj_file_pth) def euler_to_rotMat(yaw, pitch, roll): Rz_yaw = np.array([ [np.cos(yaw), -np.sin(yaw), 0], [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]]) Ry_pitch = np.array([ [np.cos(pitch), 0, np.sin(pitch)], [0, 1, 0], [-np.sin(pitch), 0, np.cos(pitch)]]) Rx_roll = np.array([ [1, 0, 0], [0, np.cos(roll), -np.sin(roll)], [0, np.sin(roll), np.cos(roll)]]) # R = RzRyRx rotMat = np.dot(Rz_yaw, np.dot(Ry_pitch, Rx_roll)) return rotMat def main(inference=False): if inference: inference_model = init_model() data_base = "DEMO" pc_path = os.path.join(data_base, "pcd_correction") img_path = os.path.join(data_base, "img") save_result_path = os.path.join(data_base, "result") if inference: pc_list, img_list = fetch_dataset(data_base, livox_pc_path=pc_path, img_path=img_path) inference_and_remove(pc_list, img_list, inference_model, save_result_path) if __name__ == "__main__": set_log_level() main() ================================================ FILE: SC-PGO/utils/python/makeMergedMap.py ================================================ import os import sys import time import copy from io import StringIO import pypcd # for the install, use this command: python3.x (use your python ver) -m pip install --user git+https://github.com/DanielPollithy/pypcd.git from pypcd import pypcd import numpy as np from numpy import linalg as LA import open3d as o3d from pypcdMyUtils import * jet_table = np.load('jet_table.npy') bone_table = np.load('bone_table.npy') color_table = jet_table color_table_len = color_table.shape[0] ########################## # User only consider this block ########################## data_dir = "/home/user/Documents/catkin2021/catkin_fastlio2/data/" # should end with / scan_idx_range_to_stack = [0, 200] # if you want a whole map, use [0, len(scan_files)] node_skip = 1 num_points_in_a_scan = 150000 # for reservation (save faster) // e.g., use 150000 for 128 ray lidars, 100000 for 64 ray lidars, 30000 for 16 ray lidars, if error occured, use the larger value. is_live_vis = False # recommend to use false is_o3d_vis = True intensity_color_max = 200 is_near_removal = True thres_near_removal = 2 # meter (to remove platform-myself structure ghost points) ########################## # scan_dir = data_dir + "Scans" scan_files = os.listdir(scan_dir) scan_files.sort() poses = [] f = open(data_dir+"optimized_poses.txt", 'r') while True: line = f.readline() if not line: break pose_SE3 = np.asarray([float(i) for i in line.split()]) pose_SE3 = np.vstack( (np.reshape(pose_SE3, (3, 4)), np.asarray([0,0,0,1])) ) poses.append(pose_SE3) f.close() # assert (scan_idx_range_to_stack[1] > scan_idx_range_to_stack[0]) print("Merging scans from", scan_idx_range_to_stack[0], "to", scan_idx_range_to_stack[1]) # if(is_live_vis): vis = o3d.visualization.Visualizer() vis.create_window('Map', visible = True) nodes_count = 0 pcd_combined_for_vis = o3d.geometry.PointCloud() pcd_combined_for_save = None # The scans from 000000.pcd should be prepared if it is not used (because below code indexing is designed in a naive way) # manually reserve memory for fast write num_all_points_expected = int(num_points_in_a_scan * np.round((scan_idx_range_to_stack[1] - scan_idx_range_to_stack[0])/node_skip)) np_xyz_all = np.empty([num_all_points_expected, 3]) np_intensity_all = np.empty([num_all_points_expected, 1]) curr_count = 0 for node_idx in range(len(scan_files)): if(node_idx < scan_idx_range_to_stack[0] or node_idx >= scan_idx_range_to_stack[1]): continue nodes_count = nodes_count + 1 if( nodes_count % node_skip is not 0): if(node_idx is not scan_idx_range_to_stack[0]): # to ensure the vis init continue print("read keyframe scan idx", node_idx) scan_pose = poses[node_idx] scan_path = os.path.join(scan_dir, scan_files[node_idx]) scan_pcd = o3d.io.read_point_cloud(scan_path) scan_xyz_local = copy.deepcopy(np.asarray(scan_pcd.points)) scan_pypcd_with_intensity = pypcd.PointCloud.from_path(scan_path) scan_intensity = scan_pypcd_with_intensity.pc_data['intensity'] scan_intensity_colors_idx = np.round( (color_table_len-1) * np.minimum( 1, np.maximum(0, scan_intensity / intensity_color_max) ) ) scan_intensity_colors = color_table[scan_intensity_colors_idx.astype(int)] scan_pcd_global = scan_pcd.transform(scan_pose) # global coord, note that this is not deepcopy scan_pcd_global.colors = o3d.utility.Vector3dVector(scan_intensity_colors) scan_xyz = np.asarray(scan_pcd_global.points) scan_intensity = np.expand_dims(scan_intensity, axis=1) scan_ranges = LA.norm(scan_xyz_local, axis=1) if(is_near_removal): eff_idxes = np.where (scan_ranges > thres_near_removal) scan_xyz = scan_xyz[eff_idxes[0], :] scan_intensity = scan_intensity[eff_idxes[0], :] scan_pcd_global = scan_pcd_global.select_by_index(eff_idxes[0]) if(is_o3d_vis): pcd_combined_for_vis += scan_pcd_global # open3d pointcloud class append is fast if is_live_vis: if(node_idx is scan_idx_range_to_stack[0]): # to ensure the vis init vis.add_geometry(pcd_combined_for_vis) vis.update_geometry(pcd_combined_for_vis) vis.poll_events() vis.update_renderer() # save np_xyz_all[curr_count:curr_count + scan_xyz.shape[0], :] = scan_xyz np_intensity_all[curr_count:curr_count + scan_xyz.shape[0], :] = scan_intensity curr_count = curr_count + scan_xyz.shape[0] print(curr_count) # if(is_o3d_vis): print("draw the merged map.") o3d.visualization.draw_geometries([pcd_combined_for_vis]) # save ply having intensity np_xyz_all = np_xyz_all[0:curr_count, :] np_intensity_all = np_intensity_all[0:curr_count, :] np_xyzi_all = np.hstack( (np_xyz_all, np_intensity_all) ) xyzi = make_xyzi_point_cloud(np_xyzi_all) map_name = data_dir + "map_" + str(scan_idx_range_to_stack[0]) + "_to_" + str(scan_idx_range_to_stack[1]) + "_with_intensity.pcd" xyzi.save_pcd(map_name, compression='binary_compressed') print("intensity map is save (path:", map_name, ")") # save rgb colored points # map_name = data_dir + "map_" + str(scan_idx_range_to_stack[0]) + "_to_" + str(scan_idx_range_to_stack[1]) + ".pcd" # o3d.io.write_point_cloud(map_name, pcd_combined_for_vis) # print("the map is save (path:", map_name, ")") ================================================ FILE: SC-PGO/utils/python/pypcdMyUtils.py ================================================ import numpy as np import pypcd # for the install, use this command: python3.x (use your python ver) -m pip install --user git+https://github.com/DanielPollithy/pypcd.git from pypcd import pypcd def make_xyzi_point_cloud(xyzl, label_type='f'): """ Make XYZL point cloud from numpy array. TODO i labels? """ md = {'version': .7, 'fields': ['x', 'y', 'z', 'intensity'], 'count': [1, 1, 1, 1], 'width': len(xyzl), 'height': 1, 'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], 'points': len(xyzl), 'data': 'ASCII'} if label_type.lower() == 'f': md['size'] = [4, 4, 4, 4] md['type'] = ['F', 'F', 'F', 'F'] elif label_type.lower() == 'u': md['size'] = [4, 4, 4, 1] md['type'] = ['F', 'F', 'F', 'U'] else: raise ValueError('label type must be F or U') # TODO use .view() xyzl = xyzl.astype(np.float32) dt = np.dtype([('x', np.float32), ('y', np.float32), ('z', np.float32), ('intensity', np.float32)]) pc_data = np.rec.fromarrays([xyzl[:, 0], xyzl[:, 1], xyzl[:, 2], xyzl[:, 3]], dtype=dt) pc = pypcd.PointCloud(md, pc_data) return pc ================================================ FILE: ad_localization_msgs/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.0.2) project(ad_localization_msgs) ## Compile as C++11, supported in ROS Kinetic and newer # add_compile_options(-std=c++11) find_package(catkin REQUIRED COMPONENTS roscpp rospy message_generation std_msgs geometry_msgs) ## Generate messages in the 'msg' folder add_message_files( FILES NavStateInfo.msg ) ## Generate added messages and services with any dependencies listed here generate_messages( DEPENDENCIES std_msgs # Or other packages containing msgs geometry_msgs ) catkin_package( CATKIN_DEPENDS message_runtime ) ================================================ FILE: ad_localization_msgs/msg/NavStateInfo.msg ================================================ # Header include seq, timestamp(last node pub time), and frame_id(sensor model) std_msgs/Header header # The time of pose measurement, in nano second. uint64 measurement_time_ns # Report localization status # when INVALID or CONVERGING, localization result is not reliable and should not be used. string status_info # For debug string debug_info # Position of the Vehicle Reference Point(VRP) in the Map reference frame(East/North/Up). # The VRP is the car center(Forward/Left/Up). # x for East, y for North, z for Up Height, in meters. geometry_msgs/Point position_enu # Position uncertainty(standard deviation in meters) geometry_msgs/Point position_enu_std_dev # Position of the Vehicle Reference Point(VRP) in the WGS84 reference ellipsoid coordinate system. # longitude in degrees, ranging from -180 to 180. # latitude in degrees, ranging from -90 to 90. # ellipsoid height in meters. float64 longitude float64 latitude float64 altitude # Attitude in euler angle form to describe the orientation of a VRP frame with respect to a Map reference frame(ENU). # The roll, in (-pi/2, pi/2), corresponds to a rotation around the Vehicle Forward-axis. # The pitch, in [-pi, pi), corresponds to a rotation around the Vehicle Left-axis. # The yaw, in [-pi, pi), corresponds to a rotation around the Vehicle Up-axis. # The yaw is the angle of the Vehicle head w.r.t the ENU reference frame, East = 0, North = pi/2, anti-clockwise in radius. # The direction of rotation follows the right-hand rule. float64 roll float64 pitch float64 yaw # Attitude uncertainty(standard deviation in radians) geometry_msgs/Point attitude_std_dev # Linear velocity of the VRP in the Vehicle reference frame # x for Forward, y for Left, z for Up, in meters per second geometry_msgs/Vector3 linear_velocity # Linear velocity of the VRP in the ENU reference frame # x for East, y for North, z for Up, in meters per second geometry_msgs/Vector3 linear_velocity_global # Linear acceleration of the VRP in the Vehicle reference frame # x for Forward, y for Left, z for Up, in meters per power second geometry_msgs/Vector3 linear_acceleration # Linear acceleration of the VRP in the ENU reference frame # x for East, y for North, z for Up, in meters per power second geometry_msgs/Vector3 linear_acceleration_global # Angular velocity of the VRP in the Vehicle reference frame # x across Forward axes, y across Left axes, # z across Up axes, in radians per second geometry_msgs/Point angular_velocity ================================================ FILE: ad_localization_msgs/package.xml ================================================ ad_localization_msgs 0.0.0 The ad_localization_msgs package root TODO catkin message_generation geometry_msgs roscpp rospy std_msgs message_runtime geometry_msgs roscpp rospy std_msgs