[
  {
    "path": ".gitignore",
    "content": ".vscode/\n__pycache__/"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2021, PJLab-ADG\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n   contributors may be used to endorse or promote products derived from\n   this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "LIO-Livox/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.8.3)\nproject(livox_odometry)\n\nSET(CMAKE_BUILD_TYPE \"Release\")\nset(CMAKE_CXX_FLAGS \"-std=c++11\")\n\n#SET(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++0x\")\nSET(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)\n\nfind_package(catkin REQUIRED COMPONENTS\n  \t     message_generation\n  \t     geometry_msgs\n  \t     nav_msgs\n  \t     sensor_msgs\n  \t     roscpp\n  \t     rospy\n  \t     std_msgs\n  \t     tf\n  \t     tf_conversions\n  \t     rosbag\n  \t     livox_ros_driver\n  \t     eigen_conversions\n  \t     pcl_conversions\n  \t     pcl_ros\n  \t     message_filters\n  \t     std_srvs)\n\nfind_package(Eigen3 REQUIRED)\nfind_package(PCL REQUIRED)\nfind_package(Ceres REQUIRED)\nfind_package(OpenCV REQUIRED)\nfind_package(SuiteSparse REQUIRED)\n\ninclude_directories(include\n        \t    ${catkin_INCLUDE_DIRS}\n\t\t    ${EIGEN3_INCLUDE_DIR}\n\t\t    ${PCL_INCLUDE_DIRS}\n        \t    ${CERES_INCLUDE_DIRS}\n\t\t    ${OpenCV_INCLUDE_DIRS}\n        \t    ${SUITESPARSE_INCLUDE_DIRS})\n\n##################\n## ROS messages ##\n##################\n\ngenerate_messages(DEPENDENCIES std_msgs)\n\ncatkin_package(CATKIN_DEPENDS geometry_msgs nav_msgs roscpp rospy std_msgs message_runtime \n  \t       pcl_conversions pcl_ros message_filters std_srvs tf_conversions tf \n\t       eigen_conversions DEPENDS PCL OpenCV INCLUDE_DIRS include)\n\nadd_executable (ScanRegistration \n\t\tsrc/lio/ScanRegistration.cpp\n\t\tsrc/lio/LidarFeatureExtractor.cpp\n        \tsrc/segment/segment.cpp\n        \tsrc/segment/pointsCorrect.cpp)\ntarget_link_libraries(ScanRegistration ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES})\n\nadd_executable (PoseEstimation \n\t\tsrc/lio/PoseEstimation.cpp\n\t\tsrc/lio/Estimator.cpp\n\t\tsrc/lio/IMUIntegrator.cpp\n\t\tsrc/lio/ceresfunc.cpp \n\t\tsrc/lio/Map_Manager.cpp)\ntarget_link_libraries(PoseEstimation ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${CERES_LIBRARIES})\n\n\n\n"
  },
  {
    "path": "LIO-Livox/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2021, LIVOX\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "LIO-Livox/README.md",
    "content": "# LIO-Livox (A Robust LiDAR-Inertial Odometry for Livox LiDAR)\nThis respository implements a robust LiDAR-inertial odometry system for Livox LiDAR. \nThe system uses only a single Livox LiDAR with a built-in IMU. It has a robust initialization module, \nwhich is independent to the sensor motion. **It can be initialized with the static state, dynamic state, and the mixture of static and dynamic state.** \nThe system achieves super robust performance. **It can pass through a 4km-tunnel and run on the highway with a very high speed (about 80km/h) using a single Livox Horizon.**\nMoreover, **it is also robust to dynamic objects**, such as cars, bicycles, and pedestrains. It obtains high precision of localization even in traffic jams.\n**The mapping result is precise even most of the FOV is occluded by vehicles.**\nVideos of the demonstration of the system can be found on Youtube and Bilibili. *NOTE: Images are only used for demonstration, not used in the system.*\n\n**Developer**: [Livox](www.livoxtech.com)\n\n<div align=\"center\">\n<img src=\"./doc/tunnel.gif\" width=\"850px\">\n<img src=\"./doc/urban_dynamic.gif\" width=\"850px\">\n<img src=\"./doc/urban_complex.gif\" width=\"850px\">\n</div>\n\n## System achritecture\n\n<div align=\"center\">\n<img src=\"./doc/system.png\" width=\"1000px\">\n</div>\n\nThe system consists of two ros nodes: ScanRegistartion and PoseEstimation.\n*  The class \"LidarFeatureExtractor\" of the node \"ScanRegistartion\" extracts corner features, surface features, and irregular features from the raw point cloud.\n*  In the node \"PoseEstimation\", the main thread aims to estimate sensor poses, while another thread in the class \"Estimator\" uses the class \"MapManager\" to build and manage feature maps.\n\nThe system is mainly designed for car platforms in the large scale outdoor environment.\nUsers can easily run the system with a Livox Horizon LiDAR.Horizon \n\nThe system starts with the node \"ScanRegistartion\", where feature points are extracted. Before the feature extraction, dynamic objects are removed from the raw point cloud, since in urban scenes there are usually many dynamic objects, which \naffect system robustness and precision. For the dynamic objects filter, we use a fast point cloud segmentation method. The Euclidean clustering is applied to group points into some clusters. The raw point cloud is divided into ground points, background points, and foreground points. \nForeground points are considered as dynamic objects, which are excluded form the feature extraction process. Due to the dynamic objects filter, the system obtains high robustness in dynamic scenes.\n\nIn open scenarios, usually few features can be extracted, leading to degeneracy on certain degrees of freedom. To tackle this problem, we developed a feature extraction process to make the distribution of feature points wide and uniform. \nA uniform and wide distribution provides more constraints on all 6 degrees of freedom, which is helpful for eliminating degeneracy. Besides, some irregular points also provides information in feature-less\nscenes. Therefore, we also extract irregular features as a class for the point cloud registration.\nFeature points are classifed into three types, corner features, surface features, and irregular features, according to their\nlocal geometry properties. We first extract points with large curvature and isolated points on each scan line as corner points. Then principal components analysis (PCA) is performed to classify surface features and irregular features, as shown in the following figure. \nFor points with different distance, thresholds are set to different values, in order to make the distribution of points in space as uniform as possible. \n\n<div align=\"center\">\n<img src=\"./doc/feature extraction.png\" height=\"400px\">\n</div>\n\nIn the node \"PoseEstimation\", the motion distortion of the point cloud is compensated using IMU preintegration or constant velocity model. Then the IMU initialization module is performed. If the Initialization\nis successfully finished, the system will switch to the LIO mode. Otherwise, it run with LO mode and initialize IMU states. In the LO mode, we use a frame-to-model point cloud registration to estimate the sensor pose.\nInspired by ORB-SLAM3, a maximum a posteriori (MAP) estimation method is adopted to jointly initialize IMU biases, velocities, and the gravity direction.\nThis method doesn't need a careful initialization process. **The system can be initialized with an arbitrary motion.** This method takes into account sensor uncertainty, which obtains the optimum in the sense of maximum posterior probability.\nIt achieves efficient, robust, and accurate performance.\nAfter the initialization, a tightly coupled slding window based sensor fusion module is performed to estimate IMU poses, biases, and velocities within the sliding window.\nSimultaneously, an extra thread builds and maintains the global map in parallel. \n\n\n## Prerequisites\n*  [Ubuntu](http://ubuntu.com) (tested on 16.04 and 18.04)\n*  [ROS](http://wiki.ros.org/ROS/Installation) (tested with Kinetic and Melodic)\n*  [Eigen](http://eigen.tuxfamily.org/index.php?title=Main_Page)\n*  [Ceres Solver](http://ceres-solver.org/installation.html)\n*  [PCL](http://www.pointclouds.org/downloads/linux.html)\n*  [livox_ros_driver](https://github.com/Livox-SDK/livox_ros_driver)\n*  Suitesparse\n   ```\n   sudo apt-get install libsuitesparse-dev\n   ```\n\n## Compilation\n```\ncd ~/catkin_ws/src\ngit clone https://github.com/Livox-SDK/LIO-Livox\ncd ..\ncatkin_make\n```\n\n## Run with bag files:\n### Run the launch file:\n```\ncd ~/catkin_ws\nsource devel/setup.bash\nroslaunch lio_livox horizon.launch\n```\n\n#### Play your bag files:\n```\nrosbag play YOUR_ROSBAG.bag\n```\n\n## Run with your device:\n### Run your LiDAR with livox_ros_driver\n```\ncd ~/catkin_ws\nsource devel/setup.bash\nroslaunch livox_ros_driver livox_lidar_msg.launch\n```\n\n### Run the launch file:\n```\ncd ~/catkin_ws\nsource devel/setup.bash\nroslaunch lio_livox horizon.launch\n```\n\n## Notes:\nThe current version of the system is only adopted for Livox Horizon. In theory, it should be able to run directly with a Livox Avia, but we haven't done enough tests.\nBesides, the system doesn't provide a interface of Livox mid series. If you want use mid-40 or mid-70, you can try [livox_mapping](https://github.com/Livox-SDK/livox_mapping).\n\nThe topic of point cloud messages is /livox/lidar and its type is livox_ros_driver/CustomMsg. \\\nThe topic of IMU messages is /livox/imu and its type is sensor_msgs/Imu.\n\nThere are some parameters in launch files:\n*  IMU_Mode: choose IMU information fusion strategy, there are 3 modes:\n    -  0 - without using IMU information, pure LiDAR odometry, motion distortion is removed using a constant velocity model \n    -  1 - using IMU preintegration to remove motion distortion \n    -  2 - tightly coupling IMU and LiDAR information\n*  Extrinsic_Tlb: extrinsic parameter between LiDAR and IMU, which uses SE3 form. If you want to use an external IMU, you need to calibrate your own sensor suite\nand change this parameter to your extrinsic parameter.\n\nThere are also some parameters in the config file:\n*  Use_seg: choose the segmentation mode for dynamic objects filtering, there are 2 modes:\n    -  0 - without using the segmentation method, you can choose this mode if there is few dynamic objects in your data\n    -  1 - using the segmentation method to remove dynamic objects\n\n## Acknowledgements\nThanks for following work:\n* [LOAM](https://github.com/cuitaixiang/LOAM_NOTED) (LOAM: Lidar Odometry and Mapping in Real-time)\n* [VINS-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono) (VINS-Mono: A Robust and Versatile Monocular Visual-Inertial State Estimator)\n* [LIO-mapping](https://github.com/hyye/lio-mapping) (Tightly Coupled 3D Lidar Inertial Odometry and Mapping)\n* [ORB-SLAM3](https://github.com/UZ-SLAMLab/ORB_SLAM3) (ORB-SLAM3: An Accurate Open-Source Library for Visual, Visual-Inertial and Multi-Map SLAM)\n* [LiLi-OM](https://github.com/KIT-ISAS/lili-om) (Towards High-Performance Solid-State-LiDAR-Inertial Odometry and Mapping)\n* [MSCKF_VIO](https://github.com/KumarRobotics/msckf_vio) (Robust Stereo Visual Inertial Odometry for Fast Autonomous Flight)\n* [horizon_highway_slam](https://github.com/Livox-SDK/horizon_highway_slam)\n* [livox_mapping](https://github.com/Livox-SDK/livox_mapping)\n* [livox_horizon_loam](https://github.com/Livox-SDK/livox_horizon_loam)\n\n## Support\nYou can get support from Livox with the following methods:\n*  Send email to [cs@livoxtech.com](cs@livoxtech.com) with a clear description of your problem and your setup\n*  Report issues on github\n"
  },
  {
    "path": "LIO-Livox/cmake/FindEigen3.cmake",
    "content": "# - Try to find Eigen3 lib\n#\n# This module supports requiring a minimum version, e.g. you can do\n#   find_package(Eigen3 3.1.2)\n# to require version 3.1.2 or newer of Eigen3.\n#\n# Once done this will define\n#\n#  EIGEN3_FOUND - system has eigen lib with correct version\n#  EIGEN3_INCLUDE_DIR - the eigen include directory\n#  EIGEN3_VERSION - eigen version\n\n# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>\n# Copyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>\n# Copyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n# Redistribution and use is allowed according to the terms of the 2-clause BSD license.\n\nif(NOT Eigen3_FIND_VERSION)\n  if(NOT Eigen3_FIND_VERSION_MAJOR)\n    set(Eigen3_FIND_VERSION_MAJOR 2)\n  endif(NOT Eigen3_FIND_VERSION_MAJOR)\n  if(NOT Eigen3_FIND_VERSION_MINOR)\n    set(Eigen3_FIND_VERSION_MINOR 91)\n  endif(NOT Eigen3_FIND_VERSION_MINOR)\n  if(NOT Eigen3_FIND_VERSION_PATCH)\n    set(Eigen3_FIND_VERSION_PATCH 0)\n  endif(NOT Eigen3_FIND_VERSION_PATCH)\n\n  set(Eigen3_FIND_VERSION \"${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}\")\nendif(NOT Eigen3_FIND_VERSION)\n\nmacro(_eigen3_check_version)\n  file(READ \"${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h\" _eigen3_version_header)\n\n  string(REGEX MATCH \"define[ \\t]+EIGEN_WORLD_VERSION[ \\t]+([0-9]+)\" _eigen3_world_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_WORLD_VERSION \"${CMAKE_MATCH_1}\")\n  string(REGEX MATCH \"define[ \\t]+EIGEN_MAJOR_VERSION[ \\t]+([0-9]+)\" _eigen3_major_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_MAJOR_VERSION \"${CMAKE_MATCH_1}\")\n  string(REGEX MATCH \"define[ \\t]+EIGEN_MINOR_VERSION[ \\t]+([0-9]+)\" _eigen3_minor_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_MINOR_VERSION \"${CMAKE_MATCH_1}\")\n\n  set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION})\n  if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})\n    set(EIGEN3_VERSION_OK FALSE)\n  else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})\n    set(EIGEN3_VERSION_OK TRUE)\n  endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})\n\n  if(NOT EIGEN3_VERSION_OK)\n\n    message(STATUS \"Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, \"\n                   \"but at least version ${Eigen3_FIND_VERSION} is required\")\n  endif(NOT EIGEN3_VERSION_OK)\nendmacro(_eigen3_check_version)\n\nif (EIGEN3_INCLUDE_DIR)\n\n  # in cache already\n  _eigen3_check_version()\n  set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})\n\nelse (EIGEN3_INCLUDE_DIR)\n\n  # specific additional paths for some OS\n  if (WIN32)\n    set(EIGEN_ADDITIONAL_SEARCH_PATHS ${EIGEN_ADDITIONAL_SEARCH_PATHS} \"C:/Program Files/Eigen/include\" \"C:/Program Files (x86)/Eigen/include\")\n  endif(WIN32)\n\n  find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library\n      PATHS\n      ${CMAKE_INSTALL_PREFIX}/include\n      ${EIGEN_ADDITIONAL_SEARCH_PATHS}\n      ${KDE4_INCLUDE_DIR}\n      PATH_SUFFIXES eigen3 eigen\n    )\n\n  if(EIGEN3_INCLUDE_DIR)\n    _eigen3_check_version()\n  endif(EIGEN3_INCLUDE_DIR)\n\n  include(FindPackageHandleStandardArgs)\n  find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK)\n\n  mark_as_advanced(EIGEN3_INCLUDE_DIR)\n\nendif(EIGEN3_INCLUDE_DIR)\n\n"
  },
  {
    "path": "LIO-Livox/cmake/FindSuiteSparse.cmake",
    "content": "# Ceres Solver - A fast non-linear least squares minimizer\n# Copyright 2015 Google Inc. All rights reserved.\n# http://ceres-solver.org/\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n#   this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n#   this list of conditions and the following disclaimer in the documentation\n#   and/or other materials provided with the distribution.\n# * Neither the name of Google Inc. nor the names of its contributors may be\n#   used to endorse or promote products derived from this software without\n#   specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Author: alexs.mac@gmail.com (Alex Stewart)\n#\n\n# FindSuiteSparse.cmake - Find SuiteSparse libraries & dependencies.\n#\n# This module defines the following variables:\n#\n# SUITESPARSE_FOUND: TRUE iff SuiteSparse and all dependencies have been found.\n# SUITESPARSE_INCLUDE_DIRS: Include directories for all SuiteSparse components.\n# SUITESPARSE_LIBRARIES: Libraries for all SuiteSparse component libraries and\n#                        dependencies.\n# SUITESPARSE_VERSION: Extracted from UFconfig.h (<= v3) or\n#                      SuiteSparse_config.h (>= v4).\n# SUITESPARSE_MAIN_VERSION: Equal to 4 if SUITESPARSE_VERSION = 4.2.1\n# SUITESPARSE_SUB_VERSION: Equal to 2 if SUITESPARSE_VERSION = 4.2.1\n# SUITESPARSE_SUBSUB_VERSION: Equal to 1 if SUITESPARSE_VERSION = 4.2.1\n#\n# SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION: TRUE iff running\n#     on Ubuntu, SUITESPARSE_VERSION is 3.4.0 and found SuiteSparse is a system\n#     install, in which case found version of SuiteSparse cannot be used to link\n#     a shared library due to a bug (static linking is unaffected).\n#\n# The following variables control the behaviour of this module:\n#\n# SUITESPARSE_INCLUDE_DIR_HINTS: List of additional directories in which to\n#                                search for SuiteSparse includes,\n#                                e.g: /timbuktu/include.\n# SUITESPARSE_LIBRARY_DIR_HINTS: List of additional directories in which to\n#                                search for SuiteSparse libraries,\n#                                e.g: /timbuktu/lib.\n#\n# The following variables define the presence / includes & libraries for the\n# SuiteSparse components searched for, the SUITESPARSE_XX variables are the\n# union of the variables for all components.\n#\n# == Symmetric Approximate Minimum Degree (AMD)\n# AMD_FOUND\n# AMD_INCLUDE_DIR\n# AMD_LIBRARY\n#\n# == Constrained Approximate Minimum Degree (CAMD)\n# CAMD_FOUND\n# CAMD_INCLUDE_DIR\n# CAMD_LIBRARY\n#\n# == Column Approximate Minimum Degree (COLAMD)\n# COLAMD_FOUND\n# COLAMD_INCLUDE_DIR\n# COLAMD_LIBRARY\n#\n# Constrained Column Approximate Minimum Degree (CCOLAMD)\n# CCOLAMD_FOUND\n# CCOLAMD_INCLUDE_DIR\n# CCOLAMD_LIBRARY\n#\n# == Sparse Supernodal Cholesky Factorization and Update/Downdate (CHOLMOD)\n# CHOLMOD_FOUND\n# CHOLMOD_INCLUDE_DIR\n# CHOLMOD_LIBRARY\n#\n# == Multifrontal Sparse QR (SuiteSparseQR)\n# SUITESPARSEQR_FOUND\n# SUITESPARSEQR_INCLUDE_DIR\n# SUITESPARSEQR_LIBRARY\n#\n# == Common configuration for all but CSparse (SuiteSparse version >= 4).\n# SUITESPARSE_CONFIG_FOUND\n# SUITESPARSE_CONFIG_INCLUDE_DIR\n# SUITESPARSE_CONFIG_LIBRARY\n#\n# == Common configuration for all but CSparse (SuiteSparse version < 4).\n# UFCONFIG_FOUND\n# UFCONFIG_INCLUDE_DIR\n#\n# Optional SuiteSparse Dependencies:\n#\n# == Serial Graph Partitioning and Fill-reducing Matrix Ordering (METIS)\n# METIS_FOUND\n# METIS_LIBRARY\n#\n# == Intel Thread Building Blocks (TBB)\n# TBB_FOUND\n# TBB_LIBRARY\n# TBB_MALLOC_FOUND\n# TBB_MALLOC_LIBRARY\n\n# Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when\n# FindSuiteSparse was invoked.\nmacro(SUITESPARSE_RESET_FIND_LIBRARY_PREFIX)\n  if (MSVC)\n    set(CMAKE_FIND_LIBRARY_PREFIXES \"${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}\")\n  endif (MSVC)\nendmacro(SUITESPARSE_RESET_FIND_LIBRARY_PREFIX)\n\n# Called if we failed to find SuiteSparse or any of it's required dependencies,\n# unsets all public (designed to be used externally) variables and reports\n# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.\nmacro(SUITESPARSE_REPORT_NOT_FOUND REASON_MSG)\n  unset(SUITESPARSE_FOUND)\n  unset(SUITESPARSE_INCLUDE_DIRS)\n  unset(SUITESPARSE_LIBRARIES)\n  unset(SUITESPARSE_VERSION)\n  unset(SUITESPARSE_MAIN_VERSION)\n  unset(SUITESPARSE_SUB_VERSION)\n  unset(SUITESPARSE_SUBSUB_VERSION)\n  # Do NOT unset SUITESPARSE_FOUND_REQUIRED_VARS here, as it is used by\n  # FindPackageHandleStandardArgs() to generate the automatic error message on\n  # failure which highlights which components are missing.\n\n  suitesparse_reset_find_library_prefix()\n\n  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()\n  # use the camelcase library name, not uppercase.\n  if (SuiteSparse_FIND_QUIETLY)\n    message(STATUS \"Failed to find SuiteSparse - \" ${REASON_MSG} ${ARGN})\n  elseif (SuiteSparse_FIND_REQUIRED)\n    message(FATAL_ERROR \"Failed to find SuiteSparse - \" ${REASON_MSG} ${ARGN})\n  else()\n    # Neither QUIETLY nor REQUIRED, use no priority which emits a message\n    # but continues configuration and allows generation.\n    message(\"-- Failed to find SuiteSparse - \" ${REASON_MSG} ${ARGN})\n  endif (SuiteSparse_FIND_QUIETLY)\n\n  # Do not call return(), s/t we keep processing if not called with REQUIRED\n  # and report all missing components, rather than bailing after failing to find\n  # the first.\nendmacro(SUITESPARSE_REPORT_NOT_FOUND)\n\n# Protect against any alternative find_package scripts for this library having\n# been called previously (in a client project) which set SUITESPARSE_FOUND, but\n# not the other variables we require / set here which could cause the search\n# logic here to fail.\nunset(SUITESPARSE_FOUND)\n\n# Handle possible presence of lib prefix for libraries on MSVC, see\n# also SUITESPARSE_RESET_FIND_LIBRARY_PREFIX().\nif (MSVC)\n  # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES\n  # s/t we can set it back before returning.\n  set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES \"${CMAKE_FIND_LIBRARY_PREFIXES}\")\n  # The empty string in this list is important, it represents the case when\n  # the libraries have no prefix (shared libraries / DLLs).\n  set(CMAKE_FIND_LIBRARY_PREFIXES \"lib\" \"\" \"${CMAKE_FIND_LIBRARY_PREFIXES}\")\nendif (MSVC)\n\n# Specify search directories for include files and libraries (this is the union\n# of the search directories for all OSs).  Search user-specified hint\n# directories first if supplied, and search user-installed locations first\n# so that we prefer user installs to system installs where both exist.\nlist(APPEND SUITESPARSE_CHECK_INCLUDE_DIRS\n  /opt/local/include\n  /opt/local/include/ufsparse # Mac OS X\n  /usr/local/homebrew/include # Mac OS X\n  /usr/local/include\n  /usr/include)\nlist(APPEND SUITESPARSE_CHECK_LIBRARY_DIRS\n  /opt/local/lib\n  /opt/local/lib/ufsparse # Mac OS X\n  /usr/local/homebrew/lib # Mac OS X\n  /usr/local/lib\n  /usr/lib)\n# Additional suffixes to try appending to each search path.\nlist(APPEND SUITESPARSE_CHECK_PATH_SUFFIXES\n  suitesparse) # Windows/Ubuntu\n\n# Wrappers to find_path/library that pass the SuiteSparse search hints/paths.\n#\n# suitesparse_find_component(<component> [FILES name1 [name2 ...]]\n#                                        [LIBRARIES name1 [name2 ...]]\n#                                        [REQUIRED])\nmacro(suitesparse_find_component COMPONENT)\n  include(CMakeParseArguments)\n  set(OPTIONS REQUIRED)\n  set(MULTI_VALUE_ARGS FILES LIBRARIES)\n  cmake_parse_arguments(SUITESPARSE_FIND_${COMPONENT}\n    \"${OPTIONS}\" \"\" \"${MULTI_VALUE_ARGS}\" ${ARGN})\n\n  if (SUITESPARSE_FIND_${COMPONENT}_REQUIRED)\n    list(APPEND SUITESPARSE_FOUND_REQUIRED_VARS ${COMPONENT}_FOUND)\n  endif()\n\n  set(${COMPONENT}_FOUND TRUE)\n  if (SUITESPARSE_FIND_${COMPONENT}_FILES)\n    find_path(${COMPONENT}_INCLUDE_DIR\n      NAMES ${SUITESPARSE_FIND_${COMPONENT}_FILES}\n      HINTS ${SUITESPARSE_INCLUDE_DIR_HINTS}\n      PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS}\n      PATH_SUFFIXES ${SUITESPARSE_CHECK_PATH_SUFFIXES})\n    if (${COMPONENT}_INCLUDE_DIR)\n      message(STATUS \"Found ${COMPONENT} headers in: \"\n        \"${${COMPONENT}_INCLUDE_DIR}\")\n      mark_as_advanced(${COMPONENT}_INCLUDE_DIR)\n    else()\n      # Specified headers not found.\n      set(${COMPONENT}_FOUND FALSE)\n      if (SUITESPARSE_FIND_${COMPONENT}_REQUIRED)\n        suitesparse_report_not_found(\n          \"Did not find ${COMPONENT} header (required SuiteSparse component).\")\n      else()\n        message(STATUS \"Did not find ${COMPONENT} header (optional \"\n          \"SuiteSparse component).\")\n        # Hide optional vars from CMake GUI even if not found.\n        mark_as_advanced(${COMPONENT}_INCLUDE_DIR)\n      endif()\n    endif()\n  endif()\n\n  if (SUITESPARSE_FIND_${COMPONENT}_LIBRARIES)\n    find_library(${COMPONENT}_LIBRARY\n      NAMES ${SUITESPARSE_FIND_${COMPONENT}_LIBRARIES}\n      HINTS ${SUITESPARSE_LIBRARY_DIR_HINTS}\n      PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS}\n      PATH_SUFFIXES ${SUITESPARSE_CHECK_PATH_SUFFIXES})\n    if (${COMPONENT}_LIBRARY)\n      message(STATUS \"Found ${COMPONENT} library: ${${COMPONENT}_LIBRARY}\")\n      mark_as_advanced(${COMPONENT}_LIBRARY)\n    else ()\n      # Specified libraries not found.\n      set(${COMPONENT}_FOUND FALSE)\n      if (SUITESPARSE_FIND_${COMPONENT}_REQUIRED)\n        suitesparse_report_not_found(\n          \"Did not find ${COMPONENT} library (required SuiteSparse component).\")\n      else()\n        message(STATUS \"Did not find ${COMPONENT} library (optional SuiteSparse \"\n          \"dependency)\")\n        # Hide optional vars from CMake GUI even if not found.\n        mark_as_advanced(${COMPONENT}_LIBRARY)\n      endif()\n    endif()\n  endif()\nendmacro()\n\n# Given the number of components of SuiteSparse, and to ensure that the\n# automatic failure message generated by FindPackageHandleStandardArgs()\n# when not all required components are found is helpful, we maintain a list\n# of all variables that must be defined for SuiteSparse to be considered found.\nunset(SUITESPARSE_FOUND_REQUIRED_VARS)\n\n# BLAS.\nfind_package(BLAS QUIET)\nif (NOT BLAS_FOUND)\n  suitesparse_report_not_found(\n    \"Did not find BLAS library (required for SuiteSparse).\")\nendif (NOT BLAS_FOUND)\nlist(APPEND SUITESPARSE_FOUND_REQUIRED_VARS BLAS_FOUND)\n\n# LAPACK.\nfind_package(LAPACK QUIET)\nif (NOT LAPACK_FOUND)\n  suitesparse_report_not_found(\n    \"Did not find LAPACK library (required for SuiteSparse).\")\nendif (NOT LAPACK_FOUND)\nlist(APPEND SUITESPARSE_FOUND_REQUIRED_VARS LAPACK_FOUND)\n\nsuitesparse_find_component(AMD REQUIRED FILES amd.h LIBRARIES amd)\nsuitesparse_find_component(CAMD REQUIRED FILES camd.h LIBRARIES camd)\nsuitesparse_find_component(COLAMD REQUIRED FILES colamd.h LIBRARIES colamd)\nsuitesparse_find_component(CCOLAMD REQUIRED FILES ccolamd.h LIBRARIES ccolamd)\nsuitesparse_find_component(CHOLMOD REQUIRED FILES cholmod.h LIBRARIES cholmod)\nsuitesparse_find_component(\n  SUITESPARSEQR REQUIRED FILES SuiteSparseQR.hpp LIBRARIES spqr)\nif (SUITESPARSEQR_FOUND)\n  # SuiteSparseQR may be compiled with Intel Threading Building Blocks,\n  # we assume that if TBB is installed, SuiteSparseQR was compiled with\n  # support for it, this will do no harm if it wasn't.\n  find_package(TBB QUIET)\n  if (TBB_FOUND)\n    message(STATUS \"Found Intel Thread Building Blocks (TBB) library \"\n      \"(${TBB_VERSION}) assuming SuiteSparseQR was compiled \"\n      \"with TBB.\")\n    # Add the TBB libraries to the SuiteSparseQR libraries (the only\n    # libraries to optionally depend on TBB).\n    list(APPEND SUITESPARSEQR_LIBRARY ${TBB_LIBRARIES})\n  else()\n    message(STATUS \"Did not find Intel TBB library, assuming SuiteSparseQR was \"\n      \"not compiled with TBB.\")\n  endif()\nendif(SUITESPARSEQR_FOUND)\n\n# UFconfig / SuiteSparse_config.\n#\n# If SuiteSparse version is >= 4 then SuiteSparse_config is required.\n# For SuiteSparse 3, UFconfig.h is required.\nsuitesparse_find_component(\n  SUITESPARSE_CONFIG FILES SuiteSparse_config.h LIBRARIES suitesparseconfig)\n\nif (SUITESPARSE_CONFIG_FOUND)\n  # SuiteSparse_config (SuiteSparse version >= 4) requires librt library for\n  # timing by default when compiled on Linux or Unix, but not on OSX (which\n  # does not have librt).\n  if (CMAKE_SYSTEM_NAME MATCHES \"Linux\" OR UNIX AND NOT APPLE)\n    suitesparse_find_component(LIBRT LIBRARIES rt)\n    if (LIBRT_FOUND)\n      message(STATUS \"Adding librt: ${LIBRT_LIBRARY} to \"\n        \"SuiteSparse_config libraries (required on Linux & Unix [not OSX] if \"\n        \"SuiteSparse is compiled with timing).\")\n      list(APPEND SUITESPARSE_CONFIG_LIBRARY ${LIBRT_LIBRARY})\n    else()\n      message(STATUS \"Could not find librt, but found SuiteSparse_config, \"\n        \"assuming that SuiteSparse was compiled without timing.\")\n    endif ()\n  endif (CMAKE_SYSTEM_NAME MATCHES \"Linux\" OR UNIX AND NOT APPLE)\nelse()\n  # Failed to find SuiteSparse_config (>= v4 installs), instead look for\n  # UFconfig header which should be present in < v4 installs.\n  suitesparse_find_component(UFCONFIG FILES UFconfig.h)\nendif ()\n\nif (NOT SUITESPARSE_CONFIG_FOUND AND\n    NOT UFCONFIG_FOUND)\n  suitesparse_report_not_found(\n    \"Failed to find either: SuiteSparse_config header & library (should be \"\n    \"present in all SuiteSparse >= v4 installs), or UFconfig header (should \"\n    \"be present in all SuiteSparse < v4 installs).\")\nendif()\n\n# Extract the SuiteSparse version from the appropriate header (UFconfig.h for\n# <= v3, SuiteSparse_config.h for >= v4).\nlist(APPEND SUITESPARSE_FOUND_REQUIRED_VARS SUITESPARSE_VERSION)\n\nif (UFCONFIG_FOUND)\n  # SuiteSparse version <= 3.\n  set(SUITESPARSE_VERSION_FILE ${UFCONFIG_INCLUDE_DIR}/UFconfig.h)\n  if (NOT EXISTS ${SUITESPARSE_VERSION_FILE})\n    suitesparse_report_not_found(\n      \"Could not find file: ${SUITESPARSE_VERSION_FILE} containing version \"\n      \"information for <= v3 SuiteSparse installs, but UFconfig was found \"\n      \"(only present in <= v3 installs).\")\n  else (NOT EXISTS ${SUITESPARSE_VERSION_FILE})\n    file(READ ${SUITESPARSE_VERSION_FILE} UFCONFIG_CONTENTS)\n\n    string(REGEX MATCH \"#define SUITESPARSE_MAIN_VERSION [0-9]+\"\n      SUITESPARSE_MAIN_VERSION \"${UFCONFIG_CONTENTS}\")\n    string(REGEX REPLACE \"#define SUITESPARSE_MAIN_VERSION ([0-9]+)\" \"\\\\1\"\n      SUITESPARSE_MAIN_VERSION \"${SUITESPARSE_MAIN_VERSION}\")\n\n    string(REGEX MATCH \"#define SUITESPARSE_SUB_VERSION [0-9]+\"\n      SUITESPARSE_SUB_VERSION \"${UFCONFIG_CONTENTS}\")\n    string(REGEX REPLACE \"#define SUITESPARSE_SUB_VERSION ([0-9]+)\" \"\\\\1\"\n      SUITESPARSE_SUB_VERSION \"${SUITESPARSE_SUB_VERSION}\")\n\n    string(REGEX MATCH \"#define SUITESPARSE_SUBSUB_VERSION [0-9]+\"\n      SUITESPARSE_SUBSUB_VERSION \"${UFCONFIG_CONTENTS}\")\n    string(REGEX REPLACE \"#define SUITESPARSE_SUBSUB_VERSION ([0-9]+)\" \"\\\\1\"\n      SUITESPARSE_SUBSUB_VERSION \"${SUITESPARSE_SUBSUB_VERSION}\")\n\n    # This is on a single line s/t CMake does not interpret it as a list of\n    # elements and insert ';' separators which would result in 4.;2.;1 nonsense.\n    set(SUITESPARSE_VERSION\n      \"${SUITESPARSE_MAIN_VERSION}.${SUITESPARSE_SUB_VERSION}.${SUITESPARSE_SUBSUB_VERSION}\")\n  endif (NOT EXISTS ${SUITESPARSE_VERSION_FILE})\nendif (UFCONFIG_FOUND)\n\nif (SUITESPARSE_CONFIG_FOUND)\n  # SuiteSparse version >= 4.\n  set(SUITESPARSE_VERSION_FILE\n    ${SUITESPARSE_CONFIG_INCLUDE_DIR}/SuiteSparse_config.h)\n  if (NOT EXISTS ${SUITESPARSE_VERSION_FILE})\n    suitesparse_report_not_found(\n      \"Could not find file: ${SUITESPARSE_VERSION_FILE} containing version \"\n      \"information for >= v4 SuiteSparse installs, but SuiteSparse_config was \"\n      \"found (only present in >= v4 installs).\")\n  else (NOT EXISTS ${SUITESPARSE_VERSION_FILE})\n    file(READ ${SUITESPARSE_VERSION_FILE} SUITESPARSE_CONFIG_CONTENTS)\n\n    string(REGEX MATCH \"#define SUITESPARSE_MAIN_VERSION [0-9]+\"\n      SUITESPARSE_MAIN_VERSION \"${SUITESPARSE_CONFIG_CONTENTS}\")\n    string(REGEX REPLACE \"#define SUITESPARSE_MAIN_VERSION ([0-9]+)\" \"\\\\1\"\n      SUITESPARSE_MAIN_VERSION \"${SUITESPARSE_MAIN_VERSION}\")\n\n    string(REGEX MATCH \"#define SUITESPARSE_SUB_VERSION [0-9]+\"\n      SUITESPARSE_SUB_VERSION \"${SUITESPARSE_CONFIG_CONTENTS}\")\n    string(REGEX REPLACE \"#define SUITESPARSE_SUB_VERSION ([0-9]+)\" \"\\\\1\"\n      SUITESPARSE_SUB_VERSION \"${SUITESPARSE_SUB_VERSION}\")\n\n    string(REGEX MATCH \"#define SUITESPARSE_SUBSUB_VERSION [0-9]+\"\n      SUITESPARSE_SUBSUB_VERSION \"${SUITESPARSE_CONFIG_CONTENTS}\")\n    string(REGEX REPLACE \"#define SUITESPARSE_SUBSUB_VERSION ([0-9]+)\" \"\\\\1\"\n      SUITESPARSE_SUBSUB_VERSION \"${SUITESPARSE_SUBSUB_VERSION}\")\n\n    # This is on a single line s/t CMake does not interpret it as a list of\n    # elements and insert ';' separators which would result in 4.;2.;1 nonsense.\n    set(SUITESPARSE_VERSION\n      \"${SUITESPARSE_MAIN_VERSION}.${SUITESPARSE_SUB_VERSION}.${SUITESPARSE_SUBSUB_VERSION}\")\n  endif (NOT EXISTS ${SUITESPARSE_VERSION_FILE})\nendif (SUITESPARSE_CONFIG_FOUND)\n\n# METIS (Optional dependency).\nsuitesparse_find_component(METIS LIBRARIES metis)\n\n# Only mark SuiteSparse as found if all required components and dependencies\n# have been found.\nset(SUITESPARSE_FOUND TRUE)\nforeach(REQUIRED_VAR ${SUITESPARSE_FOUND_REQUIRED_VARS})\n  if (NOT ${REQUIRED_VAR})\n    set(SUITESPARSE_FOUND FALSE)\n  endif (NOT ${REQUIRED_VAR})\nendforeach(REQUIRED_VAR ${SUITESPARSE_FOUND_REQUIRED_VARS})\n\nif (SUITESPARSE_FOUND)\n  list(APPEND SUITESPARSE_INCLUDE_DIRS\n    ${AMD_INCLUDE_DIR}\n    ${CAMD_INCLUDE_DIR}\n    ${COLAMD_INCLUDE_DIR}\n    ${CCOLAMD_INCLUDE_DIR}\n    ${CHOLMOD_INCLUDE_DIR}\n    ${SUITESPARSEQR_INCLUDE_DIR})\n  # Handle config separately, as otherwise at least one of them will be set\n  # to NOTFOUND which would cause any check on SUITESPARSE_INCLUDE_DIRS to fail.\n  if (SUITESPARSE_CONFIG_FOUND)\n    list(APPEND SUITESPARSE_INCLUDE_DIRS\n      ${SUITESPARSE_CONFIG_INCLUDE_DIR})\n  endif (SUITESPARSE_CONFIG_FOUND)\n  if (UFCONFIG_FOUND)\n    list(APPEND SUITESPARSE_INCLUDE_DIRS\n      ${UFCONFIG_INCLUDE_DIR})\n  endif (UFCONFIG_FOUND)\n  # As SuiteSparse includes are often all in the same directory, remove any\n  # repetitions.\n  list(REMOVE_DUPLICATES SUITESPARSE_INCLUDE_DIRS)\n\n  # Important: The ordering of these libraries is *NOT* arbitrary, as these\n  # could potentially be static libraries their link ordering is important.\n  list(APPEND SUITESPARSE_LIBRARIES\n    ${SUITESPARSEQR_LIBRARY}\n    ${CHOLMOD_LIBRARY}\n    ${CCOLAMD_LIBRARY}\n    ${CAMD_LIBRARY}\n    ${COLAMD_LIBRARY}\n    ${AMD_LIBRARY}\n    ${LAPACK_LIBRARIES}\n    ${BLAS_LIBRARIES})\n  if (SUITESPARSE_CONFIG_FOUND)\n    list(APPEND SUITESPARSE_LIBRARIES\n      ${SUITESPARSE_CONFIG_LIBRARY})\n  endif (SUITESPARSE_CONFIG_FOUND)\n  if (METIS_FOUND)\n    list(APPEND SUITESPARSE_LIBRARIES\n      ${METIS_LIBRARY})\n  endif (METIS_FOUND)\nendif()\n\n# Determine if we are running on Ubuntu with the package install of SuiteSparse\n# which is broken and does not support linking a shared library.\nset(SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION FALSE)\nif (CMAKE_SYSTEM_NAME MATCHES \"Linux\" AND\n    SUITESPARSE_VERSION VERSION_EQUAL 3.4.0)\n  find_program(LSB_RELEASE_EXECUTABLE lsb_release)\n  if (LSB_RELEASE_EXECUTABLE)\n    # Any even moderately recent Ubuntu release (likely to be affected by\n    # this bug) should have lsb_release, if it isn't present we are likely\n    # on a different Linux distribution (should be fine).\n    execute_process(COMMAND ${LSB_RELEASE_EXECUTABLE} -si\n      OUTPUT_VARIABLE LSB_DISTRIBUTOR_ID\n      OUTPUT_STRIP_TRAILING_WHITESPACE)\n\n    if (LSB_DISTRIBUTOR_ID MATCHES \"Ubuntu\" AND\n        SUITESPARSE_LIBRARIES MATCHES \"/usr/lib/libamd\")\n      # We are on Ubuntu, and the SuiteSparse version matches the broken\n      # system install version and is a system install.\n      set(SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION TRUE)\n      message(STATUS \"Found system install of SuiteSparse \"\n        \"${SUITESPARSE_VERSION} running on Ubuntu, which has a known bug \"\n        \"preventing linking of shared libraries (static linking unaffected).\")\n    endif (LSB_DISTRIBUTOR_ID MATCHES \"Ubuntu\" AND\n      SUITESPARSE_LIBRARIES MATCHES \"/usr/lib/libamd\")\n  endif (LSB_RELEASE_EXECUTABLE)\nendif (CMAKE_SYSTEM_NAME MATCHES \"Linux\" AND\n  SUITESPARSE_VERSION VERSION_EQUAL 3.4.0)\n\nsuitesparse_reset_find_library_prefix()\n\n# Handle REQUIRED and QUIET arguments to FIND_PACKAGE\ninclude(FindPackageHandleStandardArgs)\nif (SUITESPARSE_FOUND)\n  find_package_handle_standard_args(SuiteSparse\n    REQUIRED_VARS ${SUITESPARSE_FOUND_REQUIRED_VARS}\n    VERSION_VAR SUITESPARSE_VERSION\n    FAIL_MESSAGE \"Failed to find some/all required components of SuiteSparse.\")\nelse (SUITESPARSE_FOUND)\n  # Do not pass VERSION_VAR to FindPackageHandleStandardArgs() if we failed to\n  # find SuiteSparse to avoid a confusing autogenerated failure message\n  # that states 'not found (missing: FOO) (found version: x.y.z)'.\n  find_package_handle_standard_args(SuiteSparse\n    REQUIRED_VARS ${SUITESPARSE_FOUND_REQUIRED_VARS}\n    FAIL_MESSAGE \"Failed to find some/all required components of SuiteSparse.\")\nendif (SUITESPARSE_FOUND)\n"
  },
  {
    "path": "LIO-Livox/config/horizon_config.yaml",
    "content": "%YAML:1.0\n\n\n# switches\nLidar_Type: 0    # 0-horizon\nUsed_Line: 6    # lines used for lio, set to 1~6\nFeature_Mode: 0    # 0(false) or 1(true)\nNumCurvSize: 2\nDistanceFaraway: 100 # [m]  <DistanceFaraway near / >DistanceFaraway far\nNumFlat: 3 # nums of one part's flat feature\nPartNum: 150 # nums of one scan's parts\nFlatThreshold: 0.02 # cloud curvature threshold of flat feature\nBreakCornerDis: 1 # break distance of break points\nLidarNearestDis: 1.0 # if(depth < LidarNearestDis) do not use this point \nKdTreeCornerOutlierDis: 0.2 # corner filter threshold\nUse_seg: 1 # use segment algorithm\nmap_skip_frame: 2\n"
  },
  {
    "path": "LIO-Livox/include/Estimator/Estimator.h",
    "content": "#ifndef LIO_LIVOX_ESTIMATOR_H\n#define LIO_LIVOX_ESTIMATOR_H\n\n#include <ros/ros.h>\n#include <pcl_conversions/pcl_conversions.h>\n#include <sensor_msgs/PointCloud2.h>\n#include <sensor_msgs/NavSatFix.h>\n#include <visualization_msgs/Marker.h>\n#include <visualization_msgs/MarkerArray.h>\n#include <nav_msgs/Odometry.h>\n#include <nav_msgs/Path.h>\n#include <tf/tf.h>\n#include <tf/transform_broadcaster.h>\n#include <Eigen/Core>\n#include <sensor_msgs/Imu.h>\n#include <queue>\n#include <iterator>\n#include <future>\n#include \"MapManager/Map_Manager.h\"\n#include \"utils/ceresfunc.h\"\n#include \"utils/pcl_utils.hpp\"\n#include \"IMUIntegrator/IMUIntegrator.h\"\n#include <chrono>\n\nclass Estimator{\n\ttypedef pcl::PointXYZINormal PointType;\npublic:\n\t/** \\brief slide window size */\n\tstatic const int SLIDEWINDOWSIZE = 10;\n\n\t/** \\brief lidar frame struct */\n\tstruct LidarFrame{\n\t\tpcl::PointCloud<PointType>::Ptr laserCloud;\n\t\tIMUIntegrator imuIntegrator;\n\t\tEigen::Vector3d P;\n\t\tEigen::Vector3d V;\n\t\tEigen::Quaterniond Q;\n\t\tEigen::Vector3d bg;\n\t\tEigen::Vector3d ba;\n    Eigen::VectorXf ground_plane_coeff;\n\t\tdouble timeStamp;\n\t\tLidarFrame(){\n\t\t\tP.setZero();\n\t\t\tV.setZero();\n\t\t\tQ.setIdentity();\n\t\t\tbg.setZero();\n\t\t\tba.setZero();\n\t\t\ttimeStamp = 0;\n\t\t}\n\t};\n\n\t/** \\brief point to line feature */\n\tstruct FeatureLine{\n\t\tEigen::Vector3d pointOri;\n\t\tEigen::Vector3d lineP1;\n\t\tEigen::Vector3d lineP2;\n\t\tdouble error;\n\t\tbool valid;\n\t\tFeatureLine(Eigen::Vector3d  po, Eigen::Vector3d  p1, Eigen::Vector3d  p2)\n\t\t\t\t\t\t:pointOri(std::move(po)), lineP1(std::move(p1)), lineP2(std::move(p2)){\n\t\t\tvalid = false;\n\t\t\terror = 0;\n\t\t}\n\t\tdouble ComputeError(const Eigen::Matrix4d& pose){\n\t\t\tEigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1);\n\t\t\tdouble l12 = std::sqrt((lineP1(0) - lineP2(0))*(lineP1(0) - lineP2(0)) + (lineP1(1) - lineP2(1))*\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(lineP1(1) - lineP2(1)) + (lineP1(2) - lineP2(2))*(lineP1(2) - lineP2(2)));\n\t\t\tdouble a012 = std::sqrt(\n\t\t\t\t\t\t\t((P_to_Map(0) - lineP1(0)) * (P_to_Map(1) - lineP2(1)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(1) - lineP1(1)))\n\t\t\t\t\t\t\t* ((P_to_Map(0) - lineP1(0)) * (P_to_Map(1) - lineP2(1)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(1) - lineP1(1)))\n\t\t\t\t\t\t\t+ ((P_to_Map(0) - lineP1(0)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(2) - lineP1(2)))\n\t\t\t\t\t\t\t\t* ((P_to_Map(0) - lineP1(0)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(0) - lineP2(0)) * (P_to_Map(2) - lineP1(2)))\n\t\t\t\t\t\t\t+ ((P_to_Map(1) - lineP1(1)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(1) - lineP2(1)) * (P_to_Map(2) - lineP1(2)))\n\t\t\t\t\t\t\t\t* ((P_to_Map(1) - lineP1(1)) * (P_to_Map(2) - lineP2(2)) - (P_to_Map(1) - lineP2(1)) * (P_to_Map(2) - lineP1(2))));\n\t\t\terror = a012 / l12;\n\t\t}\n\t};\n\n\t/** \\brief point to plan feature */\n\tstruct FeaturePlan{\n\t\tEigen::Vector3d pointOri;\n\t\tdouble pa;\n\t\tdouble pb;\n\t\tdouble pc;\n\t\tdouble pd;\n\t\tdouble error;\n\t\tbool valid;\n\t\tFeaturePlan(const Eigen::Vector3d& po, const double& pa_, const double& pb_, const double& pc_, const double& pd_)\n\t\t\t\t\t\t:pointOri(po), pa(pa_), pb(pb_), pc(pc_), pd(pd_){\n\t\t\tvalid = false;\n\t\t\terror = 0;\n\t\t}\n\t\tdouble ComputeError(const Eigen::Matrix4d& pose){\n\t\t\tEigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1);\n\t\t\terror = pa * P_to_Map(0) + pb * P_to_Map(1) + pc * P_to_Map(2) + pd;\n\t\t}\n\t};\n\n\t/** \\brief point to plan feature */\n\tstruct FeaturePlanVec{\n\t\tEigen::Vector3d pointOri;\n\t\tEigen::Vector3d pointProj;\n\t\tEigen::Matrix3d sqrt_info;\n\t\tdouble error;\n\t\tbool valid;\n\t\tFeaturePlanVec(const Eigen::Vector3d& po, const Eigen::Vector3d& p_proj, Eigen::Matrix3d sqrt_info_)\n\t\t\t\t\t\t:pointOri(po), pointProj(p_proj), sqrt_info(sqrt_info_) {\n\t\t\tvalid = false;\n\t\t\terror = 0;\n\t\t}\n\t\tdouble ComputeError(const Eigen::Matrix4d& pose){\n\t\t\tEigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1);\n\t\t\terror = (P_to_Map - pointProj).norm();\n\t\t}\n\t};\n\n\t/** \\brief non feature */\n\tstruct FeatureNon{\n\t\tEigen::Vector3d pointOri;\n\t\tdouble pa;\n\t\tdouble pb;\n\t\tdouble pc;\n\t\tdouble pd;\n\t\tdouble error;\n\t\tbool valid;\n\t\tFeatureNon(const Eigen::Vector3d& po, const double& pa_, const double& pb_, const double& pc_, const double& pd_)\n\t\t\t\t\t\t:pointOri(po), pa(pa_), pb(pb_), pc(pc_), pd(pd_){\n\t\t\tvalid = false;\n\t\t\terror = 0;\n\t\t}\n\t\tdouble ComputeError(const Eigen::Matrix4d& pose){\n\t\t\tEigen::Vector3d P_to_Map = pose.topLeftCorner(3,3) * pointOri + pose.topRightCorner(3,1);\n\t\t\terror = pa * P_to_Map(0) + pb * P_to_Map(1) + pc * P_to_Map(2) + pd;\n\t\t}\n\t};\n\npublic:\n\t/** \\brief constructor of Estimator\n\t*/\n\tEstimator(const float& filter_corner, const float& filter_surf);\n\n\t~Estimator();\n\n\t\t/** \\brief Open a independent thread to increment MAP cloud\n\t\t*/\n\t[[noreturn]] void threadMapIncrement();\n\n\t/** \\brief construct sharp feature Ceres Costfunctions\n\t* \\param[in] edges: store costfunctions\n\t* \\param[in] m4d: lidar pose, represented by matrix 4X4\n\t*/\n\tvoid processPointToLine(std::vector<ceres::CostFunction *>& edges,\n\t\t\t\t\t\t\tstd::vector<FeatureLine>& vLineFeatures,\n\t\t\t\t\t\t\tconst pcl::PointCloud<PointType>::Ptr& laserCloudCorner,\n\t\t\t\t\t\t\tconst pcl::PointCloud<PointType>::Ptr& laserCloudCornerMap,\n\t\t\t\t\t\t\tconst pcl::KdTreeFLANN<PointType>::Ptr& kdtree,\n\t\t\t\t\t\t\tconst Eigen::Matrix4d& exTlb,\n\t\t\t\t\t\t\tconst Eigen::Matrix4d& m4d);\n\n\t/** \\brief construct Plan feature Ceres Costfunctions\n\t* \\param[in] edges: store costfunctions\n\t* \\param[in] m4d: lidar pose, represented by matrix 4X4\n\t*/\n\tvoid processPointToPlan(std::vector<ceres::CostFunction *>& edges,\n\t\t\t\t\t\t\tstd::vector<FeaturePlan>& vPlanFeatures,\n\t\t\t\t\t\t\tconst pcl::PointCloud<PointType>::Ptr& laserCloudSurf,\n\t\t\t\t\t\t\tconst pcl::PointCloud<PointType>::Ptr& laserCloudSurfMap,\n\t\t\t\t\t\t\tconst pcl::KdTreeFLANN<PointType>::Ptr& kdtree,\n\t\t\t\t\t\t\tconst Eigen::Matrix4d& exTlb,\n\t\t\t\t\t\t\tconst Eigen::Matrix4d& m4d);\n\n\tvoid processPointToPlanVec(std::vector<ceres::CostFunction *>& edges,\n\t\t\t\t\t\t\t   std::vector<FeaturePlanVec>& vPlanFeatures,\n\t\t\t\t\t\t\t   const pcl::PointCloud<PointType>::Ptr& laserCloudSurf,\n\t\t\t\t\t\t\t   const pcl::PointCloud<PointType>::Ptr& laserCloudSurfMap,\n\t\t\t\t\t\t\t   const pcl::KdTreeFLANN<PointType>::Ptr& kdtree,\n\t\t\t\t\t\t\t   const Eigen::Matrix4d& exTlb,\n\t\t\t\t\t\t\t   const Eigen::Matrix4d& m4d);\n\t\t\t\t\n\tvoid processNonFeatureICP(std::vector<ceres::CostFunction *>& edges,\n\t\t\t\t\t\t\t  std::vector<FeatureNon>& vNonFeatures,\n\t\t\t\t\t\t\t  const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeature,\n\t\t\t\t\t\t\t  const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureLocal,\n\t\t\t\t\t\t\t  const pcl::KdTreeFLANN<PointType>::Ptr& kdtreeLocal,\n\t\t\t\t\t\t\t  const Eigen::Matrix4d& exTlb,\n\t\t\t\t\t\t\t  const Eigen::Matrix4d& m4d);\n\n\t/** \\brief Transform Lidar Pose in slidewindow to double array\n\t\t* \\param[in] lidarFrameList: Lidar Poses in slidewindow\n\t\t*/\n\tvoid vector2double(const std::list<LidarFrame>& lidarFrameList);\n\n\t/** \\brief Transform double array to Lidar Pose in slidewindow\n\t\t* \\param[in] lidarFrameList: Lidar Poses in slidewindow\n\t\t*/\n\tvoid double2vector(std::list<LidarFrame>& lidarFrameList);\n\n\t/** \\brief estimate lidar pose by matching current lidar cloud with map cloud and tightly coupled IMU message\n\t\t* \\param[in] lidarFrameList: multi-frames of lidar cloud and lidar pose\n\t\t* \\param[in] exTlb: extrinsic matrix between lidar and IMU\n\t\t* \\param[in] gravity: gravity vector\n\t\t*/\n\tvoid EstimateLidarPose(std::list<LidarFrame>& lidarFrameList,\n\t\t\t\t\t\t   const Eigen::Matrix4d& exTlb,\n\t\t\t\t\t\t   const Eigen::Vector3d& gravity,\n\t\t\t\t\t\t   nav_msgs::Odometry& debugInfo);\n\n\tvoid Estimate(std::list<LidarFrame>& lidarFrameList,\n\t\t\t\t  const Eigen::Matrix4d& exTlb,\n\t\t\t\t  const Eigen::Vector3d& gravity);\n\n\tpcl::PointCloud<PointType>::Ptr get_corner_map(){\n\t\treturn map_manager->get_corner_map();\n\t}\n\tpcl::PointCloud<PointType>::Ptr get_surf_map(){\n\t\treturn map_manager->get_surf_map();\n\t}\n\tpcl::PointCloud<PointType>::Ptr get_nonfeature_map(){\n\t\treturn map_manager->get_nonfeature_map();\n\t}\n\tpcl::PointCloud<PointType>::Ptr get_init_ground_cloud(){\n\t\treturn initGroundCloud;\n\t}\n\n\tvoid MapIncrementLocal(const pcl::PointCloud<PointType>::Ptr& laserCloudCornerStack,\n\t\t\t\t\t\t   const pcl::PointCloud<PointType>::Ptr& laserCloudSurfStack,\n\t\t\t\t\t\t   const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureStack,\n\t\t\t\t\t\t   const Eigen::Matrix4d& transformTobeMapped);\n\nprivate:\n\t/** \\brief store map points */\n\tMAP_MANAGER* map_manager;\n\n\tint init_ground_count;\n  Eigen::VectorXf init_ground_plane_coeff;\n\tdouble para_PR[SLIDEWINDOWSIZE][6];\n\tdouble para_VBias[SLIDEWINDOWSIZE][9];\n\tMarginalizationInfo *last_marginalization_info = nullptr;\n\tstd::vector<double *> last_marginalization_parameter_blocks;\n\tstd::vector<pcl::PointCloud<PointType>::Ptr> laserCloudCornerLast;\n\tstd::vector<pcl::PointCloud<PointType>::Ptr> laserCloudSurfLast;\n\tstd::vector<pcl::PointCloud<PointType>::Ptr> laserCloudNonFeatureLast;\n\n\tpcl::PointCloud<PointType>::Ptr laserCloudCornerFromLocal;\n\tpcl::PointCloud<PointType>::Ptr laserCloudSurfFromLocal;\n\tpcl::PointCloud<PointType>::Ptr laserCloudNonFeatureFromLocal;\n\tpcl::PointCloud<PointType>::Ptr laserCloudCornerForMap;\n\tpcl::PointCloud<PointType>::Ptr laserCloudSurfForMap;\n\tpcl::PointCloud<PointType>::Ptr laserCloudNonFeatureForMap;\n\tpcl::PointCloud<PointType>::Ptr initGroundCloud;\n\tEigen::Matrix4d transformForMap;\n\tstd::vector<pcl::PointCloud<PointType>::Ptr> laserCloudCornerStack;\n\tstd::vector<pcl::PointCloud<PointType>::Ptr> laserCloudSurfStack;\n\tstd::vector<pcl::PointCloud<PointType>::Ptr> laserCloudNonFeatureStack;\n\tpcl::KdTreeFLANN<PointType>::Ptr kdtreeCornerFromLocal;\n\tpcl::KdTreeFLANN<PointType>::Ptr kdtreeSurfFromLocal;\n\tpcl::KdTreeFLANN<PointType>::Ptr kdtreeNonFeatureFromLocal;\n\tpcl::VoxelGrid<PointType> downSizeFilterCorner;\n\tpcl::VoxelGrid<PointType> downSizeFilterSurf;\n\tpcl::VoxelGrid<PointType> downSizeFilterNonFeature;\n\tstd::mutex mtx_Map;\n\tstd::thread threadMap;\n\n\tpcl::KdTreeFLANN<PointType> CornerKdMap[10000];\n\tpcl::KdTreeFLANN<PointType> SurfKdMap[10000];\n\tpcl::KdTreeFLANN<PointType> NonFeatureKdMap[10000];\n\n\tpcl::PointCloud<PointType> GlobalSurfMap[10000];\n\tpcl::PointCloud<PointType> GlobalCornerMap[10000];\n\tpcl::PointCloud<PointType> GlobalNonFeatureMap[10000];\n\n\tint laserCenWidth_last = 10;\n\tint laserCenHeight_last = 5;\n\tint laserCenDepth_last = 10;\n\n\tstatic const int localMapWindowSize = 50;\n\tint localMapID = 0;\n\tpcl::PointCloud<PointType>::Ptr localCornerMap[localMapWindowSize];\n\tpcl::PointCloud<PointType>::Ptr localSurfMap[localMapWindowSize];\n\tpcl::PointCloud<PointType>::Ptr localNonFeatureMap[localMapWindowSize];\n\n\tint map_update_ID = 0;\n\n\tint map_skip_frame = 2; //every map_skip_frame frame update map\n\tdouble plan_weight_tan = 0.0;\n\tdouble thres_dist = 1.0;\n};\n\n#endif //LIO_LIVOX_ESTIMATOR_H\n"
  },
  {
    "path": "LIO-Livox/include/IMUIntegrator/IMUIntegrator.h",
    "content": "#ifndef LIO_LIVOX_IMUINTEGRATOR_H\n#define LIO_LIVOX_IMUINTEGRATOR_H\n#include <sensor_msgs/Imu.h>\n#include <queue>\n#include <mutex>\n#include <Eigen/Core>\n#include <utility>\n#include <ros/ros.h>\n#include \"sophus/so3.hpp\"\n\nclass IMUIntegrator{\npublic:\n    IMUIntegrator();\n\n    /** \\brief constructor of IMUIntegrator\n     * \\param[in] vIMU: IMU messages need to be integrated\n     */\n    IMUIntegrator(std::vector<sensor_msgs::ImuConstPtr> vIMU);\n\n    void Reset();\n\n    /** \\brief get delta quaternion after IMU integration\n     */\n    const Eigen::Quaterniond & GetDeltaQ() const;\n\n    /** \\brief get delta displacement after IMU integration\n     */\n    const Eigen::Vector3d & GetDeltaP() const;\n\n    /** \\brief get delta velocity after IMU integration\n     */\n    const Eigen::Vector3d & GetDeltaV() const;\n\n    /** \\brief get time span after IMU integration\n      */\n    const double & GetDeltaTime() const;\n\n\t\t/** \\brief get linearized bias gyr\n\t\t\t*/\n\t\tconst Eigen::Vector3d & GetBiasGyr() const;\n\n\t\t/** \\brief get linearized bias acc\n\t   */\n\t\tconst Eigen::Vector3d& GetBiasAcc() const;\n\n    /** \\brief get covariance matrix after IMU integration\n     */\n    const Eigen::Matrix<double, 15, 15>& GetCovariance();\n\n    /** \\brief get jacobian matrix after IMU integration\n      */\n    const Eigen::Matrix<double, 15, 15> & GetJacobian() const;\n\n    /** \\brief get average acceleration of IMU messages for initialization\n     */\n    Eigen::Vector3d GetAverageAcc();\n\n    /** \\brief push IMU message to the IMU buffer vimuMsg\n     * \\param[in] imu: the IMU message need to be pushed\n     */\n    void PushIMUMsg(const sensor_msgs::ImuConstPtr& imu);\n    void PushIMUMsg(const std::vector<sensor_msgs::ImuConstPtr>& vimu);\n    const std::vector<sensor_msgs::ImuConstPtr> & GetIMUMsg() const;\n\n    /** \\brief only integrate gyro information of each IMU message stored in vimuMsg\n     * \\param[in] lastTime: the left time boundary of vimuMsg\n     */\n    void GyroIntegration(double lastTime);\n\n    /** \\brief pre-integration of IMU messages stored in vimuMsg\n     */\n    void PreIntegration(double lastTime, const Eigen::Vector3d& bg, const Eigen::Vector3d& ba);\n\n    /** \\brief normal integration of IMU messages stored in vimuMsg\n     */\n    void Integration(){}\n\npublic:\n    const double acc_n = 0.08;\n    const double gyr_n = 0.004;\n    const double acc_w = 2.0e-4;\n    const double gyr_w = 2.0e-5;\n\t\tconstexpr static const double lidar_m = 6e-3;\n    constexpr static const double gnorm = 9.805;\n\n    enum JacobianOrder\n    {\n        O_P = 0,\n        O_R = 3,\n        O_V = 6,\n        O_BG = 9,\n        O_BA = 12\n    };\nprivate:\n    std::vector<sensor_msgs::ImuConstPtr> vimuMsg;\n    Eigen::Quaterniond dq;\n    Eigen::Vector3d dp;\n    Eigen::Vector3d dv;\n\t\tEigen::Vector3d linearized_bg;\n\t\tEigen::Vector3d linearized_ba;\n    Eigen::Matrix<double, 15, 15> covariance;\n    Eigen::Matrix<double, 15, 15> jacobian;\n    Eigen::Matrix<double, 12, 12> noise;\n    double dtime;\n};\n\n#endif //LIO_LIVOX_IMUINTEGRATOR_H\n"
  },
  {
    "path": "LIO-Livox/include/LidarFeatureExtractor/LidarFeatureExtractor.h",
    "content": "#ifndef LIO_LIVOX_LIDARFEATUREEXTRACTOR_H\n#define LIO_LIVOX_LIDARFEATUREEXTRACTOR_H\n#include <ros/ros.h>\n#include <livox_ros_driver/CustomMsg.h>\n#include <sensor_msgs/PointCloud2.h>\n#include <pcl_conversions/pcl_conversions.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <future>\n#include \"opencv2/core.hpp\"\n#include \"segment/segment.hpp\"\nclass LidarFeatureExtractor{\n    typedef pcl::PointXYZINormal PointType;\npublic:\n    /** \\brief constructor of LidarFeatureExtractor\n      * \\param[in] n_scans: lines used to extract lidar features\n      */\n    LidarFeatureExtractor(int n_scans,int NumCurvSize,float DistanceFaraway,int NumFlat,int PartNum,float FlatThreshold,\n                          float BreakCornerDis,float LidarNearestDis,float KdTreeCornerOutlierDis);\n\n    /** \\brief transform float to int\n      */\n    static uint32_t _float_as_int(float f){\n      union{uint32_t i; float f;} conv{};\n      conv.f = f;\n      return conv.i;\n    }\n\n    /** \\brief transform int to float\n      */\n    static float _int_as_float(uint32_t i){\n      union{float f; uint32_t i;} conv{};\n      conv.i = i;\n      return conv.f;\n    }\n\n    /** \\brief Determine whether the point_list is flat\n      * \\param[in] point_list: points need to be judged\n      * \\param[in] plane_threshold\n      */\n    bool plane_judge(const std::vector<PointType>& point_list,const int plane_threshold);\n\n    /** \\brief Detect lidar feature points\n      * \\param[in] cloud: original lidar cloud need to be detected\n      * \\param[in] pointsLessSharp: less sharp index of cloud\n      * \\param[in] pointsLessFlat: less flat index of cloud\n      */\n    void detectFeaturePoint(pcl::PointCloud<PointType>::Ptr& cloud,\n                            std::vector<int>& pointsLessSharp,\n                            std::vector<int>& pointsLessFlat);\n\n    void detectFeaturePoint2(pcl::PointCloud<PointType>::Ptr& cloud,\n                             pcl::PointCloud<PointType>::Ptr& pointsLessFlat,\n                             pcl::PointCloud<PointType>::Ptr& pointsNonFeature);\n\n    void detectFeaturePoint3(pcl::PointCloud<PointType>::Ptr& cloud,\n                             std::vector<int>& pointsLessSharp);\n                \n    void FeatureExtract_with_segment(const livox_ros_driver::CustomMsgConstPtr &msg,\n                                     pcl::PointCloud<PointType>::Ptr& laserCloud,\n                                     pcl::PointCloud<PointType>::Ptr& laserConerFeature,\n                                     pcl::PointCloud<PointType>::Ptr& laserSurfFeature,\n                                     pcl::PointCloud<PointType>::Ptr& laserNonFeature,\n                                     sensor_msgs::PointCloud2 &msg2,\n                                     int Used_Line = 1);\n    /** \\brief Detect lidar feature points of CustomMsg\n      * \\param[in] msg: original CustomMsg need to be detected\n      * \\param[in] laserCloud: transform CustomMsg to pcl point cloud format\n      * \\param[in] laserConerFeature: less Coner features extracted from laserCloud\n      * \\param[in] laserSurfFeature: less Surf features extracted from laserCloud\n      */\n    void FeatureExtract(const livox_ros_driver::CustomMsgConstPtr &msg,\n                        pcl::PointCloud<PointType>::Ptr& laserCloud,\n                        pcl::PointCloud<PointType>::Ptr& laserConerFeature,\n                        pcl::PointCloud<PointType>::Ptr& laserSurfFeature,\n                        int Used_Line = 1);\n\nprivate:\n    /** \\brief lines used to extract lidar features */\n    const int N_SCANS;\n\n    /** \\brief store original points of each line */\n    std::vector<pcl::PointCloud<PointType>::Ptr> vlines;\n\n    /** \\brief store corner feature index of each line */\n    std::vector<std::vector<int>> vcorner;\n\n    /** \\brief store surf feature index of each line */\n    std::vector<std::vector<int>> vsurf;\n\n    int thNumCurvSize;\n\n    float thDistanceFaraway;\n\n    int thNumFlat;\n    \n    int thPartNum;\n\n    float thFlatThreshold;\n\n    float thBreakCornerDis;\n\n    float thLidarNearestDis;  \n};\n\n#endif //LIO_LIVOX_LIDARFEATUREEXTRACTOR_H\n"
  },
  {
    "path": "LIO-Livox/include/MapManager/Map_Manager.h",
    "content": "#ifndef LIO_LIVOX_MAP_MANAGER_H\n#define LIO_LIVOX_MAP_MANAGER_H\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <pcl/filters/voxel_grid.h>\n#include <future>\nclass MAP_MANAGER{\n    typedef pcl::PointXYZINormal PointType;\npublic:\n\n    std::mutex mtx_MapManager;\n    /** \\brief constructor of MAP_MANAGER */\n    MAP_MANAGER(const float& filter_corner, const float& filter_surf);\n\n    static size_t ToIndex(int i, int j, int k);\n\n    /** \\brief transform float to int\n  */\n    static uint32_t _float_as_int(float f){\n      union{uint32_t i; float f;} conv{};\n      conv.f = f;\n      return conv.i;\n    }\n\n    /** \\brief transform int to float\n      */\n    static float _int_as_float(uint32_t i){\n      union{float f; uint32_t i;} conv{};\n      conv.i = i;\n      return conv.f;\n    }\n\n    /** \\brief transform point pi to the MAP coordinate\n     * \\param[in] pi: point to be transformed\n     * \\param[in] po: point after transfomation\n     * \\param[in] _transformTobeMapped: transform matrix between pi and po\n     */\n    static void pointAssociateToMap(PointType const * const pi,\n                                    PointType * const po,\n                                    const Eigen::Matrix4d& _transformTobeMapped);\n\n    void featureAssociateToMap(const pcl::PointCloud<PointType>::Ptr& laserCloudCorner,\n                               const pcl::PointCloud<PointType>::Ptr& laserCloudSurf,\n                               const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeature,\n                               const pcl::PointCloud<PointType>::Ptr& laserCloudCornerToMap,\n                               const pcl::PointCloud<PointType>::Ptr& laserCloudSurfToMap,\n                               const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureToMap,\n                               const Eigen::Matrix4d& transformTobeMapped);\n    /** \\brief add new lidar points to the map\n     * \\param[in] laserCloudCornerStack: coner feature points that need to be added to map\n     * \\param[in] laserCloudSurfStack: surf feature points that need to be added to map\n     * \\param[in] transformTobeMapped: transform matrix of the lidar pose\n     */\n    void MapIncrement(const pcl::PointCloud<PointType>::Ptr& laserCloudCornerStack,\n                      const pcl::PointCloud<PointType>::Ptr& laserCloudSurfStack,\n                      const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureStack,\n                      const Eigen::Matrix4d& transformTobeMapped);\n\n    /** \\brief retrieve map points according to the lidar pose\n     * \\param[in] laserCloudCornerFromMap: store coner feature points retrieved from map\n     * \\param[in] laserCloudSurfFromMap: tore surf feature points retrieved from map\n     * \\param[in] transformTobeMapped: transform matrix of the lidar pose\n     */\n    void MapMove(const Eigen::Matrix4d& transformTobeMapped);\n\n\n    size_t FindUsedCornerMap(const PointType *p,int a,int b,int c);\n\n    size_t FindUsedSurfMap(const PointType *p,int a,int b,int c);\n\n    size_t FindUsedNonFeatureMap(const PointType *p,int a,int b,int c);\n\n    pcl::KdTreeFLANN<PointType> getCornerKdMap(int i){\n      return CornerKdMap_last[i];\n    }\n    pcl::KdTreeFLANN<PointType> getSurfKdMap(int i){\n      return SurfKdMap_last[i];\n    }\n    pcl::KdTreeFLANN<PointType> getNonFeatureKdMap(int i){\n      return NonFeatureKdMap_last[i];\n    }\n\t\tpcl::PointCloud<PointType>::Ptr get_corner_map(){\n\t\t\treturn laserCloudCornerFromMap;\n\t\t}\n\t\tpcl::PointCloud<PointType>::Ptr get_surf_map(){\n\t\t\treturn laserCloudSurfFromMap;\n\t\t}\n    pcl::PointCloud<PointType>::Ptr get_nonfeature_map(){\n\t\t\treturn laserCloudNonFeatureFromMap;\n\t\t}\n    int get_map_current_pos(){\n      return currentUpdatePos;\n    }\n    int get_laserCloudCenWidth_last(){\n      return laserCloudCenWidth_last;\n    }\n    int get_laserCloudCenHeight_last(){\n      return laserCloudCenHeight_last;\n    }\n    int get_laserCloudCenDepth_last(){\n      return laserCloudCenDepth_last;\n    }\n    pcl::PointCloud<PointType> laserCloudSurf_for_match[4851];\n    pcl::PointCloud<PointType> laserCloudCorner_for_match[4851];\n    pcl::PointCloud<PointType> laserCloudNonFeature_for_match[4851];\n\nprivate:\n    int laserCloudCenWidth = 10;\n    int laserCloudCenHeight = 5;\n    int laserCloudCenDepth = 10;\n\n    int laserCloudCenWidth_last = 10;\n    int laserCloudCenHeight_last = 5;\n    int laserCloudCenDepth_last = 10;\n\n    static const int laserCloudWidth = 21;\n    static const int laserCloudHeight = 11;\n    static const int laserCloudDepth = 21;\n    static const int laserCloudNum = laserCloudWidth * laserCloudHeight * laserCloudDepth;//4851\n    pcl::PointCloud<PointType>::Ptr laserCloudCornerArray[laserCloudNum];\n    pcl::PointCloud<PointType>::Ptr laserCloudSurfArray[laserCloudNum];\n    pcl::PointCloud<PointType>::Ptr laserCloudNonFeatureArray[laserCloudNum];\n    pcl::PointCloud<PointType>::Ptr laserCloudCornerArrayStack[laserCloudNum];\n    pcl::PointCloud<PointType>::Ptr laserCloudSurfArrayStack[laserCloudNum];\n    pcl::PointCloud<PointType>::Ptr laserCloudNonFeatureArrayStack[laserCloudNum];\n\n    pcl::VoxelGrid<PointType> downSizeFilterCorner;\n    pcl::VoxelGrid<PointType> downSizeFilterSurf;\n    pcl::VoxelGrid<PointType> downSizeFilterNonFeature;\n\n    pcl::PointCloud<PointType>::Ptr laserCloudCornerFromMap;\n    pcl::PointCloud<PointType>::Ptr laserCloudSurfFromMap;\n    pcl::PointCloud<PointType>::Ptr laserCloudNonFeatureFromMap;\n\n    pcl::KdTreeFLANN<PointType>::Ptr laserCloudCornerKdMap[laserCloudNum];\n    pcl::KdTreeFLANN<PointType>::Ptr laserCloudSurfKdMap[laserCloudNum];\n    pcl::KdTreeFLANN<PointType>::Ptr laserCloudNonFeatureKdMap[laserCloudNum];\n\n    pcl::KdTreeFLANN<PointType> CornerKdMap_copy[laserCloudNum];\n    pcl::KdTreeFLANN<PointType> SurfKdMap_copy[laserCloudNum];\n    pcl::KdTreeFLANN<PointType> NonFeatureKdMap_copy[laserCloudNum];\n\n    pcl::KdTreeFLANN<PointType> CornerKdMap_last[laserCloudNum];\n    pcl::KdTreeFLANN<PointType> SurfKdMap_last[laserCloudNum];\n    pcl::KdTreeFLANN<PointType> NonFeatureKdMap_last[laserCloudNum];\n\n    static const int localMapWindowSize = 60;\n    pcl::PointCloud<PointType>::Ptr localCornerMap[localMapWindowSize];\n    pcl::PointCloud<PointType>::Ptr localSurfMap[localMapWindowSize];\n    pcl::PointCloud<PointType>::Ptr localNonFeatureMap[localMapWindowSize];\n\n    int localMapID = 0;\n\n    int currentUpdatePos = 0;\n    int estimatorPos = 0;\n};\n\n#endif //LIO_LIVOX_MAP_MANAGER_H\n"
  },
  {
    "path": "LIO-Livox/include/segment/pointsCorrect.hpp",
    "content": "#ifndef _COMMON_HPP\n#define _CONNON_HPP\n\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <Eigen/Core>\n#include <pcl/common/transforms.h>\n#include <pcl/features/normal_3d.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl/filters/passthrough.h>\n#include <pcl/filters/statistical_outlier_removal.h>\n#include <pcl/common/common.h>\n#include <Eigen/Dense>\n\n#include <vector>\n\nusing namespace std;\n\ntypedef struct\n{\n    Eigen::Matrix3f eigenVectorsPCA;\n    Eigen::Vector3f eigenValuesPCA;\n    std::vector<int> neibors;\n} SNeiborPCA_cor;\n\nint GetNeiborPCA_cor(SNeiborPCA_cor &npca, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> kdtree, pcl::PointXYZ searchPoint, float fSearchRadius);\nint FilterGndForPos_cor(float* outPoints,float*inPoints,int inNum);\nint CalGndPos_cor(float *gnd, float *fPoints,int pointNum,float fSearchRadius);\nint GetRTMatrix_cor(float *RTM, float *v0, float *v1);\nint CorrectPoints_cor(float *fPoints,int pointNum,float *gndPos);\nint GetGndPos(float *pos, float *fPoints,int pointNum);\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/segment/segment.hpp",
    "content": "#ifndef _SEGMENT_HPP\n#define _SEGMENT_HPP\n\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <Eigen/Core>\n#include <pcl/common/transforms.h>\n#include <pcl/features/normal_3d.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl/filters/passthrough.h>\n#include <pcl/filters/statistical_outlier_removal.h>\n#include <pcl/common/common.h>\n#include <Eigen/Dense>\n\n#include <vector>\n\n#include \"pointsCorrect.hpp\"\n\nusing namespace std;\n\n#define SELF_CALI_FRAMES 20\n\n#define GND_IMG_NX 150\n#define GND_IMG_NY 400\n#define GND_IMG_DX 0.2\n#define GND_IMG_DY 0.2\n#define GND_IMG_OFFX 40\n#define GND_IMG_OFFY 40\n\n#define GND_IMG_NX1 24\n#define GND_IMG_NY1 20\n#define GND_IMG_DX1 4\n#define GND_IMG_DY1 4\n#define GND_IMG_OFFX1 40\n#define GND_IMG_OFFY1 40\n\n#define DN_SAMPLE_IMG_NX 600 //(GND_IMG_NX)\n#define DN_SAMPLE_IMG_NY 200 //(GND_IMG_NY)\n#define DN_SAMPLE_IMG_NZ 100\n#define DN_SAMPLE_IMG_DX 0.4 //(GND_IMG_DX)\n#define DN_SAMPLE_IMG_DY 0.4 //(GND_IMG_DY)\n#define DN_SAMPLE_IMG_DZ 0.2\n#define DN_SAMPLE_IMG_OFFX 40 //(GND_IMG_OFFX)\n#define DN_SAMPLE_IMG_OFFY 40 //(GND_IMG_OFFY)\n#define DN_SAMPLE_IMG_OFFZ 2.5//2.5\n\n#define FREE_ANG_NUM 360\n#define FREE_PI (3.14159265)\n#define FREE_DELTA_ANG (FREE_PI*2/FREE_ANG_NUM)\n\ntypedef struct\n{\n    Eigen::Matrix3f eigenVectorsPCA;\n    Eigen::Vector3f eigenValuesPCA;\n    std::vector<int> neibors;\n} SNeiborPCA;\n\ntypedef struct\n{\n    // basic\n    int pnum;\n    float xmin;\n    float xmax;\n    float ymin;\n    float ymax;\n    float zmin;\n    float zmax;\n    float zmean;\n\n    // pca\n    float d0[3];\n    float d1[3];\n\n    float center[3];\n\n    float obb[8];\n\n    int cls;//类别\n\n} SClusterFeature;\n\nint FilterGndForPos(float* outPoints,float*inPoints,int inNum);\nint CalNomarls(float *nvects, float *fPoints,int pointNum,float fSearchRadius);\n\nint CalGndPos(float *gnd, float *fPoints,int pointNum,float fSearchRadius);\n\nint GetNeiborPCA(SNeiborPCA &npca, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud,\n                    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree, pcl::PointXYZ searchPoint, float fSearchRadius);\n\n\nint CorrectPoints(float *fPoints,int pointNum,float *gndPos);\n\n// 地面上物体分割\nint AbvGndSeg(int *pLabel, float *fPoints, int pointNum);\nint SegBG(int *pLabel, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius);\n\nSClusterFeature FindACluster(int *pLabel, int seedId, int labelId, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius, float thrHeight);\nint SegObjects(int *pLabel, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius);\n\nint CalFreeRegion(float *pFreeDis, float *fPoints,int *pLabel,int pointNum);\nint FreeSeg(float *fPoints,int *pLabel,int pointNum);\n\nint CompleteObjects(int *pLabel, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius);\nint ExpandObjects(int *pLabel, float* fPoints, int pointNum, float fSearchRadius);\nint ExpandBG(int *pLabel, float* fPoints, int pointNum, float fSearchRadius);\n\n// 地面分割\nint GndSeg(int* pLabel,float *fPoints,int pointNum,float fSearchRadius);\n\n\nclass PCSeg\n{\n    public:\n    PCSeg();\n    // functions\n    int DoSeg(int *pLabel, float* fPoints1, int pointNum);\n    int GetMainVectors(float*fPoints, int* pLabel, int pointNum);\n    int EncodeFeatures(float *pFeas);\n    int DoBoundaryDetection(float* fPoints1,int *pLabel1,int pointNum);\n    int DoTrafficLineDet(float *fPoints1,int *pLabel1,int pointNum);\n\n    int CorrectPoints(float *fPoints,int pointNum,float *gndPos);\n\n    float *PrePoints;\n    int numPrePoints = 0;\n\n    float gnd_pos[100*6];\n    int gnd_vet_len = 0;\n\n    int laneType=0;\n    float lanePosition[2] = {0};\n\n    // vars\n    unsigned char *pVImg;\n    float gndPos[6];\n    int posFlag;\n\n    // cluster features\n    float pVectors[256*3];\n    float pCenters[256*3];//重心\n    int pnum[256];\n\n    int objClass[256];\n    float zs[256];\n    float pOBBs[256*8];\n\n    float CBBox[256*7];//（a,x,y,z,l,w,h）\n\n    int clusterNum;\n    \n    float *corPoints;\n    int corNum;\n\n    ~PCSeg();\n\n};\n\nSClusterFeature CalBBox(float *fPoints,int pointNum);\nSClusterFeature CalOBB(float *fPoints,int pointNum);\n\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/sophus/average.hpp",
    "content": "/// @file\n/// Calculation of biinvariant means.\n\n#ifndef SOPHUS_AVERAGE_HPP\n#define SOPHUS_AVERAGE_HPP\n\n#include \"common.hpp\"\n#include \"rxso2.hpp\"\n#include \"rxso3.hpp\"\n#include \"se2.hpp\"\n#include \"se3.hpp\"\n#include \"sim2.hpp\"\n#include \"sim3.hpp\"\n#include \"so2.hpp\"\n#include \"so3.hpp\"\n\nnamespace Sophus {\n\n/// Calculates mean iteratively.\n///\n/// Returns ``nullopt`` if it does not converge.\n///\ntemplate <class SequenceContainer>\noptional<typename SequenceContainer::value_type> iterativeMean(\n    SequenceContainer const& foo_Ts_bar, int max_num_iterations) {\n  size_t N = foo_Ts_bar.size();\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n\n  using Group = typename SequenceContainer::value_type;\n  using Scalar = typename Group::Scalar;\n  using Tangent = typename Group::Tangent;\n\n  // This implements the algorithm in the beginning of Sec. 4.2 in\n  // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf.\n  Group foo_T_average = foo_Ts_bar.front();\n  Scalar w = Scalar(1. / N);\n  for (int i = 0; i < max_num_iterations; ++i) {\n    Tangent average;\n    setToZero<Tangent>(average);\n    for (Group const& foo_T_bar : foo_Ts_bar) {\n      average += w * (foo_T_average.inverse() * foo_T_bar).log();\n    }\n    Group foo_T_newaverage = foo_T_average * Group::exp(average);\n    if (squaredNorm<Tangent>(\n            (foo_T_newaverage.inverse() * foo_T_average).log()) <\n        Constants<Scalar>::epsilon()) {\n      return foo_T_newaverage;\n    }\n\n    foo_T_average = foo_T_newaverage;\n  }\n  // LCOV_EXCL_START\n  return nullopt;\n  // LCOV_EXCL_STOP\n}\n\n#ifdef DOXYGEN_SHOULD_SKIP_THIS\n/// Mean implementation for any Lie group.\ntemplate <class SequenceContainer, class Scalar>\noptional<typename SequenceContainer::value_type> average(\n    SequenceContainer const& foo_Ts_bar);\n#else\n\n// Mean implementation for SO(2).\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SO2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  // This implements rotational part of Proposition 12 from Sec. 6.2 of\n  // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf.\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  SO2<Scalar> foo_T_average = foo_Ts_bar.front();\n  Scalar w = Scalar(1. / N);\n\n  Scalar average(0);\n  for (SO2<Scalar> const& foo_T_bar : foo_Ts_bar) {\n    average += w * (foo_T_average.inverse() * foo_T_bar).log();\n  }\n  return foo_T_average * SO2<Scalar>::exp(average);\n}\n\n// Mean implementation for RxSO(2).\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, RxSO2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  RxSO2<Scalar> foo_T_average = foo_Ts_bar.front();\n  Scalar w = Scalar(1. / N);\n\n  Vector2<Scalar> average(Scalar(0), Scalar(0));\n  for (RxSO2<Scalar> const& foo_T_bar : foo_Ts_bar) {\n    average += w * (foo_T_average.inverse() * foo_T_bar).log();\n  }\n  return foo_T_average * RxSO2<Scalar>::exp(average);\n}\n\nnamespace details {\ntemplate <class T>\nvoid getQuaternion(T const&);\n\ntemplate <class Scalar>\nEigen::Quaternion<Scalar> getUnitQuaternion(SO3<Scalar> const& R) {\n  return R.unit_quaternion();\n}\n\ntemplate <class Scalar>\nEigen::Quaternion<Scalar> getUnitQuaternion(RxSO3<Scalar> const& sR) {\n  return sR.so3().unit_quaternion();\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nEigen::Quaternion<Scalar> averageUnitQuaternion(\n    SequenceContainer const& foo_Ts_bar) {\n  // This:  http://stackoverflow.com/a/27410865/1221742\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  Eigen::Matrix<Scalar, 4, Eigen::Dynamic> Q(4, N);\n  int i = 0;\n  Scalar w = Scalar(1. / N);\n  for (auto const& foo_T_bar : foo_Ts_bar) {\n    Q.col(i) = w * details::getUnitQuaternion(foo_T_bar).coeffs();\n    ++i;\n  }\n\n  Eigen::Matrix<Scalar, 4, 4> QQt = Q * Q.transpose();\n  // TODO: Figure out why we can't use SelfAdjointEigenSolver here.\n  Eigen::EigenSolver<Eigen::Matrix<Scalar, 4, 4> > es(QQt);\n\n  std::complex<Scalar> max_eigenvalue = es.eigenvalues()[0];\n  Eigen::Matrix<std::complex<Scalar>, 4, 1> max_eigenvector =\n      es.eigenvectors().col(0);\n\n  for (int i = 1; i < 4; i++) {\n    if (std::norm(es.eigenvalues()[i]) > std::norm(max_eigenvalue)) {\n      max_eigenvalue = es.eigenvalues()[i];\n      max_eigenvector = es.eigenvectors().col(i);\n    }\n  }\n  Eigen::Quaternion<Scalar> quat;\n  quat.coeffs() <<                //\n      max_eigenvector[0].real(),  //\n      max_eigenvector[1].real(),  //\n      max_eigenvector[2].real(),  //\n      max_eigenvector[3].real();\n  return quat;\n}\n}  // namespace details\n\n// Mean implementation for SO(3).\n//\n// TODO: Detect degenerated cases and return nullopt.\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SO3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  return SO3<Scalar>(details::averageUnitQuaternion(foo_Ts_bar));\n}\n\n// Mean implementation for R x SO(3).\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, RxSO3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  Scalar scale_sum = Scalar(0);\n  using std::exp;\n  using std::log;\n  for (RxSO3<Scalar> const& foo_T_bar : foo_Ts_bar) {\n    scale_sum += log(foo_T_bar.scale());\n  }\n  return RxSO3<Scalar>(exp(scale_sum / Scalar(N)),\n                       SO3<Scalar>(details::averageUnitQuaternion(foo_Ts_bar)));\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SE2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  // TODO: Implement Proposition 12 from Sec. 6.2 of\n  // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf.\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, Sim2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SE3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, Sim3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\n#endif  // DOXYGEN_SHOULD_SKIP_THIS\n\n}  // namespace Sophus\n\n#endif  // SOPHUS_AVERAGE_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/common.hpp",
    "content": "/// @file\n/// Common functionality.\n\n#ifndef SOPHUS_COMMON_HPP\n#define SOPHUS_COMMON_HPP\n\n#include <cmath>\n#include <cstdio>\n#include <cstdlib>\n#include <random>\n#include <type_traits>\n\n#include <Eigen/Core>\n\n#if !defined(SOPHUS_DISABLE_ENSURES)\n#include \"formatstring.hpp\"\n#endif //!defined(SOPHUS_DISABLE_ENSURES)\n\n// following boost's assert.hpp\n#undef SOPHUS_ENSURE\n\n// ENSURES are similar to ASSERTS, but they are always checked for (including in\n// release builds). At the moment there are no ASSERTS in Sophus which should\n// only be used for checks which are performance critical.\n\n#ifdef __GNUC__\n#define SOPHUS_FUNCTION __PRETTY_FUNCTION__\n#elif (_MSC_VER >= 1310)\n#define SOPHUS_FUNCTION __FUNCTION__\n#else\n#define SOPHUS_FUNCTION \"unknown\"\n#endif\n\n// Make sure this compiles with older versions of Eigen which do not have\n// EIGEN_DEVICE_FUNC defined.\n#ifndef EIGEN_DEVICE_FUNC\n#define EIGEN_DEVICE_FUNC\n#endif\n\n// NVCC on windows has issues with defaulting the Map specialization constructors, so\n// special case that specific platform case.\n#if defined(_MSC_VER) && defined(__CUDACC__)\n#define SOPHUS_WINDOW_NVCC_FALLBACK\n#endif\n\n// Make sure this compiles with older versions of Eigen which do not have\n// EIGEN_DEFAULT_COPY_CONSTRUCTOR defined\n#ifndef EIGEN_DEFAULT_COPY_CONSTRUCTOR\n#if EIGEN_HAS_CXX11 && !defined(SOPHUS_WINDOW_NVCC_FALLBACK)\n#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) EIGEN_DEVICE_FUNC CLASS(const CLASS&) = default;\n#else\n#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS)\n#endif\n#ifndef EIGEN_INHERIT_ASSIGNMENT_OPERATORS\n#error \"eigen must have EIGEN_INHERIT_ASSIGNMENT_OPERATORS\"\n#endif\n#define SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Derived) \\\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \\\n    EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived)\n#endif\n\n#ifndef SOPHUS_INHERIT_ASSIGNMENT_OPERATORS\n#ifndef EIGEN_INHERIT_ASSIGNMENT_OPERATORS\n#error \"eigen must have EIGEN_INHERIT_ASSIGNMENT_OPERATORS\"\n#endif\n#define SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived)\n#endif\n\n#define SOPHUS_FUNC EIGEN_DEVICE_FUNC\n\n#if defined(SOPHUS_DISABLE_ENSURES)\n\n#define SOPHUS_ENSURE(expr, ...) ((void)0)\n\n#elif defined(SOPHUS_ENABLE_ENSURE_HANDLER)\n\nnamespace Sophus {\nvoid ensureFailed(char const* function, char const* file, int line,\n                  char const* description);\n}\n\n#define SOPHUS_ENSURE(expr, ...)                     \\\n  ((expr) ? ((void)0)                                \\\n          : ::Sophus::ensureFailed(                  \\\n                SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                Sophus::details::FormatString(__VA_ARGS__).c_str()))\n#else\n// LCOV_EXCL_START\n\nnamespace Sophus {\ntemplate <class... Args>\nSOPHUS_FUNC void defaultEnsure(char const* function, char const* file, int line,\n                               char const* description, Args&&... args) {\n  std::printf(\"Sophus ensure failed in function '%s', file '%s', line %d.\\n\",\n              function, file, line);\n#ifdef __CUDACC__\n  std::printf(\"%s\", description);\n#else\n  std::cout << details::FormatString(description, std::forward<Args>(args)...)\n            << std::endl;\n  std::abort();\n#endif\n}\n}  // namespace Sophus\n\n// LCOV_EXCL_STOP\n#define SOPHUS_ENSURE(expr, ...)                                       \\\n  ((expr) ? ((void)0)                                                  \\\n          : Sophus::defaultEnsure(SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  ##__VA_ARGS__))\n#endif\n\nnamespace Sophus {\n\ntemplate <class Scalar>\nstruct Constants {\n  SOPHUS_FUNC static Scalar epsilon() { return Scalar(1e-10); }\n\n  SOPHUS_FUNC static Scalar epsilonSqrt() {\n    using std::sqrt;\n    return sqrt(epsilon());\n  }\n\n  SOPHUS_FUNC static Scalar pi() {\n    return Scalar(3.141592653589793238462643383279502884);\n  }\n};\n\ntemplate <>\nstruct Constants<float> {\n  SOPHUS_FUNC static float constexpr epsilon() {\n    return static_cast<float>(1e-5);\n  }\n\n  SOPHUS_FUNC static float epsilonSqrt() { return std::sqrt(epsilon()); }\n\n  SOPHUS_FUNC static float constexpr pi() {\n    return 3.141592653589793238462643383279502884f;\n  }\n};\n\n/// Nullopt type of lightweight optional class.\nstruct nullopt_t {\n  explicit constexpr nullopt_t() {}\n};\n\nconstexpr nullopt_t nullopt{};\n\n/// Lightweight optional implementation which requires ``T`` to have a\n/// default constructor.\n///\n/// TODO: Replace with std::optional once Sophus moves to c++17.\n///\ntemplate <class T>\nclass optional {\n public:\n  optional() : is_valid_(false) {}\n\n  optional(nullopt_t) : is_valid_(false) {}\n\n  optional(T const& type) : type_(type), is_valid_(true) {}\n\n  explicit operator bool() const { return is_valid_; }\n\n  T const* operator->() const {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return &type_;\n  }\n\n  T* operator->() {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return &type_;\n  }\n\n  T const& operator*() const {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return type_;\n  }\n\n  T& operator*() {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return type_;\n  }\n\n private:\n  T type_;\n  bool is_valid_;\n};\n\ntemplate <bool B, class T = void>\nusing enable_if_t = typename std::enable_if<B, T>::type;\n\ntemplate <class G>\nstruct IsUniformRandomBitGenerator {\n  static const bool value = std::is_unsigned<typename G::result_type>::value &&\n                            std::is_unsigned<decltype(G::min())>::value &&\n                            std::is_unsigned<decltype(G::max())>::value;\n};\n}  // namespace Sophus\n\n#endif  // SOPHUS_COMMON_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/example_ensure_handler.cpp",
    "content": "#include \"common.hpp\"\n\n#include <cstdio>\n#include <cstdlib>\n\nnamespace Sophus {\nvoid ensureFailed(char const* function, char const* file, int line,\n                  char const* description) {\n  std::printf(\"Sophus ensure failed in function '%s', file '%s', line %d.\\n\",\n              file, function, line);\n  std::printf(\"Description: %s\\n\", description);\n  std::abort();\n}\n}  // namespace Sophus\n"
  },
  {
    "path": "LIO-Livox/include/sophus/formatstring.hpp",
    "content": "/// @file\n/// FormatString functionality\n\n#ifndef SOPHUS_FORMATSTRING_HPP\n#define SOPHUS_FORMATSTRING_HPP\n\n#include <iostream>\n\nnamespace Sophus {\nnamespace details {\n\n// Following: http://stackoverflow.com/a/22759544\ntemplate <class T>\nclass IsStreamable {\n private:\n  template <class TT>\n  static auto test(int)\n      -> decltype(std::declval<std::stringstream&>() << std::declval<TT>(),\n                  std::true_type());\n\n  template <class>\n  static auto test(...) -> std::false_type;\n\n public:\n  static bool const value = decltype(test<T>(0))::value;\n};\n\ntemplate <class T>\nclass ArgToStream {\n public:\n  static void impl(std::stringstream& stream, T&& arg) {\n    stream << std::forward<T>(arg);\n  }\n};\n\ninline void FormatStream(std::stringstream& stream, char const* text) {\n  stream << text;\n  return;\n}\n\n// Following: http://en.cppreference.com/w/cpp/language/parameter_pack\ntemplate <class T, typename... Args>\nvoid FormatStream(std::stringstream& stream, char const* text, T&& arg,\n                  Args&&... args) {\n  static_assert(IsStreamable<T>::value,\n                \"One of the args has no ostream overload!\");\n  for (; *text != '\\0'; ++text) {\n    if (*text == '%') {\n      ArgToStream<T&&>::impl(stream, std::forward<T>(arg));\n      FormatStream(stream, text + 1, std::forward<Args>(args)...);\n      return;\n    }\n    stream << *text;\n  }\n  stream << \"\\nFormat-Warning: There are \" << sizeof...(Args) + 1\n         << \" args unused.\";\n  return;\n}\n\ntemplate <class... Args>\nstd::string FormatString(char const* text, Args&&... args) {\n  std::stringstream stream;\n  FormatStream(stream, text, std::forward<Args>(args)...);\n  return stream.str();\n}\n\ninline std::string FormatString() { return std::string(); }\n\n}  // namespace details\n}  // namespace Sophus\n\n#endif //SOPHUS_FORMATSTRING_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/geometry.hpp",
    "content": "/// @file\n/// Transformations between poses and hyperplanes.\n\n#ifndef GEOMETRY_HPP\n#define GEOMETRY_HPP\n\n#include \"se2.hpp\"\n#include \"se3.hpp\"\n#include \"so2.hpp\"\n#include \"so3.hpp\"\n#include \"types.hpp\"\n\nnamespace Sophus {\n\n/// Takes in a rotation ``R_foo_plane`` and returns the corresponding line\n/// normal along the y-axis (in reference frame ``foo``).\n///\ntemplate <class T>\nVector2<T> normalFromSO2(SO2<T> const& R_foo_line) {\n  return R_foo_line.matrix().col(1);\n}\n\n/// Takes in line normal in reference frame foo and constructs a corresponding\n/// rotation matrix ``R_foo_line``.\n///\n/// Precondition: ``normal_foo`` must not be close to zero.\n///\ntemplate <class T>\nSO2<T> SO2FromNormal(Vector2<T> normal_foo) {\n  SOPHUS_ENSURE(normal_foo.squaredNorm() > Constants<T>::epsilon(), \"%\",\n                normal_foo.transpose());\n  normal_foo.normalize();\n  return SO2<T>(normal_foo.y(), -normal_foo.x());\n}\n\n/// Takes in a rotation ``R_foo_plane`` and returns the corresponding plane\n/// normal along the z-axis\n/// (in reference frame ``foo``).\n///\ntemplate <class T>\nVector3<T> normalFromSO3(SO3<T> const& R_foo_plane) {\n  return R_foo_plane.matrix().col(2);\n}\n\n/// Takes in plane normal in reference frame foo and constructs a corresponding\n/// rotation matrix ``R_foo_plane``.\n///\n/// Note: The ``plane`` frame is defined as such that the normal points along\n///       the positive z-axis. One can specify hints for the x-axis and y-axis\n///       of the ``plane`` frame.\n///\n/// Preconditions:\n/// - ``normal_foo``, ``xDirHint_foo``, ``yDirHint_foo`` must not be close to\n///   zero.\n/// - ``xDirHint_foo`` and ``yDirHint_foo`` must be approx. perpendicular.\n///\ntemplate <class T>\nMatrix3<T> rotationFromNormal(Vector3<T> const& normal_foo,\n                              Vector3<T> xDirHint_foo = Vector3<T>(T(1), T(0),\n                                                                   T(0)),\n                              Vector3<T> yDirHint_foo = Vector3<T>(T(0), T(1),\n                                                                   T(0))) {\n  SOPHUS_ENSURE(xDirHint_foo.dot(yDirHint_foo) < Constants<T>::epsilon(),\n                \"xDirHint (%) and yDirHint (%) must be perpendicular.\",\n                xDirHint_foo.transpose(), yDirHint_foo.transpose());\n  using std::abs;\n  using std::sqrt;\n  T const xDirHint_foo_sqr_length = xDirHint_foo.squaredNorm();\n  T const yDirHint_foo_sqr_length = yDirHint_foo.squaredNorm();\n  T const normal_foo_sqr_length = normal_foo.squaredNorm();\n  SOPHUS_ENSURE(xDirHint_foo_sqr_length > Constants<T>::epsilon(), \"%\",\n                xDirHint_foo.transpose());\n  SOPHUS_ENSURE(yDirHint_foo_sqr_length > Constants<T>::epsilon(), \"%\",\n                yDirHint_foo.transpose());\n  SOPHUS_ENSURE(normal_foo_sqr_length > Constants<T>::epsilon(), \"%\",\n                normal_foo.transpose());\n\n  Matrix3<T> basis_foo;\n  basis_foo.col(2) = normal_foo;\n\n  if (abs(xDirHint_foo_sqr_length - T(1)) > Constants<T>::epsilon()) {\n    xDirHint_foo.normalize();\n  }\n  if (abs(yDirHint_foo_sqr_length - T(1)) > Constants<T>::epsilon()) {\n    yDirHint_foo.normalize();\n  }\n  if (abs(normal_foo_sqr_length - T(1)) > Constants<T>::epsilon()) {\n    basis_foo.col(2).normalize();\n  }\n\n  T abs_x_dot_z = abs(basis_foo.col(2).dot(xDirHint_foo));\n  T abs_y_dot_z = abs(basis_foo.col(2).dot(yDirHint_foo));\n  if (abs_x_dot_z < abs_y_dot_z) {\n    // basis_foo.z and xDirHint are far from parallel.\n    basis_foo.col(1) = basis_foo.col(2).cross(xDirHint_foo).normalized();\n    basis_foo.col(0) = basis_foo.col(1).cross(basis_foo.col(2));\n  } else {\n    // basis_foo.z and yDirHint are far from parallel.\n    basis_foo.col(0) = yDirHint_foo.cross(basis_foo.col(2)).normalized();\n    basis_foo.col(1) = basis_foo.col(2).cross(basis_foo.col(0));\n  }\n  T det = basis_foo.determinant();\n  // sanity check\n  SOPHUS_ENSURE(abs(det - T(1)) < Constants<T>::epsilon(),\n                \"Determinant of basis is not 1, but %. Basis is \\n%\\n\", det,\n                basis_foo);\n  return basis_foo;\n}\n\n/// Takes in plane normal in reference frame foo and constructs a corresponding\n/// rotation matrix ``R_foo_plane``.\n///\n/// See ``rotationFromNormal`` for details.\n///\ntemplate <class T>\nSO3<T> SO3FromNormal(Vector3<T> const& normal_foo) {\n  return SO3<T>(rotationFromNormal(normal_foo));\n}\n\n/// Returns a line (wrt. to frame ``foo``), given a pose of the ``line`` in\n/// reference frame ``foo``.\n///\n/// Note: The plane is defined by X-axis of the ``line`` frame.\n///\ntemplate <class T>\nLine2<T> lineFromSE2(SE2<T> const& T_foo_line) {\n  return Line2<T>(normalFromSO2(T_foo_line.so2()), T_foo_line.translation());\n}\n\n/// Returns the pose ``T_foo_line``, given a line in reference frame ``foo``.\n///\n/// Note: The line is defined by X-axis of the frame ``line``.\n///\ntemplate <class T>\nSE2<T> SE2FromLine(Line2<T> const& line_foo) {\n  T const d = line_foo.offset();\n  Vector2<T> const n = line_foo.normal();\n  SO2<T> const R_foo_plane = SO2FromNormal(n);\n  return SE2<T>(R_foo_plane, -d * n);\n}\n\n/// Returns a plane (wrt. to frame ``foo``), given a pose of the ``plane`` in\n/// reference frame ``foo``.\n///\n/// Note: The plane is defined by XY-plane of the frame ``plane``.\n///\ntemplate <class T>\nPlane3<T> planeFromSE3(SE3<T> const& T_foo_plane) {\n  return Plane3<T>(normalFromSO3(T_foo_plane.so3()), T_foo_plane.translation());\n}\n\n/// Returns the pose ``T_foo_plane``, given a plane in reference frame ``foo``.\n///\n/// Note: The plane is defined by XY-plane of the frame ``plane``.\n///\ntemplate <class T>\nSE3<T> SE3FromPlane(Plane3<T> const& plane_foo) {\n  T const d = plane_foo.offset();\n  Vector3<T> const n = plane_foo.normal();\n  SO3<T> const R_foo_plane = SO3FromNormal(n);\n  return SE3<T>(R_foo_plane, -d * n);\n}\n\n/// Takes in a hyperplane and returns unique representation by ensuring that the\n/// ``offset`` is not negative.\n///\ntemplate <class T, int N>\nEigen::Hyperplane<T, N> makeHyperplaneUnique(\n    Eigen::Hyperplane<T, N> const& plane) {\n  if (plane.offset() >= 0) {\n    return plane;\n  }\n\n  return Eigen::Hyperplane<T, N>(-plane.normal(), -plane.offset());\n}\n\n}  // namespace Sophus\n\n#endif  // GEOMETRY_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/interpolate.hpp",
    "content": "/// @file\n/// Interpolation for Lie groups.\n\n#ifndef SOPHUS_INTERPOLATE_HPP\n#define SOPHUS_INTERPOLATE_HPP\n\n#include <Eigen/Eigenvalues>\n\n#include \"interpolate_details.hpp\"\n\nnamespace Sophus {\n\n/// This function interpolates between two Lie group elements ``foo_T_bar``\n/// and ``foo_T_baz`` with an interpolation factor of ``alpha`` in [0, 1].\n///\n/// It returns a pose ``foo_T_quiz`` with ``quiz`` being a frame between ``bar``\n/// and ``baz``. If ``alpha=0`` it returns ``foo_T_bar``. If it is 1, it returns\n/// ``foo_T_bar``.\n///\n/// (Since interpolation on Lie groups is inverse-invariant, we can equivalently\n/// think of the input arguments as being ``bar_T_foo``, ``baz_T_foo`` and the\n/// return value being ``quiz_T_foo``.)\n///\n/// Precondition: ``p`` must be in [0, 1].\n///\ntemplate <class G, class Scalar2 = typename G::Scalar>\nenable_if_t<interp_details::Traits<G>::supported, G> interpolate(\n    G const& foo_T_bar, G const& foo_T_baz, Scalar2 p = Scalar2(0.5f)) {\n  using Scalar = typename G::Scalar;\n  Scalar inter_p(p);\n  SOPHUS_ENSURE(inter_p >= Scalar(0) && inter_p <= Scalar(1),\n                \"p (%) must in [0, 1].\");\n  return foo_T_bar * G::exp(inter_p * (foo_T_bar.inverse() * foo_T_baz).log());\n}\n\n}  // namespace Sophus\n\n#endif  // SOPHUS_INTERPOLATE_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/interpolate_details.hpp",
    "content": "#ifndef SOPHUS_INTERPOLATE_DETAILS_HPP\n#define SOPHUS_INTERPOLATE_DETAILS_HPP\n\n#include \"rxso2.hpp\"\n#include \"rxso3.hpp\"\n#include \"se2.hpp\"\n#include \"se3.hpp\"\n#include \"sim2.hpp\"\n#include \"sim3.hpp\"\n#include \"so2.hpp\"\n#include \"so3.hpp\"\n\nnamespace Sophus {\nnamespace interp_details {\n\ntemplate <class Group>\nstruct Traits;\n\ntemplate <class Scalar>\nstruct Traits<SO2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SO2<Scalar> const& foo_T_bar) {\n    using std::abs;\n    Scalar angle = foo_T_bar.log();\n    return abs(abs(angle) - Constants<Scalar>::pi()) <\n           Constants<Scalar>::epsilon();\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<RxSO2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(RxSO2<Scalar> const& foo_T_bar) {\n    return Traits<SO2<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so2());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<SO3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SO3<Scalar> const& foo_T_bar) {\n    using std::abs;\n    Scalar angle = foo_T_bar.logAndTheta().theta;\n    return abs(abs(angle) - Constants<Scalar>::pi()) <\n           Constants<Scalar>::epsilon();\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<RxSO3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(RxSO3<Scalar> const& foo_T_bar) {\n    return Traits<SO3<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so3());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<SE2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SE2<Scalar> const& foo_T_bar) {\n    return Traits<SO2<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so2());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<SE3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SE3<Scalar> const& foo_T_bar) {\n    return Traits<SO3<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so3());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<Sim2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(Sim2<Scalar> const& foo_T_bar) {\n    return Traits<SO2<Scalar>>::hasShortestPathAmbiguity(\n        foo_T_bar.rxso2().so2());\n    ;\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<Sim3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(Sim3<Scalar> const& foo_T_bar) {\n    return Traits<SO3<Scalar>>::hasShortestPathAmbiguity(\n        foo_T_bar.rxso3().so3());\n    ;\n  }\n};\n\n}  // namespace interp_details\n}  // namespace Sophus\n\n#endif  // SOPHUS_INTERPOLATE_DETAILS_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/num_diff.hpp",
    "content": "/// @file\n/// Numerical differentiation using finite differences\n\n#ifndef SOPHUS_NUM_DIFF_HPP\n#define SOPHUS_NUM_DIFF_HPP\n\n#include <functional>\n#include <type_traits>\n#include <utility>\n\n#include \"types.hpp\"\n\nnamespace Sophus {\n\nnamespace details {\ntemplate <class Scalar>\nclass Curve {\n public:\n  template <class Fn>\n  static auto num_diff(Fn curve, Scalar t, Scalar h) -> decltype(curve(t)) {\n    using ReturnType = decltype(curve(t));\n    static_assert(std::is_floating_point<Scalar>::value,\n                  \"Scalar must be a floating point type.\");\n    static_assert(IsFloatingPoint<ReturnType>::value,\n                  \"ReturnType must be either a floating point scalar, \"\n                  \"vector or matrix.\");\n\n    return (curve(t + h) - curve(t - h)) / (Scalar(2) * h);\n  }\n};\n\ntemplate <class Scalar, int N, int M>\nclass VectorField {\n public:\n  static Eigen::Matrix<Scalar, N, M> num_diff(\n      std::function<Sophus::Vector<Scalar, N>(Sophus::Vector<Scalar, M>)>\n          vector_field,\n      Sophus::Vector<Scalar, M> const& a, Scalar eps) {\n    static_assert(std::is_floating_point<Scalar>::value,\n                  \"Scalar must be a floating point type.\");\n    Eigen::Matrix<Scalar, N, M> J;\n    Sophus::Vector<Scalar, M> h;\n    h.setZero();\n    for (int i = 0; i < M; ++i) {\n      h[i] = eps;\n      J.col(i) =\n          (vector_field(a + h) - vector_field(a - h)) / (Scalar(2) * eps);\n      h[i] = Scalar(0);\n    }\n\n    return J;\n  }\n};\n\ntemplate <class Scalar, int N>\nclass VectorField<Scalar, N, 1> {\n public:\n  static Eigen::Matrix<Scalar, N, 1> num_diff(\n      std::function<Sophus::Vector<Scalar, N>(Scalar)> vector_field,\n      Scalar const& a, Scalar eps) {\n    return details::Curve<Scalar>::num_diff(std::move(vector_field), a, eps);\n  }\n};\n}  // namespace details\n\n/// Calculates the derivative of a curve at a point ``t``.\n///\n/// Here, a curve is a function from a Scalar to a Euclidean space. Thus, it\n/// returns either a Scalar, a vector or a matrix.\n///\ntemplate <class Scalar, class Fn>\nauto curveNumDiff(Fn curve, Scalar t,\n                  Scalar h = Constants<Scalar>::epsilonSqrt())\n    -> decltype(details::Curve<Scalar>::num_diff(std::move(curve), t, h)) {\n  return details::Curve<Scalar>::num_diff(std::move(curve), t, h);\n}\n\n/// Calculates the derivative of a vector field at a point ``a``.\n///\n/// Here, a vector field is a function from a vector space to another vector\n/// space.\n///\ntemplate <class Scalar, int N, int M, class ScalarOrVector, class Fn>\nEigen::Matrix<Scalar, N, M> vectorFieldNumDiff(\n    Fn vector_field, ScalarOrVector const& a,\n    Scalar eps = Constants<Scalar>::epsilonSqrt()) {\n  return details::VectorField<Scalar, N, M>::num_diff(std::move(vector_field),\n                                                      a, eps);\n}\n\n}  // namespace Sophus\n\n#endif  // SOPHUS_NUM_DIFF_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/rotation_matrix.hpp",
    "content": "/// @file\n/// Rotation matrix helper functions.\n\n#ifndef SOPHUS_ROTATION_MATRIX_HPP\n#define SOPHUS_ROTATION_MATRIX_HPP\n\n#include <Eigen/Dense>\n#include <Eigen/SVD>\n\n#include \"types.hpp\"\n\nnamespace Sophus {\n\n/// Takes in arbitrary square matrix and returns true if it is\n/// orthogonal.\ntemplate <class D>\nSOPHUS_FUNC bool isOrthogonal(Eigen::MatrixBase<D> const& R) {\n  using Scalar = typename D::Scalar;\n  static int const N = D::RowsAtCompileTime;\n  static int const M = D::ColsAtCompileTime;\n\n  static_assert(N == M, \"must be a square matrix\");\n  static_assert(N >= 2, \"must have compile time dimension >= 2\");\n\n  return (R * R.transpose() - Matrix<Scalar, N, N>::Identity()).norm() <\n         Constants<Scalar>::epsilon();\n}\n\n/// Takes in arbitrary square matrix and returns true if it is\n/// \"scaled-orthogonal\" with positive determinant.\n///\ntemplate <class D>\nSOPHUS_FUNC bool isScaledOrthogonalAndPositive(Eigen::MatrixBase<D> const& sR) {\n  using Scalar = typename D::Scalar;\n  static int const N = D::RowsAtCompileTime;\n  static int const M = D::ColsAtCompileTime;\n  using std::pow;\n  using std::sqrt;\n\n  Scalar det = sR.determinant();\n\n  if (det <= Scalar(0)) {\n    return false;\n  }\n\n  Scalar scale_sqr = pow(det, Scalar(2. / N));\n\n  static_assert(N == M, \"must be a square matrix\");\n  static_assert(N >= 2, \"must have compile time dimension >= 2\");\n\n  return (sR * sR.transpose() - scale_sqr * Matrix<Scalar, N, N>::Identity())\n             .template lpNorm<Eigen::Infinity>() <\n         sqrt(Constants<Scalar>::epsilon());\n}\n\n/// Takes in arbitrary square matrix (2x2 or larger) and returns closest\n/// orthogonal matrix with positive determinant.\ntemplate <class D>\nSOPHUS_FUNC enable_if_t<\n    std::is_floating_point<typename D::Scalar>::value,\n    Matrix<typename D::Scalar, D::RowsAtCompileTime, D::RowsAtCompileTime>>\nmakeRotationMatrix(Eigen::MatrixBase<D> const& R) {\n  using Scalar = typename D::Scalar;\n  static int const N = D::RowsAtCompileTime;\n  static int const M = D::ColsAtCompileTime;\n\n  static_assert(N == M, \"must be a square matrix\");\n  static_assert(N >= 2, \"must have compile time dimension >= 2\");\n\n  Eigen::JacobiSVD<Matrix<Scalar, N, N>> svd(\n      R, Eigen::ComputeFullU | Eigen::ComputeFullV);\n\n  // Determine determinant of orthogonal matrix U*V'.\n  Scalar d = (svd.matrixU() * svd.matrixV().transpose()).determinant();\n  // Starting from the identity matrix D, set the last entry to d (+1 or\n  // -1),  so that det(U*D*V') = 1.\n  Matrix<Scalar, N, N> Diag = Matrix<Scalar, N, N>::Identity();\n  Diag(N - 1, N - 1) = d;\n  return svd.matrixU() * Diag * svd.matrixV().transpose();\n}\n\n}  // namespace Sophus\n\n#endif  // SOPHUS_ROTATION_MATRIX_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/rxso2.hpp",
    "content": "/// @file\n/// Direct product R X SO(2) - rotation and scaling in 2d.\n\n#ifndef SOPHUS_RXSO2_HPP\n#define SOPHUS_RXSO2_HPP\n\n#include \"so2.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass RxSO2;\nusing RxSO2d = RxSO2<double>;\nusing RxSO2f = RxSO2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::RxSO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Sophus::Vector2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO2<Scalar_>, Options_>>\n    : traits<Sophus::RxSO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO2<Scalar_> const, Options_>>\n    : traits<Sophus::RxSO2<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// RxSO2 base type - implements RxSO2 class but is storage agnostic\n///\n/// This class implements the group ``R+ x SO(2)``, the direct product of the\n/// group of positive scalar 2x2 matrices (= isomorph to the positive\n/// real numbers) and the two-dimensional special orthogonal group SO(2).\n/// Geometrically, it is the group of rotation and scaling in two dimensions.\n/// As a matrix groups, R+ x SO(2) consists of matrices of the form ``s * R``\n/// where ``R`` is an orthogonal matrix with ``det(R) = 1`` and ``s > 0``\n/// being a positive real number. In particular, it has the following form:\n///\n///     | s * cos(theta)  s * -sin(theta) |\n///     | s * sin(theta)  s *  cos(theta) |\n///\n/// where ``theta`` being the rotation angle. Internally, it is represented by\n/// the first column of the rotation matrix, or in other words by a non-zero\n/// complex number.\n///\n/// R+ x SO(2) is not compact, but a commutative group. First it is not compact\n/// since the scale factor is not bound. Second it is commutative since\n/// ``sR(alpha, s1) * sR(beta, s2) = sR(beta, s2) * sR(alpha, s1)``,  simply\n/// because ``alpha + beta = beta + alpha`` and ``s1 * s2 = s2 * s1`` with\n/// ``alpha`` and ``beta`` being rotation angles and ``s1``, ``s2`` being scale\n/// factors.\n///\n/// This class has the explicit class invariant that the scale ``s`` is not\n/// too close to zero. Strictly speaking, it must hold that:\n///\n///   ``complex().norm() >= Constants::epsilon()``.\n///\n/// In order to obey this condition, group multiplication is implemented with\n/// saturation such that a product always has a scale which is equal or greater\n/// this threshold.\ntemplate <class Derived>\nclass RxSO2Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using ComplexType = typename Eigen::internal::traits<Derived>::ComplexType;\n  using ComplexTemporaryType = Sophus::Vector2<Scalar, Options>;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (one for rotation and one for scaling).\n  static int constexpr DoF = 2;\n  /// Number of internal parameters used (complex number is a tuple).\n  static int constexpr num_parameters = 2;\n  /// Group transformations are 2x2 matrices.\n  static int constexpr N = 2;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with RxSO2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using RxSO2Product = RxSO2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  /// For RxSO(2), it simply returns the identity matrix.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const { return Adjoint::Identity(); }\n\n  /// Returns rotation angle.\n  ///\n  SOPHUS_FUNC Scalar angle() const { return SO2<Scalar>(complex()).log(); }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC RxSO2<NewScalarType> cast() const {\n    return RxSO2<NewScalarType>(complex().template cast<NewScalarType>());\n  }\n\n  /// This provides unsafe read/write access to internal data. RxSO(2) is\n  /// represented by a complex number (two parameters). When using direct\n  /// write access, the user needs to take care of that the complex number is\n  /// not set close to zero.\n  ///\n  /// Note: The first parameter represents the real part, while the\n  /// second parameter represent the imaginary part.\n  ///\n  SOPHUS_FUNC Scalar* data() { return complex_nonconst().data(); }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const { return complex().data(); }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC RxSO2<Scalar> inverse() const {\n    Scalar squared_scale = complex().squaredNorm();\n    return RxSO2<Scalar>(complex().x() / squared_scale,\n                         -complex().y() / squared_scale);\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (scaled rotation matrices) to elements of the tangent\n  /// space (rotation-vector plus logarithm of scale factor).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of RxSO2.\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    using std::log;\n    Tangent theta_sigma;\n    theta_sigma[1] = log(scale());\n    theta_sigma[0] = SO2<Scalar>(complex()).log();\n    return theta_sigma;\n  }\n\n  /// Returns 2x2 matrix representation of the instance.\n  ///\n  /// For RxSO2, the matrix representation is an scaled orthogonal matrix ``sR``\n  /// with ``det(R)=s^2``, thus a scaled rotation matrix ``R``  with scale\n  /// ``s``.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation sR;\n    // clang-format off\n    sR << complex()[0], -complex()[1],\n          complex()[1],  complex()[0];\n    // clang-format on\n    return sR;\n  }\n\n  /// Assignment operator.\n  ///\n  SOPHUS_FUNC RxSO2Base& operator=(RxSO2Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO2Base<Derived>& operator=(\n      RxSO2Base<OtherDerived> const& other) {\n    complex_nonconst() = other.complex();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation and scale\n  /// multiplication.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC RxSO2Product<OtherDerived> operator*(\n      RxSO2Base<OtherDerived> const& other) const {\n    using ResultT = ReturnScalar<OtherDerived>;\n\n    Scalar lhs_real = complex().x();\n    Scalar lhs_imag = complex().y();\n    typename OtherDerived::Scalar const& rhs_real = other.complex().x();\n    typename OtherDerived::Scalar const& rhs_imag = other.complex().y();\n    /// complex multiplication\n    typename RxSO2Product<OtherDerived>::ComplexType result_complex(\n        lhs_real * rhs_real - lhs_imag * rhs_imag,\n        lhs_real * rhs_imag + lhs_imag * rhs_real);\n\n    const ResultT squared_scale = result_complex.squaredNorm();\n\n    if (squared_scale <\n        Constants<ResultT>::epsilon() * Constants<ResultT>::epsilon()) {\n      /// Saturation to ensure class invariant.\n      result_complex.normalize();\n      result_complex *= Constants<ResultT>::epsilon();\n    }\n    return RxSO2Product<OtherDerived>(result_complex);\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates a 2 dimensional point ``p`` by the SO2 element\n  /// ``bar_R_foo`` (= rotation matrix) and scales it by the scale factor ``s``:\n  ///\n  ///   ``p_bar = s * (bar_R_foo * p_foo)``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return matrix() * p;\n  }\n\n  /// Group action on homogeneous 2-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const auto rsp = *this * p.template head<2>();\n    return HomogeneousPointProduct<HPointDerived>(rsp(0), rsp(1), p(2));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parameterized line ``l(t) = o + t * d`` by the SO2\n  /// element and scales it by the scale factor\n  ///\n  /// Origin ``o`` is rotated and scaled\n  /// Direction ``d`` is rotated (preserving it's norm)\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), (*this) * l.direction() / scale());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC RxSO2Base<Derived>& operator*=(\n      RxSO2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns internal parameters of RxSO(2).\n  ///\n  /// It returns (c[0], c[1]), with c being the  complex number.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return complex();\n  }\n\n  /// Sets non-zero complex\n  ///\n  /// Precondition: ``z`` must not be close to zero.\n  SOPHUS_FUNC void setComplex(Vector2<Scalar> const& z) {\n    SOPHUS_ENSURE(z.squaredNorm() > Constants<Scalar>::epsilon() *\n                                        Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    static_cast<Derived*>(this)->complex_nonconst() = z;\n  }\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC ComplexType const& complex() const {\n    return static_cast<Derived const*>(this)->complex();\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Transformation rotationMatrix() const {\n    ComplexTemporaryType norm_quad = complex();\n    norm_quad.normalize();\n    return SO2<Scalar>(norm_quad).matrix();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC\n  Scalar scale() const { return complex().norm(); }\n\n  /// Setter of rotation angle, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setAngle(Scalar const& theta) { setSO2(SO2<Scalar>(theta)); }\n\n  /// Setter of complex using rotation matrix ``R``, leaves scale as is.\n  ///\n  /// Precondition: ``R`` must be orthogonal with determinant of one.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Transformation const& R) {\n    setSO2(SO2<Scalar>(R));\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  SOPHUS_FUNC void setScale(Scalar const& scale) {\n    using std::sqrt;\n    complex_nonconst().normalize();\n    complex_nonconst() *= scale;\n  }\n\n  /// Setter of complex number using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 2x2 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Transformation const& sR) {\n    SOPHUS_ENSURE(isScaledOrthogonalAndPositive(sR),\n                  \"sR must be scaled orthogonal:\\n %\", sR);\n    complex_nonconst() = sR.col(0);\n  }\n\n  /// Setter of SO(2) rotations, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setSO2(SO2<Scalar> const& so2) {\n    using std::sqrt;\n    Scalar saved_scale = scale();\n    complex_nonconst() = so2.unit_complex();\n    complex_nonconst() *= saved_scale;\n  }\n\n  SOPHUS_FUNC SO2<Scalar> so2() const { return SO2<Scalar>(complex()); }\n\n protected:\n  /// Mutator of complex is private to ensure class invariant.\n  ///\n  SOPHUS_FUNC ComplexType& complex_nonconst() {\n    return static_cast<Derived*>(this)->complex_nonconst();\n  }\n};\n\n/// RxSO2 using storage; derived from RxSO2Base.\ntemplate <class Scalar_, int Options>\nclass RxSO2 : public RxSO2Base<RxSO2<Scalar_, Options>> {\n public:\n  using Base = RxSO2Base<RxSO2<Scalar_, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using ComplexMember = Eigen::Matrix<Scalar, 2, 1, Options>;\n\n  /// ``Base`` is friend so complex_nonconst can be accessed from ``Base``.\n  friend class RxSO2Base<RxSO2<Scalar_, Options>>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes complex number to identity rotation and\n  /// scale to 1.\n  ///\n  SOPHUS_FUNC RxSO2() : complex_(Scalar(1), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC RxSO2(RxSO2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO2(RxSO2Base<OtherDerived> const& other)\n      : complex_(other.complex()) {}\n\n  /// Constructor from scaled rotation matrix\n  ///\n  /// Precondition: rotation matrix need to be scaled orthogonal with\n  /// determinant of ``s^2``.\n  ///\n  SOPHUS_FUNC explicit RxSO2(Transformation const& sR) {\n    this->setScaledRotationMatrix(sR);\n  }\n\n  /// Constructor from scale factor and rotation matrix ``R``.\n  ///\n  /// Precondition: Rotation matrix ``R`` must to be orthogonal with determinant\n  ///               of 1 and ``scale`` must to be close to zero.\n  ///\n  SOPHUS_FUNC RxSO2(Scalar const& scale, Transformation const& R)\n      : RxSO2((scale * SO2<Scalar>(R).unit_complex()).eval()) {}\n\n  /// Constructor from scale factor and SO2\n  ///\n  /// Precondition: ``scale`` must be close to zero.\n  ///\n  SOPHUS_FUNC RxSO2(Scalar const& scale, SO2<Scalar> const& so2)\n      : RxSO2((scale * so2.unit_complex()).eval()) {}\n\n  /// Constructor from complex number.\n  ///\n  /// Precondition: complex number must not be close to zero.\n  ///\n  SOPHUS_FUNC explicit RxSO2(Vector2<Scalar> const& z) : complex_(z) {\n    SOPHUS_ENSURE(complex_.squaredNorm() >= Constants<Scalar>::epsilon() *\n                                                Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon: % vs %\",\n                  complex_.squaredNorm(),\n                  Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon());\n  }\n\n  /// Constructor from complex number.\n  ///\n  /// Precondition: complex number must not be close to zero.\n  ///\n  SOPHUS_FUNC explicit RxSO2(Scalar const& real, Scalar const& imag)\n      : RxSO2(Vector2<Scalar>(real, imag)) {}\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC ComplexMember const& complex() const { return complex_; }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation angle\n  /// plus logarithm of scale) and returns the corresponding element of the\n  /// group RxSO2.\n  ///\n  /// To be more specific, this function computes ``expmat(hat(theta))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of RSO2.\n  ///\n  SOPHUS_FUNC static RxSO2<Scalar> exp(Tangent const& a) {\n    using std::exp;\n\n    Scalar const theta = a[0];\n    Scalar const sigma = a[1];\n    Scalar s = exp(sigma);\n    Vector2<Scalar> z = SO2<Scalar>::exp(theta).unit_complex();\n    z *= s;\n    return RxSO2<Scalar>(z);\n  }\n\n  /// Returns the ith infinitesimal generators of ``R+ x SO(2)``.\n  ///\n  /// The infinitesimal generators of RxSO2 are:\n  ///\n  /// ```\n  ///         |  0 -1 |\n  ///   G_0 = |  1  0 |\n  ///\n  ///         |  1  0 |\n  ///   G_1 = |  0  1 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be 0, or 1.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 1, \"i should be 0 or 1.\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 2-vector representation ``a`` (= rotation angle plus\n  /// logarithm of scale) and  returns the corresponding matrix representation\n  /// of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of RxSO2 is defined as\n  ///\n  ///   ``hat(.): R^2 -> R^{2x2},  hat(a) = sum_i a_i * G_i``  (for i=0,1,2)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of RxSO2.\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation A;\n    // clang-format off\n    A << a(1), -a(0),\n         a(0),  a(1);\n    // clang-format on\n    return A;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of RxSO(2). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_rxso2 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of RxSO2.\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const&, Tangent const&) {\n    Vector2<Scalar> res;\n    res.setZero();\n    return res;\n  }\n\n  /// Draw uniform sample from RxSO(2) manifold.\n  ///\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2)].\n  ///\n  template <class UniformRandomBitGenerator>\n  static RxSO2 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    using std::exp2;\n    return RxSO2(exp2(uniform(generator)),\n                 SO2<Scalar>::sampleUniform(generator));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 2x2-matrix representation ``Omega`` and maps it to the\n  /// corresponding vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  d -x |\n  ///                |  x  d |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    using std::abs;\n    return Tangent(Omega(1, 0), Omega(0, 0));\n  }\n\n protected:\n  SOPHUS_FUNC ComplexMember& complex_nonconst() { return complex_; }\n\n  ComplexMember complex_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``RxSO2``; derived from  RxSO2Base.\n///\n/// Allows us to wrap RxSO2 objects around POD array (e.g. external z style\n/// complex).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO2<Scalar_>, Options>\n    : public Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_>, Options>> {\n  using Base = Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_>, Options>>;\n\n public:\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so complex_nonconst can be accessed from ``Base``.\n  friend class Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_>, Options>>;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar* coeffs) : complex_(coeffs) {}\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar>, Options> const& complex() const {\n    return complex_;\n  }\n\n protected:\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options>& complex_nonconst() {\n    return complex_;\n  }\n\n  Map<Sophus::Vector2<Scalar>, Options> complex_;\n};\n\n/// Specialization of Eigen::Map for ``RxSO2 const``; derived from  RxSO2Base.\n///\n/// Allows us to wrap RxSO2 objects around POD array (e.g. external z style\n/// complex).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO2<Scalar_> const, Options>\n    : public Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  Map(Scalar const* coeffs) : complex_(coeffs) {}\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar> const, Options> const& complex() const {\n    return complex_;\n  }\n\n protected:\n  Map<Sophus::Vector2<Scalar> const, Options> const complex_;\n};\n}  // namespace Eigen\n\n#endif  /// SOPHUS_RXSO2_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/rxso3.hpp",
    "content": "/// @file\n/// Direct product R X SO(3) - rotation and scaling in 3d.\n\n#ifndef SOPHUS_RXSO3_HPP\n#define SOPHUS_RXSO3_HPP\n\n#include \"so3.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass RxSO3;\nusing RxSO3d = RxSO3<double>;\nusing RxSO3f = RxSO3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::RxSO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Eigen::Quaternion<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO3<Scalar_>, Options_>>\n    : traits<Sophus::RxSO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO3<Scalar_> const, Options_>>\n    : traits<Sophus::RxSO3<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// RxSO3 base type - implements RxSO3 class but is storage agnostic\n///\n/// This class implements the group ``R+ x SO(3)``, the direct product of the\n/// group of positive scalar 3x3 matrices (= isomorph to the positive\n/// real numbers) and the three-dimensional special orthogonal group SO(3).\n/// Geometrically, it is the group of rotation and scaling in three dimensions.\n/// As a matrix groups, RxSO3 consists of matrices of the form ``s * R``\n/// where ``R`` is an orthogonal matrix with ``det(R)=1`` and ``s > 0``\n/// being a positive real number.\n///\n/// Internally, RxSO3 is represented by the group of non-zero quaternions.\n/// In particular, the scale equals the squared(!) norm of the quaternion ``q``,\n/// ``s = |q|^2``. This is a most compact representation since the degrees of\n/// freedom (DoF) of RxSO3 (=4) equals the number of internal parameters (=4).\n///\n/// This class has the explicit class invariant that the scale ``s`` is not\n/// too close to zero. Strictly speaking, it must hold that:\n///\n///   ``quaternion().squaredNorm() >= Constants::epsilon()``.\n///\n/// In order to obey this condition, group multiplication is implemented with\n/// saturation such that a product always has a scale which is equal or greater\n/// this threshold.\ntemplate <class Derived>\nclass RxSO3Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using QuaternionType =\n      typename Eigen::internal::traits<Derived>::QuaternionType;\n  using QuaternionTemporaryType = Eigen::Quaternion<Scalar, Options>;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (three for rotation and one for scaling).\n  static int constexpr DoF = 4;\n  /// Number of internal parameters used (quaternion is a 4-tuple).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  struct TangentAndTheta {\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n    Tangent tangent;\n    Scalar theta;\n  };\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with RxSO3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using RxSO3Product = RxSO3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  /// For RxSO(3), it simply returns the rotation matrix corresponding to ``A``.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Adjoint res;\n    res.setIdentity();\n    res.template topLeftCorner<3, 3>() = rotationMatrix();\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC RxSO3<NewScalarType> cast() const {\n    return RxSO3<NewScalarType>(quaternion().template cast<NewScalarType>());\n  }\n\n  /// This provides unsafe read/write access to internal data. RxSO(3) is\n  /// represented by an Eigen::Quaternion (four parameters). When using direct\n  /// write access, the user needs to take care of that the quaternion is not\n  /// set close to zero.\n  ///\n  /// Note: The first three Scalars represent the imaginary parts, while the\n  /// forth Scalar represent the real part.\n  ///\n  SOPHUS_FUNC Scalar* data() { return quaternion_nonconst().coeffs().data(); }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    return quaternion().coeffs().data();\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC RxSO3<Scalar> inverse() const {\n    return RxSO3<Scalar>(quaternion().inverse());\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (scaled rotation matrices) to elements of the tangent\n  /// space (rotation-vector plus logarithm of scale factor).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of RxSO3.\n  ///\n  SOPHUS_FUNC Tangent log() const { return logAndTheta().tangent; }\n\n  /// As above, but also returns ``theta = |omega|``.\n  ///\n  SOPHUS_FUNC TangentAndTheta logAndTheta() const {\n    using std::log;\n\n    Scalar scale = quaternion().squaredNorm();\n    TangentAndTheta result;\n    result.tangent[3] = log(scale);\n    auto omega_and_theta = SO3<Scalar>(quaternion()).logAndTheta();\n    result.tangent.template head<3>() = omega_and_theta.tangent;\n    result.theta = omega_and_theta.theta;\n    return result;\n  }\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// For RxSO3, the matrix representation is an scaled orthogonal matrix ``sR``\n  /// with ``det(R)=s^3``, thus a scaled rotation matrix ``R``  with scale\n  /// ``s``.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation sR;\n\n    Scalar const vx_sq = quaternion().vec().x() * quaternion().vec().x();\n    Scalar const vy_sq = quaternion().vec().y() * quaternion().vec().y();\n    Scalar const vz_sq = quaternion().vec().z() * quaternion().vec().z();\n    Scalar const w_sq = quaternion().w() * quaternion().w();\n    Scalar const two_vx = Scalar(2) * quaternion().vec().x();\n    Scalar const two_vy = Scalar(2) * quaternion().vec().y();\n    Scalar const two_vz = Scalar(2) * quaternion().vec().z();\n    Scalar const two_vx_vy = two_vx * quaternion().vec().y();\n    Scalar const two_vx_vz = two_vx * quaternion().vec().z();\n    Scalar const two_vx_w = two_vx * quaternion().w();\n    Scalar const two_vy_vz = two_vy * quaternion().vec().z();\n    Scalar const two_vy_w = two_vy * quaternion().w();\n    Scalar const two_vz_w = two_vz * quaternion().w();\n\n    sR(0, 0) = vx_sq - vy_sq - vz_sq + w_sq;\n    sR(1, 0) = two_vx_vy + two_vz_w;\n    sR(2, 0) = two_vx_vz - two_vy_w;\n\n    sR(0, 1) = two_vx_vy - two_vz_w;\n    sR(1, 1) = -vx_sq + vy_sq - vz_sq + w_sq;\n    sR(2, 1) = two_vx_w + two_vy_vz;\n\n    sR(0, 2) = two_vx_vz + two_vy_w;\n    sR(1, 2) = -two_vx_w + two_vy_vz;\n    sR(2, 2) = -vx_sq - vy_sq + vz_sq + w_sq;\n    return sR;\n  }\n\n  /// Assignment operator.\n  ///\n  SOPHUS_FUNC RxSO3Base& operator=(RxSO3Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO3Base<Derived>& operator=(\n      RxSO3Base<OtherDerived> const& other) {\n    quaternion_nonconst() = other.quaternion();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation and scale\n  /// multiplication.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC RxSO3Product<OtherDerived> operator*(\n      RxSO3Base<OtherDerived> const& other) const {\n    using ResultT = ReturnScalar<OtherDerived>;\n    typename RxSO3Product<OtherDerived>::QuaternionType result_quaternion(\n        quaternion() * other.quaternion());\n\n    ResultT scale = result_quaternion.squaredNorm();\n    if (scale < Constants<ResultT>::epsilon()) {\n      SOPHUS_ENSURE(scale > ResultT(0), \"Scale must be greater zero.\");\n      /// Saturation to ensure class invariant.\n      result_quaternion.normalize();\n      result_quaternion.coeffs() *= sqrt(Constants<Scalar>::epsilon());\n    }\n    return RxSO3Product<OtherDerived>(result_quaternion);\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates a 3 dimensional point ``p`` by the SO3 element\n  ///  ``bar_R_foo`` (= rotation matrix) and scales it by the scale factor\n  ///  ``s``:\n  ///\n  ///   ``p_bar = s * (bar_R_foo * p_foo)``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    // Follows http:///eigen.tuxfamily.org/bz/show_bug.cgi?id=459\n    Scalar scale = quaternion().squaredNorm();\n    PointProduct<PointDerived> two_vec_cross_p = quaternion().vec().cross(p);\n    two_vec_cross_p += two_vec_cross_p;\n    return scale * p + (quaternion().w() * two_vec_cross_p +\n                        quaternion().vec().cross(two_vec_cross_p));\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const auto rsp = *this * p.template head<3>();\n    return HomogeneousPointProduct<HPointDerived>(rsp(0), rsp(1), rsp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO3\n  /// element ans scales it by the scale factor:\n  ///\n  /// Origin ``o`` is rotated and scaled\n  /// Direction ``d`` is rotated (preserving it's norm)\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(),\n                (*this) * l.direction() / quaternion().squaredNorm());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO3's Scalar type.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC RxSO3Base<Derived>& operator*=(\n      RxSO3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns internal parameters of RxSO(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real), with q being the\n  /// quaternion.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return quaternion().coeffs();\n  }\n\n  /// Sets non-zero quaternion\n  ///\n  /// Precondition: ``quat`` must not be close to zero.\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quat) {\n    SOPHUS_ENSURE(quat.squaredNorm() > Constants<Scalar>::epsilon() *\n                                           Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    static_cast<Derived*>(this)->quaternion_nonconst() = quat;\n  }\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& quaternion() const {\n    return static_cast<Derived const*>(this)->quaternion();\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Transformation rotationMatrix() const {\n    QuaternionTemporaryType norm_quad = quaternion();\n    norm_quad.normalize();\n    return norm_quad.toRotationMatrix();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC\n  Scalar scale() const { return quaternion().squaredNorm(); }\n\n  /// Setter of quaternion using rotation matrix ``R``, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Transformation const& R) {\n    using std::sqrt;\n    Scalar saved_scale = scale();\n    quaternion_nonconst() = R;\n    quaternion_nonconst().coeffs() *= sqrt(saved_scale);\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  /// Note: This function as a significant computational cost, since it has to\n  /// call the square root twice.\n  ///\n  SOPHUS_FUNC\n  void setScale(Scalar const& scale) {\n    using std::sqrt;\n    quaternion_nonconst().normalize();\n    quaternion_nonconst().coeffs() *= sqrt(scale);\n  }\n\n  /// Setter of quaternion using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 3x3 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Transformation const& sR) {\n    Transformation squared_sR = sR * sR.transpose();\n    Scalar squared_scale =\n        Scalar(1. / 3.) *\n        (squared_sR(0, 0) + squared_sR(1, 1) + squared_sR(2, 2));\n    SOPHUS_ENSURE(squared_scale >= Constants<Scalar>::epsilon() *\n                                       Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    Scalar scale = sqrt(squared_scale);\n    quaternion_nonconst() = sR / scale;\n    quaternion_nonconst().coeffs() *= sqrt(scale);\n  }\n\n  /// Setter of SO(3) rotations, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setSO3(SO3<Scalar> const& so3) {\n    using std::sqrt;\n    Scalar saved_scale = scale();\n    quaternion_nonconst() = so3.unit_quaternion();\n    quaternion_nonconst().coeffs() *= sqrt(saved_scale);\n  }\n\n  SOPHUS_FUNC SO3<Scalar> so3() const { return SO3<Scalar>(quaternion()); }\n\n protected:\n  /// Mutator of quaternion is private to ensure class invariant.\n  ///\n  SOPHUS_FUNC QuaternionType& quaternion_nonconst() {\n    return static_cast<Derived*>(this)->quaternion_nonconst();\n  }\n};\n\n/// RxSO3 using storage; derived from RxSO3Base.\ntemplate <class Scalar_, int Options>\nclass RxSO3 : public RxSO3Base<RxSO3<Scalar_, Options>> {\n public:\n  using Base = RxSO3Base<RxSO3<Scalar_, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using QuaternionMember = Eigen::Quaternion<Scalar, Options>;\n\n  /// ``Base`` is friend so quaternion_nonconst can be accessed from ``Base``.\n  friend class RxSO3Base<RxSO3<Scalar_, Options>>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes quaternion to identity rotation and scale\n  /// to 1.\n  ///\n  SOPHUS_FUNC RxSO3()\n      : quaternion_(Scalar(1), Scalar(0), Scalar(0), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC RxSO3(RxSO3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO3(RxSO3Base<OtherDerived> const& other)\n      : quaternion_(other.quaternion()) {}\n\n  /// Constructor from scaled rotation matrix\n  ///\n  /// Precondition: rotation matrix need to be scaled orthogonal with\n  /// determinant of ``s^3``.\n  ///\n  SOPHUS_FUNC explicit RxSO3(Transformation const& sR) {\n    this->setScaledRotationMatrix(sR);\n  }\n\n  /// Constructor from scale factor and rotation matrix ``R``.\n  ///\n  /// Precondition: Rotation matrix ``R`` must to be orthogonal with determinant\n  ///               of 1 and ``scale`` must not be close to zero.\n  ///\n  SOPHUS_FUNC RxSO3(Scalar const& scale, Transformation const& R)\n      : quaternion_(R) {\n    SOPHUS_ENSURE(scale >= Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    using std::sqrt;\n    quaternion_.coeffs() *= sqrt(scale);\n  }\n\n  /// Constructor from scale factor and SO3\n  ///\n  /// Precondition: ``scale`` must not to be close to zero.\n  ///\n  SOPHUS_FUNC RxSO3(Scalar const& scale, SO3<Scalar> const& so3)\n      : quaternion_(so3.unit_quaternion()) {\n    SOPHUS_ENSURE(scale >= Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    using std::sqrt;\n    quaternion_.coeffs() *= sqrt(scale);\n  }\n\n  /// Constructor from quaternion\n  ///\n  /// Precondition: quaternion must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC explicit RxSO3(Eigen::QuaternionBase<D> const& quat)\n      : quaternion_(quat) {\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type.\");\n    SOPHUS_ENSURE(quaternion_.squaredNorm() >= Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n  }\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC QuaternionMember const& quaternion() const { return quaternion_; }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation 3-vector\n  /// plus logarithm of scale) and returns the corresponding element of the\n  /// group RxSO3.\n  ///\n  /// To be more specific, thixs function computes ``expmat(hat(omega))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of RSO3.\n  ///\n  SOPHUS_FUNC static RxSO3<Scalar> exp(Tangent const& a) {\n    Scalar theta;\n    return expAndTheta(a, &theta);\n  }\n\n  /// As above, but also returns ``theta = |omega|`` as out-parameter.\n  ///\n  /// Precondition: ``theta`` must not be ``nullptr``.\n  ///\n  SOPHUS_FUNC static RxSO3<Scalar> expAndTheta(Tangent const& a,\n                                               Scalar* theta) {\n    SOPHUS_ENSURE(theta != nullptr, \"must not be nullptr.\");\n    using std::exp;\n    using std::sqrt;\n\n    Vector3<Scalar> const omega = a.template head<3>();\n    Scalar sigma = a[3];\n    Scalar sqrt_scale = sqrt(exp(sigma));\n    Eigen::Quaternion<Scalar> quat =\n        SO3<Scalar>::expAndTheta(omega, theta).unit_quaternion();\n    quat.coeffs() *= sqrt_scale;\n    return RxSO3<Scalar>(quat);\n  }\n\n  /// Returns the ith infinitesimal generators of ``R+ x SO(3)``.\n  ///\n  /// The infinitesimal generators of RxSO3 are:\n  ///\n  /// ```\n  ///         |  0  0  0 |\n  ///   G_0 = |  0  0 -1 |\n  ///         |  0  1  0 |\n  ///\n  ///         |  0  0  1 |\n  ///   G_1 = |  0  0  0 |\n  ///         | -1  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  1  0  0 |\n  ///   G_2 = |  0  1  0 |\n  ///         |  0  0  1 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be 0, 1, 2 or 3.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 3, \"i should be in range [0,3].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 4-vector representation ``a`` (= rotation vector plus\n  /// logarithm of scale) and  returns the corresponding matrix representation\n  /// of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of RxSO3 is defined as\n  ///\n  ///   ``hat(.): R^4 -> R^{3x3},  hat(a) = sum_i a_i * G_i``  (for i=0,1,2,3)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of RxSO3.\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation A;\n    // clang-format off\n    A <<  a(3), -a(2),  a(1),\n          a(2),  a(3), -a(0),\n         -a(1),  a(0),  a(3);\n    // clang-format on\n    return A;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of RxSO(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_rxso3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of RxSO3.\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector3<Scalar> const omega1 = a.template head<3>();\n    Vector3<Scalar> const omega2 = b.template head<3>();\n    Vector4<Scalar> res;\n    res.template head<3>() = omega1.cross(omega2);\n    res[3] = Scalar(0);\n    return res;\n  }\n\n  /// Draw uniform sample from RxSO(3) manifold.\n  ///\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2].\n  ///\n  template <class UniformRandomBitGenerator>\n  static RxSO3 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    using std::exp2;\n    return RxSO3(exp2(uniform(generator)),\n                 SO3<Scalar>::sampleUniform(generator));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  d -c  b |\n  ///                |  c  d -a |\n  ///                | -b  a  d |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    using std::abs;\n    return Tangent(Omega(2, 1), Omega(0, 2), Omega(1, 0), Omega(0, 0));\n  }\n\n protected:\n  SOPHUS_FUNC QuaternionMember& quaternion_nonconst() { return quaternion_; }\n\n  QuaternionMember quaternion_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``RxSO3``; derived from RxSO3Base\n///\n/// Allows us to wrap RxSO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO3<Scalar_>, Options>\n    : public Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so quaternion_nonconst can be accessed from ``Base``.\n  friend class Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_>, Options>>;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar* coeffs) : quaternion_(coeffs) {}\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC\n  Map<Eigen::Quaternion<Scalar>, Options> const& quaternion() const {\n    return quaternion_;\n  }\n\n protected:\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar>, Options>& quaternion_nonconst() {\n    return quaternion_;\n  }\n\n  Map<Eigen::Quaternion<Scalar>, Options> quaternion_;\n};\n\n/// Specialization of Eigen::Map for ``RxSO3 const``; derived from RxSO3Base.\n///\n/// Allows us to wrap RxSO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO3<Scalar_> const, Options>\n    : public Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  Map(Scalar const* coeffs) : quaternion_(coeffs) {}\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC\n  Map<Eigen::Quaternion<Scalar> const, Options> const& quaternion() const {\n    return quaternion_;\n  }\n\n protected:\n  Map<Eigen::Quaternion<Scalar> const, Options> const quaternion_;\n};\n}  // namespace Eigen\n\n#endif  /// SOPHUS_RXSO3_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/se2.hpp",
    "content": "/// @file\n/// Special Euclidean group SE(2) - rotation and translation in 2d.\n\n#ifndef SOPHUS_SE2_HPP\n#define SOPHUS_SE2_HPP\n\n#include \"so2.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SE2;\nusing SE2d = SE2<double>;\nusing SE2f = SE2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::SE2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector2<Scalar, Options>;\n  using SO2Type = Sophus::SO2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE2<Scalar_>, Options>>\n    : traits<Sophus::SE2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar>, Options>;\n  using SO2Type = Map<Sophus::SO2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE2<Scalar_> const, Options>>\n    : traits<Sophus::SE2<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar> const, Options>;\n  using SO2Type = Map<Sophus::SO2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SE2 base type - implements SE2 class but is storage agnostic.\n///\n/// SE(2) is the group of rotations  and translation in 2d. It is the\n/// semi-direct product of SO(2) and the 2d Euclidean vector space.  The class\n/// is represented using a composition of SO2Group  for rotation and a 2-vector\n/// for translation.\n///\n/// SE(2) is neither compact, nor a commutative group.\n///\n/// See SO2Group for more details of the rotation representation in 2d.\n///\ntemplate <class Derived>\nclass SE2Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using SO2Type = typename Eigen::internal::traits<Derived>::SO2Type;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (two for translation, three for rotation).\n  static int constexpr DoF = 3;\n  /// Number of internal parameters used (tuple for complex, two for\n  /// translation).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SE2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SE2Product = SE2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Matrix<Scalar, 2, 2> const& R = so2().matrix();\n    Transformation res;\n    res.setIdentity();\n    res.template topLeftCorner<2, 2>() = R;\n    res(0, 2) = translation()[1];\n    res(1, 2) = -translation()[0];\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SE2<NewScalarType> cast() const {\n    return SE2<NewScalarType>(so2().template cast<NewScalarType>(),\n                              translation().template cast<NewScalarType>());\n  }\n\n  /// Returns derivative of  this * exp(x)  wrt x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    Sophus::Vector2<Scalar> const c = unit_complex();\n    Scalar o(0);\n    J(0, 0) = o;\n    J(0, 1) = o;\n    J(0, 2) = -c[1];\n    J(1, 0) = o;\n    J(1, 1) = o;\n    J(1, 2) = c[0];\n    J(2, 0) = c[0];\n    J(2, 1) = -c[1];\n    J(2, 2) = o;\n    J(3, 0) = c[1];\n    J(3, 1) = c[0];\n    J(3, 2) = o;\n    return J;\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SE2<Scalar> inverse() const {\n    SO2<Scalar> const invR = so2().inverse();\n    return SE2<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SE(2).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    using std::abs;\n\n    Tangent upsilon_theta;\n    Scalar theta = so2().log();\n    upsilon_theta[2] = theta;\n    Scalar halftheta = Scalar(0.5) * theta;\n    Scalar halftheta_by_tan_of_halftheta;\n\n    Vector2<Scalar> z = so2().unit_complex();\n    Scalar real_minus_one = z.x() - Scalar(1.);\n    if (abs(real_minus_one) < Constants<Scalar>::epsilon()) {\n      halftheta_by_tan_of_halftheta =\n          Scalar(1.) - Scalar(1. / 12) * theta * theta;\n    } else {\n      halftheta_by_tan_of_halftheta = -(halftheta * z.y()) / (real_minus_one);\n    }\n    Matrix<Scalar, 2, 2> V_inv;\n    V_inv << halftheta_by_tan_of_halftheta, halftheta, -halftheta,\n        halftheta_by_tan_of_halftheta;\n    upsilon_theta.template head<2>() = V_inv * translation();\n    return upsilon_theta;\n  }\n\n  /// Normalize SO2 element\n  ///\n  /// It re-normalizes the SO2 element.\n  ///\n  SOPHUS_FUNC void normalize() { so2().normalize(); }\n\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///   | R t |\n  ///   | o 1 |\n  ///\n  /// where ``R`` is a 2x2 rotation matrix, ``t`` a translation 2-vector and\n  /// ``o`` a 2-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogenious_matrix;\n    homogenious_matrix.template topLeftCorner<2, 3>() = matrix2x3();\n    homogenious_matrix.row(2) =\n        Matrix<Scalar, 1, 3>(Scalar(0), Scalar(0), Scalar(1));\n    return homogenious_matrix;\n  }\n\n  /// Returns the significant first two rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 2, 3> matrix2x3() const {\n    Matrix<Scalar, 2, 3> matrix;\n    matrix.template topLeftCorner<2, 2>() = rotationMatrix();\n    matrix.col(2) = translation();\n    return matrix;\n  }\n\n  /// Assignment operator.\n  ///\n  SOPHUS_FUNC SE2Base& operator=(SE2Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE2Base<Derived>& operator=(SE2Base<OtherDerived> const& other) {\n    so2() = other.so2();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SE2Product<OtherDerived> operator*(\n      SE2Base<OtherDerived> const& other) const {\n    return SE2Product<OtherDerived>(\n        so2() * other.so2(), translation() + so2() * other.translation());\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates and translates a two dimensional point ``p`` by the\n  /// SE(2) element ``bar_T_foo = (bar_R_foo, t_bar)`` (= rigid body\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_R_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return so2() * p + translation();\n  }\n\n  /// Group action on homogeneous 2-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        so2() * p.template head<2>() + p(2) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), p(2));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the SE(2) element:\n  ///\n  /// Origin ``o`` is rotated and translated using SE(2) action\n  /// Direction ``d`` is rotated using SO(2) action\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), so2() * l.direction());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SE2Base<Derived>& operator*=(SE2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns internal parameters of SE(2).\n  ///\n  /// It returns (c[0], c[1], t[0], t[1]),\n  /// with c being the unit complex number, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << so2().params(), translation();\n    return p;\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 2, 2> rotationMatrix() const {\n    return so2().matrix();\n  }\n\n  /// Takes in complex number, and normalizes it.\n  ///\n  /// Precondition: The complex number must not be close to zero.\n  ///\n  SOPHUS_FUNC void setComplex(Sophus::Vector2<Scalar> const& complex) {\n    return so2().setComplex(complex);\n  }\n\n  /// Sets ``so3`` using ``rotation_matrix``.\n  ///\n  /// Precondition: ``R`` must be orthogonal and ``det(R)=1``.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix<Scalar, 2, 2> const& R) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n %\", R);\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: %\",\n                  R.determinant());\n    typename SO2Type::ComplexTemporaryType const complex(\n        Scalar(0.5) * (R(0, 0) + R(1, 1)), Scalar(0.5) * (R(1, 0) - R(0, 1)));\n    so2().setComplex(complex);\n  }\n\n  /// Mutator of SO3 group.\n  ///\n  SOPHUS_FUNC\n  SO2Type& so2() { return static_cast<Derived*>(this)->so2(); }\n\n  /// Accessor of SO3 group.\n  ///\n  SOPHUS_FUNC\n  SO2Type const& so2() const {\n    return static_cast<Derived const*>(this)->so2();\n  }\n\n  /// Mutator of translation vector.\n  ///\n  SOPHUS_FUNC\n  TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC\n  TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n\n  /// Accessor of unit complex number.\n  ///\n  SOPHUS_FUNC\n  typename Eigen::internal::traits<Derived>::SO2Type::ComplexT const&\n  unit_complex() const {\n    return so2().unit_complex();\n  }\n};\n\n/// SE2 using default storage; derived from SE2Base.\ntemplate <class Scalar_, int Options>\nclass SE2 : public SE2Base<SE2<Scalar_, Options>> {\n public:\n  using Base = SE2Base<SE2<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using SO2Member = SO2<Scalar, Options>;\n  using TranslationMember = Vector2<Scalar, Options>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes rigid body motion to the identity.\n  ///\n  SOPHUS_FUNC SE2();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SE2(SE2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE2(SE2Base<OtherDerived> const& other)\n      : so2_(other.so2()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from SO3 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC SE2(SO2Base<OtherDerived> const& so2,\n                  Eigen::MatrixBase<D> const& translation)\n      : so2_(so2), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from rotation matrix and translation vector\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  /// of 1.\n  ///\n  SOPHUS_FUNC\n  SE2(typename SO2<Scalar>::Transformation const& rotation_matrix,\n      Point const& translation)\n      : so2_(rotation_matrix), translation_(translation) {}\n\n  /// Constructor from rotation angle and translation vector.\n  ///\n  SOPHUS_FUNC SE2(Scalar const& theta, Point const& translation)\n      : so2_(theta), translation_(translation) {}\n\n  /// Constructor from complex number and translation vector\n  ///\n  /// Precondition: ``complex`` must not be close to zero.\n  SOPHUS_FUNC SE2(Vector2<Scalar> const& complex, Point const& translation)\n      : so2_(complex), translation_(translation) {}\n\n  /// Constructor from 3x3 matrix\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  /// of 1. The last row must be ``(0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit SE2(Transformation const& T)\n      : so2_(T.template topLeftCorner<2, 2>().eval()),\n        translation_(T.template block<2, 1>(0, 2)) {}\n\n  /// This provides unsafe read/write access to internal data. SO(2) is\n  /// represented by a complex number (two parameters). When using direct write\n  /// access, the user needs to take care of that the complex number stays\n  /// normalized.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // so2_ and translation_ are layed out sequentially with no padding\n    return so2_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    /// so2_ and translation_ are layed out sequentially with no padding\n    return so2_.data();\n  }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC SO2Member& so2() { return so2_; }\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC SO2Member const& so2() const { return so2_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& upsilon_theta) {\n    using std::abs;\n    using std::cos;\n    using std::pow;\n    using std::sin;\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Sophus::Vector<Scalar, 2> upsilon = upsilon_theta.template head<2>();\n    Scalar theta = upsilon_theta[2];\n\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      Scalar const o(0);\n      Scalar const i(1);\n\n      // clang-format off\n      J << o, o, o, o, o, i, i, o, -Scalar(0.5) * upsilon[1], o, i,\n          Scalar(0.5) * upsilon[0];\n      // clang-format on\n      return J;\n    }\n\n    Scalar const c0 = sin(theta);\n    Scalar const c1 = cos(theta);\n    Scalar const c2 = 1.0 / theta;\n    Scalar const c3 = c0 * c2;\n    Scalar const c4 = -c1 + Scalar(1);\n    Scalar const c5 = c2 * c4;\n    Scalar const c6 = c1 * c2;\n    Scalar const c7 = pow(theta, -2);\n    Scalar const c8 = c0 * c7;\n    Scalar const c9 = c4 * c7;\n\n    Scalar const o = Scalar(0);\n    J(0, 0) = o;\n    J(0, 1) = o;\n    J(0, 2) = -c0;\n    J(1, 0) = o;\n    J(1, 1) = o;\n    J(1, 2) = c1;\n    J(2, 0) = c3;\n    J(2, 1) = -c5;\n    J(2, 2) =\n        -c3 * upsilon[1] + c6 * upsilon[0] - c8 * upsilon[0] + c9 * upsilon[1];\n    J(3, 0) = c5;\n    J(3, 1) = c3;\n    J(3, 2) =\n        c3 * upsilon[0] + c6 * upsilon[1] - c8 * upsilon[1] - c9 * upsilon[0];\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Scalar const o(0);\n    Scalar const i(1);\n\n    // clang-format off\n    J << o, o, o, o, o, i, i, o, o, o, i, o;\n    // clang-format on\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= twist ``a``) and\n  /// returns the corresponding element of the group SE(2).\n  ///\n  /// The first two components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of SE(2), while the last three components\n  /// of ``a`` represents the rotation vector ``omega``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of SE(2), see below.\n  ///\n  SOPHUS_FUNC static SE2<Scalar> exp(Tangent const& a) {\n    Scalar theta = a[2];\n    SO2<Scalar> so2 = SO2<Scalar>::exp(theta);\n    Scalar sin_theta_by_theta;\n    Scalar one_minus_cos_theta_by_theta;\n    using std::abs;\n\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      Scalar theta_sq = theta * theta;\n      sin_theta_by_theta = Scalar(1.) - Scalar(1. / 6.) * theta_sq;\n      one_minus_cos_theta_by_theta =\n          Scalar(0.5) * theta - Scalar(1. / 24.) * theta * theta_sq;\n    } else {\n      sin_theta_by_theta = so2.unit_complex().y() / theta;\n      one_minus_cos_theta_by_theta =\n          (Scalar(1.) - so2.unit_complex().x()) / theta;\n    }\n    Vector2<Scalar> trans(\n        sin_theta_by_theta * a[0] - one_minus_cos_theta_by_theta * a[1],\n        one_minus_cos_theta_by_theta * a[0] + sin_theta_by_theta * a[1]);\n    return SE2<Scalar>(so2, trans);\n  }\n\n  /// Returns closest SE3 given arbitrary 4x4 matrix.\n  ///\n  template <class S = Scalar>\n  static SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, SE2>\n  fitToSE2(Matrix3<Scalar> const& T) {\n    return SE2(SO2<Scalar>::fitToSO2(T.template block<2, 2>(0, 0)),\n               T.template block<2, 1>(0, 2));\n  }\n\n  /// Returns the ith infinitesimal generators of SE(2).\n  ///\n  /// The infinitesimal generators of SE(2) are:\n  ///\n  /// ```\n  ///         |  0  0  1 |\n  ///   G_0 = |  0  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0  0  0 |\n  ///   G_1 = |  0  0  1 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in 0, 1 or 2.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 || i <= 2, \"i should be in range [0,2].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 3-vector representation (= twist) and returns the\n  /// corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SE(3) is defined as\n  ///\n  ///   ``hat(.): R^3 -> R^{3x33},  hat(a) = sum_i a_i * G_i``  (for i=0,1,2)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of SE(2).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.setZero();\n    Omega.template topLeftCorner<2, 2>() = SO2<Scalar>::hat(a[2]);\n    Omega.col(2).template head<2>() = a.template head<2>();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of SE(2). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_se2 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of SE(2).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector2<Scalar> upsilon1 = a.template head<2>();\n    Vector2<Scalar> upsilon2 = b.template head<2>();\n    Scalar theta1 = a[2];\n    Scalar theta2 = b[2];\n\n    return Tangent(-theta1 * upsilon2[1] + theta2 * upsilon1[1],\n                   theta1 * upsilon2[0] - theta2 * upsilon1[0], Scalar(0));\n  }\n\n  /// Construct pure rotation.\n  ///\n  static SOPHUS_FUNC SE2 rot(Scalar const& x) {\n    return SE2(SO2<Scalar>(x), Sophus::Vector2<Scalar>::Zero());\n  }\n\n  /// Draw uniform sample from SE(2) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  ///\n  template <class UniformRandomBitGenerator>\n  static SE2 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return SE2(SO2<Scalar>::sampleUniform(generator),\n               Vector2<Scalar>(uniform(generator), uniform(generator)));\n  }\n\n  /// Construct a translation only SE(2) instance.\n  ///\n  template <class T0, class T1>\n  static SOPHUS_FUNC SE2 trans(T0 const& x, T1 const& y) {\n    return SE2(SO2<Scalar>(), Vector2<Scalar>(x, y));\n  }\n\n  static SOPHUS_FUNC SE2 trans(Vector2<Scalar> const& xy) {\n    return SE2(SO2<Scalar>(), xy);\n  }\n\n  /// Construct x-axis translation.\n  ///\n  static SOPHUS_FUNC SE2 transX(Scalar const& x) {\n    return SE2::trans(x, Scalar(0));\n  }\n\n  /// Construct y-axis translation.\n  ///\n  static SOPHUS_FUNC SE2 transY(Scalar const& y) {\n    return SE2::trans(Scalar(0), y);\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding 3-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -d  a |\n  ///                |  d  0  b |\n  ///                |  0  0  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    SOPHUS_ENSURE(\n        Omega.row(2).template lpNorm<1>() < Constants<Scalar>::epsilon(),\n        \"Omega: \\n%\", Omega);\n    Tangent upsilon_omega;\n    upsilon_omega.template head<2>() = Omega.col(2).template head<2>();\n    upsilon_omega[2] = SO2<Scalar>::vee(Omega.template topLeftCorner<2, 2>());\n    return upsilon_omega;\n  }\n\n protected:\n  SO2Member so2_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSE2<Scalar, Options>::SE2() : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<SE2>::value,\n                \"Assume standard layout for the use of offsetof check below.\");\n  static_assert(\n      offsetof(SE2, so2_) + sizeof(Scalar) * SO2<Scalar>::num_parameters ==\n          offsetof(SE2, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``SE2``; derived from SE2Base.\n///\n/// Allows us to wrap SE2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE2<Scalar_>, Options>\n    : public Sophus::SE2Base<Map<Sophus::SE2<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SE2Base<Map<Sophus::SE2<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  Map(Scalar* coeffs)\n      : so2_(coeffs),\n        translation_(coeffs + Sophus::SO2<Scalar>::num_parameters) {}\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO2<Scalar>, Options>& so2() { return so2_; }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO2<Scalar>, Options> const& so2() const {\n    return so2_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO2<Scalar>, Options> so2_;\n  Map<Sophus::Vector2<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``SE2 const``; derived from SE2Base.\n///\n/// Allows us to wrap SE2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE2<Scalar_> const, Options>\n    : public Sophus::SE2Base<Map<Sophus::SE2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SE2Base<Map<Sophus::SE2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar const* coeffs)\n      : so2_(coeffs),\n        translation_(coeffs + Sophus::SO2<Scalar>::num_parameters) {}\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO2<Scalar> const, Options> const& so2() const {\n    return so2_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO2<Scalar> const, Options> const so2_;\n  Map<Sophus::Vector2<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/sophus/se3.hpp",
    "content": "/// @file\n/// Special Euclidean group SE(3) - rotation and translation in 3d.\n\n#ifndef SOPHUS_SE3_HPP\n#define SOPHUS_SE3_HPP\n\n#include \"so3.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SE3;\nusing SE3d = SE3<double>;\nusing SE3f = SE3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::SE3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector3<Scalar, Options>;\n  using SO3Type = Sophus::SO3<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE3<Scalar_>, Options>>\n    : traits<Sophus::SE3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar>, Options>;\n  using SO3Type = Map<Sophus::SO3<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE3<Scalar_> const, Options>>\n    : traits<Sophus::SE3<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar> const, Options>;\n  using SO3Type = Map<Sophus::SO3<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SE3 base type - implements SE3 class but is storage agnostic.\n///\n/// SE(3) is the group of rotations  and translation in 3d. It is the\n/// semi-direct product of SO(3) and the 3d Euclidean vector space.  The class\n/// is represented using a composition of SO3  for rotation and a one 3-vector\n/// for translation.\n///\n/// SE(3) is neither compact, nor a commutative group.\n///\n/// See SO3 for more details of the rotation representation in 3d.\n///\ntemplate <class Derived>\nclass SE3Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using SO3Type = typename Eigen::internal::traits<Derived>::SO3Type;\n  using QuaternionType = typename SO3Type::QuaternionType;\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (three for translation, three for rotation).\n  static int constexpr DoF = 6;\n  /// Number of internal parameters used (4-tuple for quaternion, three for\n  /// translation).\n  static int constexpr num_parameters = 7;\n  /// Group transformations are 4x4 matrices.\n  static int constexpr N = 4;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SE3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SE3Product = SE3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Sophus::Matrix3<Scalar> const R = so3().matrix();\n    Adjoint res;\n    res.block(0, 0, 3, 3) = R;\n    res.block(3, 3, 3, 3) = R;\n    res.block(0, 3, 3, 3) = SO3<Scalar>::hat(translation()) * R;\n    res.block(3, 0, 3, 3) = Matrix3<Scalar>::Zero(3, 3);\n    return res;\n  }\n\n  /// Extract rotation angle about canonical X-axis\n  ///\n  Scalar angleX() const { return so3().angleX(); }\n\n  /// Extract rotation angle about canonical Y-axis\n  ///\n  Scalar angleY() const { return so3().angleY(); }\n\n  /// Extract rotation angle about canonical Z-axis\n  ///\n  Scalar angleZ() const { return so3().angleZ(); }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SE3<NewScalarType> cast() const {\n    return SE3<NewScalarType>(so3().template cast<NewScalarType>(),\n                              translation().template cast<NewScalarType>());\n  }\n\n  /// Returns derivative of  this * exp(x)  wrt x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    Eigen::Quaternion<Scalar> const q = unit_quaternion();\n    Scalar const c0 = Scalar(0.5) * q.w();\n    Scalar const c1 = Scalar(0.5) * q.z();\n    Scalar const c2 = -c1;\n    Scalar const c3 = Scalar(0.5) * q.y();\n    Scalar const c4 = Scalar(0.5) * q.x();\n    Scalar const c5 = -c4;\n    Scalar const c6 = -c3;\n    Scalar const c7 = q.w() * q.w();\n    Scalar const c8 = q.x() * q.x();\n    Scalar const c9 = q.y() * q.y();\n    Scalar const c10 = -c9;\n    Scalar const c11 = q.z() * q.z();\n    Scalar const c12 = -c11;\n    Scalar const c13 = Scalar(2) * q.w();\n    Scalar const c14 = c13 * q.z();\n    Scalar const c15 = Scalar(2) * q.x();\n    Scalar const c16 = c15 * q.y();\n    Scalar const c17 = c13 * q.y();\n    Scalar const c18 = c15 * q.z();\n    Scalar const c19 = c7 - c8;\n    Scalar const c20 = c13 * q.x();\n    Scalar const c21 = Scalar(2) * q.y() * q.z();\n    J(0, 0) = 0;\n    J(0, 1) = 0;\n    J(0, 2) = 0;\n    J(0, 3) = c0;\n    J(0, 4) = c2;\n    J(0, 5) = c3;\n    J(1, 0) = 0;\n    J(1, 1) = 0;\n    J(1, 2) = 0;\n    J(1, 3) = c1;\n    J(1, 4) = c0;\n    J(1, 5) = c5;\n    J(2, 0) = 0;\n    J(2, 1) = 0;\n    J(2, 2) = 0;\n    J(2, 3) = c6;\n    J(2, 4) = c4;\n    J(2, 5) = c0;\n    J(3, 0) = 0;\n    J(3, 1) = 0;\n    J(3, 2) = 0;\n    J(3, 3) = c5;\n    J(3, 4) = c6;\n    J(3, 5) = c2;\n    J(4, 0) = c10 + c12 + c7 + c8;\n    J(4, 1) = -c14 + c16;\n    J(4, 2) = c17 + c18;\n    J(4, 3) = 0;\n    J(4, 4) = 0;\n    J(4, 5) = 0;\n    J(5, 0) = c14 + c16;\n    J(5, 1) = c12 + c19 + c9;\n    J(5, 2) = -c20 + c21;\n    J(5, 3) = 0;\n    J(5, 4) = 0;\n    J(5, 5) = 0;\n    J(6, 0) = -c17 + c18;\n    J(6, 1) = c20 + c21;\n    J(6, 2) = c10 + c11 + c19;\n    J(6, 3) = 0;\n    J(6, 4) = 0;\n    J(6, 5) = 0;\n    return J;\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SE3<Scalar> inverse() const {\n    SO3<Scalar> invR = so3().inverse();\n    return SE3<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SE(3).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    // For the derivation of the logarithm of SE(3), see\n    // J. Gallier, D. Xu, \"Computing exponentials of skew symmetric matrices\n    // and logarithms of orthogonal matrices\", IJRA 2002.\n    // https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf\n    // (Sec. 6., pp. 8)\n    using std::abs;\n    using std::cos;\n    using std::sin;\n    Tangent upsilon_omega;\n    auto omega_and_theta = so3().logAndTheta();\n    Scalar theta = omega_and_theta.theta;\n    upsilon_omega.template tail<3>() = omega_and_theta.tangent;\n    Matrix3<Scalar> const Omega =\n        SO3<Scalar>::hat(upsilon_omega.template tail<3>());\n\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      Matrix3<Scalar> const V_inv = Matrix3<Scalar>::Identity() -\n                                    Scalar(0.5) * Omega +\n                                    Scalar(1. / 12.) * (Omega * Omega);\n\n      upsilon_omega.template head<3>() = V_inv * translation();\n    } else {\n      Scalar const half_theta = Scalar(0.5) * theta;\n\n      Matrix3<Scalar> const V_inv =\n          (Matrix3<Scalar>::Identity() - Scalar(0.5) * Omega +\n           (Scalar(1) -\n            theta * cos(half_theta) / (Scalar(2) * sin(half_theta))) /\n               (theta * theta) * (Omega * Omega));\n      upsilon_omega.template head<3>() = V_inv * translation();\n    }\n    return upsilon_omega;\n  }\n\n  /// It re-normalizes the SO3 element.\n  ///\n  /// Note: Because of the class invariant of SO3, there is typically no need to\n  /// call this function directly.\n  ///\n  SOPHUS_FUNC void normalize() { so3().normalize(); }\n\n  /// Returns 4x4 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///   | R t |\n  ///   | o 1 |\n  ///\n  /// where ``R`` is a 3x3 rotation matrix, ``t`` a translation 3-vector and\n  /// ``o`` a 3-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogenious_matrix;\n    homogenious_matrix.template topLeftCorner<3, 4>() = matrix3x4();\n    homogenious_matrix.row(3) =\n        Matrix<Scalar, 1, 4>(Scalar(0), Scalar(0), Scalar(0), Scalar(1));\n    return homogenious_matrix;\n  }\n\n  /// Returns the significant first three rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 3, 4> matrix3x4() const {\n    Matrix<Scalar, 3, 4> matrix;\n    matrix.template topLeftCorner<3, 3>() = rotationMatrix();\n    matrix.col(3) = translation();\n    return matrix;\n  }\n\n  /// Assignment operator.\n  ///\n  SOPHUS_FUNC SE3Base& operator=(SE3Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE3Base<Derived>& operator=(SE3Base<OtherDerived> const& other) {\n    so3() = other.so3();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SE3Product<OtherDerived> operator*(\n      SE3Base<OtherDerived> const& other) const {\n    return SE3Product<OtherDerived>(\n        so3() * other.so3(), translation() + so3() * other.translation());\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates and translates a three dimensional point ``p`` by\n  /// the SE(3) element ``bar_T_foo = (bar_R_foo, t_bar)`` (= rigid body\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_R_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return so3() * p + translation();\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        so3() * p.template head<3>() + p(3) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), tp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the SE(3) element:\n  ///\n  /// Origin is transformed using SE(3) action\n  /// Direction is transformed using rotation part\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), so3() * l.direction());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SE3's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SE3Base<Derived>& operator*=(SE3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Matrix3<Scalar> rotationMatrix() const { return so3().matrix(); }\n\n  /// Mutator of SO3 group.\n  ///\n  SOPHUS_FUNC SO3Type& so3() { return static_cast<Derived*>(this)->so3(); }\n\n  /// Accessor of SO3 group.\n  ///\n  SOPHUS_FUNC SO3Type const& so3() const {\n    return static_cast<const Derived*>(this)->so3();\n  }\n\n  /// Takes in quaternion, and normalizes it.\n  ///\n  /// Precondition: The quaternion must not be close to zero.\n  ///\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quat) {\n    so3().setQuaternion(quat);\n  }\n\n  /// Sets ``so3`` using ``rotation_matrix``.\n  ///\n  /// Precondition: ``R`` must be orthogonal and ``det(R)=1``.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix3<Scalar> const& R) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n %\", R);\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: %\",\n                  R.determinant());\n    so3().setQuaternion(Eigen::Quaternion<Scalar>(R));\n  }\n\n  /// Returns internal parameters of SE(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real, t[0], t[1], t[2]),\n  /// with q being the unit quaternion, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << so3().params(), translation();\n    return p;\n  }\n\n  /// Mutator of translation vector.\n  ///\n  SOPHUS_FUNC TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& unit_quaternion() const {\n    return this->so3().unit_quaternion();\n  }\n};\n\n/// SE3 using default storage; derived from SE3Base.\ntemplate <class Scalar_, int Options>\nclass SE3 : public SE3Base<SE3<Scalar_, Options>> {\n  using Base = SE3Base<SE3<Scalar_, Options>>;\n\n public:\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using SO3Member = SO3<Scalar, Options>;\n  using TranslationMember = Vector3<Scalar, Options>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes rigid body motion to the identity.\n  ///\n  SOPHUS_FUNC SE3();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SE3(SE3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE3(SE3Base<OtherDerived> const& other)\n      : so3_(other.so3()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from SO3 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC SE3(SO3Base<OtherDerived> const& so3,\n                  Eigen::MatrixBase<D> const& translation)\n      : so3_(so3), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from rotation matrix and translation vector\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  ///               of 1.\n  ///\n  SOPHUS_FUNC\n  SE3(Matrix3<Scalar> const& rotation_matrix, Point const& translation)\n      : so3_(rotation_matrix), translation_(translation) {}\n\n  /// Constructor from quaternion and translation vector.\n  ///\n  /// Precondition: ``quaternion`` must not be close to zero.\n  ///\n  SOPHUS_FUNC SE3(Eigen::Quaternion<Scalar> const& quaternion,\n                  Point const& translation)\n      : so3_(quaternion), translation_(translation) {}\n\n  /// Constructor from 4x4 matrix\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  ///               of 1. The last row must be ``(0, 0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit SE3(Matrix4<Scalar> const& T)\n      : so3_(T.template topLeftCorner<3, 3>()),\n        translation_(T.template block<3, 1>(0, 3)) {\n    SOPHUS_ENSURE((T.row(3) - Matrix<Scalar, 1, 4>(Scalar(0), Scalar(0),\n                                                   Scalar(0), Scalar(1)))\n                          .squaredNorm() < Constants<Scalar>::epsilon(),\n                  \"Last row is not (0,0,0,1), but (%).\", T.row(3));\n  }\n\n  /// This provides unsafe read/write access to internal data. SO(3) is\n  /// represented by an Eigen::Quaternion (four parameters). When using direct\n  /// write access, the user needs to take care of that the quaternion stays\n  /// normalized.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // so3_ and translation_ are laid out sequentially with no padding\n    return so3_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    // so3_ and translation_ are laid out sequentially with no padding\n    return so3_.data();\n  }\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC SO3Member& so3() { return so3_; }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC SO3Member const& so3() const { return so3_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& upsilon_omega) {\n    using std::cos;\n    using std::pow;\n    using std::sin;\n    using std::sqrt;\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Sophus::Vector<Scalar, 3> upsilon = upsilon_omega.template head<3>();\n    Sophus::Vector<Scalar, 3> omega = upsilon_omega.template tail<3>();\n\n    Scalar const c0 = omega[0] * omega[0];\n    Scalar const c1 = omega[1] * omega[1];\n    Scalar const c2 = omega[2] * omega[2];\n    Scalar const c3 = c0 + c1 + c2;\n    Scalar const o(0);\n    Scalar const h(0.5);\n    Scalar const i(1);\n\n    if (c3 < Constants<Scalar>::epsilon()) {\n      Scalar const ux = Scalar(0.5) * upsilon[0];\n      Scalar const uy = Scalar(0.5) * upsilon[1];\n      Scalar const uz = Scalar(0.5) * upsilon[2];\n\n      /// clang-format off\n      J << o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o,\n          o, i, o, o, o, uz, -uy, o, i, o, -uz, o, ux, o, o, i, uy, -ux, o;\n      /// clang-format on\n      return J;\n    }\n\n    Scalar const c4 = sqrt(c3);\n    Scalar const c5 = Scalar(1.0) / c4;\n    Scalar const c6 = Scalar(0.5) * c4;\n    Scalar const c7 = sin(c6);\n    Scalar const c8 = c5 * c7;\n    Scalar const c9 = pow(c3, -3.0L / 2.0L);\n    Scalar const c10 = c7 * c9;\n    Scalar const c11 = Scalar(1.0) / c3;\n    Scalar const c12 = cos(c6);\n    Scalar const c13 = Scalar(0.5) * c11 * c12;\n    Scalar const c14 = c7 * c9 * omega[0];\n    Scalar const c15 = Scalar(0.5) * c11 * c12 * omega[0];\n    Scalar const c16 = -c14 * omega[1] + c15 * omega[1];\n    Scalar const c17 = -c14 * omega[2] + c15 * omega[2];\n    Scalar const c18 = omega[1] * omega[2];\n    Scalar const c19 = -c10 * c18 + c13 * c18;\n    Scalar const c20 = c5 * omega[0];\n    Scalar const c21 = Scalar(0.5) * c7;\n    Scalar const c22 = c5 * omega[1];\n    Scalar const c23 = c5 * omega[2];\n    Scalar const c24 = -c1;\n    Scalar const c25 = -c2;\n    Scalar const c26 = c24 + c25;\n    Scalar const c27 = sin(c4);\n    Scalar const c28 = -c27 + c4;\n    Scalar const c29 = c28 * c9;\n    Scalar const c30 = cos(c4);\n    Scalar const c31 = -c30 + Scalar(1);\n    Scalar const c32 = c11 * c31;\n    Scalar const c33 = c32 * omega[2];\n    Scalar const c34 = c29 * omega[0];\n    Scalar const c35 = c34 * omega[1];\n    Scalar const c36 = c32 * omega[1];\n    Scalar const c37 = c34 * omega[2];\n    Scalar const c38 = pow(c3, -5.0L / 2.0L);\n    Scalar const c39 = Scalar(3) * c28 * c38 * omega[0];\n    Scalar const c40 = c26 * c9;\n    Scalar const c41 = -c20 * c30 + c20;\n    Scalar const c42 = c27 * c9 * omega[0];\n    Scalar const c43 = c42 * omega[1];\n    Scalar const c44 = pow(c3, -2);\n    Scalar const c45 = Scalar(2) * c31 * c44 * omega[0];\n    Scalar const c46 = c45 * omega[1];\n    Scalar const c47 = c29 * omega[2];\n    Scalar const c48 = c43 - c46 + c47;\n    Scalar const c49 = Scalar(3) * c0 * c28 * c38;\n    Scalar const c50 = c9 * omega[0] * omega[2];\n    Scalar const c51 = c41 * c50 - c49 * omega[2];\n    Scalar const c52 = c9 * omega[0] * omega[1];\n    Scalar const c53 = c41 * c52 - c49 * omega[1];\n    Scalar const c54 = c42 * omega[2];\n    Scalar const c55 = c45 * omega[2];\n    Scalar const c56 = c29 * omega[1];\n    Scalar const c57 = -c54 + c55 + c56;\n    Scalar const c58 = Scalar(-2) * c56;\n    Scalar const c59 = Scalar(3) * c28 * c38 * omega[1];\n    Scalar const c60 = -c22 * c30 + c22;\n    Scalar const c61 = -c18 * c39;\n    Scalar const c62 = c32 + c61;\n    Scalar const c63 = c27 * c9;\n    Scalar const c64 = c1 * c63;\n    Scalar const c65 = Scalar(2) * c31 * c44;\n    Scalar const c66 = c1 * c65;\n    Scalar const c67 = c50 * c60;\n    Scalar const c68 = -c1 * c39 + c52 * c60;\n    Scalar const c69 = c18 * c63;\n    Scalar const c70 = c18 * c65;\n    Scalar const c71 = c34 - c69 + c70;\n    Scalar const c72 = Scalar(-2) * c47;\n    Scalar const c73 = Scalar(3) * c28 * c38 * omega[2];\n    Scalar const c74 = -c23 * c30 + c23;\n    Scalar const c75 = -c32 + c61;\n    Scalar const c76 = c2 * c63;\n    Scalar const c77 = c2 * c65;\n    Scalar const c78 = c52 * c74;\n    Scalar const c79 = c34 + c69 - c70;\n    Scalar const c80 = -c2 * c39 + c50 * c74;\n    Scalar const c81 = -c0;\n    Scalar const c82 = c25 + c81;\n    Scalar const c83 = c32 * omega[0];\n    Scalar const c84 = c18 * c29;\n    Scalar const c85 = Scalar(-2) * c34;\n    Scalar const c86 = c82 * c9;\n    Scalar const c87 = c0 * c63;\n    Scalar const c88 = c0 * c65;\n    Scalar const c89 = c9 * omega[1] * omega[2];\n    Scalar const c90 = c41 * c89;\n    Scalar const c91 = c54 - c55 + c56;\n    Scalar const c92 = -c1 * c73 + c60 * c89;\n    Scalar const c93 = -c43 + c46 + c47;\n    Scalar const c94 = -c2 * c59 + c74 * c89;\n    Scalar const c95 = c24 + c81;\n    Scalar const c96 = c9 * c95;\n    J(0, 0) = o;\n    J(0, 1) = o;\n    J(0, 2) = o;\n    J(0, 3) = -c0 * c10 + c0 * c13 + c8;\n    J(0, 4) = c16;\n    J(0, 5) = c17;\n    J(1, 0) = o;\n    J(1, 1) = o;\n    J(1, 2) = o;\n    J(1, 3) = c16;\n    J(1, 4) = -c1 * c10 + c1 * c13 + c8;\n    J(1, 5) = c19;\n    J(2, 0) = o;\n    J(2, 1) = o;\n    J(2, 2) = o;\n    J(2, 3) = c17;\n    J(2, 4) = c19;\n    J(2, 5) = -c10 * c2 + c13 * c2 + c8;\n    J(3, 0) = o;\n    J(3, 1) = o;\n    J(3, 2) = o;\n    J(3, 3) = -c20 * c21;\n    J(3, 4) = -c21 * c22;\n    J(3, 5) = -c21 * c23;\n    J(4, 0) = c26 * c29 + Scalar(1);\n    J(4, 1) = -c33 + c35;\n    J(4, 2) = c36 + c37;\n    J(4, 3) = upsilon[0] * (-c26 * c39 + c40 * c41) + upsilon[1] * (c53 + c57) +\n              upsilon[2] * (c48 + c51);\n    J(4, 4) = upsilon[0] * (-c26 * c59 + c40 * c60 + c58) +\n              upsilon[1] * (c68 + c71) + upsilon[2] * (c62 + c64 - c66 + c67);\n    J(4, 5) = upsilon[0] * (-c26 * c73 + c40 * c74 + c72) +\n              upsilon[1] * (c75 - c76 + c77 + c78) + upsilon[2] * (c79 + c80);\n    J(5, 0) = c33 + c35;\n    J(5, 1) = c29 * c82 + Scalar(1);\n    J(5, 2) = -c83 + c84;\n    J(5, 3) = upsilon[0] * (c53 + c91) +\n              upsilon[1] * (-c39 * c82 + c41 * c86 + c85) +\n              upsilon[2] * (c75 - c87 + c88 + c90);\n    J(5, 4) = upsilon[0] * (c68 + c79) + upsilon[1] * (-c59 * c82 + c60 * c86) +\n              upsilon[2] * (c92 + c93);\n    J(5, 5) = upsilon[0] * (c62 + c76 - c77 + c78) +\n              upsilon[1] * (c72 - c73 * c82 + c74 * c86) +\n              upsilon[2] * (c57 + c94);\n    J(6, 0) = -c36 + c37;\n    J(6, 1) = c83 + c84;\n    J(6, 2) = c29 * c95 + Scalar(1);\n    J(6, 3) = upsilon[0] * (c51 + c93) + upsilon[1] * (c62 + c87 - c88 + c90) +\n              upsilon[2] * (-c39 * c95 + c41 * c96 + c85);\n    J(6, 4) = upsilon[0] * (-c64 + c66 + c67 + c75) + upsilon[1] * (c48 + c92) +\n              upsilon[2] * (c58 - c59 * c95 + c60 * c96);\n    J(6, 5) = upsilon[0] * (c71 + c80) + upsilon[1] * (c91 + c94) +\n              upsilon[2] * (-c73 * c95 + c74 * c96);\n\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Scalar const o(0);\n    Scalar const h(0.5);\n    Scalar const i(1);\n\n    // clang-format off\n    J << o, o, o, h, o, o, o,\n         o, o, o, h, o, o, o,\n         o, o, o, h, o, o, o,\n         o, o, o, i, o, o, o,\n         o, o, o, i, o, o, o,\n         o, o, o, i, o, o, o;\n    // clang-format on\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= twist ``a``) and\n  /// returns the corresponding element of the group SE(3).\n  ///\n  /// The first three components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of SE(3), while the last three components\n  /// of ``a`` represents the rotation vector ``omega``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of SE(3), see below.\n  ///\n  SOPHUS_FUNC static SE3<Scalar> exp(Tangent const& a) {\n    using std::cos;\n    using std::sin;\n    Vector3<Scalar> const omega = a.template tail<3>();\n\n    Scalar theta;\n    SO3<Scalar> const so3 = SO3<Scalar>::expAndTheta(omega, &theta);\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n    Matrix3<Scalar> const Omega_sq = Omega * Omega;\n    Matrix3<Scalar> V;\n\n    if (theta < Constants<Scalar>::epsilon()) {\n      V = so3.matrix();\n      /// Note: That is an accurate expansion!\n    } else {\n      Scalar theta_sq = theta * theta;\n      V = (Matrix3<Scalar>::Identity() +\n           (Scalar(1) - cos(theta)) / (theta_sq)*Omega +\n           (theta - sin(theta)) / (theta_sq * theta) * Omega_sq);\n    }\n    return SE3<Scalar>(so3, V * a.template head<3>());\n  }\n\n  /// Returns closest SE3 given arbirary 4x4 matrix.\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC static enable_if_t<std::is_floating_point<S>::value, SE3>\n  fitToSE3(Matrix4<Scalar> const& T) {\n    return SE3(SO3<Scalar>::fitToSO3(T.template block<3, 3>(0, 0)),\n               T.template block<3, 1>(0, 3));\n  }\n\n  /// Returns the ith infinitesimal generators of SE(3).\n  ///\n  /// The infinitesimal generators of SE(3) are:\n  ///\n  /// ```\n  ///         |  0  0  0  1 |\n  ///   G_0 = |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_1 = |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_2 = |  0  0  0  0 |\n  ///         |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_3 = |  0  0 -1  0 |\n  ///         |  0  1  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  1  0 |\n  ///   G_4 = |  0  0  0  0 |\n  ///         | -1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0 -1  0  0 |\n  ///   G_5 = |  1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in [0, 5].\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 5, \"i should be in range [0,5].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 6-vector representation (= twist) and returns the\n  /// corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SE(3) is defined as\n  ///\n  ///   ``hat(.): R^6 -> R^{4x4},  hat(a) = sum_i a_i * G_i``  (for i=0,...,5)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of SE(3).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.setZero();\n    Omega.template topLeftCorner<3, 3>() =\n        SO3<Scalar>::hat(a.template tail<3>());\n    Omega.col(3).template head<3>() = a.template head<3>();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of SE(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_se3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of SE(3).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector3<Scalar> const upsilon1 = a.template head<3>();\n    Vector3<Scalar> const upsilon2 = b.template head<3>();\n    Vector3<Scalar> const omega1 = a.template tail<3>();\n    Vector3<Scalar> const omega2 = b.template tail<3>();\n\n    Tangent res;\n    res.template head<3>() = omega1.cross(upsilon2) + upsilon1.cross(omega2);\n    res.template tail<3>() = omega1.cross(omega2);\n\n    return res;\n  }\n\n  /// Construct x-axis rotation.\n  ///\n  static SOPHUS_FUNC SE3 rotX(Scalar const& x) {\n    return SE3(SO3<Scalar>::rotX(x), Sophus::Vector3<Scalar>::Zero());\n  }\n\n  /// Construct y-axis rotation.\n  ///\n  static SOPHUS_FUNC SE3 rotY(Scalar const& y) {\n    return SE3(SO3<Scalar>::rotY(y), Sophus::Vector3<Scalar>::Zero());\n  }\n\n  /// Construct z-axis rotation.\n  ///\n  static SOPHUS_FUNC SE3 rotZ(Scalar const& z) {\n    return SE3(SO3<Scalar>::rotZ(z), Sophus::Vector3<Scalar>::Zero());\n  }\n\n  /// Draw uniform sample from SE(3) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  ///\n  template <class UniformRandomBitGenerator>\n  static SE3 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return SE3(SO3<Scalar>::sampleUniform(generator),\n               Vector3<Scalar>(uniform(generator), uniform(generator),\n                               uniform(generator)));\n  }\n\n  /// Construct a translation only SE3 instance.\n  ///\n  template <class T0, class T1, class T2>\n  static SOPHUS_FUNC SE3 trans(T0 const& x, T1 const& y, T2 const& z) {\n    return SE3(SO3<Scalar>(), Vector3<Scalar>(x, y, z));\n  }\n\n  static SOPHUS_FUNC SE3 trans(Vector3<Scalar> const& xyz) {\n    return SE3(SO3<Scalar>(), xyz);\n  }\n\n  /// Construct x-axis translation.\n  ///\n  static SOPHUS_FUNC SE3 transX(Scalar const& x) {\n    return SE3::trans(x, Scalar(0), Scalar(0));\n  }\n\n  /// Construct y-axis translation.\n  ///\n  static SOPHUS_FUNC SE3 transY(Scalar const& y) {\n    return SE3::trans(Scalar(0), y, Scalar(0));\n  }\n\n  /// Construct z-axis translation.\n  ///\n  static SOPHUS_FUNC SE3 transZ(Scalar const& z) {\n    return SE3::trans(Scalar(0), Scalar(0), z);\n  }\n\n  /// vee-operator\n  ///\n  /// It takes 4x4-matrix representation ``Omega`` and maps it to the\n  /// corresponding 6-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -f  e  a |\n  ///                |  f  0 -d  b |\n  ///                | -e  d  0  c\n  ///                |  0  0  0  0 | .\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    Tangent upsilon_omega;\n    upsilon_omega.template head<3>() = Omega.col(3).template head<3>();\n    upsilon_omega.template tail<3>() =\n        SO3<Scalar>::vee(Omega.template topLeftCorner<3, 3>());\n    return upsilon_omega;\n  }\n\n protected:\n  SO3Member so3_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSE3<Scalar, Options>::SE3() : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<SE3>::value,\n                \"Assume standard layout for the use of offsetof check below.\");\n  static_assert(\n      offsetof(SE3, so3_) + sizeof(Scalar) * SO3<Scalar>::num_parameters ==\n          offsetof(SE3, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``SE3``; derived from SE3Base.\n///\n/// Allows us to wrap SE3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE3<Scalar_>, Options>\n    : public Sophus::SE3Base<Map<Sophus::SE3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SE3Base<Map<Sophus::SE3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar* coeffs)\n      : so3_(coeffs),\n        translation_(coeffs + Sophus::SO3<Scalar>::num_parameters) {}\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO3<Scalar>, Options>& so3() { return so3_; }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO3<Scalar>, Options> const& so3() const {\n    return so3_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar, Options>>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar, Options>> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO3<Scalar>, Options> so3_;\n  Map<Sophus::Vector3<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``SE3 const``; derived from SE3Base.\n///\n/// Allows us to wrap SE3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE3<Scalar_> const, Options>\n    : public Sophus::SE3Base<Map<Sophus::SE3<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SE3Base<Map<Sophus::SE3<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar const* coeffs)\n      : so3_(coeffs),\n        translation_(coeffs + Sophus::SO3<Scalar>::num_parameters) {}\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO3<Scalar> const, Options> const& so3() const {\n    return so3_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO3<Scalar> const, Options> const so3_;\n  Map<Sophus::Vector3<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/sophus/sim2.hpp",
    "content": "/// @file\n/// Similarity group Sim(2) - scaling, rotation and translation in 2d.\n\n#ifndef SOPHUS_SIM2_HPP\n#define SOPHUS_SIM2_HPP\n\n#include \"rxso2.hpp\"\n#include \"sim_details.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass Sim2;\nusing Sim2d = Sim2<double>;\nusing Sim2f = Sim2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::Sim2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector2<Scalar, Options>;\n  using RxSO2Type = Sophus::RxSO2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim2<Scalar_>, Options>>\n    : traits<Sophus::Sim2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar>, Options>;\n  using RxSO2Type = Map<Sophus::RxSO2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim2<Scalar_> const, Options>>\n    : traits<Sophus::Sim2<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar> const, Options>;\n  using RxSO2Type = Map<Sophus::RxSO2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// Sim2 base type - implements Sim2 class but is storage agnostic.\n///\n/// Sim(2) is the group of rotations  and translation and scaling in 2d. It is\n/// the semi-direct product of R+xSO(2) and the 2d Euclidean vector space. The\n/// class is represented using a composition of RxSO2  for scaling plus\n/// rotation and a 2-vector for translation.\n///\n/// Sim(2) is neither compact, nor a commutative group.\n///\n/// See RxSO2 for more details of the scaling + rotation representation in 2d.\n///\ntemplate <class Derived>\nclass Sim2Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using RxSO2Type = typename Eigen::internal::traits<Derived>::RxSO2Type;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (two for translation, one for rotation and one for scaling).\n  static int constexpr DoF = 4;\n  /// Number of internal parameters used (2-tuple for complex number, two for\n  /// translation).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SIM2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using Sim2Product = Sim2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Adjoint res;\n    res.setZero();\n    res.template block<2, 2>(0, 0) = rxso2().matrix();\n    res(0, 2) = translation()[1];\n    res(1, 2) = -translation()[0];\n    res.template block<2, 1>(0, 3) = -translation();\n\n    res(2, 2) = Scalar(1);\n\n    res(3, 3) = Scalar(1);\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC Sim2<NewScalarType> cast() const {\n    return Sim2<NewScalarType>(rxso2().template cast<NewScalarType>(),\n                               translation().template cast<NewScalarType>());\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC Sim2<Scalar> inverse() const {\n    RxSO2<Scalar> invR = rxso2().inverse();\n    return Sim2<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of Sim(2).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    /// The derivation of the closed-form Sim(2) logarithm for is done\n    /// analogously to the closed-form solution of the SE(2) logarithm, see\n    /// J. Gallier, D. Xu, \"Computing exponentials of skew symmetric matrices\n    /// and logarithms of orthogonal matrices\", IJRA 2002.\n    /// https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf\n    /// (Sec. 6., pp. 8)\n    Tangent res;\n    Vector2<Scalar> const theta_sigma = rxso2().log();\n    Scalar const theta = theta_sigma[0];\n    Scalar const sigma = theta_sigma[1];\n    Matrix2<Scalar> const Omega = SO2<Scalar>::hat(theta);\n    Matrix2<Scalar> const W_inv =\n        details::calcWInv<Scalar, 2>(Omega, theta, sigma, scale());\n\n    res.segment(0, 2) = W_inv * translation();\n    res[2] = theta;\n    res[3] = sigma;\n    return res;\n  }\n\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///   | s*R t |\n  ///   |  o  1 |\n  ///\n  /// where ``R`` is a 2x2 rotation matrix, ``s`` a scale factor, ``t`` a\n  /// translation 2-vector and ``o`` a 2-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogenious_matrix;\n    homogenious_matrix.template topLeftCorner<2, 3>() = matrix2x3();\n    homogenious_matrix.row(2) =\n        Matrix<Scalar, 3, 1>(Scalar(0), Scalar(0), Scalar(1));\n    return homogenious_matrix;\n  }\n\n  /// Returns the significant first two rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 2, 3> matrix2x3() const {\n    Matrix<Scalar, 2, 3> matrix;\n    matrix.template topLeftCorner<2, 2>() = rxso2().matrix();\n    matrix.col(2) = translation();\n    return matrix;\n  }\n\n  /// Assignment operator.\n  ///\n  SOPHUS_FUNC Sim2Base& operator=(Sim2Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim2Base<Derived>& operator=(\n      Sim2Base<OtherDerived> const& other) {\n    rxso2() = other.rxso2();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation plus scaling concatenation.\n  ///\n  /// Note: That scaling is calculated with saturation. See RxSO2 for\n  /// details.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC Sim2Product<OtherDerived> operator*(\n      Sim2Base<OtherDerived> const& other) const {\n    return Sim2Product<OtherDerived>(\n        rxso2() * other.rxso2(), translation() + rxso2() * other.translation());\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates, scales and translates a two dimensional point\n  /// ``p`` by the Sim(2) element ``(bar_sR_foo, t_bar)`` (= similarity\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_sR_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return rxso2() * p + translation();\n  }\n\n  /// Group action on homogeneous 2-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        rxso2() * p.template head<2>() + p(2) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), p(2));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates, scales and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the Sim(2) element:\n  ///\n  /// Origin ``o`` is rotated, scaled and translated\n  /// Direction ``d`` is rotated\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    Line rotatedLine = rxso2() * l;\n    return Line(rotatedLine.origin() + translation(), rotatedLine.direction());\n  }\n\n  /// Returns internal parameters of Sim(2).\n  ///\n  /// It returns (c[0], c[1], t[0], t[1]),\n  /// with c being the complex number, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << rxso2().params(), translation();\n    return p;\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC Sim2Base<Derived>& operator*=(\n      Sim2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Setter of non-zero complex number.\n  ///\n  /// Precondition: ``z`` must not be close to zero.\n  ///\n  SOPHUS_FUNC void setComplex(Vector2<Scalar> const& z) {\n    rxso2().setComplex(z);\n  }\n\n  /// Accessor of complex number.\n  ///\n  SOPHUS_FUNC\n  typename Eigen::internal::traits<Derived>::RxSO2Type::ComplexType const&\n  complex() const {\n    return rxso2().complex();\n  }\n\n  /// Returns Rotation matrix\n  ///\n  SOPHUS_FUNC Matrix2<Scalar> rotationMatrix() const {\n    return rxso2().rotationMatrix();\n  }\n\n  /// Mutator of SO2 group.\n  ///\n  SOPHUS_FUNC RxSO2Type& rxso2() {\n    return static_cast<Derived*>(this)->rxso2();\n  }\n\n  /// Accessor of SO2 group.\n  ///\n  SOPHUS_FUNC RxSO2Type const& rxso2() const {\n    return static_cast<Derived const*>(this)->rxso2();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC Scalar scale() const { return rxso2().scale(); }\n\n  /// Setter of complex number using rotation matrix ``R``, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix2<Scalar>& R) {\n    rxso2().setRotationMatrix(R);\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  /// Note: This function as a significant computational cost, since it has to\n  /// call the square root twice.\n  ///\n  SOPHUS_FUNC void setScale(Scalar const& scale) { rxso2().setScale(scale); }\n\n  /// Setter of complexnumber using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 2x2 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Matrix2<Scalar> const& sR) {\n    rxso2().setScaledRotationMatrix(sR);\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n};\n\n/// Sim2 using default storage; derived from Sim2Base.\ntemplate <class Scalar_, int Options>\nclass Sim2 : public Sim2Base<Sim2<Scalar_, Options>> {\n public:\n  using Base = Sim2Base<Sim2<Scalar_, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using RxSo2Member = RxSO2<Scalar, Options>;\n  using TranslationMember = Vector2<Scalar, Options>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes similarity transform to the identity.\n  ///\n  SOPHUS_FUNC Sim2();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC Sim2(Sim2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim2(Sim2Base<OtherDerived> const& other)\n      : rxso2_(other.rxso2()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from RxSO2 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC Sim2(RxSO2Base<OtherDerived> const& rxso2,\n                   Eigen::MatrixBase<D> const& translation)\n      : rxso2_(rxso2), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from complex number and translation vector.\n  ///\n  /// Precondition: complex number must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC Sim2(Vector2<Scalar> const& complex_number,\n                   Eigen::MatrixBase<D> const& translation)\n      : rxso2_(complex_number), translation_(translation) {\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from 3x3 matrix\n  ///\n  /// Precondition: Top-left 2x2 matrix needs to be \"scaled-orthogonal\" with\n  ///               positive determinant. The last row must be ``(0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit Sim2(Matrix<Scalar, 3, 3> const& T)\n      : rxso2_((T.template topLeftCorner<2, 2>()).eval()),\n        translation_(T.template block<2, 1>(0, 2)) {}\n\n  /// This provides unsafe read/write access to internal data. Sim(2) is\n  /// represented by a complex number (two parameters) and a 2-vector. When\n  /// using direct write access, the user needs to take care of that the\n  /// complex number is not set close to zero.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // rxso2_ and translation_ are laid out sequentially with no padding\n    return rxso2_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    // rxso2_ and translation_ are laid out sequentially with no padding\n    return rxso2_.data();\n  }\n\n  /// Accessor of RxSO2\n  ///\n  SOPHUS_FUNC RxSo2Member& rxso2() { return rxso2_; }\n\n  /// Mutator of RxSO2\n  ///\n  SOPHUS_FUNC RxSo2Member const& rxso2() const { return rxso2_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Derivative of Lie bracket with respect to first element.\n  ///\n  /// This function returns ``D_a [a, b]`` with ``D_a`` being the\n  /// differential operator with respect to ``a``, ``[a, b]`` being the lie\n  /// bracket of the Lie algebra sim(2).\n  /// See ``lieBracket()`` below.\n  ///\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space and returns the\n  /// corresponding element of the group Sim(2).\n  ///\n  /// The first two components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of Sim(2), the following two components\n  /// of ``a`` represents the rotation ``theta`` and the final component\n  /// represents the logarithm of the scaling factor ``sigma``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of Sim(2), see below.\n  ///\n  SOPHUS_FUNC static Sim2<Scalar> exp(Tangent const& a) {\n    // For the derivation of the exponential map of Sim(N) see\n    // H. Strasdat, \"Local Accuracy and Global Consistency for Efficient Visual\n    // SLAM\", PhD thesis, 2012.\n    // http:///hauke.strasdat.net/files/strasdat_thesis_2012.pdf (A.5, pp. 186)\n    Vector2<Scalar> const upsilon = a.segment(0, 2);\n    Scalar const theta = a[2];\n    Scalar const sigma = a[3];\n    RxSO2<Scalar> rxso2 = RxSO2<Scalar>::exp(a.template tail<2>());\n    Matrix2<Scalar> const Omega = SO2<Scalar>::hat(theta);\n    Matrix2<Scalar> const W = details::calcW<Scalar, 2>(Omega, theta, sigma);\n    return Sim2<Scalar>(rxso2, W * upsilon);\n  }\n\n  /// Returns the ith infinitesimal generators of Sim(2).\n  ///\n  /// The infinitesimal generators of Sim(2) are:\n  ///\n  /// ```\n  ///         |  0  0  1 |\n  ///   G_0 = |  0  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0  0  0 |\n  ///   G_1 = |  0  0  1 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  1  0  0 |\n  ///   G_3 = |  0  1  0 |\n  ///         |  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in [0, 3].\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 || i <= 3, \"i should be in range [0,3].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 4-vector representation and returns the corresponding\n  /// matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of Sim(2) is defined as\n  ///\n  ///   ``hat(.): R^4 -> R^{3x3},  hat(a) = sum_i a_i * G_i``  (for i=0,...,6)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of Sim(2).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.template topLeftCorner<2, 2>() =\n        RxSO2<Scalar>::hat(a.template tail<2>());\n    Omega.col(2).template head<2>() = a.template head<2>();\n    Omega.row(2).setZero();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of Sim(2). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_sim2 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of Sim(2).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector2<Scalar> const upsilon1 = a.template head<2>();\n    Vector2<Scalar> const upsilon2 = b.template head<2>();\n    Scalar const theta1 = a[2];\n    Scalar const theta2 = b[2];\n    Scalar const sigma1 = a[3];\n    Scalar const sigma2 = b[3];\n\n    Tangent res;\n    res[0] = -theta1 * upsilon2[1] + theta2 * upsilon1[1] +\n             sigma1 * upsilon2[0] - sigma2 * upsilon1[0];\n    res[1] = theta1 * upsilon2[0] - theta2 * upsilon1[0] +\n             sigma1 * upsilon2[1] - sigma2 * upsilon1[1];\n    res[2] = Scalar(0);\n    res[3] = Scalar(0);\n\n    return res;\n  }\n\n  /// Draw uniform sample from Sim(2) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2].\n  ///\n  template <class UniformRandomBitGenerator>\n  static Sim2 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return Sim2(RxSO2<Scalar>::sampleUniform(generator),\n                Vector2<Scalar>(uniform(generator), uniform(generator)));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding 4-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  d -c  a |\n  ///                |  c  d  b |\n  ///                |  0  0  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    Tangent upsilon_omega_sigma;\n    upsilon_omega_sigma.template head<2>() = Omega.col(2).template head<2>();\n    upsilon_omega_sigma.template tail<2>() =\n        RxSO2<Scalar>::vee(Omega.template topLeftCorner<2, 2>());\n    return upsilon_omega_sigma;\n  }\n\n protected:\n  RxSo2Member rxso2_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSim2<Scalar, Options>::Sim2() : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<Sim2>::value,\n                \"Assume standard layout for the use of offsetof check below.\");\n  static_assert(\n      offsetof(Sim2, rxso2_) + sizeof(Scalar) * RxSO2<Scalar>::num_parameters ==\n          offsetof(Sim2, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``Sim2``; derived from Sim2Base.\n///\n/// Allows us to wrap Sim2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim2<Scalar_>, Options>\n    : public Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_>, Options>> {\n public:\n  using Base = Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar* coeffs)\n      : rxso2_(coeffs),\n        translation_(coeffs + Sophus::RxSO2<Scalar>::num_parameters) {}\n\n  /// Mutator of RxSO2\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO2<Scalar>, Options>& rxso2() { return rxso2_; }\n\n  /// Accessor of RxSO2\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO2<Scalar>, Options> const& rxso2() const {\n    return rxso2_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO2<Scalar>, Options> rxso2_;\n  Map<Sophus::Vector2<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``Sim2 const``; derived from Sim2Base.\n///\n/// Allows us to wrap RxSO2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim2<Scalar_> const, Options>\n    : public Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar const* coeffs)\n      : rxso2_(coeffs),\n        translation_(coeffs + Sophus::RxSO2<Scalar>::num_parameters) {}\n\n  /// Accessor of RxSO2\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO2<Scalar> const, Options> const& rxso2() const {\n    return rxso2_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO2<Scalar> const, Options> const rxso2_;\n  Map<Sophus::Vector2<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/sophus/sim3.hpp",
    "content": "/// @file\n/// Similarity group Sim(3) - scaling, rotation and translation in 3d.\n\n#ifndef SOPHUS_SIM3_HPP\n#define SOPHUS_SIM3_HPP\n\n#include \"rxso3.hpp\"\n#include \"sim_details.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass Sim3;\nusing Sim3d = Sim3<double>;\nusing Sim3f = Sim3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::Sim3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector3<Scalar, Options>;\n  using RxSO3Type = Sophus::RxSO3<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim3<Scalar_>, Options>>\n    : traits<Sophus::Sim3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar>, Options>;\n  using RxSO3Type = Map<Sophus::RxSO3<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim3<Scalar_> const, Options>>\n    : traits<Sophus::Sim3<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar> const, Options>;\n  using RxSO3Type = Map<Sophus::RxSO3<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// Sim3 base type - implements Sim3 class but is storage agnostic.\n///\n/// Sim(3) is the group of rotations  and translation and scaling in 3d. It is\n/// the semi-direct product of R+xSO(3) and the 3d Euclidean vector space.  The\n/// class is represented using a composition of RxSO3  for scaling plus\n/// rotation and a 3-vector for translation.\n///\n/// Sim(3) is neither compact, nor a commutative group.\n///\n/// See RxSO3 for more details of the scaling + rotation representation in 3d.\n///\ntemplate <class Derived>\nclass Sim3Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using RxSO3Type = typename Eigen::internal::traits<Derived>::RxSO3Type;\n  using QuaternionType = typename RxSO3Type::QuaternionType;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (three for translation, three for rotation and one for scaling).\n  static int constexpr DoF = 7;\n  /// Number of internal parameters used (4-tuple for quaternion, three for\n  /// translation).\n  static int constexpr num_parameters = 7;\n  /// Group transformations are 4x4 matrices.\n  static int constexpr N = 4;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with Sim3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using Sim3Product = Sim3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Matrix3<Scalar> const R = rxso3().rotationMatrix();\n    Adjoint res;\n    res.setZero();\n    res.template block<3, 3>(0, 0) = rxso3().matrix();\n    res.template block<3, 3>(0, 3) = SO3<Scalar>::hat(translation()) * R;\n    res.template block<3, 1>(0, 6) = -translation();\n\n    res.template block<3, 3>(3, 3) = R;\n\n    res(6, 6) = Scalar(1);\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC Sim3<NewScalarType> cast() const {\n    return Sim3<NewScalarType>(rxso3().template cast<NewScalarType>(),\n                               translation().template cast<NewScalarType>());\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC Sim3<Scalar> inverse() const {\n    RxSO3<Scalar> invR = rxso3().inverse();\n    return Sim3<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of Sim(3).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    // The derivation of the closed-form Sim(3) logarithm for is done\n    // analogously to the closed-form solution of the SE(3) logarithm, see\n    // J. Gallier, D. Xu, \"Computing exponentials of skew symmetric matrices\n    // and logarithms of orthogonal matrices\", IJRA 2002.\n    // https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf\n    // (Sec. 6., pp. 8)\n    Tangent res;\n    auto omega_sigma_and_theta = rxso3().logAndTheta();\n    Vector3<Scalar> const omega =\n        omega_sigma_and_theta.tangent.template head<3>();\n    Scalar sigma = omega_sigma_and_theta.tangent[3];\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n    Matrix3<Scalar> const W_inv = details::calcWInv<Scalar, 3>(\n        Omega, omega_sigma_and_theta.theta, sigma, scale());\n\n    res.segment(0, 3) = W_inv * translation();\n    res.segment(3, 3) = omega;\n    res[6] = sigma;\n    return res;\n  }\n\n  /// Returns 4x4 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///     | s*R t |\n  ///     |  o  1 |\n  ///\n  /// where ``R`` is a 3x3 rotation matrix, ``s`` a scale factor, ``t`` a\n  /// translation 3-vector and ``o`` a 3-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogenious_matrix;\n    homogenious_matrix.template topLeftCorner<3, 4>() = matrix3x4();\n    homogenious_matrix.row(3) =\n        Matrix<Scalar, 4, 1>(Scalar(0), Scalar(0), Scalar(0), Scalar(1));\n    return homogenious_matrix;\n  }\n\n  /// Returns the significant first three rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 3, 4> matrix3x4() const {\n    Matrix<Scalar, 3, 4> matrix;\n    matrix.template topLeftCorner<3, 3>() = rxso3().matrix();\n    matrix.col(3) = translation();\n    return matrix;\n  }\n\n  /// Assignment operator.\n  ///\n  SOPHUS_FUNC Sim3Base& operator=(Sim3Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim3Base<Derived>& operator=(\n      Sim3Base<OtherDerived> const& other) {\n    rxso3() = other.rxso3();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation plus scaling concatenation.\n  ///\n  /// Note: That scaling is calculated with saturation. See RxSO3 for\n  /// details.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC Sim3Product<OtherDerived> operator*(\n      Sim3Base<OtherDerived> const& other) const {\n    return Sim3Product<OtherDerived>(\n        rxso3() * other.rxso3(), translation() + rxso3() * other.translation());\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates, scales and translates a three dimensional point\n  /// ``p`` by the Sim(3) element ``(bar_sR_foo, t_bar)`` (= similarity\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_sR_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return rxso3() * p + translation();\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        rxso3() * p.template head<3>() + p(3) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), tp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates, scales and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the Sim(3) element:\n  ///\n  /// Origin ``o`` is rotated, scaled and translated\n  /// Direction ``d`` is rotated\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    Line rotatedLine = rxso3() * l;\n    return Line(rotatedLine.origin() + translation(), rotatedLine.direction());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO3's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC Sim3Base<Derived>& operator*=(\n      Sim3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns internal parameters of Sim(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real, t[0], t[1], t[2]),\n  /// with q being the quaternion, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << rxso3().params(), translation();\n    return p;\n  }\n\n  /// Setter of non-zero quaternion.\n  ///\n  /// Precondition: ``quat`` must not be close to zero.\n  ///\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quat) {\n    rxso3().setQuaternion(quat);\n  }\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& quaternion() const {\n    return rxso3().quaternion();\n  }\n\n  /// Returns Rotation matrix\n  ///\n  SOPHUS_FUNC Matrix3<Scalar> rotationMatrix() const {\n    return rxso3().rotationMatrix();\n  }\n\n  /// Mutator of SO3 group.\n  ///\n  SOPHUS_FUNC RxSO3Type& rxso3() {\n    return static_cast<Derived*>(this)->rxso3();\n  }\n\n  /// Accessor of SO3 group.\n  ///\n  SOPHUS_FUNC RxSO3Type const& rxso3() const {\n    return static_cast<Derived const*>(this)->rxso3();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC Scalar scale() const { return rxso3().scale(); }\n\n  /// Setter of quaternion using rotation matrix ``R``, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix3<Scalar>& R) {\n    rxso3().setRotationMatrix(R);\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  /// Note: This function as a significant computational cost, since it has to\n  /// call the square root twice.\n  ///\n  SOPHUS_FUNC void setScale(Scalar const& scale) { rxso3().setScale(scale); }\n\n  /// Setter of quaternion using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 3x3 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Matrix3<Scalar> const& sR) {\n    rxso3().setScaledRotationMatrix(sR);\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n};\n\n/// Sim3 using default storage; derived from Sim3Base.\ntemplate <class Scalar_, int Options>\nclass Sim3 : public Sim3Base<Sim3<Scalar_, Options>> {\n public:\n  using Base = Sim3Base<Sim3<Scalar_, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using RxSo3Member = RxSO3<Scalar, Options>;\n  using TranslationMember = Vector3<Scalar, Options>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes similarity transform to the identity.\n  ///\n  SOPHUS_FUNC Sim3();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC Sim3(Sim3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim3(Sim3Base<OtherDerived> const& other)\n      : rxso3_(other.rxso3()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from RxSO3 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC Sim3(RxSO3Base<OtherDerived> const& rxso3,\n                   Eigen::MatrixBase<D> const& translation)\n      : rxso3_(rxso3), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from quaternion and translation vector.\n  ///\n  /// Precondition: quaternion must not be close to zero.\n  ///\n  template <class D1, class D2>\n  SOPHUS_FUNC Sim3(Eigen::QuaternionBase<D1> const& quaternion,\n                   Eigen::MatrixBase<D2> const& translation)\n      : rxso3_(quaternion), translation_(translation) {\n    static_assert(std::is_same<typename D1::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D2::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from 4x4 matrix\n  ///\n  /// Precondition: Top-left 3x3 matrix needs to be \"scaled-orthogonal\" with\n  ///               positive determinant. The last row must be ``(0, 0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit Sim3(Matrix<Scalar, 4, 4> const& T)\n      : rxso3_(T.template topLeftCorner<3, 3>()),\n        translation_(T.template block<3, 1>(0, 3)) {}\n\n  /// This provides unsafe read/write access to internal data. Sim(3) is\n  /// represented by an Eigen::Quaternion (four parameters) and a 3-vector. When\n  /// using direct write access, the user needs to take care of that the\n  /// quaternion is not set close to zero.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // rxso3_ and translation_ are laid out sequentially with no padding\n    return rxso3_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    // rxso3_ and translation_ are laid out sequentially with no padding\n    return rxso3_.data();\n  }\n\n  /// Accessor of RxSO3\n  ///\n  SOPHUS_FUNC RxSo3Member& rxso3() { return rxso3_; }\n\n  /// Mutator of RxSO3\n  ///\n  SOPHUS_FUNC RxSo3Member const& rxso3() const { return rxso3_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space and returns the\n  /// corresponding element of the group Sim(3).\n  ///\n  /// The first three components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of Sim(3), the following three components\n  /// of ``a`` represents the rotation vector ``omega`` and the final component\n  /// represents the logarithm of the scaling factor ``sigma``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of Sim(3), see below.\n  ///\n  SOPHUS_FUNC static Sim3<Scalar> exp(Tangent const& a) {\n    // For the derivation of the exponential map of Sim(3) see\n    // H. Strasdat, \"Local Accuracy and Global Consistency for Efficient Visual\n    // SLAM\", PhD thesis, 2012.\n    // http:///hauke.strasdat.net/files/strasdat_thesis_2012.pdf (A.5, pp. 186)\n    Vector3<Scalar> const upsilon = a.segment(0, 3);\n    Vector3<Scalar> const omega = a.segment(3, 3);\n    Scalar const sigma = a[6];\n    Scalar theta;\n    RxSO3<Scalar> rxso3 =\n        RxSO3<Scalar>::expAndTheta(a.template tail<4>(), &theta);\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n\n    Matrix3<Scalar> const W = details::calcW<Scalar, 3>(Omega, theta, sigma);\n    return Sim3<Scalar>(rxso3, W * upsilon);\n  }\n\n  /// Returns the ith infinitesimal generators of Sim(3).\n  ///\n  /// The infinitesimal generators of Sim(3) are:\n  ///\n  /// ```\n  ///         |  0  0  0  1 |\n  ///   G_0 = |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_1 = |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_2 = |  0  0  0  0 |\n  ///         |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_3 = |  0  0 -1  0 |\n  ///         |  0  1  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  1  0 |\n  ///   G_4 = |  0  0  0  0 |\n  ///         | -1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0 -1  0  0 |\n  ///   G_5 = |  1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  1  0  0  0 |\n  ///   G_6 = |  0  1  0  0 |\n  ///         |  0  0  1  0 |\n  ///         |  0  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in [0, 6].\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 || i <= 6, \"i should be in range [0,6].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 7-vector representation and returns the corresponding\n  /// matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of Sim(3) is defined as\n  ///\n  ///   ``hat(.): R^7 -> R^{4x4},  hat(a) = sum_i a_i * G_i``  (for i=0,...,6)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of Sim(3).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.template topLeftCorner<3, 3>() =\n        RxSO3<Scalar>::hat(a.template tail<4>());\n    Omega.col(3).template head<3>() = a.template head<3>();\n    Omega.row(3).setZero();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of Sim(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_sim3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of Sim(3).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector3<Scalar> const upsilon1 = a.template head<3>();\n    Vector3<Scalar> const upsilon2 = b.template head<3>();\n    Vector3<Scalar> const omega1 = a.template segment<3>(3);\n    Vector3<Scalar> const omega2 = b.template segment<3>(3);\n    Scalar sigma1 = a[6];\n    Scalar sigma2 = b[6];\n\n    Tangent res;\n    res.template head<3>() = SO3<Scalar>::hat(omega1) * upsilon2 +\n                             SO3<Scalar>::hat(upsilon1) * omega2 +\n                             sigma1 * upsilon2 - sigma2 * upsilon1;\n    res.template segment<3>(3) = omega1.cross(omega2);\n    res[6] = Scalar(0);\n\n    return res;\n  }\n\n  /// Draw uniform sample from Sim(3) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2].\n  ///\n  template <class UniformRandomBitGenerator>\n  static Sim3 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return Sim3(RxSO3<Scalar>::sampleUniform(generator),\n                Vector3<Scalar>(uniform(generator), uniform(generator),\n                                uniform(generator)));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 4x4-matrix representation ``Omega`` and maps it to the\n  /// corresponding 7-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  g -f  e  a |\n  ///                |  f  g -d  b |\n  ///                | -e  d  g  c |\n  ///                |  0  0  0  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    Tangent upsilon_omega_sigma;\n    upsilon_omega_sigma.template head<3>() = Omega.col(3).template head<3>();\n    upsilon_omega_sigma.template tail<4>() =\n        RxSO3<Scalar>::vee(Omega.template topLeftCorner<3, 3>());\n    return upsilon_omega_sigma;\n  }\n\n protected:\n  RxSo3Member rxso3_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSim3<Scalar, Options>::Sim3() : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<Sim3>::value,\n                \"Assume standard layout for the use of offsetof check below.\");\n  static_assert(\n      offsetof(Sim3, rxso3_) + sizeof(Scalar) * RxSO3<Scalar>::num_parameters ==\n          offsetof(Sim3, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``Sim3``; derived from Sim3Base.\n///\n/// Allows us to wrap Sim3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim3<Scalar_>, Options>\n    : public Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar* coeffs)\n      : rxso3_(coeffs),\n        translation_(coeffs + Sophus::RxSO3<Scalar>::num_parameters) {}\n\n  /// Mutator of RxSO3\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO3<Scalar>, Options>& rxso3() { return rxso3_; }\n\n  /// Accessor of RxSO3\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO3<Scalar>, Options> const& rxso3() const {\n    return rxso3_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar>, Options>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar>, Options> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO3<Scalar>, Options> rxso3_;\n  Map<Sophus::Vector3<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``Sim3 const``; derived from Sim3Base.\n///\n/// Allows us to wrap RxSO3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim3<Scalar_> const, Options>\n    : public Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_> const, Options>> {\n  using Base = Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_> const, Options>>;\n\n public:\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar const* coeffs)\n      : rxso3_(coeffs),\n        translation_(coeffs + Sophus::RxSO3<Scalar>::num_parameters) {}\n\n  /// Accessor of RxSO3\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO3<Scalar> const, Options> const& rxso3() const {\n    return rxso3_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO3<Scalar> const, Options> const rxso3_;\n  Map<Sophus::Vector3<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/sophus/sim_details.hpp",
    "content": "#ifndef SOPHUS_SIM_DETAILS_HPP\n#define SOPHUS_SIM_DETAILS_HPP\n\n#include \"types.hpp\"\n\nnamespace Sophus {\nnamespace details {\n\ntemplate <class Scalar, int N>\nMatrix<Scalar, N, N> calcW(Matrix<Scalar, N, N> const& Omega,\n                           Scalar const theta, Scalar const sigma) {\n  using std::abs;\n  using std::cos;\n  using std::exp;\n  using std::sin;\n  static Matrix<Scalar, N, N> const I = Matrix<Scalar, N, N>::Identity();\n  static Scalar const one(1);\n  static Scalar const half(0.5);\n  Matrix<Scalar, N, N> const Omega2 = Omega * Omega;\n  Scalar const scale = exp(sigma);\n  Scalar A, B, C;\n  if (abs(sigma) < Constants<Scalar>::epsilon()) {\n    C = one;\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      A = half;\n      B = Scalar(1. / 6.);\n    } else {\n      Scalar theta_sq = theta * theta;\n      A = (one - cos(theta)) / theta_sq;\n      B = (theta - sin(theta)) / (theta_sq * theta);\n    }\n  } else {\n    C = (scale - one) / sigma;\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      Scalar sigma_sq = sigma * sigma;\n      A = ((sigma - one) * scale + one) / sigma_sq;\n      B = (scale * half * sigma_sq + scale - one - sigma * scale) /\n          (sigma_sq * sigma);\n    } else {\n      Scalar theta_sq = theta * theta;\n      Scalar a = scale * sin(theta);\n      Scalar b = scale * cos(theta);\n      Scalar c = theta_sq + sigma * sigma;\n      A = (a * sigma + (one - b) * theta) / (theta * c);\n      B = (C - ((b - one) * sigma + a * theta) / (c)) * one / (theta_sq);\n    }\n  }\n  return A * Omega + B * Omega2 + C * I;\n}\n\ntemplate <class Scalar, int N>\nMatrix<Scalar, N, N> calcWInv(Matrix<Scalar, N, N> const& Omega,\n                              Scalar const theta, Scalar const sigma,\n                              Scalar const scale) {\n  using std::abs;\n  using std::cos;\n  using std::sin;\n  static Matrix<Scalar, N, N> const I = Matrix<Scalar, N, N>::Identity();\n  static Scalar const half(0.5);\n  static Scalar const one(1);\n  static Scalar const two(2);\n  Matrix<Scalar, N, N> const Omega2 = Omega * Omega;\n  Scalar const scale_sq = scale * scale;\n  Scalar const theta_sq = theta * theta;\n  Scalar const sin_theta = sin(theta);\n  Scalar const cos_theta = cos(theta);\n\n  Scalar a, b, c;\n  if (abs(sigma * sigma) < Constants<Scalar>::epsilon()) {\n    c = one - half * sigma;\n    a = -half;\n    if (abs(theta_sq) < Constants<Scalar>::epsilon()) {\n      b = Scalar(1. / 12.);\n    } else {\n      b = (theta * sin_theta + two * cos_theta - two) /\n          (two * theta_sq * (cos_theta - one));\n    }\n  } else {\n    Scalar const scale_cu = scale_sq * scale;\n    c = sigma / (scale - one);\n    if (abs(theta_sq) < Constants<Scalar>::epsilon()) {\n      a = (-sigma * scale + scale - one) / ((scale - one) * (scale - one));\n      b = (scale_sq * sigma - two * scale_sq + scale * sigma + two * scale) /\n          (two * scale_cu - Scalar(6) * scale_sq + Scalar(6) * scale - two);\n    } else {\n      Scalar const s_sin_theta = scale * sin_theta;\n      Scalar const s_cos_theta = scale * cos_theta;\n      a = (theta * s_cos_theta - theta - sigma * s_sin_theta) /\n          (theta * (scale_sq - two * s_cos_theta + one));\n      b = -scale *\n          (theta * s_sin_theta - theta * sin_theta + sigma * s_cos_theta -\n           scale * sigma + sigma * cos_theta - sigma) /\n          (theta_sq * (scale_cu - two * scale * s_cos_theta - scale_sq +\n                       two * s_cos_theta + scale - one));\n    }\n  }\n  return a * Omega + b * Omega2 + c * I;\n}\n\n}  // namespace details\n}  // namespace Sophus\n\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/sophus/so2.hpp",
    "content": "/// @file\n/// Special orthogonal group SO(2) - rotation in 2d.\n\n#ifndef SOPHUS_SO2_HPP\n#define SOPHUS_SO2_HPP\n\n#include <complex>\n#include <type_traits>\n\n// Include only the selective set of Eigen headers that we need.\n// This helps when using Sophus with unusual compilers, like nvcc.\n#include <Eigen/LU>\n\n#include \"rotation_matrix.hpp\"\n#include \"types.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SO2;\nusing SO2d = SO2<double>;\nusing SO2f = SO2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::SO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Sophus::Vector2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO2<Scalar_>, Options_>>\n    : traits<Sophus::SO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO2<Scalar_> const, Options_>>\n    : traits<Sophus::SO2<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SO2 base type - implements SO2 class but is storage agnostic.\n///\n/// SO(2) is the group of rotations in 2d. As a matrix group, it is the set of\n/// matrices which are orthogonal such that ``R * R' = I`` (with ``R'`` being\n/// the transpose of ``R``) and have a positive determinant. In particular, the\n/// determinant is 1. Let ``theta`` be the rotation angle, the rotation matrix\n/// can be written in close form:\n///\n///      | cos(theta) -sin(theta) |\n///      | sin(theta)  cos(theta) |\n///\n/// As a matter of fact, the first column of those matrices is isomorph to the\n/// set of unit complex numbers U(1). Thus, internally, SO2 is represented as\n/// complex number with length 1.\n///\n/// SO(2) is a compact and commutative group. First it is compact since the set\n/// of rotation matrices is a closed and bounded set. Second it is commutative\n/// since ``R(alpha) * R(beta) = R(beta) * R(alpha)``,  simply because ``alpha +\n/// beta = beta + alpha`` with ``alpha`` and ``beta`` being rotation angles\n/// (about the same axis).\n///\n/// Class invariant: The 2-norm of ``unit_complex`` must be close to 1.\n/// Technically speaking, it must hold that:\n///\n///   ``|unit_complex().squaredNorm() - 1| <= Constants::epsilon()``.\ntemplate <class Derived>\nclass SO2Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using ComplexT = typename Eigen::internal::traits<Derived>::ComplexType;\n  using ComplexTemporaryType = Sophus::Vector2<Scalar, Options>;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space (one\n  /// since we only have in-plane rotations).\n  static int constexpr DoF = 1;\n  /// Number of internal parameters used (complex numbers are a tuples).\n  static int constexpr num_parameters = 2;\n  /// Group transformations are 2x2 matrices.\n  static int constexpr N = 2;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Tangent = Scalar;\n  using Adjoint = Scalar;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SO2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SO2Product = SO2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  /// It simply ``1``, since ``SO(2)`` is a commutative group.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const { return Scalar(1); }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SO2<NewScalarType> cast() const {\n    return SO2<NewScalarType>(unit_complex().template cast<NewScalarType>());\n  }\n\n  /// This provides unsafe read/write access to internal data. SO(2) is\n  /// represented by a unit complex number (two parameters). When using direct\n  /// write access, the user needs to take care of that the complex number stays\n  /// normalized.\n  ///\n  SOPHUS_FUNC Scalar* data() { return unit_complex_nonconst().data(); }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const { return unit_complex().data(); }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SO2<Scalar> inverse() const {\n    return SO2<Scalar>(unit_complex().x(), -unit_complex().y());\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rotation matrices) to elements of the tangent space\n  /// (rotation angles).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SO(2).\n  ///\n  SOPHUS_FUNC Scalar log() const {\n    using std::atan2;\n    return atan2(unit_complex().y(), unit_complex().x());\n  }\n\n  /// It re-normalizes ``unit_complex`` to unit length.\n  ///\n  /// Note: Because of the class invariant, there is typically no need to call\n  /// this function directly.\n  ///\n  SOPHUS_FUNC void normalize() {\n    using std::sqrt;\n    Scalar length = sqrt(unit_complex().x() * unit_complex().x() +\n                         unit_complex().y() * unit_complex().y());\n    SOPHUS_ENSURE(length >= Constants<Scalar>::epsilon(),\n                  \"Complex number should not be close to zero!\");\n    unit_complex_nonconst().x() /= length;\n    unit_complex_nonconst().y() /= length;\n  }\n\n  /// Returns 2x2 matrix representation of the instance.\n  ///\n  /// For SO(2), the matrix representation is an orthogonal matrix ``R`` with\n  /// ``det(R)=1``, thus the so-called \"rotation matrix\".\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Scalar const& real = unit_complex().x();\n    Scalar const& imag = unit_complex().y();\n    Transformation R;\n    // clang-format off\n    R <<\n      real, -imag,\n      imag,  real;\n    // clang-format on\n    return R;\n  }\n\n  /// Assignment operator\n  ///\n  SOPHUS_FUNC SO2Base& operator=(SO2Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO2Base<Derived>& operator=(SO2Base<OtherDerived> const& other) {\n    unit_complex_nonconst() = other.unit_complex();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SO2Product<OtherDerived> operator*(\n      SO2Base<OtherDerived> const& other) const {\n    using ResultT = ReturnScalar<OtherDerived>;\n    Scalar const lhs_real = unit_complex().x();\n    Scalar const lhs_imag = unit_complex().y();\n    typename OtherDerived::Scalar const& rhs_real = other.unit_complex().x();\n    typename OtherDerived::Scalar const& rhs_imag = other.unit_complex().y();\n    // complex multiplication\n    ResultT const result_real = lhs_real * rhs_real - lhs_imag * rhs_imag;\n    ResultT const result_imag = lhs_real * rhs_imag + lhs_imag * rhs_real;\n\n    ResultT const squared_norm =\n        result_real * result_real + result_imag * result_imag;\n    // We can assume that the squared-norm is close to 1 since we deal with a\n    // unit complex number. Due to numerical precision issues, there might\n    // be a small drift after pose concatenation. Hence, we need to renormalizes\n    // the complex number here.\n    // Since squared-norm is close to 1, we do not need to calculate the costly\n    // square-root, but can use an approximation around 1 (see\n    // http://stackoverflow.com/a/12934750 for details).\n    if (squared_norm != ResultT(1.0)) {\n      ResultT const scale = ResultT(2.0) / (ResultT(1.0) + squared_norm);\n      return SO2Product<OtherDerived>(result_real * scale, result_imag * scale);\n    }\n    return SO2Product<OtherDerived>(result_real, result_imag);\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates a 2 dimensional point ``p`` by the SO2 element\n  ///  ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    Scalar const& real = unit_complex().x();\n    Scalar const& imag = unit_complex().y();\n    return PointProduct<PointDerived>(real * p[0] - imag * p[1],\n                                      imag * p[0] + real * p[1]);\n  }\n\n  /// Group action on homogeneous 2-points.\n  ///\n  /// This function rotates a homogeneous 2 dimensional point ``p`` by the SO2\n  /// element ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    Scalar const& real = unit_complex().x();\n    Scalar const& imag = unit_complex().y();\n    return HomogeneousPointProduct<HPointDerived>(\n        real * p[0] - imag * p[1], imag * p[0] + real * p[1], p[2]);\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO2\n  /// element:\n  ///\n  /// Both direction ``d`` and origin ``o`` are rotated as a 2 dimensional point\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), (*this) * l.direction());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SO2Base<Derived> operator*=(SO2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns derivative of  this * SO2::exp(x)  wrt. x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    return Matrix<Scalar, num_parameters, DoF>(-unit_complex()[1],\n                                               unit_complex()[0]);\n  }\n\n  /// Returns internal parameters of SO(2).\n  ///\n  /// It returns (c[0], c[1]), with c being the unit complex number.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return unit_complex();\n  }\n\n  /// Takes in complex number / tuple and normalizes it.\n  ///\n  /// Precondition: The complex number must not be close to zero.\n  ///\n  SOPHUS_FUNC void setComplex(Point const& complex) {\n    unit_complex_nonconst() = complex;\n    normalize();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC\n  ComplexT const& unit_complex() const {\n    return static_cast<Derived const*>(this)->unit_complex();\n  }\n\n private:\n  /// Mutator of unit_complex is private to ensure class invariant. That is\n  /// the complex number must stay close to unit length.\n  ///\n  SOPHUS_FUNC\n  ComplexT& unit_complex_nonconst() {\n    return static_cast<Derived*>(this)->unit_complex_nonconst();\n  }\n};\n\n/// SO2 using  default storage; derived from SO2Base.\ntemplate <class Scalar_, int Options>\nclass SO2 : public SO2Base<SO2<Scalar_, Options>> {\n public:\n  using Base = SO2Base<SO2<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using ComplexMember = Vector2<Scalar, Options>;\n\n  /// ``Base`` is friend so unit_complex_nonconst can be accessed from ``Base``.\n  friend class SO2Base<SO2<Scalar, Options>>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes unit complex number to identity rotation.\n  ///\n  SOPHUS_FUNC SO2() : unit_complex_(Scalar(1), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SO2(SO2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO2(SO2Base<OtherDerived> const& other)\n      : unit_complex_(other.unit_complex()) {}\n\n  /// Constructor from rotation matrix\n  ///\n  /// Precondition: rotation matrix need to be orthogonal with determinant of 1.\n  ///\n  SOPHUS_FUNC explicit SO2(Transformation const& R)\n      : unit_complex_(Scalar(0.5) * (R(0, 0) + R(1, 1)),\n                      Scalar(0.5) * (R(1, 0) - R(0, 1))) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n %\", R);\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: %\",\n                  R.determinant());\n  }\n\n  /// Constructor from pair of real and imaginary number.\n  ///\n  /// Precondition: The pair must not be close to zero.\n  ///\n  SOPHUS_FUNC SO2(Scalar const& real, Scalar const& imag)\n      : unit_complex_(real, imag) {\n    Base::normalize();\n  }\n\n  /// Constructor from 2-vector.\n  ///\n  /// Precondition: The vector must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC explicit SO2(Eigen::MatrixBase<D> const& complex)\n      : unit_complex_(complex) {\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    Base::normalize();\n  }\n\n  /// Constructor from an rotation angle.\n  ///\n  SOPHUS_FUNC explicit SO2(Scalar theta) {\n    unit_complex_nonconst() = SO2<Scalar>::exp(theta).unit_complex();\n  }\n\n  /// Accessor of unit complex number\n  ///\n  SOPHUS_FUNC ComplexMember const& unit_complex() const {\n    return unit_complex_;\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation angle\n  /// ``theta``) and returns the corresponding element of the group SO(2).\n  ///\n  /// To be more specific, this function computes ``expmat(hat(omega))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of SO(2).\n  ///\n  SOPHUS_FUNC static SO2<Scalar> exp(Tangent const& theta) {\n    using std::cos;\n    using std::sin;\n    return SO2<Scalar>(cos(theta), sin(theta));\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& theta) {\n    using std::cos;\n    using std::sin;\n    return Sophus::Matrix<Scalar, num_parameters, DoF>(-sin(theta), cos(theta));\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    return Sophus::Matrix<Scalar, num_parameters, DoF>(Scalar(0), Scalar(1));\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int) {\n    return generator();\n  }\n\n  /// Returns the infinitesimal generators of SO(2).\n  ///\n  /// The infinitesimal generators of SO(2) is:\n  ///\n  ///     |  0  1 |\n  ///     | -1  0 |\n  ///\n  SOPHUS_FUNC static Transformation generator() { return hat(Scalar(1)); }\n\n  /// hat-operator\n  ///\n  /// It takes in the scalar representation ``theta`` (= rotation angle) and\n  /// returns the corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SO(2) is defined as\n  ///\n  ///   ``hat(.): R^2 -> R^{2x2},  hat(theta) = theta * G``\n  ///\n  /// with ``G`` being the infinitesimal generator of SO(2).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& theta) {\n    Transformation Omega;\n    // clang-format off\n    Omega <<\n        Scalar(0),   -theta,\n            theta, Scalar(0);\n    // clang-format on\n    return Omega;\n  }\n\n  /// Returns closed SO2 given arbitrary 2x2 matrix.\n  ///\n  template <class S = Scalar>\n  static SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, SO2>\n  fitToSO2(Transformation const& R) {\n    return SO2(makeRotationMatrix(R));\n  }\n\n  /// Lie bracket\n  ///\n  /// It returns the Lie bracket of SO(2). Since SO(2) is a commutative group,\n  /// the Lie bracket is simple ``0``.\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const&, Tangent const&) {\n    return Scalar(0);\n  }\n\n  /// Draw uniform sample from SO(2) manifold.\n  ///\n  template <class UniformRandomBitGenerator>\n  static SO2 sampleUniform(UniformRandomBitGenerator& generator) {\n    static_assert(IsUniformRandomBitGenerator<UniformRandomBitGenerator>::value,\n                  \"generator must meet the UniformRandomBitGenerator concept\");\n    std::uniform_real_distribution<Scalar> uniform(-Constants<Scalar>::pi(),\n                                                   Constants<Scalar>::pi());\n    return SO2(uniform(generator));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 2x2-matrix representation ``Omega`` and maps it to the\n  /// corresponding scalar representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -a |\n  ///                |  a  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    using std::abs;\n    return Omega(1, 0);\n  }\n\n protected:\n  /// Mutator of complex number is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC ComplexMember& unit_complex_nonconst() { return unit_complex_; }\n\n  ComplexMember unit_complex_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``SO2``; derived from SO2Base.\n///\n/// Allows us to wrap SO2 objects around POD array (e.g. external c style\n/// complex number / tuple).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO2<Scalar_>, Options>\n    : public Sophus::SO2Base<Map<Sophus::SO2<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SO2Base<Map<Sophus::SO2<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so unit_complex_nonconst can be accessed from ``Base``.\n  friend class Sophus::SO2Base<Map<Sophus::SO2<Scalar_>, Options>>;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  Map(Scalar* coeffs) : unit_complex_(coeffs) {}\n\n  /// Accessor of unit complex number.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar>, Options> const& unit_complex() const {\n    return unit_complex_;\n  }\n\n protected:\n  /// Mutator of unit_complex is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar>, Options>& unit_complex_nonconst() {\n    return unit_complex_;\n  }\n\n  Map<Matrix<Scalar, 2, 1>, Options> unit_complex_;\n};\n\n/// Specialization of Eigen::Map for ``SO2 const``; derived from SO2Base.\n///\n/// Allows us to wrap SO2 objects around POD array (e.g. external c style\n/// complex number / tuple).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO2<Scalar_> const, Options>\n    : public Sophus::SO2Base<Map<Sophus::SO2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SO2Base<Map<Sophus::SO2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar const* coeffs) : unit_complex_(coeffs) {}\n\n  /// Accessor of unit complex number.\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar> const, Options> const& unit_complex()\n      const {\n    return unit_complex_;\n  }\n\n protected:\n  /// Mutator of unit_complex is protected to ensure class invariant.\n  ///\n  Map<Matrix<Scalar, 2, 1> const, Options> const unit_complex_;\n};\n}  // namespace Eigen\n\n#endif  // SOPHUS_SO2_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/so3.hpp",
    "content": "/// @file\n/// Special orthogonal group SO(3) - rotation in 3d.\n\n#ifndef SOPHUS_SO3_HPP\n#define SOPHUS_SO3_HPP\n\n#include \"rotation_matrix.hpp\"\n#include \"so2.hpp\"\n#include \"types.hpp\"\n\n// Include only the selective set of Eigen headers that we need.\n// This helps when using Sophus with unusual compilers, like nvcc.\n#include <Eigen/src/Geometry/OrthoMethods.h>\n#include <Eigen/src/Geometry/Quaternion.h>\n#include <Eigen/src/Geometry/RotationBase.h>\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SO3;\nusing SO3d = SO3<double>;\nusing SO3f = SO3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::SO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Eigen::Quaternion<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO3<Scalar_>, Options_>>\n    : traits<Sophus::SO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO3<Scalar_> const, Options_>>\n    : traits<Sophus::SO3<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SO3 base type - implements SO3 class but is storage agnostic.\n///\n/// SO(3) is the group of rotations in 3d. As a matrix group, it is the set of\n/// matrices which are orthogonal such that ``R * R' = I`` (with ``R'`` being\n/// the transpose of ``R``) and have a positive determinant. In particular, the\n/// determinant is 1. Internally, the group is represented as a unit quaternion.\n/// Unit quaternion can be seen as members of the special unitary group SU(2).\n/// SU(2) is a double cover of SO(3). Hence, for every rotation matrix ``R``,\n/// there exist two unit quaternions: ``(r, v)`` and ``(-r, -v)``, with ``r``\n/// the real part and ``v`` being the imaginary 3-vector part of the quaternion.\n///\n/// SO(3) is a compact, but non-commutative group. First it is compact since the\n/// set of rotation matrices is a closed and bounded set. Second it is\n/// non-commutative since the equation ``R_1 * R_2 = R_2 * R_1`` does not hold\n/// in general. For example rotating an object by some degrees about its\n/// ``x``-axis and then by some degrees about its y axis, does not lead to the\n/// same orientation when rotation first about ``y`` and then about ``x``.\n///\n/// Class invariant: The 2-norm of ``unit_quaternion`` must be close to 1.\n/// Technically speaking, it must hold that:\n///\n///   ``|unit_quaternion().squaredNorm() - 1| <= Constants::epsilon()``.\ntemplate <class Derived>\nclass SO3Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using QuaternionType =\n      typename Eigen::internal::traits<Derived>::QuaternionType;\n  using QuaternionTemporaryType = Eigen::Quaternion<Scalar, Options>;\n\n  /// Degrees of freedom of group, number of dimensions in tangent space.\n  static int constexpr DoF = 3;\n  /// Number of internal parameters used (quaternion is a 4-tuple).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  struct TangentAndTheta {\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n    Tangent tangent;\n    Scalar theta;\n  };\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SO3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SO3Product = SO3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  //\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  //\n  /// For SO(3), it simply returns the rotation matrix corresponding to ``A``.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const { return matrix(); }\n\n  /// Extract rotation angle about canonical X-axis\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, S> angleX() const {\n    Sophus::Matrix3<Scalar> R = matrix();\n    Sophus::Matrix2<Scalar> Rx = R.template block<2, 2>(1, 1);\n    return SO2<Scalar>(makeRotationMatrix(Rx)).log();\n  }\n\n  /// Extract rotation angle about canonical Y-axis\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, S> angleY() const {\n    Sophus::Matrix3<Scalar> R = matrix();\n    Sophus::Matrix2<Scalar> Ry;\n    // clang-format off\n    Ry <<\n      R(0, 0), R(2, 0),\n      R(0, 2), R(2, 2);\n    // clang-format on\n    return SO2<Scalar>(makeRotationMatrix(Ry)).log();\n  }\n\n  /// Extract rotation angle about canonical Z-axis\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, S> angleZ() const {\n    Sophus::Matrix3<Scalar> R = matrix();\n    Sophus::Matrix2<Scalar> Rz = R.template block<2, 2>(0, 0);\n    return SO2<Scalar>(makeRotationMatrix(Rz)).log();\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SO3<NewScalarType> cast() const {\n    return SO3<NewScalarType>(unit_quaternion().template cast<NewScalarType>());\n  }\n\n  /// This provides unsafe read/write access to internal data. SO(3) is\n  /// represented by an Eigen::Quaternion (four parameters). When using direct\n  /// write access, the user needs to take care of that the quaternion stays\n  /// normalized.\n  ///\n  /// Note: The first three Scalars represent the imaginary parts, while the\n  /// forth Scalar represent the real part.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    return unit_quaternion_nonconst().coeffs().data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    return unit_quaternion().coeffs().data();\n  }\n\n  /// Returns derivative of  this * SO3::exp(x)  wrt. x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    Eigen::Quaternion<Scalar> const q = unit_quaternion();\n    Scalar const c0 = Scalar(0.5) * q.w();\n    Scalar const c1 = Scalar(0.5) * q.z();\n    Scalar const c2 = -c1;\n    Scalar const c3 = Scalar(0.5) * q.y();\n    Scalar const c4 = Scalar(0.5) * q.x();\n    Scalar const c5 = -c4;\n    Scalar const c6 = -c3;\n    J(0, 0) = c0;\n    J(0, 1) = c2;\n    J(0, 2) = c3;\n    J(1, 0) = c1;\n    J(1, 1) = c0;\n    J(1, 2) = c5;\n    J(2, 0) = c6;\n    J(2, 1) = c4;\n    J(2, 2) = c0;\n    J(3, 0) = c5;\n    J(3, 1) = c6;\n    J(3, 2) = c2;\n\n    return J;\n  }\n\n  /// Returns internal parameters of SO(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real), with q being the\n  /// unit quaternion.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return unit_quaternion().coeffs();\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SO3<Scalar> inverse() const {\n    return SO3<Scalar>(unit_quaternion().conjugate());\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rotation matrices) to elements of the tangent space\n  /// (rotation-vector).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SO(3).\n  ///\n  SOPHUS_FUNC Tangent log() const { return logAndTheta().tangent; }\n\n  /// As above, but also returns ``theta = |omega|``.\n  ///\n  SOPHUS_FUNC TangentAndTheta logAndTheta() const {\n    TangentAndTheta J;\n    using std::abs;\n    using std::atan;\n    using std::sqrt;\n    Scalar squared_n = unit_quaternion().vec().squaredNorm();\n    Scalar w = unit_quaternion().w();\n\n    Scalar two_atan_nbyw_by_n;\n\n    /// Atan-based log thanks to\n    ///\n    /// C. Hertzberg et al.:\n    /// \"Integrating Generic Sensor Fusion Algorithms with Sound State\n    /// Representation through Encapsulation of Manifolds\"\n    /// Information Fusion, 2011\n\n    if (squared_n < Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon()) {\n      // If quaternion is normalized and n=0, then w should be 1;\n      // w=0 should never happen here!\n      SOPHUS_ENSURE(abs(w) >= Constants<Scalar>::epsilon(),\n                    \"Quaternion (%) should be normalized!\",\n                    unit_quaternion().coeffs().transpose());\n      Scalar squared_w = w * w;\n      two_atan_nbyw_by_n =\n          Scalar(2) / w - Scalar(2.0/3.0) * (squared_n) / (w * squared_w);\n      J.theta = Scalar(2) * squared_n / w;\n    } else {\n      Scalar n = sqrt(squared_n);\n      if (abs(w) < Constants<Scalar>::epsilon()) {\n        if (w > Scalar(0)) {\n          two_atan_nbyw_by_n = Constants<Scalar>::pi() / n;\n        } else {\n          two_atan_nbyw_by_n = -Constants<Scalar>::pi() / n;\n        }\n      } else {\n        two_atan_nbyw_by_n = Scalar(2) * atan(n / w) / n;\n      }\n      J.theta = two_atan_nbyw_by_n * n;\n    }\n\n    J.tangent = two_atan_nbyw_by_n * unit_quaternion().vec();\n    return J;\n  }\n\n  /// It re-normalizes ``unit_quaternion`` to unit length.\n  ///\n  /// Note: Because of the class invariant, there is typically no need to call\n  /// this function directly.\n  ///\n  SOPHUS_FUNC void normalize() {\n    Scalar length = unit_quaternion_nonconst().norm();\n    SOPHUS_ENSURE(length >= Constants<Scalar>::epsilon(),\n                  \"Quaternion (%) should not be close to zero!\",\n                  unit_quaternion_nonconst().coeffs().transpose());\n    unit_quaternion_nonconst().coeffs() /= length;\n  }\n\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// For SO(3), the matrix representation is an orthogonal matrix ``R`` with\n  /// ``det(R)=1``, thus the so-called \"rotation matrix\".\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    return unit_quaternion().toRotationMatrix();\n  }\n\n  /// Assignment operator.\n  ///\n  SOPHUS_FUNC SO3Base& operator=(SO3Base const& other) = default;\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO3Base<Derived>& operator=(SO3Base<OtherDerived> const& other) {\n    unit_quaternion_nonconst() = other.unit_quaternion();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SO3Product<OtherDerived> operator*(\n      SO3Base<OtherDerived> const& other) const {\n    using QuaternionProductType =\n        typename SO3Product<OtherDerived>::QuaternionType;\n    const QuaternionType& a = unit_quaternion();\n    const typename OtherDerived::QuaternionType& b = other.unit_quaternion();\n    /// NOTE: We cannot use Eigen's Quaternion multiplication because it always\n    /// returns a Quaternion of the same Scalar as this object, so it is not\n    /// able to multiple Jets and doubles correctly.\n    return SO3Product<OtherDerived>(QuaternionProductType(\n        a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),\n        a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),\n        a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),\n        a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()));\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates a 3 dimensional point ``p`` by the SO3 element\n  ///  ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``.\n  ///\n  /// Since SO3 is internally represented by a unit quaternion ``q``, it is\n  /// implemented as ``p_bar = q * p_foo * q^{*}``\n  /// with ``q^{*}`` being the quaternion conjugate of ``q``.\n  ///\n  /// Geometrically, ``p``  is rotated by angle ``|omega|`` around the\n  /// axis ``omega/|omega|`` with ``omega := vee(log(bar_R_foo))``.\n  ///\n  /// For ``vee``-operator, see below.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    /// NOTE: We cannot use Eigen's Quaternion transformVector because it always\n    /// returns a Vector3 of the same Scalar as this quaternion, so it is not\n    /// able to be applied to Jets and doubles correctly.\n    const QuaternionType& q = unit_quaternion();\n    PointProduct<PointDerived> uv = q.vec().cross(p);\n    uv += uv;\n    return p + q.w() * uv + q.vec().cross(uv);\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const auto rp = *this * p.template head<3>();\n    return HomogeneousPointProduct<HPointDerived>(rp(0), rp(1), rp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO3\n  /// element:\n  ///\n  /// Both direction ``d`` and origin ``o`` are rotated as a 3 dimensional point\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), (*this) * l.direction());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO3's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SO3Base<Derived>& operator*=(SO3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Takes in quaternion, and normalizes it.\n  ///\n  /// Precondition: The quaternion must not be close to zero.\n  ///\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quaternion) {\n    unit_quaternion_nonconst() = quaternion;\n    normalize();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& unit_quaternion() const {\n    return static_cast<Derived const*>(this)->unit_quaternion();\n  }\n\n private:\n  /// Mutator of unit_quaternion is private to ensure class invariant. That is\n  /// the quaternion must stay close to unit length.\n  ///\n  SOPHUS_FUNC QuaternionType& unit_quaternion_nonconst() {\n    return static_cast<Derived*>(this)->unit_quaternion_nonconst();\n  }\n};\n\n/// SO3 using default storage; derived from SO3Base.\ntemplate <class Scalar_, int Options>\nclass SO3 : public SO3Base<SO3<Scalar_, Options>> {\n public:\n  using Base = SO3Base<SO3<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using QuaternionMember = Eigen::Quaternion<Scalar, Options>;\n\n  /// ``Base`` is friend so unit_quaternion_nonconst can be accessed from\n  /// ``Base``.\n  friend class SO3Base<SO3<Scalar, Options>>;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes unit quaternion to identity rotation.\n  ///\n  SOPHUS_FUNC SO3()\n      : unit_quaternion_(Scalar(1), Scalar(0), Scalar(0), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SO3(SO3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO3(SO3Base<OtherDerived> const& other)\n      : unit_quaternion_(other.unit_quaternion()) {}\n\n  /// Constructor from rotation matrix\n  ///\n  /// Precondition: rotation matrix needs to be orthogonal with determinant\n  /// of 1.\n  ///\n  SOPHUS_FUNC SO3(Transformation const& R) : unit_quaternion_(R) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n %\",\n                  R * R.transpose());\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: %\",\n                  R.determinant());\n  }\n\n  /// Constructor from quaternion\n  ///\n  /// Precondition: quaternion must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC explicit SO3(Eigen::QuaternionBase<D> const& quat)\n      : unit_quaternion_(quat) {\n    static_assert(\n        std::is_same<typename Eigen::QuaternionBase<D>::Scalar, Scalar>::value,\n        \"Input must be of same scalar type\");\n    Base::normalize();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC QuaternionMember const& unit_quaternion() const {\n    return unit_quaternion_;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& omega) {\n    using std::cos;\n    using std::exp;\n    using std::sin;\n    using std::sqrt;\n    Scalar const c0 = omega[0] * omega[0];\n    Scalar const c1 = omega[1] * omega[1];\n    Scalar const c2 = omega[2] * omega[2];\n    Scalar const c3 = c0 + c1 + c2;\n\n    if (c3 < Constants<Scalar>::epsilon()) {\n      return Dx_exp_x_at_0();\n    }\n\n    Scalar const c4 = sqrt(c3);\n    Scalar const c5 = 1.0 / c4;\n    Scalar const c6 = 0.5 * c4;\n    Scalar const c7 = sin(c6);\n    Scalar const c8 = c5 * c7;\n    Scalar const c9 = pow(c3, -3.0L / 2.0L);\n    Scalar const c10 = c7 * c9;\n    Scalar const c11 = Scalar(1.0) / c3;\n    Scalar const c12 = cos(c6);\n    Scalar const c13 = Scalar(0.5) * c11 * c12;\n    Scalar const c14 = c7 * c9 * omega[0];\n    Scalar const c15 = Scalar(0.5) * c11 * c12 * omega[0];\n    Scalar const c16 = -c14 * omega[1] + c15 * omega[1];\n    Scalar const c17 = -c14 * omega[2] + c15 * omega[2];\n    Scalar const c18 = omega[1] * omega[2];\n    Scalar const c19 = -c10 * c18 + c13 * c18;\n    Scalar const c20 = Scalar(0.5) * c5 * c7;\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    J(0, 0) = -c0 * c10 + c0 * c13 + c8;\n    J(0, 1) = c16;\n    J(0, 2) = c17;\n    J(1, 0) = c16;\n    J(1, 1) = -c1 * c10 + c1 * c13 + c8;\n    J(1, 2) = c19;\n    J(2, 0) = c17;\n    J(2, 1) = c19;\n    J(2, 2) = -c10 * c2 + c13 * c2 + c8;\n    J(3, 0) = -c20 * omega[0];\n    J(3, 1) = -c20 * omega[1];\n    J(3, 2) = -c20 * omega[2];\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    // clang-format off\n    J <<  Scalar(0.5),   Scalar(0),   Scalar(0),\n            Scalar(0), Scalar(0.5),   Scalar(0),\n            Scalar(0),   Scalar(0), Scalar(0.5),\n            Scalar(0),   Scalar(0),   Scalar(0);\n    // clang-format on\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation vector\n  /// ``omega``) and returns the corresponding element of the group SO(3).\n  ///\n  /// To be more specific, this function computes ``expmat(hat(omega))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of SO(3).\n  ///\n  SOPHUS_FUNC static SO3<Scalar> exp(Tangent const& omega) {\n    Scalar theta;\n    return expAndTheta(omega, &theta);\n  }\n\n  /// As above, but also returns ``theta = |omega|`` as out-parameter.\n  ///\n  /// Precondition: ``theta`` must not be ``nullptr``.\n  ///\n  SOPHUS_FUNC static SO3<Scalar> expAndTheta(Tangent const& omega,\n                                             Scalar* theta) {\n    SOPHUS_ENSURE(theta != nullptr, \"must not be nullptr.\");\n    using std::abs;\n    using std::cos;\n    using std::sin;\n    using std::sqrt;\n    Scalar theta_sq = omega.squaredNorm();\n\n    Scalar imag_factor;\n    Scalar real_factor;\n    if (theta_sq <\n        Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon()) {\n      *theta = Scalar(0);\n      Scalar theta_po4 = theta_sq * theta_sq;\n      imag_factor = Scalar(0.5) - Scalar(1.0 / 48.0) * theta_sq +\n                    Scalar(1.0 / 3840.0) * theta_po4;\n      real_factor = Scalar(1) - Scalar(1.0 / 8.0) * theta_sq +\n                    Scalar(1.0 / 384.0) * theta_po4;\n    } else {\n      *theta = sqrt(theta_sq);\n      Scalar half_theta = Scalar(0.5) * (*theta);\n      Scalar sin_half_theta = sin(half_theta);\n      imag_factor = sin_half_theta / (*theta);\n      real_factor = cos(half_theta);\n    }\n\n    SO3 q;\n    q.unit_quaternion_nonconst() =\n        QuaternionMember(real_factor, imag_factor * omega.x(),\n                         imag_factor * omega.y(), imag_factor * omega.z());\n    SOPHUS_ENSURE(abs(q.unit_quaternion().squaredNorm() - Scalar(1)) <\n                      Sophus::Constants<Scalar>::epsilon(),\n                  \"SO3::exp failed! omega: %, real: %, img: %\",\n                  omega.transpose(), real_factor, imag_factor);\n    return q;\n  }\n\n  /// Returns closest SO3 given arbitrary 3x3 matrix.\n  ///\n  template <class S = Scalar>\n  static SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, SO3>\n  fitToSO3(Transformation const& R) {\n    return SO3(::Sophus::makeRotationMatrix(R));\n  }\n\n  /// Returns the ith infinitesimal generators of SO(3).\n  ///\n  /// The infinitesimal generators of SO(3) are:\n  ///\n  /// ```\n  ///         |  0  0  0 |\n  ///   G_0 = |  0  0 -1 |\n  ///         |  0  1  0 |\n  ///\n  ///         |  0  0  1 |\n  ///   G_1 = |  0  0  0 |\n  ///         | -1  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be 0, 1 or 2.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 2, \"i should be in range [0,2].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 3-vector representation ``omega`` (= rotation vector) and\n  /// returns the corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SO(3) is defined as\n  ///\n  ///   ``hat(.): R^3 -> R^{3x3},  hat(omega) = sum_i omega_i * G_i``\n  ///   (for i=0,1,2)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of SO(3).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& omega) {\n    Transformation Omega;\n    // clang-format off\n    Omega <<\n        Scalar(0), -omega(2),  omega(1),\n         omega(2), Scalar(0), -omega(0),\n        -omega(1),  omega(0), Scalar(0);\n    // clang-format on\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of SO(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_so3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of SO3.\n  ///\n  /// For the Lie algebra so3, the Lie bracket is simply the cross product:\n  ///\n  /// ``[omega_1, omega_2]_so3 = omega_1 x omega_2.``\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& omega1,\n                                        Tangent const& omega2) {\n    return omega1.cross(omega2);\n  }\n\n  /// Construct x-axis rotation.\n  ///\n  static SOPHUS_FUNC SO3 rotX(Scalar const& x) {\n    return SO3::exp(Sophus::Vector3<Scalar>(x, Scalar(0), Scalar(0)));\n  }\n\n  /// Construct y-axis rotation.\n  ///\n  static SOPHUS_FUNC SO3 rotY(Scalar const& y) {\n    return SO3::exp(Sophus::Vector3<Scalar>(Scalar(0), y, Scalar(0)));\n  }\n\n  /// Construct z-axis rotation.\n  ///\n  static SOPHUS_FUNC SO3 rotZ(Scalar const& z) {\n    return SO3::exp(Sophus::Vector3<Scalar>(Scalar(0), Scalar(0), z));\n  }\n\n  /// Draw uniform sample from SO(3) manifold.\n  /// Based on: http://planning.cs.uiuc.edu/node198.html\n  ///\n  template <class UniformRandomBitGenerator>\n  static SO3 sampleUniform(UniformRandomBitGenerator& generator) {\n    static_assert(IsUniformRandomBitGenerator<UniformRandomBitGenerator>::value,\n                  \"generator must meet the UniformRandomBitGenerator concept\");\n\n    std::uniform_real_distribution<Scalar> uniform(Scalar(0), Scalar(1));\n    std::uniform_real_distribution<Scalar> uniform_twopi(\n        Scalar(0), 2 * Constants<Scalar>::pi());\n\n    const Scalar u1 = uniform(generator);\n    const Scalar u2 = uniform_twopi(generator);\n    const Scalar u3 = uniform_twopi(generator);\n\n    const Scalar a = sqrt(1 - u1);\n    const Scalar b = sqrt(u1);\n\n    return SO3(\n        QuaternionMember(a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3)));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -c  b |\n  ///                |  c  0 -a |\n  ///                | -b  a  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    return Tangent(Omega(2, 1), Omega(0, 2), Omega(1, 0));\n  }\n\n  SOPHUS_FUNC static Transformation JacobianRInv(Tangent const& w) {\n    Transformation Jrinv = Transformation::Identity();\n    Scalar theta = w.norm();\n    Scalar theta2 = theta * theta;\n    Scalar s = ( 1.0 - (1.0+cos(theta))*theta / (2.0*sin(theta)) );\n\n    // very small angle\n    if(theta < 0.00001)\n    {\n      return Jrinv;\n    }\n    else\n    {\n      Tangent k = w.normalized();\n      Transformation K = SO3<Scalar>::hat(k);\n      Jrinv = Transformation::Identity()\n              + 0.5*SO3<Scalar>::hat(w)\n              + ( 1.0 - (1.0+cos(theta))*theta / (2.0*sin(theta)) ) *K*K;\n    }\n    return Jrinv;\n  }\n\n protected:\n  /// Mutator of unit_quaternion is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC QuaternionMember& unit_quaternion_nonconst() {\n    return unit_quaternion_;\n  }\n\n  QuaternionMember unit_quaternion_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n/// Specialization of Eigen::Map for ``SO3``; derived from SO3Base.\n///\n/// Allows us to wrap SO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO3<Scalar_>, Options>\n    : public Sophus::SO3Base<Map<Sophus::SO3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SO3Base<Map<Sophus::SO3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so unit_quaternion_nonconst can be accessed from\n  /// ``Base``.\n  friend class Sophus::SO3Base<Map<Sophus::SO3<Scalar_>, Options>>;\n\n  // LCOV_EXCL_START\n  SOPHUS_INHERIT_ASSIGNMENT_OPERATORS(Map);\n  // LCOV_EXCL_STOP\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar* coeffs) : unit_quaternion_(coeffs) {}\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar>, Options> const& unit_quaternion()\n      const {\n    return unit_quaternion_;\n  }\n\n protected:\n  /// Mutator of unit_quaternion is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar>, Options>&\n  unit_quaternion_nonconst() {\n    return unit_quaternion_;\n  }\n\n  Map<Eigen::Quaternion<Scalar>, Options> unit_quaternion_;\n};\n\n/// Specialization of Eigen::Map for ``SO3 const``; derived from SO3Base.\n///\n/// Allows us to wrap SO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO3<Scalar_> const, Options>\n    : public Sophus::SO3Base<Map<Sophus::SO3<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SO3Base<Map<Sophus::SO3<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar const* coeffs) : unit_quaternion_(coeffs) {}\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar> const, Options> const&\n  unit_quaternion() const {\n    return unit_quaternion_;\n  }\n\n protected:\n  /// Mutator of unit_quaternion is protected to ensure class invariant.\n  ///\n  Map<Eigen::Quaternion<Scalar> const, Options> const unit_quaternion_;\n};\n}  // namespace Eigen\n\n#endif\n"
  },
  {
    "path": "LIO-Livox/include/sophus/test_macros.hpp",
    "content": "#ifndef SOPUHS_TESTS_MACROS_HPP\n#define SOPUHS_TESTS_MACROS_HPP\n\n#include <sophus/types.hpp>\n#include <sophus/formatstring.hpp>\n\nnamespace Sophus {\nnamespace details {\n\ntemplate <class Scalar>\nclass Pretty {\n public:\n  static std::string impl(Scalar s) { return FormatString(\"%\", s); }\n};\n\ntemplate <class Scalar, int M, int N>\nclass Pretty<Eigen::Matrix<Scalar, M, N>> {\n public:\n  static std::string impl(Matrix<Scalar, M, N> const& v) {\n    return FormatString(\"\\n%\\n\", v);\n  }\n};\n\ntemplate <class T>\nstd::string pretty(T const& v) {\n  return Pretty<T>::impl(v);\n}\n\ntemplate <class... Args>\nvoid testFailed(bool& passed, char const* func, char const* file, int line,\n                std::string const& msg) {\n  std::cerr << FormatString(\"Test failed in function %, file %, line %\\n\", func,\n                            file, line);\n  std::cerr << msg << \"\\n\\n\";\n  passed = false;\n}\n}  // namespace details\n\nvoid processTestResult(bool passed) {\n  if (!passed) {\n    // LCOV_EXCL_START\n    std::cerr << \"failed!\" << std::endl << std::endl;\n    exit(-1);\n    // LCOV_EXCL_STOP\n  }\n  std::cerr << \"passed.\" << std::endl << std::endl;\n}\n}  // namespace Sophus\n\n#define SOPHUS_STRINGIFY(x) #x\n\n/// GenericTests whether condition is true.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST(passed, condition, ...)                                    \\\n  do {                                                                         \\\n    if (!(condition)) {                                                        \\\n      std::string msg = Sophus::details::FormatString(                         \\\n          \"condition ``%`` is false\\n\", SOPHUS_STRINGIFY(condition));          \\\n      msg += Sophus::details::FormatString(__VA_ARGS__);                       \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// GenericTests whether left is equal to right given a threshold.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_EQUAL(passed, left, right, ...)                            \\\n  do {                                                                         \\\n    if (left != right) {                                                       \\\n      std::string msg = Sophus::details::FormatString(                         \\\n          \"% (=%) is not equal to % (=%)\\n\", SOPHUS_STRINGIFY(left),           \\\n          Sophus::details::pretty(left), SOPHUS_STRINGIFY(right),              \\\n          Sophus::details::pretty(right));                                     \\\n      msg += Sophus::details::FormatString(__VA_ARGS__);                       \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// GenericTests whether left is equal to right given a threshold.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_NEQ(passed, left, right, ...)                              \\\n  do {                                                                         \\\n    if (left == right) {                                                       \\\n      std::string msg = Sophus::details::FormatString(                         \\\n          \"% (=%) shoudl not be equal to % (=%)\\n\", SOPHUS_STRINGIFY(left),    \\\n          Sophus::details::pretty(left), SOPHUS_STRINGIFY(right),              \\\n          Sophus::details::pretty(right));                                     \\\n      msg += Sophus::details::FormatString(__VA_ARGS__);                       \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// GenericTests whether left is approximatly equal to right given a threshold.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_APPROX(passed, left, right, thr, ...)                      \\\n  do {                                                                         \\\n    auto nrm = Sophus::maxMetric((left), (right));                             \\\n    if (!(nrm < (thr))) {                                                      \\\n      std::string msg = Sophus::details::FormatString(                         \\\n          \"% (=%) is not approx % (=%); % is %; nrm is %\\n\",                   \\\n          SOPHUS_STRINGIFY(left), Sophus::details::pretty(left),               \\\n          SOPHUS_STRINGIFY(right), Sophus::details::pretty(right),             \\\n          SOPHUS_STRINGIFY(thr), Sophus::details::pretty(thr), nrm);           \\\n      msg += Sophus::details::FormatString(__VA_ARGS__);                       \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// GenericTests whether left is NOT approximatly equal to right given a\n/// threshold. The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_NOT_APPROX(passed, left, right, thr, ...)                  \\\n  do {                                                                         \\\n    auto nrm = Sophus::maxMetric((left), (right));                             \\\n    if (nrm < (thr)) {                                                         \\\n      std::string msg = Sophus::details::FormatString(                         \\\n          \"% (=%) is approx % (=%), but it should not!\\n % is %; nrm is %\\n\",  \\\n          SOPHUS_STRINGIFY(left), Sophus::details::pretty(left),               \\\n          SOPHUS_STRINGIFY(right), Sophus::details::pretty(right),             \\\n          SOPHUS_STRINGIFY(thr), Sophus::details::pretty(thr), nrm);           \\\n      msg += Sophus::details::FormatString(__VA_ARGS__);                       \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n#endif  // SOPUHS_TESTS_MACROS_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/types.hpp",
    "content": "/// @file\n/// Common type aliases.\n\n#ifndef SOPHUS_TYPES_HPP\n#define SOPHUS_TYPES_HPP\n\n#include <type_traits>\n#include \"common.hpp\"\n\nnamespace Sophus {\n\ntemplate <class Scalar, int M, int Options = 0>\nusing Vector = Eigen::Matrix<Scalar, M, 1, Options>;\n\ntemplate <class Scalar, int Options = 0>\nusing Vector2 = Vector<Scalar, 2, Options>;\nusing Vector2f = Vector2<float>;\nusing Vector2d = Vector2<double>;\n\ntemplate <class Scalar, int Options = 0>\nusing Vector3 = Vector<Scalar, 3, Options>;\nusing Vector3f = Vector3<float>;\nusing Vector3d = Vector3<double>;\n\ntemplate <class Scalar>\nusing Vector4 = Vector<Scalar, 4>;\nusing Vector4f = Vector4<float>;\nusing Vector4d = Vector4<double>;\n\ntemplate <class Scalar>\nusing Vector6 = Vector<Scalar, 6>;\nusing Vector6f = Vector6<float>;\nusing Vector6d = Vector6<double>;\n\ntemplate <class Scalar>\nusing Vector7 = Vector<Scalar, 7>;\nusing Vector7f = Vector7<float>;\nusing Vector7d = Vector7<double>;\n\ntemplate <class Scalar, int M, int N>\nusing Matrix = Eigen::Matrix<Scalar, M, N>;\n\ntemplate <class Scalar>\nusing Matrix2 = Matrix<Scalar, 2, 2>;\nusing Matrix2f = Matrix2<float>;\nusing Matrix2d = Matrix2<double>;\n\ntemplate <class Scalar>\nusing Matrix3 = Matrix<Scalar, 3, 3>;\nusing Matrix3f = Matrix3<float>;\nusing Matrix3d = Matrix3<double>;\n\ntemplate <class Scalar>\nusing Matrix4 = Matrix<Scalar, 4, 4>;\nusing Matrix4f = Matrix4<float>;\nusing Matrix4d = Matrix4<double>;\n\ntemplate <class Scalar>\nusing Matrix6 = Matrix<Scalar, 6, 6>;\nusing Matrix6f = Matrix6<float>;\nusing Matrix6d = Matrix6<double>;\n\ntemplate <class Scalar>\nusing Matrix7 = Matrix<Scalar, 7, 7>;\nusing Matrix7f = Matrix7<float>;\nusing Matrix7d = Matrix7<double>;\n\ntemplate <class Scalar, int N, int Options = 0>\nusing ParametrizedLine = Eigen::ParametrizedLine<Scalar, N, Options>;\n\ntemplate <class Scalar, int Options = 0>\nusing ParametrizedLine3 = ParametrizedLine<Scalar, 3, Options>;\nusing ParametrizedLine3f = ParametrizedLine3<float>;\nusing ParametrizedLine3d = ParametrizedLine3<double>;\n\ntemplate <class Scalar, int Options = 0>\nusing ParametrizedLine2 = ParametrizedLine<Scalar, 2, Options>;\nusing ParametrizedLine2f = ParametrizedLine2<float>;\nusing ParametrizedLine2d = ParametrizedLine2<double>;\n\nnamespace details {\ntemplate <class Scalar>\nclass MaxMetric {\n public:\n  static Scalar impl(Scalar s0, Scalar s1) {\n    using std::abs;\n    return abs(s0 - s1);\n  }\n};\n\ntemplate <class Scalar, int M, int N>\nclass MaxMetric<Matrix<Scalar, M, N>> {\n public:\n  static Scalar impl(Matrix<Scalar, M, N> const& p0,\n                     Matrix<Scalar, M, N> const& p1) {\n    return (p0 - p1).template lpNorm<Eigen::Infinity>();\n  }\n};\n\ntemplate <class Scalar>\nclass SetToZero {\n public:\n  static void impl(Scalar& s) { s = Scalar(0); }\n};\n\ntemplate <class Scalar, int M, int N>\nclass SetToZero<Matrix<Scalar, M, N>> {\n public:\n  static void impl(Matrix<Scalar, M, N>& v) { v.setZero(); }\n};\n\ntemplate <class T1, class Scalar>\nclass SetElementAt;\n\ntemplate <class Scalar>\nclass SetElementAt<Scalar, Scalar> {\n public:\n  static void impl(Scalar& s, Scalar value, int at) {\n    SOPHUS_ENSURE(at == 0, \"is %\", at);\n    s = value;\n  }\n};\n\ntemplate <class Scalar, int N>\nclass SetElementAt<Vector<Scalar, N>, Scalar> {\n public:\n  static void impl(Vector<Scalar, N>& v, Scalar value, int at) {\n    SOPHUS_ENSURE(at >= 0 && at < N, \"is %\", at);\n    v[at] = value;\n  }\n};\n\ntemplate <class Scalar>\nclass SquaredNorm {\n public:\n  static Scalar impl(Scalar const& s) { return s * s; }\n};\n\ntemplate <class Scalar, int N>\nclass SquaredNorm<Matrix<Scalar, N, 1>> {\n public:\n  static Scalar impl(Matrix<Scalar, N, 1> const& s) { return s.squaredNorm(); }\n};\n\ntemplate <class Scalar>\nclass Transpose {\n public:\n  static Scalar impl(Scalar const& s) { return s; }\n};\n\ntemplate <class Scalar, int M, int N>\nclass Transpose<Matrix<Scalar, M, N>> {\n public:\n  static Matrix<Scalar, M, N> impl(Matrix<Scalar, M, N> const& s) {\n    return s.transpose();\n  }\n};\n}  // namespace details\n\n/// Returns maximum metric between two points ``p0`` and ``p1``, with ``p0, p1``\n/// being matrices or a scalars.\n///\ntemplate <class T>\nauto maxMetric(T const& p0, T const& p1)\n    -> decltype(details::MaxMetric<T>::impl(p0, p1)) {\n  return details::MaxMetric<T>::impl(p0, p1);\n}\n\n/// Sets point ``p`` to zero, with ``p`` being a matrix or a scalar.\n///\ntemplate <class T>\nvoid setToZero(T& p) {\n  return details::SetToZero<T>::impl(p);\n}\n\n/// Sets ``i``th component of ``p`` to ``value``, with ``p`` being a\n/// matrix or a scalar. If ``p`` is a scalar, ``i`` must be ``0``.\n///\ntemplate <class T, class Scalar>\nvoid setElementAt(T& p, Scalar value, int i) {\n  return details::SetElementAt<T, Scalar>::impl(p, value, i);\n}\n\n/// Returns the squared 2-norm of ``p``, with ``p`` being a vector or a scalar.\n///\ntemplate <class T>\nauto squaredNorm(T const& p) -> decltype(details::SquaredNorm<T>::impl(p)) {\n  return details::SquaredNorm<T>::impl(p);\n}\n\n/// Returns ``p.transpose()`` if ``p`` is a matrix, and simply ``p`` if m is a\n/// scalar.\n///\ntemplate <class T>\nauto transpose(T const& p) -> decltype(details::Transpose<T>::impl(T())) {\n  return details::Transpose<T>::impl(p);\n}\n\ntemplate <class Scalar>\nstruct IsFloatingPoint {\n  static bool const value = std::is_floating_point<Scalar>::value;\n};\n\ntemplate <class Scalar, int M, int N>\nstruct IsFloatingPoint<Matrix<Scalar, M, N>> {\n  static bool const value = std::is_floating_point<Scalar>::value;\n};\n\ntemplate <class Scalar_>\nstruct GetScalar {\n  using Scalar = Scalar_;\n};\n\ntemplate <class Scalar_, int M, int N>\nstruct GetScalar<Matrix<Scalar_, M, N>> {\n  using Scalar = Scalar_;\n};\n\n/// If the Vector type is of fixed size, then IsFixedSizeVector::value will be\n/// true.\ntemplate <typename Vector, int NumDimensions,\n          typename = typename std::enable_if<\n              Vector::RowsAtCompileTime == NumDimensions &&\n              Vector::ColsAtCompileTime == 1>::type>\nstruct IsFixedSizeVector : std::true_type {};\n\n/// Planes in 3d are hyperplanes.\ntemplate <class T>\nusing Plane3 = Eigen::Hyperplane<T, 3>;\nusing Plane3d = Plane3<double>;\nusing Plane3f = Plane3<float>;\n\n/// Lines in 2d are hyperplanes.\ntemplate <class T>\nusing Line2 = Eigen::Hyperplane<T, 2>;\nusing Line2d = Line2<double>;\nusing Line2f = Line2<float>;\n\n}  // namespace Sophus\n\n#endif  // SOPHUS_TYPES_HPP\n"
  },
  {
    "path": "LIO-Livox/include/sophus/velocities.hpp",
    "content": "#ifndef SOPHUS_VELOCITIES_HPP\n#define SOPHUS_VELOCITIES_HPP\n\n#include <functional>\n\n#include \"num_diff.hpp\"\n#include \"se3.hpp\"\n\nnamespace Sophus {\nnamespace experimental {\n// Experimental since the API will certainly change drastically in the future.\n\n// Transforms velocity vector by rotation ``foo_R_bar``.\n//\n// Note: vel_bar can be either a linear or a rotational velocity vector.\n//\ntemplate <class Scalar>\nVector3<Scalar> transformVelocity(SO3<Scalar> const& foo_R_bar,\n                                  Vector3<Scalar> const& vel_bar) {\n  // For rotational velocities note that:\n  //\n  //   vel_bar = vee(foo_R_bar * hat(vel_bar) * foo_R_bar^T)\n  //           = vee(hat(Adj(foo_R_bar) * vel_bar))\n  //           = Adj(foo_R_bar) * vel_bar\n  //           = foo_R_bar * vel_bar.\n  //\n  return foo_R_bar * vel_bar;\n}\n\n// Transforms velocity vector by pose ``foo_T_bar``.\n//\n// Note: vel_bar can be either a linear or a rotational velocity vector.\n//\ntemplate <class Scalar>\nVector3<Scalar> transformVelocity(SE3<Scalar> const& foo_T_bar,\n                                  Vector3<Scalar> const& vel_bar) {\n  return transformVelocity(foo_T_bar.so3(), vel_bar);\n}\n\n// finite difference approximation of instantanious velocity in frame foo\n//\ntemplate <class Scalar>\nVector3<Scalar> finiteDifferenceRotationalVelocity(\n    std::function<SO3<Scalar>(Scalar)> const& foo_R_bar, Scalar t,\n    Scalar h = Constants<Scalar>::epsilon()) {\n  // https://en.wikipedia.org/w/index.php?title=Angular_velocity&oldid=791867792#Angular_velocity_tensor\n  //\n  // W = dR(t)/dt * R^{-1}(t)\n  Matrix3<Scalar> dR_dt_in_frame_foo = curveNumDiff(\n      [&foo_R_bar](Scalar t0) -> Matrix3<Scalar> {\n        return foo_R_bar(t0).matrix();\n      },\n      t, h);\n  // velocity tensor\n  Matrix3<Scalar> W_in_frame_foo =\n      dR_dt_in_frame_foo * (foo_R_bar(t)).inverse().matrix();\n  return SO3<Scalar>::vee(W_in_frame_foo);\n}\n\n// finite difference approximation of instantanious velocity in frame foo\n//\ntemplate <class Scalar>\nVector3<Scalar> finiteDifferenceRotationalVelocity(\n    std::function<SE3<Scalar>(Scalar)> const& foo_T_bar, Scalar t,\n    Scalar h = Constants<Scalar>::epsilon()) {\n  return finiteDifferenceRotationalVelocity<Scalar>(\n      [&foo_T_bar](Scalar t) -> SO3<Scalar> { return foo_T_bar(t).so3(); }, t,\n      h);\n}\n\n}  // namespace experimental\n}  // namespace Sophus\n\n#endif  // SOPHUS_VELOCITIES_HPP\n"
  },
  {
    "path": "LIO-Livox/include/utils/ceresfunc.h",
    "content": "#ifndef LIO_LIVOX_CERESFUNC_H\n#define LIO_LIVOX_CERESFUNC_H\n#include <ceres/ceres.h>\n#include <glog/logging.h>\n#include <utility>\n#include <pthread.h>\n#include <unordered_map>\n#include \"sophus/so3.hpp\"\n#include \"IMUIntegrator/IMUIntegrator.h\"\n#include \"math_utils.hpp\"\nconst int NUM_THREADS = 4;\n\n/** \\brief Residual Block Used for marginalization\n */\nstruct ResidualBlockInfo\n{\n\tResidualBlockInfo(ceres::CostFunction *_cost_function, ceres::LossFunction *_loss_function, std::vector<double *> _parameter_blocks, std::vector<int> _drop_set)\n\t\t\t\t\t: cost_function(_cost_function), loss_function(_loss_function), parameter_blocks(std::move(_parameter_blocks)), drop_set(std::move(_drop_set)) {}\n\n\tvoid Evaluate(){\n\t\tresiduals.resize(cost_function->num_residuals());\n\n\t\tstd::vector<int> block_sizes = cost_function->parameter_block_sizes();\n\t\traw_jacobians = new double *[block_sizes.size()];\n\t\tjacobians.resize(block_sizes.size());\n\n\t\tfor (int i = 0; i < static_cast<int>(block_sizes.size()); i++)\n\t\t{\n\t\t\tjacobians[i].resize(cost_function->num_residuals(), block_sizes[i]);\n\t\t\traw_jacobians[i] = jacobians[i].data();\n\t\t}\n\t\tcost_function->Evaluate(parameter_blocks.data(), residuals.data(), raw_jacobians);\n\n\t\tif (loss_function)\n\t\t{\n\t\t\tdouble residual_scaling_, alpha_sq_norm_;\n\n\t\t\tdouble sq_norm, rho[3];\n\n\t\t\tsq_norm = residuals.squaredNorm();\n\t\t\tloss_function->Evaluate(sq_norm, rho);\n\n\t\t\tdouble sqrt_rho1_ = sqrt(rho[1]);\n\n\t\t\tif ((sq_norm == 0.0) || (rho[2] <= 0.0))\n\t\t\t{\n\t\t\t\tresidual_scaling_ = sqrt_rho1_;\n\t\t\t\talpha_sq_norm_ = 0.0;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tconst double D = 1.0 + 2.0 * sq_norm * rho[2] / rho[1];\n\t\t\t\tconst double alpha = 1.0 - sqrt(D);\n\t\t\t\tresidual_scaling_ = sqrt_rho1_ / (1 - alpha);\n\t\t\t\talpha_sq_norm_ = alpha / sq_norm;\n\t\t\t}\n\n\t\t\tfor (int i = 0; i < static_cast<int>(parameter_blocks.size()); i++)\n\t\t\t{\n\t\t\t\tjacobians[i] = sqrt_rho1_ * (jacobians[i] - alpha_sq_norm_ * residuals * (residuals.transpose() * jacobians[i]));\n\t\t\t}\n\n\t\t\tresiduals *= residual_scaling_;\n\t\t}\n\t}\n\n\tceres::CostFunction *cost_function;\n\tceres::LossFunction *loss_function;\n\tstd::vector<double *> parameter_blocks;\n\tstd::vector<int> drop_set;\n\n\tdouble **raw_jacobians{};\n\tstd::vector<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> jacobians;\n\tEigen::VectorXd residuals;\n\n};\n\nstruct ThreadsStruct\n{\n\tstd::vector<ResidualBlockInfo *> sub_factors;\n\tEigen::MatrixXd A;\n\tEigen::VectorXd b;\n\tstd::unordered_map<long, int> parameter_block_size;\n\tstd::unordered_map<long, int> parameter_block_idx;\n};\n\n/** \\brief Multi-thread to process marginalization\n */\nvoid* ThreadsConstructA(void* threadsstruct);\n\n/** \\brief marginalization infomation\n */\nclass MarginalizationInfo\n{\npublic:\n\t~MarginalizationInfo(){\n//\t\t\tROS_WARN(\"release marginlizationinfo\");\n\n\t\tfor (auto it = parameter_block_data.begin(); it != parameter_block_data.end(); ++it)\n\t\t\tdelete[] it->second;\n\n\t\tfor (int i = 0; i < (int)factors.size(); i++)\n\t\t{\n\t\t\tdelete[] factors[i]->raw_jacobians;\n\t\t\tdelete factors[i]->cost_function;\n\t\t\tdelete factors[i];\n\t\t}\n\t}\n\n\tvoid addResidualBlockInfo(ResidualBlockInfo *residual_block_info){\n\t\tfactors.emplace_back(residual_block_info);\n\n\t\tstd::vector<double *> &parameter_blocks = residual_block_info->parameter_blocks;\n\t\tstd::vector<int> parameter_block_sizes = residual_block_info->cost_function->parameter_block_sizes();\n\n\t\tfor (int i = 0; i < static_cast<int>(residual_block_info->parameter_blocks.size()); i++)\n\t\t{\n\t\t\tdouble *addr = parameter_blocks[i];\n\t\t\tint size = parameter_block_sizes[i];\n\t\t\tparameter_block_size[reinterpret_cast<long>(addr)] = size;\n\t\t}\n\n\t\tfor (int i = 0; i < static_cast<int>(residual_block_info->drop_set.size()); i++)\n\t\t{\n\t\t\tdouble *addr = parameter_blocks[residual_block_info->drop_set[i]];\n\t\t\tparameter_block_idx[reinterpret_cast<long>(addr)] = 0;\n\t\t}\n\t}\n\n\tvoid preMarginalize(){\n\t\tfor (auto it : factors)\n\t\t{\n\t\t\tit->Evaluate();\n\n\t\t\tstd::vector<int> block_sizes = it->cost_function->parameter_block_sizes();\n\t\t\tfor (int i = 0; i < static_cast<int>(block_sizes.size()); i++)\n\t\t\t{\n\t\t\t\tlong addr = reinterpret_cast<long>(it->parameter_blocks[i]);\n\t\t\t\tint size = block_sizes[i];\n\t\t\t\tif (parameter_block_data.find(addr) == parameter_block_data.end())\n\t\t\t\t{\n\t\t\t\t\tdouble *data = new double[size];\n\t\t\t\t\tmemcpy(data, it->parameter_blocks[i], sizeof(double) * size);\n\t\t\t\t\tparameter_block_data[addr] = data;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tvoid marginalize(){\n\t\tint pos = 0;\n\t\tfor (auto &it : parameter_block_idx)\n\t\t{\n\t\t\tit.second = pos;\n\t\t\tpos += parameter_block_size[it.first];\n\t\t}\n\n\t\tm = pos;\n\n\t\tfor (const auto &it : parameter_block_size)\n\t\t{\n\t\t\tif (parameter_block_idx.find(it.first) == parameter_block_idx.end())\n\t\t\t{\n\t\t\t\tparameter_block_idx[it.first] = pos;\n\t\t\t\tpos += it.second;\n\t\t\t}\n\t\t}\n\n\t\tn = pos - m;\n\n\t\tEigen::MatrixXd A(pos, pos);\n\t\tEigen::VectorXd b(pos);\n\t\tA.setZero();\n\t\tb.setZero();\n\n\t\tpthread_t tids[NUM_THREADS];\n\t\tThreadsStruct threadsstruct[NUM_THREADS];\n\t\tint i = 0;\n\t\tfor (auto it : factors)\n\t\t{\n\t\t\tthreadsstruct[i].sub_factors.push_back(it);\n\t\t\ti++;\n\t\t\ti = i % NUM_THREADS;\n\t\t}\n\t\tfor (int i = 0; i < NUM_THREADS; i++)\n\t\t{\n\t\t\tthreadsstruct[i].A = Eigen::MatrixXd::Zero(pos,pos);\n\t\t\tthreadsstruct[i].b = Eigen::VectorXd::Zero(pos);\n\t\t\tthreadsstruct[i].parameter_block_size = parameter_block_size;\n\t\t\tthreadsstruct[i].parameter_block_idx = parameter_block_idx;\n\t\t\tint ret = pthread_create( &tids[i], NULL, ThreadsConstructA ,(void*)&(threadsstruct[i]));\n\t\t\tif (ret != 0)\n\t\t\t{\n\t\t\t\tstd::cout<<\"pthread_create error\"<<std::endl;\n\t\t\t\texit(1);\n\t\t\t}\n\t\t}\n\t\tfor( int i = NUM_THREADS - 1; i >= 0; i--)\n\t\t{\n\t\t\tpthread_join( tids[i], NULL );\n\t\t\tA += threadsstruct[i].A;\n\t\t\tb += threadsstruct[i].b;\n\t\t}\n\t\tEigen::MatrixXd Amm = 0.5 * (A.block(0, 0, m, m) + A.block(0, 0, m, m).transpose());\n\t\tEigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes(Amm);\n\n\t\tEigen::MatrixXd Amm_inv = saes.eigenvectors() * Eigen::VectorXd((saes.eigenvalues().array() > eps).select(saes.eigenvalues().array().inverse(), 0)).asDiagonal() * saes.eigenvectors().transpose();\n\n\t\tEigen::VectorXd bmm = b.segment(0, m);\n\t\tEigen::MatrixXd Amr = A.block(0, m, m, n);\n\t\tEigen::MatrixXd Arm = A.block(m, 0, n, m);\n\t\tEigen::MatrixXd Arr = A.block(m, m, n, n);\n\t\tEigen::VectorXd brr = b.segment(m, n);\n\t\tA = Arr - Arm * Amm_inv * Amr;\n\t\tb = brr - Arm * Amm_inv * bmm;\n\n\t\tEigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes2(A);\n\t\tEigen::VectorXd S = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array(), 0));\n\t\tEigen::VectorXd S_inv = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array().inverse(), 0));\n\n\t\tEigen::VectorXd S_sqrt = S.cwiseSqrt();\n\t\tEigen::VectorXd S_inv_sqrt = S_inv.cwiseSqrt();\n\n\t\tlinearized_jacobians = S_sqrt.asDiagonal() * saes2.eigenvectors().transpose();\n\t\tlinearized_residuals = S_inv_sqrt.asDiagonal() * saes2.eigenvectors().transpose() * b;\n\t}\n\n\tstd::vector<double *> getParameterBlocks(std::unordered_map<long, double *> &addr_shift){\n\t\tstd::vector<double *> keep_block_addr;\n\t\tkeep_block_size.clear();\n\t\tkeep_block_idx.clear();\n\t\tkeep_block_data.clear();\n\n\t\tfor (const auto &it : parameter_block_idx)\n\t\t{\n\t\t\tif (it.second >= m)\n\t\t\t{\n\t\t\t\tkeep_block_size.push_back(parameter_block_size[it.first]);\n\t\t\t\tkeep_block_idx.push_back(parameter_block_idx[it.first]);\n\t\t\t\tkeep_block_data.push_back(parameter_block_data[it.first]);\n\t\t\t\tkeep_block_addr.push_back(addr_shift[it.first]);\n\t\t\t}\n\t\t}\n\t\tsum_block_size = std::accumulate(std::begin(keep_block_size), std::end(keep_block_size), 0);\n\n\t\treturn keep_block_addr;\n\t}\n\n\tstd::vector<ResidualBlockInfo *> factors;\n\tint m, n;\n\tstd::unordered_map<long, int> parameter_block_size;\n\tint sum_block_size;\n\tstd::unordered_map<long, int> parameter_block_idx;\n\tstd::unordered_map<long, double *> parameter_block_data;\n\n\tstd::vector<int> keep_block_size;\n\tstd::vector<int> keep_block_idx;\n\tstd::vector<double *> keep_block_data;\n\n\tEigen::MatrixXd linearized_jacobians;\n\tEigen::VectorXd linearized_residuals;\n\tconst double eps = 1e-8;\n\n};\n\n/** \\brief Ceres Cost Funtion Used for Marginalization\n */\nclass MarginalizationFactor : public ceres::CostFunction\n{\npublic:\n\texplicit MarginalizationFactor(MarginalizationInfo* _marginalization_info):marginalization_info(_marginalization_info){\n\t\tint cnt = 0;\n\t\tfor (auto it : marginalization_info->keep_block_size)\n\t\t{\n\t\t\tmutable_parameter_block_sizes()->push_back(it);\n\t\t\tcnt += it;\n\t\t}\n\t\tset_num_residuals(marginalization_info->n);\n\t};\n\n\tbool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const override{\n\t\tint n = marginalization_info->n;\n\t\tint m = marginalization_info->m;\n\t\tEigen::VectorXd dx(n);\n\t\tfor (int i = 0; i < static_cast<int>(marginalization_info->keep_block_size.size()); i++)\n\t\t{\n\t\t\tint size = marginalization_info->keep_block_size[i];\n\t\t\tint idx = marginalization_info->keep_block_idx[i] - m;\n\t\t\tEigen::VectorXd x = Eigen::Map<const Eigen::VectorXd>(parameters[i], size);\n\t\t\tEigen::VectorXd x0 = Eigen::Map<const Eigen::VectorXd>(marginalization_info->keep_block_data[i], size);\n\t\t\tif(size == 6){\n\t\t\t\tdx.segment<3>(idx + 0) = x.segment<3>(0) - x0.segment<3>(0);\n\t\t\t\tdx.segment<3>(idx + 3) = (Sophus::SO3d::exp(x.segment<3>(3)).inverse() * Sophus::SO3d::exp(x0.segment<3>(3))).log();\n\t\t\t}else{\n\t\t\t\tdx.segment(idx, size) = x - x0;\n\t\t\t}\n\t\t}\n\t\tEigen::Map<Eigen::VectorXd>(residuals, n) = marginalization_info->linearized_residuals + marginalization_info->linearized_jacobians * dx;\n\t\tif (jacobians)\n\t\t{\n\n\t\t\tfor (int i = 0; i < static_cast<int>(marginalization_info->keep_block_size.size()); i++)\n\t\t\t{\n\t\t\t\tif (jacobians[i])\n\t\t\t\t{\n\t\t\t\t\tint size = marginalization_info->keep_block_size[i];\n\t\t\t\t\tint idx = marginalization_info->keep_block_idx[i] - m;\n\t\t\t\t\tEigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> jacobian(jacobians[i], n, size);\n\t\t\t\t\tjacobian.setZero();\n\t\t\t\t\tjacobian.leftCols(size) = marginalization_info->linearized_jacobians.middleCols(idx, size);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n\n\tMarginalizationInfo* marginalization_info;\n};\n\n/** \\brief Ceres Cost Funtion between Lidar Pose and Ground\n */\nstruct Cost_NavState_PR_Ground\n{\n\tCost_NavState_PR_Ground(Eigen::VectorXf ground_plane_coeff_):ground_plane_coeff(ground_plane_coeff_) {\n\t\t// std::cout << ground_plane_coeff.transpose() << std::endl;\n\t}\n\n\ttemplate <typename T>\n\tbool operator()( const T *pri_, T *residual) const {\n\t\tEigen::Map<const Eigen::Matrix<T, 6, 1>> PRi(pri_);\n\t\tEigen::Matrix<T, 3, 1> Pi = PRi.template segment<3>(0);\n\t\tSophus::SO3<T> SO3_Ri = Sophus::SO3<T>::exp(PRi.template segment<3>(3));\n\t\tEigen::Map<Eigen::Matrix<T, 3, 1> > eResiduals(residual);\n\t\teResiduals = Eigen::Matrix<T, 3, 1>::Zero();\n\n    Eigen::Matrix<T, 4, 4> T_wl = Eigen::Matrix<T, 4, 4>::Identity();\n    T_wl.topLeftCorner(3,3) = SO3_Ri.matrix();\n    T_wl.topRightCorner(3,1) = Pi;\n    Eigen::Matrix<T, 4, 4> T_lw = T_wl.inverse();\n    Eigen::Matrix<T, 3, 3> R_lw = T_lw.topLeftCorner(3,3);\n    Eigen::Matrix<T, 3, 1> t_lw = T_lw.topRightCorner(3,1);\n\n    Eigen::Matrix<T, 4, 1> ground_plane_coeff_temp = ground_plane_coeff.cast<T>().template segment<4>(0);\n    Eigen::Matrix<T, 4, 1> local_ground_plane;\n    local_ground_plane.template segment<3>(0) = R_lw * init_ground_plane_coeff.cast<T>().template segment<3>(0);\n    local_ground_plane.template segment<1>(3) = init_ground_plane_coeff.cast<T>().template segment<1>(3) - \n                                                t_lw.transpose() * local_ground_plane.template segment<3>(0);\n    eResiduals = livox_slam_ware::ominus(ground_plane_coeff_temp, local_ground_plane);\n\t\teResiduals.applyOnTheLeft(sqrt_information.template cast<T>());\n\t\treturn true;\n\t}\n\n\tstatic ceres::CostFunction *Create(Eigen::VectorXf ground_plane_coeff) {\n\t\treturn (new ceres::AutoDiffCostFunction<Cost_NavState_PR_Ground, 3, 6>(\n\t\t\t\t\t\tnew Cost_NavState_PR_Ground(ground_plane_coeff)));\n\t}\n  Eigen::VectorXf ground_plane_coeff;\n\tstatic Eigen::Matrix<double, 3, 3> sqrt_information;\n  static Eigen::VectorXf init_ground_plane_coeff;\n};\n\n/** \\brief Ceres Cost Funtion between Lidar Pose and IMU Preintegration\n */\nstruct Cost_NavState_PRV_Bias\n{\n\tCost_NavState_PRV_Bias(IMUIntegrator& measure_,\n\t\t\t\t\t\t\tEigen::Vector3d& GravityVec_,\n\t\t\t\t\t\t\tEigen::Matrix<double, 15, 15>  sqrt_information_):\n\t\t\t\t\timu_measure(measure_),\n\t\t\t\t\tGravityVec(GravityVec_),\n\t\t\t\t\tsqrt_information(std::move(sqrt_information_)){}\n\n\ttemplate <typename T>\n\tbool operator()( const T *pri_, const T *velobiasi_, const T *prj_, const T *velobiasj_, T *residual) const {\n\t\tEigen::Map<const Eigen::Matrix<T, 6, 1>> PRi(pri_);\n\t\tEigen::Matrix<T, 3, 1> Pi = PRi.template segment<3>(0);\n\t\tSophus::SO3<T> SO3_Ri = Sophus::SO3<T>::exp(PRi.template segment<3>(3));\n\n\t\tEigen::Map<const Eigen::Matrix<T, 6, 1>> PRj(prj_);\n\t\tEigen::Matrix<T, 3, 1> Pj = PRj.template segment<3>(0);\n\t\tSophus::SO3<T> SO3_Rj = Sophus::SO3<T>::exp(PRj.template segment<3>(3));\n\n\t\tEigen::Map<const Eigen::Matrix<T, 9, 1>> velobiasi(velobiasi_);\n\t\tEigen::Matrix<T, 3, 1> Vi = velobiasi.template segment<3>(0);\n\t\tEigen::Matrix<T, 3, 1> dbgi = velobiasi.template segment<3>(3) - imu_measure.GetBiasGyr().cast<T>();\n\t\tEigen::Matrix<T, 3, 1> dbai = velobiasi.template segment<3>(6) - imu_measure.GetBiasAcc().cast<T>();\n\n\t\tEigen::Map<const Eigen::Matrix<T, 9, 1>> velobiasj(velobiasj_);\n\t\tEigen::Matrix<T, 3, 1> Vj = velobiasj.template segment<3>(0);\n\n\t\tEigen::Map<Eigen::Matrix<T, 15, 1> > eResiduals(residual);\n\t\teResiduals = Eigen::Matrix<T, 15, 1>::Zero();\n\n\t\tT dTij = T(imu_measure.GetDeltaTime());\n\t\tT dT2 = dTij*dTij;\n\t\tEigen::Matrix<T, 3, 1> dPij = imu_measure.GetDeltaP().cast<T>();\n\t\tEigen::Matrix<T, 3, 1> dVij = imu_measure.GetDeltaV().cast<T>();\n\t\tSophus::SO3<T> dRij = Sophus::SO3<T>(imu_measure.GetDeltaQ().cast<T>());\n\t\tSophus::SO3<T> RiT = SO3_Ri.inverse();\n\n\t\tEigen::Matrix<T, 3, 1> rPij = RiT*(Pj - Pi - Vi*dTij - 0.5*GravityVec.cast<T>()*dT2) -\n\t\t\t\t\t\t(dPij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BG).cast<T>()*dbgi +\n\t\t\t\t\t\timu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BA).cast<T>()*dbai);\n\n\t\tSophus::SO3<T> dR_dbg = Sophus::SO3<T>::exp(\n\t\t\t\t\t\timu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_R, IMUIntegrator::O_BG).cast<T>()*dbgi);\n\t\tSophus::SO3<T> rRij = (dRij * dR_dbg).inverse() * RiT * SO3_Rj;\n\t\tEigen::Matrix<T, 3, 1> rPhiij = rRij.log();\n\n\t\tEigen::Matrix<T, 3, 1> rVij = RiT*(Vj - Vi - GravityVec.cast<T>()*dTij) -\n\t\t\t\t\t\t(dVij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BG).cast<T>()*dbgi +\n\t\t\t\t\t\t\t\t\t\timu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BA).cast<T>()*dbai);\n\n\t\teResiduals.template segment<3>(0) = rPij;\n\t\teResiduals.template segment<3>(3) = rPhiij;\n\t\teResiduals.template segment<3>(6) = rVij;\n\t\teResiduals.template segment<6>(9) = velobiasj.template segment<6>(3) - velobiasi.template segment<6>(3);\n\n\t\teResiduals.applyOnTheLeft(sqrt_information.template cast<T>());\n\t\treturn true;\n\t}\n\n\tstatic ceres::CostFunction *Create(IMUIntegrator& measure_,\n\t\t\t\t\t\t\t\t\t\tEigen::Vector3d& GravityVec_,\n\t\t\t\t\t\t\t\t\t\tEigen::Matrix<double, 15, 15>  sqrt_information_) {\n\t\treturn (new ceres::AutoDiffCostFunction<Cost_NavState_PRV_Bias, 15, 6, 9, 6, 9>(\n\t\t\t\t\t\tnew Cost_NavState_PRV_Bias(measure_,\n\t\t\t\t\t\t\t\t\t\t\t\t\tGravityVec_,\n\t\t\t\t\t\t\t\t\t\t\t\t\tstd::move(sqrt_information_))));\n\t}\n\n\tIMUIntegrator imu_measure;\n\tEigen::Vector3d GravityVec;\n\tEigen::Matrix<double, 15, 15> sqrt_information;\n};\n\n/** \\brief Ceres Cost Funtion between PointCloud Sharp Feature and Map Cloud\n */\nstruct Cost_NavState_IMU_Line\n{\n    Cost_NavState_IMU_Line(Eigen::Vector3d  _p, Eigen::Vector3d  _vtx1, Eigen::Vector3d  _vtx2,\n                           const Eigen::Matrix4d& Tbl, Eigen::Matrix<double, 1, 1>  sqrt_information_):\n            point(std::move(_p)), vtx1(std::move(_vtx1)), vtx2(std::move(_vtx2)),\n            sqrt_information(std::move(sqrt_information_)){\n      l12 = std::sqrt((vtx1(0) - vtx2(0))*(vtx1(0) - vtx2(0)) + (vtx1(1) - vtx2(1))*\n                                                                (vtx1(1) - vtx2(1)) + (vtx1(2) - vtx2(2))*(vtx1(2) - vtx2(2)));\n      Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3);\n      qbl = Eigen::Quaterniond(m3d).normalized();\n      qlb = qbl.conjugate();\n      Pbl = Tbl.topRightCorner(3,1);\n      Plb = -(qlb * Pbl);\n    }\n\n    template <typename T>\n    bool operator()(const T *PRi, T *residual) const {\n      Eigen::Matrix<T, 3, 1> cp{T(point.x()), T(point.y()), T(point.z())};\n      Eigen::Matrix<T, 3, 1> lpa{T(vtx1.x()), T(vtx1.y()), T(vtx1.z())};\n      Eigen::Matrix<T, 3, 1> lpb{T(vtx2.x()), T(vtx2.y()), T(vtx2.z())};\n\n      Eigen::Map<const Eigen::Matrix<T, 6, 1>> pri_wb(PRi);\n      Eigen::Quaternion<T> q_wb = Sophus::SO3<T>::exp(pri_wb.template segment<3>(3)).unit_quaternion();\n      Eigen::Matrix<T, 3, 1> t_wb = pri_wb.template segment<3>(0);\n      Eigen::Quaternion<T> q_wl = q_wb * qbl.cast<T>();\n      Eigen::Matrix<T, 3, 1> t_wl = q_wb * Pbl.cast<T>() + t_wb;\n      Eigen::Matrix<T, 3, 1> P_to_Map = q_wl * cp + t_wl;\n\n      T a012 = ceres::sqrt(\n              ((P_to_Map(0) - lpa(0)) * (P_to_Map(1) - lpb(1)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(1) - lpa(1)))\n              * ((P_to_Map(0) - lpa(0)) * (P_to_Map(1) - lpb(1)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(1) - lpa(1)))\n              + ((P_to_Map(0) - lpa(0)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(2) - lpa(2)))\n                * ((P_to_Map(0) - lpa(0)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(0) - lpb(0)) * (P_to_Map(2) - lpa(2)))\n              + ((P_to_Map(1) - lpa(1)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(1) - lpb(1)) * (P_to_Map(2) - lpa(2)))\n                * ((P_to_Map(1) - lpa(1)) * (P_to_Map(2) - lpb(2)) - (P_to_Map(1) - lpb(1)) * (P_to_Map(2) - lpa(2))));\n      T ld2 = a012 / T(l12);\n      T _weight = T(1) - T(0.9) * ceres::abs(ld2) / ceres::sqrt(\n              ceres::sqrt( P_to_Map(0) * P_to_Map(0) +\n                           P_to_Map(1) * P_to_Map(1) +\n                           P_to_Map(2) * P_to_Map(2) ));\n      residual[0] = T(sqrt_information(0)) * _weight * ld2;\n      return true;\n    }\n\n    static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_,\n                                       const Eigen::Vector3d& last_point_a_,\n                                       const Eigen::Vector3d& last_point_b_,\n                                       const Eigen::Matrix4d& Tbl,\n                                       Eigen::Matrix<double, 1, 1>  sqrt_information_) {\n      return (new ceres::AutoDiffCostFunction<Cost_NavState_IMU_Line, 1, 6>(\n              new Cost_NavState_IMU_Line(curr_point_, last_point_a_, last_point_b_, Tbl, std::move(sqrt_information_))));\n    }\n\n    Eigen::Vector3d point;\n    Eigen::Vector3d vtx1;\n    Eigen::Vector3d vtx2;\n    double l12;\n    Eigen::Quaterniond qbl, qlb;\n    Eigen::Vector3d Pbl, Plb;\n    Eigen::Matrix<double, 1, 1> sqrt_information;\n};\n\n/** \\brief Ceres Cost Funtion between PointCloud Flat Feature and Map Cloud\n */\nstruct Cost_NavState_IMU_Plan\n{\n    Cost_NavState_IMU_Plan(Eigen::Vector3d  _p, double _pa, double _pb, double _pc, double _pd,\n\n                           const Eigen::Matrix4d& Tbl, Eigen::Matrix<double, 1, 1>  sqrt_information_):\n            point(std::move(_p)), pa(_pa), pb(_pb), pc(_pc), pd(_pd), sqrt_information(std::move(sqrt_information_)){\n      Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3);\n      qbl = Eigen::Quaterniond(m3d).normalized();\n      qlb = qbl.conjugate();\n      Pbl = Tbl.topRightCorner(3,1);\n      Plb = -(qlb * Pbl);\n    }\n\n    template <typename T>\n    bool operator()(const T *PRi, T *residual) const {\n      Eigen::Matrix<T, 3, 1> cp{T(point.x()), T(point.y()), T(point.z())};\n\n      Eigen::Map<const Eigen::Matrix<T, 6, 1>> pri_wb(PRi);\n      Eigen::Quaternion<T> q_wb = Sophus::SO3<T>::exp(pri_wb.template segment<3>(3)).unit_quaternion();\n      Eigen::Matrix<T, 3, 1> t_wb = pri_wb.template segment<3>(0);\n      Eigen::Quaternion<T> q_wl = q_wb * qbl.cast<T>();\n      Eigen::Matrix<T, 3, 1> t_wl = q_wb * Pbl.cast<T>() + t_wb;\n      Eigen::Matrix<T, 3, 1> P_to_Map = q_wl * cp + t_wl;\n\n      T pd2 = T(pa) * P_to_Map(0) + T(pb) * P_to_Map(1) + T(pc) * P_to_Map(2) + T(pd);\n      T _weight = T(1) - T(0.9) * ceres::abs(pd2) /ceres::sqrt(\n              ceres::sqrt( P_to_Map(0) * P_to_Map(0) +\n                           P_to_Map(1) * P_to_Map(1) +\n                           P_to_Map(2) * P_to_Map(2) ));\n      residual[0] = T(sqrt_information(0)) * _weight * pd2;\n      return true;\n    }\n\n    static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_,\n                                       const double& pa_,\n                                       const double& pb_,\n                                       const double& pc_,\n                                       const double& pd_,\n                                       const Eigen::Matrix4d& Tbl,\n                                       Eigen::Matrix<double, 1, 1>  sqrt_information_) {\n      return (new ceres::AutoDiffCostFunction<Cost_NavState_IMU_Plan, 1, 6>(\n              new Cost_NavState_IMU_Plan(curr_point_, pa_, pb_, pc_, pd_, Tbl, std::move(sqrt_information_))));\n    }\n\n    double pa, pb, pc, pd;\n    Eigen::Vector3d point;\n    Eigen::Quaterniond qbl, qlb;\n    Eigen::Vector3d Pbl, Plb;\n    Eigen::Matrix<double, 1, 1> sqrt_information;\n};\n\n\n/** \\brief Ceres Cost Funtion between PointCloud Flat Feature and Map Cloud\n */\nstruct Cost_NavState_IMU_Plan_Vec\n{\n    Cost_NavState_IMU_Plan_Vec(Eigen::Vector3d  _p, \n\t\t\t\t\t\t\t   Eigen::Vector3d  _p_proj,\n\t\t\t\t\t\t\t   const Eigen::Matrix4d& Tbl,\n\t\t\t\t\t\t\t   Eigen::Matrix<double, 3, 3> _sqrt_information):\n                               point(std::move(_p)),\n\t\t\t\t\t\t\t   point_proj(std::move(_p_proj)), \n\t\t\t\t\t\t\t   sqrt_information(std::move(_sqrt_information)){\n      Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3);\n      qbl = Eigen::Quaterniond(m3d).normalized();\n      qlb = qbl.conjugate();\n      Pbl = Tbl.topRightCorner(3,1);\n      Plb = -(qlb * Pbl);\n    }\n\n    template <typename T>\n    bool operator()(const T *PRi, T *residual) const {\n      Eigen::Matrix<T, 3, 1> cp{T(point.x()), T(point.y()), T(point.z())};\n\t    Eigen::Matrix<T, 3, 1> cp_proj{T(point_proj.x()), T(point_proj.y()), T(point_proj.z())};\n\n      Eigen::Map<const Eigen::Matrix<T, 6, 1>> pri_wb(PRi);\n      Eigen::Quaternion<T> q_wb = Sophus::SO3<T>::exp(pri_wb.template segment<3>(3)).unit_quaternion();\n      Eigen::Matrix<T, 3, 1> t_wb = pri_wb.template segment<3>(0);\n      Eigen::Quaternion<T> q_wl = q_wb * qbl.cast<T>();\n      Eigen::Matrix<T, 3, 1> t_wl = q_wb * Pbl.cast<T>() + t_wb;\n      Eigen::Matrix<T, 3, 1> P_to_Map = q_wl * cp + t_wl;\n\n\t    Eigen::Map<Eigen::Matrix<T, 3, 1> > eResiduals(residual);\n      eResiduals = P_to_Map - cp_proj;\n\t    T _weight = T(1) - T(0.9) * (P_to_Map - cp_proj).norm() /ceres::sqrt(\n              ceres::sqrt( P_to_Map(0) * P_to_Map(0) +\n                           P_to_Map(1) * P_to_Map(1) +\n                           P_to_Map(2) * P_to_Map(2) ));\n      eResiduals *= _weight;\n      eResiduals.applyOnTheLeft(sqrt_information.template cast<T>());\n      return true;\n    }\n\n    static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_,\n                                       const Eigen::Vector3d&  p_proj_,\n                                       const Eigen::Matrix4d& Tbl,\n\t\t\t\t\t\t\t\t\t   const Eigen::Matrix<double, 3, 3> sqrt_information_) {\n      return (new ceres::AutoDiffCostFunction<Cost_NavState_IMU_Plan_Vec, 3, 6>(\n              new Cost_NavState_IMU_Plan_Vec(curr_point_, p_proj_, Tbl, sqrt_information_)));\n    }\n\n    Eigen::Vector3d point;\n\t  Eigen::Vector3d point_proj;\n    Eigen::Quaterniond qbl, qlb;\n    Eigen::Vector3d Pbl, Plb;\n\t  Eigen::Matrix<double, 3, 3> sqrt_information;\n};\n\n\nstruct Cost_NonFeature_ICP\n{\n    Cost_NonFeature_ICP(Eigen::Vector3d  _p, double _pa, double _pb, double _pc, double _pd,\n                        const Eigen::Matrix4d& Tbl, Eigen::Matrix<double, 1, 1>  sqrt_information_):\n            \t\t\tpoint(std::move(_p)), pa(_pa), pb(_pb), pc(_pc), pd(_pd), sqrt_information(std::move(sqrt_information_)){\n      Eigen::Matrix3d m3d = Tbl.topLeftCorner(3,3);\n      qbl = Eigen::Quaterniond(m3d).normalized();\n      qlb = qbl.conjugate();\n      Pbl = Tbl.topRightCorner(3,1);\n      Plb = -(qlb * Pbl);\n    }\n\n    template <typename T>\n    bool operator()(const T *PRi, T *residual) const {\n      Eigen::Matrix<T, 3, 1> cp{T(point.x()), T(point.y()), T(point.z())};\n\n      Eigen::Map<const Eigen::Matrix<T, 6, 1>> pri_wb(PRi);\n      Eigen::Quaternion<T> q_wb = Sophus::SO3<T>::exp(pri_wb.template segment<3>(3)).unit_quaternion();\n      Eigen::Matrix<T, 3, 1> t_wb = pri_wb.template segment<3>(0);\n      Eigen::Quaternion<T> q_wl = q_wb * qbl.cast<T>();\n      Eigen::Matrix<T, 3, 1> t_wl = q_wb * Pbl.cast<T>() + t_wb;\n      Eigen::Matrix<T, 3, 1> P_to_Map = q_wl * cp + t_wl;\n\n      T pd2 = T(pa) * P_to_Map(0) + T(pb) * P_to_Map(1) + T(pc) * P_to_Map(2) + T(pd);\n      T _weight = T(1) - T(0.9) * ceres::abs(pd2) /ceres::sqrt(\n              ceres::sqrt( P_to_Map(0) * P_to_Map(0) +\n                           P_to_Map(1) * P_to_Map(1) +\n                           P_to_Map(2) * P_to_Map(2) ));\n      residual[0] = T(sqrt_information(0)) * _weight * pd2;\n\n      return true;\n    }\n\n    static ceres::CostFunction *Create(const Eigen::Vector3d& curr_point_,\n                                       const double& pa_,\n                                       const double& pb_,\n                                       const double& pc_,\n                                       const double& pd_,\n                                       const Eigen::Matrix4d& Tbl,\n                                       Eigen::Matrix<double, 1, 1>  sqrt_information_) {\n      return (new ceres::AutoDiffCostFunction<Cost_NonFeature_ICP, 1, 6>(\n              new Cost_NonFeature_ICP(curr_point_, pa_, pb_, pc_, pd_, Tbl, std::move(sqrt_information_))));\n    }\n\n    double pa, pb, pc, pd;\n    Eigen::Vector3d point;\n    Eigen::Quaterniond qbl, qlb;\n    Eigen::Vector3d Pbl, Plb;\n    Eigen::Matrix<double, 1, 1> sqrt_information;\n};\n\n/** \\brief Ceres Cost Funtion for Initial Gravity Direction\n */\nstruct Cost_Initial_G\n{\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tCost_Initial_G(Eigen::Vector3d acc_): acc(acc_){}\n\n\ttemplate <typename T>\n\tbool operator()( const T *q, T *residual) const {\n\t\tEigen::Matrix<T, 3, 1> acc_T = acc.cast<T>();\n\t\tEigen::Quaternion<T> q_wg{q[0], q[1], q[2], q[3]};\n\t\tEigen::Matrix<T, 3, 1> g_I{T(0), T(0), T(-9.805)};\n\t\tEigen::Matrix<T, 3, 1> resi = q_wg * g_I - acc_T;\n\t\tresidual[0] = resi[0];\n\t\tresidual[1] = resi[1];\n\t\tresidual[2] = resi[2];\n\n\t\treturn true;\n\t}\n\n\tstatic ceres::CostFunction *Create(Eigen::Vector3d acc_) {\n\t\treturn (new ceres::AutoDiffCostFunction<Cost_Initial_G, 3, 4>(\n\t\t\t\t\t\tnew Cost_Initial_G(acc_)));\n\t}\n\n\tEigen::Vector3d acc;\n};\n\n/** \\brief Ceres Cost Funtion of IMU Factor in LIO Initialization\n */\nstruct Cost_Initialization_IMU\n{\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tCost_Initialization_IMU(IMUIntegrator& measure_,\n\t\t\t\t\t\t\t\t\tEigen::Vector3d ri_,\n\t\t\t\t\t\t\t\t\tEigen::Vector3d rj_,\n\t\t\t\t\t\t\t\t\tEigen::Vector3d dp_,\n\t\t\t\t\t\t\t\t\tEigen::Matrix<double, 9, 9>  sqrt_information_):\n\t\t\t\t\t\t\t\t\timu_measure(measure_),\n\t\t\t\t\t\t\t\t\tri(ri_),\n\t\t\t\t\t\t\t\t\trj(rj_),\n\t\t\t\t\t\t\t\t\tdp(dp_),\n\t\t\t\t\t\t\t\t\tsqrt_information(std::move(sqrt_information_)){}\n\n\ttemplate <typename T>\n\tbool operator()(const T *rwg_, const T *vi_, const T *vj_, const T *ba_, const T *bg_, T *residual) const {\n\t\tEigen::Matrix<T, 3, 1> G_I{T(0), T(0), T(-9.805)};\n\t\t\n\t\tEigen::Map<const Eigen::Matrix<T, 3, 1>> ba(ba_);\n\t\tEigen::Map<const Eigen::Matrix<T, 3, 1>> bg(bg_);\n\t\tEigen::Matrix<T, 3, 1> dbg = bg - imu_measure.GetBiasGyr().cast<T>();\n\t\tEigen::Matrix<T, 3, 1> dba = ba - imu_measure.GetBiasAcc().cast<T>();\n\t\t\n\t\tSophus::SO3<T> SO3_Ri = Sophus::SO3<T>::exp(ri.cast<T>());\n\t\tSophus::SO3<T> SO3_Rj = Sophus::SO3<T>::exp(rj.cast<T>());\n\n\t\tEigen::Matrix<T, 3, 1> dP = dp.cast<T>();\n\n\t\tEigen::Map<const Eigen::Matrix<T, 3, 1>> rwg(rwg_);\n\t\tSophus::SO3<T> SO3_Rwg = Sophus::SO3<T>::exp(rwg);\n\n\t\tEigen::Map<const Eigen::Matrix<T, 3, 1>> vi(vi_);\n\t\tEigen::Matrix<T, 3, 1> Vi = vi;\n\t\tEigen::Map<const Eigen::Matrix<T, 3, 1>> vj(vj_);\n\t\tEigen::Matrix<T, 3, 1> Vj = vj;\n\n\t\tEigen::Map<Eigen::Matrix<T, 9, 1> > eResiduals(residual);\n\t\teResiduals = Eigen::Matrix<T, 9, 1>::Zero();\n\n\t\tT dTij = T(imu_measure.GetDeltaTime());\n\t\tT dT2 = dTij*dTij;\n\t\tEigen::Matrix<T, 3, 1> dPij = imu_measure.GetDeltaP().cast<T>();\n\t\tEigen::Matrix<T, 3, 1> dVij = imu_measure.GetDeltaV().cast<T>();\n\t\tSophus::SO3<T> dRij = Sophus::SO3<T>(imu_measure.GetDeltaQ().cast<T>());\n\t\tSophus::SO3<T> RiT = SO3_Ri.inverse();\n\n\t\tEigen::Matrix<T, 3, 1> rPij = RiT*(dP - Vi*dTij - SO3_Rwg*G_I*dT2*T(0.5)) -\n\t\t\t\t\t\t(dPij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BG).cast<T>()*dbg +\n\t\t\t\t\t\timu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_P, IMUIntegrator::O_BA).cast<T>()*dba);\n\n\t\tSophus::SO3<T> dR_dbg = Sophus::SO3<T>::exp(\n\t\t\t\t\t\timu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_R, IMUIntegrator::O_BG).cast<T>()*dbg);\n\t\tSophus::SO3<T> rRij = (dRij * dR_dbg).inverse() * RiT * SO3_Rj;\n\t\tEigen::Matrix<T, 3, 1> rPhiij = rRij.log();\n\n\t\tEigen::Matrix<T, 3, 1> rVij = RiT*(Vj - Vi - SO3_Rwg*G_I*dTij) -\n\t\t\t\t\t\t(dVij + imu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BG).cast<T>()*dbg +\n\t\t\t\t\t\t\t\t\t\timu_measure.GetJacobian().block<3,3>(IMUIntegrator::O_V, IMUIntegrator::O_BA).cast<T>()*dba);\n\n\t\teResiduals.template segment<3>(0) = rPij;\n\t\teResiduals.template segment<3>(3) = rPhiij;\n\t\teResiduals.template segment<3>(6) = rVij;\n\n\t\teResiduals.applyOnTheLeft(sqrt_information.template cast<T>());\n\n\t\treturn true;\n\t}\n\n\tstatic ceres::CostFunction *Create(IMUIntegrator& measure_,\n\t\t\t\t\t\t\t\t\t\tEigen::Vector3d ri_,\n\t\t\t\t\t\t\t\t\t\tEigen::Vector3d rj_,\n\t\t\t\t\t\t\t\t\t\tEigen::Vector3d dp_,\n\t\t\t\t\t\t\t\t\t\tEigen::Matrix<double, 9, 9>  sqrt_information_) {\n\t\treturn (new ceres::AutoDiffCostFunction<Cost_Initialization_IMU, 9, 3, 3, 3, 3, 3>(\n\t\t\t\t\t\tnew Cost_Initialization_IMU(measure_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tri_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trj_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdp_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstd::move(sqrt_information_))));\n\t}\n\n\tIMUIntegrator imu_measure;\n\tEigen::Vector3d ri;\n\tEigen::Vector3d rj;\n\tEigen::Vector3d dp;\n\tEigen::Matrix<double, 9, 9> sqrt_information;\n};\n\n/** \\brief Ceres Cost Funtion of IMU Biases and Velocity Prior Factor in LIO Initialization\n */\nstruct Cost_Initialization_Prior_bv\n{\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tCost_Initialization_Prior_bv(Eigen::Vector3d prior_, \n\t\t\t\t\t\t\t\t\tEigen::Matrix3d sqrt_information_):\n\t\t\t\t\t\t\t\t\tprior(prior_),\n\t\t\t\t\t\t\t\t\tsqrt_information(std::move(sqrt_information_)){}\n\n\ttemplate <typename T>\n\tbool operator()(const T *bv_, T *residual) const {\n\t\tEigen::Map<const Eigen::Matrix<T, 3, 1>> bv(bv_);\n\t\tEigen::Matrix<T, 3, 1> Bv = bv;\n\n\t\tEigen::Matrix<T, 3, 1> prior_T(prior.cast<T>());\n\t\tEigen::Matrix<T, 3, 1> prior_Bv = prior_T;\n\n\t\tEigen::Map<Eigen::Matrix<T, 3, 1> > eResiduals(residual);\n\t\teResiduals = Eigen::Matrix<T, 3, 1>::Zero();\n\n\t\teResiduals = Bv - prior_Bv;\n\n\t\teResiduals.applyOnTheLeft(sqrt_information.template cast<T>());\n\n\t\treturn true;\n\t}\n\n\tstatic ceres::CostFunction *Create(Eigen::Vector3d prior_, Eigen::Matrix3d sqrt_information_) {\n\t\treturn (new ceres::AutoDiffCostFunction<Cost_Initialization_Prior_bv, 3, 3>(\n\t\t\t\t\t\tnew Cost_Initialization_Prior_bv(prior_, std::move(sqrt_information_))));\n\t}\n\n\tEigen::Vector3d prior;\n\tEigen::Matrix3d sqrt_information;\n};\n\n/** \\brief Ceres Cost Funtion of Rwg Prior Factor in LIO Initialization\n */\nstruct Cost_Initialization_Prior_R\n{\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tCost_Initialization_Prior_R(Eigen::Vector3d prior_, \n\t\t\t\t\t\t\t\tEigen::Matrix3d sqrt_information_):\n\t\t\t\t\t\t\t\tprior(prior_),\n\t\t\t\t\t\t\t\tsqrt_information(std::move(sqrt_information_)){}\n\n\ttemplate <typename T>\n\tbool operator()( const T *r_wg_, T *residual) const {\n\t\tEigen::Map<const Eigen::Matrix<T, 3, 1>> r_wg(r_wg_);\n\t\tEigen::Matrix<T, 3, 1> R_wg = r_wg;\n\t\tSophus::SO3<T> SO3_R_wg = Sophus::SO3<T>::exp(R_wg);\n\n\t\tEigen::Matrix<T, 3, 1> prior_T(prior.cast<T>());\n\t\tSophus::SO3<T> prior_R_wg = Sophus::SO3<T>::exp(prior_T);\n\n\t\tSophus::SO3<T> d_R = SO3_R_wg.inverse() * prior_R_wg;\n\t\tEigen::Matrix<T, 3, 1> d_Phi = d_R.log();\n\n\t\tEigen::Map<Eigen::Matrix<T, 3, 1> > eResiduals(residual);\n\t\teResiduals = Eigen::Matrix<T, 3, 1>::Zero();\n\n\t\teResiduals = d_Phi;\n\n\t\teResiduals.applyOnTheLeft(sqrt_information.template cast<T>());\n\n\t\treturn true;\n\t}\n\n\tstatic ceres::CostFunction *Create(Eigen::Vector3d prior_, Eigen::Matrix3d sqrt_information_) {\n\t\treturn (new ceres::AutoDiffCostFunction<Cost_Initialization_Prior_R, 3, 3>(\n\t\t\t\t\t\tnew Cost_Initialization_Prior_R(prior_, std::move(sqrt_information_))));\n\t}\n\n\tEigen::Vector3d prior;\n\tEigen::Matrix3d sqrt_information;\n};\n\n#endif //LIO_LIVOX_CERESFUNC_H\n"
  },
  {
    "path": "LIO-Livox/include/utils/math_utils.hpp",
    "content": "/*\n * COPYRIGHT AND PERMISSION NOTICE\n * Penn Software MSCKF_VIO\n * Copyright (C) 2017 The Trustees of the University of Pennsylvania\n * All rights reserved.\n */\n\n// The original file belongs to MSCKF_VIO (https://github.com/KumarRobotics/msckf_vio/)\n// Some changes have been made to use it in livox_slam_ware\n\n#ifndef MATH_UTILS_HPP\n#define MATH_UTILS_HPP\n\n#include <cmath>\n#include <Eigen/Dense>\n\nnamespace livox_slam_ware {\n\n/*\n *  @brief Create a skew-symmetric matrix from a 3-element vector.\n *  @note Performs the operation:\n *  w   ->  [  0 -w3  w2]\n *          [ w3   0 -w1]\n *          [-w2  w1   0]\n */\ninline Eigen::Matrix3d skewSymmetric(const Eigen::Vector3d& w) {\n  Eigen::Matrix3d w_hat;\n  w_hat(0, 0) = 0;\n  w_hat(0, 1) = -w(2);\n  w_hat(0, 2) = w(1);\n  w_hat(1, 0) = w(2);\n  w_hat(1, 1) = 0;\n  w_hat(1, 2) = -w(0);\n  w_hat(2, 0) = -w(1);\n  w_hat(2, 1) = w(0);\n  w_hat(2, 2) = 0;\n  return w_hat;\n}\n\n/*\n * @brief Normalize the given quaternion to unit quaternion.\n */\ninline void quaternionNormalize(Eigen::Vector4d& q) {\n  double norm = q.norm();\n  q = q / norm;\n  return;\n}\n\n/*\n * @brief Perform q1 * q2.\n *  \n *    Format of q1 and q2 is as [x,y,z,w]\n */\ninline Eigen::Vector4d quaternionMultiplication(\n    const Eigen::Vector4d& q1,\n    const Eigen::Vector4d& q2) {\n  Eigen::Matrix4d L;\n\n  // QXC: Hamilton\n  L(0, 0) =  q1(3); L(0, 1) = -q1(2); L(0, 2) =  q1(1); L(0, 3) =  q1(0);\n  L(1, 0) =  q1(2); L(1, 1) =  q1(3); L(1, 2) = -q1(0); L(1, 3) =  q1(1);\n  L(2, 0) = -q1(1); L(2, 1) =  q1(0); L(2, 2) =  q1(3); L(2, 3) =  q1(2);\n  L(3, 0) = -q1(0); L(3, 1) = -q1(1); L(3, 2) = -q1(2); L(3, 3) =  q1(3);\n\n  Eigen::Vector4d q = L * q2;\n  quaternionNormalize(q);\n  return q;\n}\n\n/*\n * @brief Convert the vector part of a quaternion to a\n *    full quaternion.\n * @note This function is useful to convert delta quaternion\n *    which is usually a 3x1 vector to a full quaternion.\n *    For more details, check Section 3.2 \"Kalman Filter Update\" in\n *    \"Indirect Kalman Filter for 3D Attitude Estimation:\n *    A Tutorial for quaternion Algebra\".\n */\ninline Eigen::Vector4d smallAngleQuaternion(\n    const Eigen::Vector3d& dtheta) {\n\n  Eigen::Vector3d dq = dtheta / 2.0;\n  Eigen::Vector4d q;\n  double dq_square_norm = dq.squaredNorm();\n\n  if (dq_square_norm <= 1) {\n    q.head<3>() = dq;\n    q(3) = std::sqrt(1-dq_square_norm);\n  } else {\n    q.head<3>() = dq;\n    q(3) = 1;\n    q = q / std::sqrt(1+dq_square_norm);\n  }\n\n  return q;\n}\n\n/*\n * @brief Convert the vector part of a quaternion to a\n *    full quaternion.\n * @note This function is useful to convert delta quaternion\n *    which is usually a 3x1 vector to a full quaternion.\n *    For more details, check Section 3.2 \"Kalman Filter Update\" in\n *    \"Indirect Kalman Filter for 3D Attitude Estimation:\n *    A Tutorial for quaternion Algebra\".\n */\ninline Eigen::Quaterniond getSmallAngleQuaternion(\n    const Eigen::Vector3d& dtheta) {\n\n  Eigen::Vector3d dq = dtheta / 2.0;\n  Eigen::Quaterniond q;\n  double dq_square_norm = dq.squaredNorm();\n\n  if (dq_square_norm <= 1) {\n    q.x() = dq(0);\n    q.y() = dq(1);\n    q.z() = dq(2);\n    q.w() = std::sqrt(1-dq_square_norm);\n  } else {\n    q.x() = dq(0);\n    q.y() = dq(1);\n    q.z() = dq(2);\n    q.w() = 1;\n    q.normalize();\n  }\n\n  return q;\n}\n\n/*\n * @brief Convert a quaternion to the corresponding rotation matrix\n * @note Pay attention to the convention used. The function follows the\n *    conversion in \"Indirect Kalman Filter for 3D Attitude Estimation:\n *    A Tutorial for Quaternion Algebra\", Equation (78).\n *\n *    The input quaternion should be in the form\n *      [q1, q2, q3, q4(scalar)]^T\n */\ninline Eigen::Matrix3d quaternionToRotation(\n    const Eigen::Vector4d& q) {\n  // QXC: Hamilton\n  const double& qw = q(3);\n  const double& qx = q(0);\n  const double& qy = q(1);\n  const double& qz = q(2);\n  Eigen::Matrix3d R;\n  R(0, 0) = 1-2*(qy*qy+qz*qz);  R(0, 1) =   2*(qx*qy-qw*qz);  R(0, 2) =   2*(qx*qz+qw*qy);\n  R(1, 0) =   2*(qx*qy+qw*qz);  R(1, 1) = 1-2*(qx*qx+qz*qz);  R(1, 2) =   2*(qy*qz-qw*qx);\n  R(2, 0) =   2*(qx*qz-qw*qy);  R(2, 1) =   2*(qy*qz+qw*qx);  R(2, 2) = 1-2*(qx*qx+qy*qy);\n\n  return R;\n}\n\n/*\n * @brief Convert a rotation matrix to a quaternion.\n * @note Pay attention to the convention used. The function follows the\n *    conversion in \"Indirect Kalman Filter for 3D Attitude Estimation:\n *    A Tutorial for Quaternion Algebra\", Equation (78).\n *\n *    The input quaternion should be in the form\n *      [q1, q2, q3, q4(scalar)]^T\n */\ninline Eigen::Vector4d rotationToQuaternion(\n    const Eigen::Matrix3d& R) {\n  Eigen::Vector4d score;\n  score(0) = R(0, 0);\n  score(1) = R(1, 1);\n  score(2) = R(2, 2);\n  score(3) = R.trace();\n\n  int max_row = 0, max_col = 0;\n  score.maxCoeff(&max_row, &max_col);\n\n  Eigen::Vector4d q = Eigen::Vector4d::Zero();\n\n  // QXC: Hamilton\n  if (max_row == 0) {\n    q(0) = std::sqrt(1+2*R(0, 0)-R.trace()) / 2.0;\n    q(1) = (R(0, 1)+R(1, 0)) / (4*q(0));\n    q(2) = (R(0, 2)+R(2, 0)) / (4*q(0));\n    q(3) = (R(2, 1)-R(1, 2)) / (4*q(0));\n  } else if (max_row == 1) {\n    q(1) = std::sqrt(1+2*R(1, 1)-R.trace()) / 2.0;\n    q(0) = (R(0, 1)+R(1, 0)) / (4*q(1));\n    q(2) = (R(1, 2)+R(2, 1)) / (4*q(1));\n    q(3) = (R(0, 2)-R(2, 0)) / (4*q(1));\n  } else if (max_row == 2) {\n    q(2) = std::sqrt(1+2*R(2, 2)-R.trace()) / 2.0;\n    q(0) = (R(0, 2)+R(2, 0)) / (4*q(2));\n    q(1) = (R(1, 2)+R(2, 1)) / (4*q(2));\n    q(3) = (R(1, 0)-R(0, 1)) / (4*q(2));\n  } else {\n    q(3) = std::sqrt(1+R.trace()) / 2.0;\n    q(0) = (R(2, 1)-R(1, 2)) / (4*q(3));\n    q(1) = (R(0, 2)-R(2, 0)) / (4*q(3));\n    q(2) = (R(1, 0)-R(0, 1)) / (4*q(3));\n  }\n\n  if (q(3) < 0) q = -q;\n  quaternionNormalize(q);\n  return q;\n}\n\n/*\n * @brief Convert a rotation matrix to a eulerAngles.\n */\ntemplate <typename T>\nvoid R2ypr(const Eigen::Matrix<T, 3, 3>& R, Eigen::Matrix<T, 3, 1> &ypr) {\n  Eigen::Matrix<T, 3, 1> n = R.col(0);\n  Eigen::Matrix<T, 3, 1> o = R.col(1);\n  Eigen::Matrix<T, 3, 1> a = R.col(2);\n\n  T y = atan2(n(1), n(0));\n  T p = atan2(-n(2), n(0) * cos(y) + n(1) * sin(y));\n  T r = atan2(a(0) * sin(y) - a(1) * cos(y), -o(0) * sin(y) + o(1) * cos(y));\n  ypr(0) = y;\n  ypr(1) = p;\n  ypr(2) = r;\n}\n/*\n * @brief Calculate the error between the two planes.\n */\ntemplate <typename T>\nEigen::Matrix<T, 3, 1> ominus(const Eigen::Matrix<T, 4, 1>& ground_plane_coeff, Eigen::Matrix<T, 4, 1> &local_ground_plane) {\n  Eigen::Matrix<T, 3, 1> n_ground_plane_coeff = ground_plane_coeff.template segment<3>(0);\n  Eigen::Matrix<T, 3, 1> n_local_ground_plane = local_ground_plane.template segment<3>(0);\n  Eigen::Matrix<float, 3, 1> x(1, 0, 0);\n  Eigen::Matrix<T, 3, 1> normal_x = x.template cast<T>();\n  Eigen::Matrix<T, 3, 3> R = Eigen::Quaternion<T>::FromTwoVectors(n_local_ground_plane, normal_x).toRotationMatrix();\n  Eigen::Matrix<T, 3, 1> n_ground_plane_coeff_after_rotation = R * n_ground_plane_coeff;\n  Eigen::Matrix<T, 3, 1> result;\n  result(0) = atan2(n_ground_plane_coeff_after_rotation(1), n_ground_plane_coeff_after_rotation(0));\n  result(1) = atan2(n_ground_plane_coeff_after_rotation(2), n_ground_plane_coeff_after_rotation.template head<2>().norm());\n  result(2) = -local_ground_plane(3) + ground_plane_coeff(3);\n  return result;\n}\n} // end namespace livox_slam_ware\n\n#endif // MATH_UTILS_HPP\n"
  },
  {
    "path": "LIO-Livox/include/utils/pcl_utils.hpp",
    "content": "#ifndef PCL_UTILS_HPP\n#define PCL_UTILS_HPP\n\n#include <pcl/sample_consensus/ransac.h>\n#include <pcl/sample_consensus/sac_model_plane.h>\nnamespace livox_slam_ware {\n/*\n * @brief Get the plane coeffs.\n */\ntemplate <typename PointType>\nEigen::VectorXf get_plane_coeffs (typename pcl::PointCloud<PointType>::Ptr lidar_cloud) {\n  Eigen::VectorXf coeff;\n  typename pcl::PointCloud<PointType>::Ptr ground_cloud;\n  ground_cloud.reset(new pcl::PointCloud<PointType>);\n  for(const auto& p : lidar_cloud->points){\n    if(std::fabs(p.normal_y + 1.0) < 1e-5) {\n      ground_cloud->push_back(p);\n    }\n  }\n  typename pcl::SampleConsensusModelPlane<PointType>::Ptr model(\n    new pcl::SampleConsensusModelPlane<PointType>(ground_cloud));//定义待拟合平面的model，并使用待拟合点云初始化\n  pcl::RandomSampleConsensus<PointType> ransac(model);//定义RANSAC算法模型\n  ransac.setDistanceThreshold(0.05);//设定阈值\n  ransac.computeModel();//拟合\n  ransac.getModelCoefficients(coeff);//获取拟合平面参数，对于平面ax+by_cz_d=0，coeff分别按顺序保存a,b,c,d\n  // make the normal upward\n  // 法向量颠倒个方向\n  if(coeff.head<3>().dot(Eigen::Vector3f::UnitZ()) < 0.0f) {\n    coeff *= -1.0f;\n  }\n  return coeff;\n}\n}\n\n#endif // PCL_UTILS_HPP"
  },
  {
    "path": "LIO-Livox/launch/livox_odometry.launch",
    "content": "<launch>\n\n    <node pkg=\"livox_odometry\" type=\"ScanRegistration\" name=\"ScanRegistration\" output=\"screen\">\n\n        <param name=\"config_file\" value=\"$(find livox_odometry)/config/horizon_config.yaml\"/>\n    </node>\n\n    <node pkg=\"livox_odometry\" type=\"PoseEstimation\" name=\"PoseEstimation\" output=\"screen\">\n        <!-- 0-Not Use IMU, 1-Use IMU remove Rotation Distort, 2-Tightly Coupled IMU -->\n        <param name=\"IMU_Mode\" type=\"int\" value=\"2\" />\n        <!-- Voxel Filter Size Use to Downsize Map Cloud -->\n        <param name=\"filter_parameter_corner\" type=\"double\" value=\"0.2\" />\n        <param name=\"filter_parameter_surf\" type=\"double\" value=\"0.4\" />\n        <!-- Save path Parameter of odometry -->\n        <param name=\"save_path\" value=\"$(arg save_path)\" />\n        <!-- Extrinsic Parameter between Lidar & IMU -->\n        <rosparam param=\"Extrinsic_Tlb\"> [1.0, 0.0, 0.0, -0.05512,\n                                          0.0, 1.0, 0.0, -0.02226,\n                                          0.0, 0.0, 1.0,  0.0297,\n                                          0.0, 0.0, 0.0,  1.0]</rosparam>\n\n    </node>\n\n    <!-- <node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find lio_livox)/rviz_cfg/lio.rviz\" /> -->\n</launch>\n"
  },
  {
    "path": "LIO-Livox/package.xml",
    "content": "<?xml version=\"1.0\"?>\n<package>\n  <name>livox_odometry</name>\n  <version>0.0.0</version>\n\n  <description>\n    This is the modified version of LIO-Livox, where serval new factors are added. For original version \n    of LIO-Livox, please refer to: https://github.com/Livox-SDK/LIO-Livox\n  </description>\n\n  <maintainer email=\"dev@livoxtech.com\">livox</maintainer>\n\n  <license>BSD</license>\n\n  <author email=\"dev@livoxtech.com\">livox</author>\n  <author email=\"huangsiyuan@pjlab.org.cn\">Siyuan with Xudong</author>\n\n  <buildtool_depend>catkin</buildtool_depend>\n  <build_depend>message_generation</build_depend>\n  <build_depend>geometry_msgs</build_depend>\n  <build_depend>nav_msgs</build_depend>\n  <build_depend>roscpp</build_depend>\n  <build_depend>rospy</build_depend>\n  <build_depend>std_msgs</build_depend>\n  <build_depend>sensor_msgs</build_depend>\n  <build_depend>tf</build_depend>\n  <build_depend>livox_ros_driver</build_depend>\n  <build_depend>tf_conversions</build_depend>\n  <build_depend>eigen_conversions</build_depend>\n  <build_depend>pcl_conversions</build_depend>\n  <build_depend>pcl_ros</build_depend>\n  <build_depend>message_filters</build_depend>\n  <build_depend>std_srvs</build_depend>\n\n  <run_depend>message_runtime</run_depend>\n  <run_depend>geometry_msgs</run_depend>\n  <run_depend>nav_msgs</run_depend>\n  <run_depend>sensor_msgs</run_depend>\n  <run_depend>roscpp</run_depend>\n  <run_depend>rospy</run_depend>\n  <run_depend>std_msgs</run_depend>\n  <run_depend>tf</run_depend>\n  <run_depend>livox_ros_driver</run_depend>\n  <run_depend>tf_conversions</run_depend>\n  <run_depend>eigen_conversions</run_depend>\n  <run_depend>pcl_conversions</run_depend>\n  <run_depend>pcl_ros</run_depend>\n  <run_depend>message_filters</run_depend>\n  <run_depend>std_srvs</run_depend>\n\n  <test_depend>rostest</test_depend>\n  <test_depend>rosbag</test_depend>\n\n  <export>\n  </export>\n</package>\n"
  },
  {
    "path": "LIO-Livox/rviz_cfg/lio.rviz",
    "content": "Panels:\n  - Class: rviz/Displays\n    Help Height: 85\n    Name: Displays\n    Property Tree Widget:\n      Expanded:\n        - /Global Options1\n        - /odometry1\n        - /odometry1/TF1\n        - /mapping1\n        - /mapping1/Path1\n        - /mapping1/PointCloud21\n        - /Grid1\n      Splitter Ratio: 0.5166666507720947\n    Tree Height: 1207\n  - Class: rviz/Selection\n    Name: Selection\n  - Class: rviz/Tool Properties\n    Expanded:\n      - /2D Pose Estimate1\n      - /2D Nav Goal1\n      - /Publish Point1\n    Name: Tool Properties\n    Splitter Ratio: 0.5886790156364441\n  - Class: rviz/Views\n    Expanded:\n      - /Current View1\n    Name: Views\n    Splitter Ratio: 0.49433961510658264\n  - Class: rviz/Time\n    Experimental: false\n    Name: Time\n    SyncMode: 0\n    SyncSource: PointCloud2\nPreferences:\n  PromptSaveOnExit: true\nToolbars:\n  toolButtonStyle: 2\nVisualization Manager:\n  Class: \"\"\n  Displays:\n    - Class: rviz/Group\n      Displays:\n        - Class: rviz/TF\n          Enabled: true\n          Frame Timeout: 5000\n          Frames:\n            All Enabled: true\n            livox_frame:\n              Value: true\n            world:\n              Value: true\n          Marker Scale: 10\n          Name: TF\n          Show Arrows: false\n          Show Axes: true\n          Show Names: true\n          Tree:\n            world:\n              livox_frame:\n                {}\n          Update Interval: 0\n          Value: true\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: FlatColor\n          Decay Time: 0\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Max Intensity: 255\n          Min Color: 0; 0; 0\n          Min Intensity: 0\n          Name: full_cloud\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 1.5\n          Size (m): 0.10000000149011612\n          Style: Points\n          Topic: /livox_full_cloud_mapped\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n      Enabled: true\n      Name: odometry\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Buffer Length: 1\n          Class: rviz/Path\n          Color: 255; 85; 255\n          Enabled: true\n          Head Diameter: 0.30000001192092896\n          Head Length: 0.20000000298023224\n          Length: 0.30000001192092896\n          Line Style: Lines\n          Line Width: 0.029999999329447746\n          Name: Path\n          Offset:\n            X: 0\n            Y: 0\n            Z: 0\n          Pose Color: 255; 85; 255\n          Pose Style: None\n          Radius: 0.029999999329447746\n          Shaft Diameter: 0.10000000149011612\n          Shaft Length: 0.10000000149011612\n          Topic: /livox_odometry_path_mapped\n          Unreliable: false\n          Value: true\n        - Alpha: 0.05000000074505806\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 37.26676559448242\n            Min Value: -14.624585151672363\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 0; 255; 127\n          Color Transformer: Intensity\n          Decay Time: 9999\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Max Intensity: 187\n          Min Color: 0; 0; 0\n          Min Intensity: 0\n          Name: PointCloud2\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 1\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /livox_full_cloud_mapped\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n      Enabled: true\n      Name: mapping\n    - Alpha: 0.5\n      Cell Size: 1\n      Class: rviz/Grid\n      Color: 160; 160; 164\n      Enabled: true\n      Line Style:\n        Line Width: 0.029999999329447746\n        Value: Lines\n      Name: Grid\n      Normal Cell Count: 0\n      Offset:\n        X: 0\n        Y: 0\n        Z: 0\n      Plane: XY\n      Plane Cell Count: 120\n      Reference Frame: world\n      Value: true\n  Enabled: true\n  Global Options:\n    Background Color: 0; 0; 0\n    Default Light: true\n    Fixed Frame: world\n    Frame Rate: 30\n  Name: root\n  Tools:\n    - Class: rviz/Interact\n      Hide Inactive Objects: true\n    - Class: rviz/MoveCamera\n    - Class: rviz/Select\n    - Class: rviz/FocusCamera\n    - Class: rviz/Measure\n    - Class: rviz/SetInitialPose\n      Theta std deviation: 0.2617993950843811\n      Topic: /initialpose\n      X std deviation: 0.5\n      Y std deviation: 0.5\n    - Class: rviz/SetGoal\n      Topic: /move_base_simple/goal\n    - Class: rviz/PublishPoint\n      Single click: true\n      Topic: /clicked_point\n  Value: true\n  Views:\n    Current:\n      Class: rviz/Orbit\n      Distance: 167.27032470703125\n      Enable Stereo Rendering:\n        Stereo Eye Separation: 0.05999999865889549\n        Stereo Focal Distance: 1\n        Swap Stereo Eyes: false\n        Value: false\n      Focal Point:\n        X: -0.31172746419906616\n        Y: 27.33951187133789\n        Z: 0.9463337659835815\n      Focal Shape Fixed Size: true\n      Focal Shape Size: 0.05000000074505806\n      Invert Z Axis: false\n      Name: Current View\n      Near Clip Distance: 0.009999999776482582\n      Pitch: 1.5697963237762451\n      Target Frame: world\n      Value: Orbit (rviz)\n      Yaw: 3.1316657066345215\n    Saved: ~\nWindow Geometry:\n  Displays:\n    collapsed: true\n  Height: 1410\n  Hide Left Dock: true\n  Hide Right Dock: true\n  QMainWindow State: 000000ff00000000fd00000004000000000000016a0000054dfc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006b00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007300000000170000054d000000eb00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006502000001dd00000169000004900000024500000001000002870000054dfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000170000054d000000b900fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d00000039fc0100000002fb0000000800540069006d006500000000000000073d000004f900fffffffb0000000800540069006d00650100000000000004500000000000000000000009af0000054d00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730000000000ffffffff0000000000000000\n  Selection:\n    collapsed: false\n  Time:\n    collapsed: false\n  Tool Properties:\n    collapsed: false\n  Views:\n    collapsed: true\n  Width: 2479\n  X: 81\n  Y: 30\n"
  },
  {
    "path": "LIO-Livox/src/lio/Estimator.cpp",
    "content": "#include \"Estimator/Estimator.h\"\n\nEstimator::Estimator(const float& filter_corner, const float& filter_surf){\n  laserCloudCornerFromLocal.reset(new pcl::PointCloud<PointType>);\n  laserCloudSurfFromLocal.reset(new pcl::PointCloud<PointType>);\n  laserCloudNonFeatureFromLocal.reset(new pcl::PointCloud<PointType>);\n  // 初始化起始地面点云\n  initGroundCloud.reset(new pcl::PointCloud<PointType>);\n  init_ground_count = 0;\n\n  laserCloudCornerLast.resize(SLIDEWINDOWSIZE);\n  for(auto& p:laserCloudCornerLast)\n    p.reset(new pcl::PointCloud<PointType>);\n  laserCloudSurfLast.resize(SLIDEWINDOWSIZE);\n  for(auto& p:laserCloudSurfLast)\n    p.reset(new pcl::PointCloud<PointType>);\n  laserCloudNonFeatureLast.resize(SLIDEWINDOWSIZE);\n  for(auto& p:laserCloudNonFeatureLast)\n    p.reset(new pcl::PointCloud<PointType>);\n  laserCloudCornerStack.resize(SLIDEWINDOWSIZE);\n  for(auto& p:laserCloudCornerStack)\n    p.reset(new pcl::PointCloud<PointType>);\n  laserCloudSurfStack.resize(SLIDEWINDOWSIZE);\n  for(auto& p:laserCloudSurfStack)\n    p.reset(new pcl::PointCloud<PointType>);\n  laserCloudNonFeatureStack.resize(SLIDEWINDOWSIZE);\n  for(auto& p:laserCloudNonFeatureStack)\n    p.reset(new pcl::PointCloud<PointType>);\n  laserCloudCornerForMap.reset(new pcl::PointCloud<PointType>);\n  laserCloudSurfForMap.reset(new pcl::PointCloud<PointType>);\n  laserCloudNonFeatureForMap.reset(new pcl::PointCloud<PointType>);\n  transformForMap.setIdentity();\n  kdtreeCornerFromLocal.reset(new pcl::KdTreeFLANN<PointType>);\n  kdtreeSurfFromLocal.reset(new pcl::KdTreeFLANN<PointType>);\n  kdtreeNonFeatureFromLocal.reset(new pcl::KdTreeFLANN<PointType>);\n\n  for(int i = 0; i < localMapWindowSize; i++){\n    localCornerMap[i].reset(new pcl::PointCloud<PointType>);\n    localSurfMap[i].reset(new pcl::PointCloud<PointType>);\n    localNonFeatureMap[i].reset(new pcl::PointCloud<PointType>);\n  }\n\n  downSizeFilterCorner.setLeafSize(filter_corner, filter_corner, filter_corner);\n  downSizeFilterSurf.setLeafSize(filter_surf, filter_surf, filter_surf);\n  downSizeFilterNonFeature.setLeafSize(0.4, 0.4, 0.4);\n  map_manager = new MAP_MANAGER(filter_corner, filter_surf);\n  threadMap = std::thread(&Estimator::threadMapIncrement, this);\n}\n\nEstimator::~Estimator(){\n  delete map_manager;\n}\n\n[[noreturn]] void Estimator::threadMapIncrement(){\n  pcl::PointCloud<PointType>::Ptr laserCloudCorner(new pcl::PointCloud<PointType>);\n  pcl::PointCloud<PointType>::Ptr laserCloudSurf(new pcl::PointCloud<PointType>);\n  pcl::PointCloud<PointType>::Ptr laserCloudNonFeature(new pcl::PointCloud<PointType>);\n  pcl::PointCloud<PointType>::Ptr laserCloudCorner_to_map(new pcl::PointCloud<PointType>);\n  pcl::PointCloud<PointType>::Ptr laserCloudSurf_to_map(new pcl::PointCloud<PointType>);\n  pcl::PointCloud<PointType>::Ptr laserCloudNonFeature_to_map(new pcl::PointCloud<PointType>);\n  Eigen::Matrix4d transform;\n  while(true){\n    std::unique_lock<std::mutex> locker(mtx_Map);\n    if(!laserCloudCornerForMap->empty()){\n\n      map_update_ID ++;\n\n      map_manager->featureAssociateToMap(laserCloudCornerForMap,\n                                         laserCloudSurfForMap,\n                                         laserCloudNonFeatureForMap,\n                                         laserCloudCorner,\n                                         laserCloudSurf,\n                                         laserCloudNonFeature,\n                                         transformForMap);\n      laserCloudCornerForMap->clear();\n      laserCloudSurfForMap->clear();\n      laserCloudNonFeatureForMap->clear();\n      transform = transformForMap;\n      locker.unlock();\n\n      *laserCloudCorner_to_map += *laserCloudCorner;\n      *laserCloudSurf_to_map += *laserCloudSurf;\n      *laserCloudNonFeature_to_map += *laserCloudNonFeature;\n\n      laserCloudCorner->clear();\n      laserCloudSurf->clear();\n      laserCloudNonFeature->clear();\n\n      if(map_update_ID % map_skip_frame == 0){\n        map_manager->MapIncrement(laserCloudCorner_to_map, \n                                  laserCloudSurf_to_map, \n                                  laserCloudNonFeature_to_map,\n                                  transform);\n\n        laserCloudCorner_to_map->clear();\n        laserCloudSurf_to_map->clear();\n        laserCloudNonFeature_to_map->clear();\n      }\n      \n    }else\n      locker.unlock();\n\n    std::chrono::milliseconds dura(2);\n    std::this_thread::sleep_for(dura);\n  }\n}\n\nvoid Estimator::processPointToLine(std::vector<ceres::CostFunction *>& edges,\n                                   std::vector<FeatureLine>& vLineFeatures,\n                                   const pcl::PointCloud<PointType>::Ptr& laserCloudCorner,\n                                   const pcl::PointCloud<PointType>::Ptr& laserCloudCornerLocal,\n                                   const pcl::KdTreeFLANN<PointType>::Ptr& kdtreeLocal,\n                                   const Eigen::Matrix4d& exTlb,\n                                   const Eigen::Matrix4d& m4d){\n\n  Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity();\n  Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose();\n  Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1);\n  if(!vLineFeatures.empty()){\n    for(const auto& l : vLineFeatures){\n      auto* e = Cost_NavState_IMU_Line::Create(l.pointOri,\n                                               l.lineP1,\n                                               l.lineP2,\n                                               Tbl,\n                                               Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n      edges.push_back(e);\n    }\n    return;\n  }\n  PointType _pointOri, _pointSel, _coeff;\n  std::vector<int> _pointSearchInd;\n  std::vector<float> _pointSearchSqDis;\n  std::vector<int> _pointSearchInd2;\n  std::vector<float> _pointSearchSqDis2;\n\n  Eigen::Matrix< double, 3, 3 > _matA1;\n  _matA1.setZero();\n\n  int laserCloudCornerStackNum = laserCloudCorner->points.size();\n  pcl::PointCloud<PointType>::Ptr kd_pointcloud(new pcl::PointCloud<PointType>);\n  int debug_num1 = 0;\n  int debug_num2 = 0;\n  int debug_num12 = 0;\n  int debug_num22 = 0;\n  for (int i = 0; i < laserCloudCornerStackNum; i++) {\n    _pointOri = laserCloudCorner->points[i];\n    MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d);\n    int id = map_manager->FindUsedCornerMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last);\n\n    if(id == 5000) continue;\n\n    if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue;\n\n    if(GlobalCornerMap[id].points.size() > 100) {\n      CornerKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis);\n      \n      if (_pointSearchSqDis[4] < thres_dist) {\n\n        debug_num1 ++;\n      float cx = 0;\n      float cy = 0;\n      float cz = 0;\n      for (int j = 0; j < 5; j++) {\n        cx += GlobalCornerMap[id].points[_pointSearchInd[j]].x;\n        cy += GlobalCornerMap[id].points[_pointSearchInd[j]].y;\n        cz += GlobalCornerMap[id].points[_pointSearchInd[j]].z;\n      }\n      cx /= 5;\n      cy /= 5;\n      cz /= 5;\n\n      float a11 = 0;\n      float a12 = 0;\n      float a13 = 0;\n      float a22 = 0;\n      float a23 = 0;\n      float a33 = 0;\n      for (int j = 0; j < 5; j++) {\n        float ax = GlobalCornerMap[id].points[_pointSearchInd[j]].x - cx;\n        float ay = GlobalCornerMap[id].points[_pointSearchInd[j]].y - cy;\n        float az = GlobalCornerMap[id].points[_pointSearchInd[j]].z - cz;\n\n        a11 += ax * ax;\n        a12 += ax * ay;\n        a13 += ax * az;\n        a22 += ay * ay;\n        a23 += ay * az;\n        a33 += az * az;\n      }\n      a11 /= 5;\n      a12 /= 5;\n      a13 /= 5;\n      a22 /= 5;\n      a23 /= 5;\n      a33 /= 5;\n\n      _matA1(0, 0) = a11;\n      _matA1(0, 1) = a12;\n      _matA1(0, 2) = a13;\n      _matA1(1, 0) = a12;\n      _matA1(1, 1) = a22;\n      _matA1(1, 2) = a23;\n      _matA1(2, 0) = a13;\n      _matA1(2, 1) = a23;\n      _matA1(2, 2) = a33;\n\n      Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> saes(_matA1);\n      Eigen::Vector3d unit_direction = saes.eigenvectors().col(2);\n\n      if (saes.eigenvalues()[2] > 3 * saes.eigenvalues()[1]) {\n        debug_num12 ++;\n        float x1 = cx + 0.1 * unit_direction[0];\n        float y1 = cy + 0.1 * unit_direction[1];\n        float z1 = cz + 0.1 * unit_direction[2];\n        float x2 = cx - 0.1 * unit_direction[0];\n        float y2 = cy - 0.1 * unit_direction[1];\n        float z2 = cz - 0.1 * unit_direction[2];\n\n        Eigen::Vector3d tripod1(x1, y1, z1);\n        Eigen::Vector3d tripod2(x2, y2, z2);\n        auto* e = Cost_NavState_IMU_Line::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                 tripod1,\n                                                 tripod2,\n                                                 Tbl,\n                                                 Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n        edges.push_back(e);\n        vLineFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                   tripod1,\n                                   tripod2);\n        vLineFeatures.back().ComputeError(m4d);\n\n        continue;\n      }\n      \n    }\n    \n    }\n\n    if(laserCloudCornerLocal->points.size() > 20 ){\n      kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2);\n      if (_pointSearchSqDis2[4] < thres_dist) {\n\n        debug_num2 ++;\n        float cx = 0;\n        float cy = 0;\n        float cz = 0;\n        for (int j = 0; j < 5; j++) {\n          cx += laserCloudCornerLocal->points[_pointSearchInd2[j]].x;\n          cy += laserCloudCornerLocal->points[_pointSearchInd2[j]].y;\n          cz += laserCloudCornerLocal->points[_pointSearchInd2[j]].z;\n        }\n        cx /= 5;\n        cy /= 5;\n        cz /= 5;\n\n        float a11 = 0;\n        float a12 = 0;\n        float a13 = 0;\n        float a22 = 0;\n        float a23 = 0;\n        float a33 = 0;\n        for (int j = 0; j < 5; j++) {\n          float ax = laserCloudCornerLocal->points[_pointSearchInd2[j]].x - cx;\n          float ay = laserCloudCornerLocal->points[_pointSearchInd2[j]].y - cy;\n          float az = laserCloudCornerLocal->points[_pointSearchInd2[j]].z - cz;\n\n          a11 += ax * ax;\n          a12 += ax * ay;\n          a13 += ax * az;\n          a22 += ay * ay;\n          a23 += ay * az;\n          a33 += az * az;\n        }\n        a11 /= 5;\n        a12 /= 5;\n        a13 /= 5;\n        a22 /= 5;\n        a23 /= 5;\n        a33 /= 5;\n\n        _matA1(0, 0) = a11;\n        _matA1(0, 1) = a12;\n        _matA1(0, 2) = a13;\n        _matA1(1, 0) = a12;\n        _matA1(1, 1) = a22;\n        _matA1(1, 2) = a23;\n        _matA1(2, 0) = a13;\n        _matA1(2, 1) = a23;\n        _matA1(2, 2) = a33;\n\n      Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> saes(_matA1);\n      Eigen::Vector3d unit_direction = saes.eigenvectors().col(2);\n\n        if (saes.eigenvalues()[2] > 3 * saes.eigenvalues()[1]) {\n          debug_num22++;\n          float x1 = cx + 0.1 * unit_direction[0];\n          float y1 = cy + 0.1 * unit_direction[1];\n          float z1 = cz + 0.1 * unit_direction[2];\n          float x2 = cx - 0.1 * unit_direction[0];\n          float y2 = cy - 0.1 * unit_direction[1];\n          float z2 = cz - 0.1 * unit_direction[2];\n\n          Eigen::Vector3d tripod1(x1, y1, z1);\n          Eigen::Vector3d tripod2(x2, y2, z2);\n          auto* e = Cost_NavState_IMU_Line::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                  tripod1,\n                                                  tripod2,\n                                                  Tbl,\n                                                  Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n          edges.push_back(e);\n          vLineFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                    tripod1,\n                                    tripod2);\n          vLineFeatures.back().ComputeError(m4d);\n        }\n      }\n    }\n     \n  }\n}\n\nvoid Estimator::processPointToPlan(std::vector<ceres::CostFunction *>& edges,\n                                   std::vector<FeaturePlan>& vPlanFeatures,\n                                   const pcl::PointCloud<PointType>::Ptr& laserCloudSurf,\n                                   const pcl::PointCloud<PointType>::Ptr& laserCloudSurfLocal,\n                                   const pcl::KdTreeFLANN<PointType>::Ptr& kdtreeLocal,\n                                   const Eigen::Matrix4d& exTlb,\n                                   const Eigen::Matrix4d& m4d){\n  Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity();\n  Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose();\n  Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1);\n  if(!vPlanFeatures.empty()){\n    for(const auto& p : vPlanFeatures){\n      auto* e = Cost_NavState_IMU_Plan::Create(p.pointOri,\n                                               p.pa,\n                                               p.pb,\n                                               p.pc,\n                                               p.pd,\n                                               Tbl,\n                                               Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n      edges.push_back(e);\n    }\n    return;\n  }\n  PointType _pointOri, _pointSel, _coeff;\n  std::vector<int> _pointSearchInd;\n  std::vector<float> _pointSearchSqDis;\n  std::vector<int> _pointSearchInd2;\n  std::vector<float> _pointSearchSqDis2;\n\n  Eigen::Matrix< double, 5, 3 > _matA0;\n  _matA0.setZero();\n  Eigen::Matrix< double, 5, 1 > _matB0;\n  _matB0.setOnes();\n  _matB0 *= -1;\n  Eigen::Matrix< double, 3, 1 > _matX0;\n  _matX0.setZero();\n  int laserCloudSurfStackNum = laserCloudSurf->points.size();\n\n  int debug_num1 = 0;\n  int debug_num2 = 0;\n  int debug_num12 = 0;\n  int debug_num22 = 0;\n  for (int i = 0; i < laserCloudSurfStackNum; i++) {\n    _pointOri = laserCloudSurf->points[i];\n    MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d);\n\n    int id = map_manager->FindUsedSurfMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last);\n\n    if(id == 5000) continue;\n\n    if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue;\n\n    if(GlobalSurfMap[id].points.size() > 50) {\n      SurfKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis);\n\n      if (_pointSearchSqDis[4] < 1.0) {\n        debug_num1 ++;\n        for (int j = 0; j < 5; j++) {\n          _matA0(j, 0) = GlobalSurfMap[id].points[_pointSearchInd[j]].x;\n          _matA0(j, 1) = GlobalSurfMap[id].points[_pointSearchInd[j]].y;\n          _matA0(j, 2) = GlobalSurfMap[id].points[_pointSearchInd[j]].z;\n        }\n        _matX0 = _matA0.colPivHouseholderQr().solve(_matB0);\n\n        float pa = _matX0(0, 0);\n        float pb = _matX0(1, 0);\n        float pc = _matX0(2, 0);\n        float pd = 1;\n\n        float ps = std::sqrt(pa * pa + pb * pb + pc * pc);\n        pa /= ps;\n        pb /= ps;\n        pc /= ps;\n        pd /= ps;\n\n        bool planeValid = true;\n        for (int j = 0; j < 5; j++) {\n          if (std::fabs(pa * GlobalSurfMap[id].points[_pointSearchInd[j]].x +\n                        pb * GlobalSurfMap[id].points[_pointSearchInd[j]].y +\n                        pc * GlobalSurfMap[id].points[_pointSearchInd[j]].z + pd) > 0.2) {\n            planeValid = false;\n            break;\n          }\n        }\n\n        if (planeValid) {\n          debug_num12 ++;\n          auto* e = Cost_NavState_IMU_Plan::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                  pa,\n                                                  pb,\n                                                  pc,\n                                                  pd,\n                                                  Tbl,\n                                                  Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n          edges.push_back(e);\n          vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                    pa,\n                                    pb,\n                                    pc,\n                                    pd);\n          vPlanFeatures.back().ComputeError(m4d);\n\n          continue;\n        }\n        \n      }\n    }\n    if(laserCloudSurfLocal->points.size() > 20 ){\n    kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2);\n    if (_pointSearchSqDis2[4] < 1.0) {\n      debug_num2++;\n      for (int j = 0; j < 5; j++) { \n        _matA0(j, 0) = laserCloudSurfLocal->points[_pointSearchInd2[j]].x;\n        _matA0(j, 1) = laserCloudSurfLocal->points[_pointSearchInd2[j]].y;\n        _matA0(j, 2) = laserCloudSurfLocal->points[_pointSearchInd2[j]].z;\n      }\n      _matX0 = _matA0.colPivHouseholderQr().solve(_matB0);\n\n      float pa = _matX0(0, 0);\n      float pb = _matX0(1, 0);\n      float pc = _matX0(2, 0);\n      float pd = 1;\n\n      float ps = std::sqrt(pa * pa + pb * pb + pc * pc);\n      pa /= ps;\n      pb /= ps;\n      pc /= ps;\n      pd /= ps;\n\n      bool planeValid = true;\n      for (int j = 0; j < 5; j++) {\n        if (std::fabs(pa * laserCloudSurfLocal->points[_pointSearchInd2[j]].x +\n                      pb * laserCloudSurfLocal->points[_pointSearchInd2[j]].y +\n                      pc * laserCloudSurfLocal->points[_pointSearchInd2[j]].z + pd) > 0.2) {\n          planeValid = false;\n          break;\n        }\n      }\n\n      if (planeValid) {\n        debug_num22 ++;\n        auto* e = Cost_NavState_IMU_Plan::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                pa,\n                                                pb,\n                                                pc,\n                                                pd,\n                                                Tbl,\n                                                Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n        edges.push_back(e);\n        vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                  pa,\n                                  pb,\n                                  pc,\n                                  pd);\n        vPlanFeatures.back().ComputeError(m4d);\n      }\n    }\n  }\n\n  }\n\n}\n\nvoid Estimator::processPointToPlanVec(std::vector<ceres::CostFunction *>& edges,\n                                   std::vector<FeaturePlanVec>& vPlanFeatures,\n                                   const pcl::PointCloud<PointType>::Ptr& laserCloudSurf,\n                                   const pcl::PointCloud<PointType>::Ptr& laserCloudSurfLocal,\n                                   const pcl::KdTreeFLANN<PointType>::Ptr& kdtreeLocal,\n                                   const Eigen::Matrix4d& exTlb,\n                                   const Eigen::Matrix4d& m4d){\n  Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity();\n  Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose();\n  Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1);\n  if(!vPlanFeatures.empty()){\n    for(const auto& p : vPlanFeatures){\n      auto* e = Cost_NavState_IMU_Plan_Vec::Create(p.pointOri,\n                                                   p.pointProj,\n                                                   Tbl,\n                                                   p.sqrt_info);\n      edges.push_back(e);\n    }\n    return;\n  }\n  PointType _pointOri, _pointSel, _coeff;\n  std::vector<int> _pointSearchInd;\n  std::vector<float> _pointSearchSqDis;\n  std::vector<int> _pointSearchInd2;\n  std::vector<float> _pointSearchSqDis2;\n\n  Eigen::Matrix< double, 5, 3 > _matA0;\n  _matA0.setZero();\n  Eigen::Matrix< double, 5, 1 > _matB0;\n  _matB0.setOnes();\n  _matB0 *= -1;\n  Eigen::Matrix< double, 3, 1 > _matX0;\n  _matX0.setZero();\n  int laserCloudSurfStackNum = laserCloudSurf->points.size();\n\n  int debug_num1 = 0;\n  int debug_num2 = 0;\n  int debug_num12 = 0;\n  int debug_num22 = 0;\n  for (int i = 0; i < laserCloudSurfStackNum; i++) {\n    _pointOri = laserCloudSurf->points[i];\n    MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d);\n\n    int id = map_manager->FindUsedSurfMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last);\n\n    if(id == 5000) continue;\n\n    if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue;\n\n    if(GlobalSurfMap[id].points.size() > 50) {\n      SurfKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis);\n\n      if (_pointSearchSqDis[4] < thres_dist) {\n        debug_num1 ++;\n        for (int j = 0; j < 5; j++) {\n          _matA0(j, 0) = GlobalSurfMap[id].points[_pointSearchInd[j]].x;\n          _matA0(j, 1) = GlobalSurfMap[id].points[_pointSearchInd[j]].y;\n          _matA0(j, 2) = GlobalSurfMap[id].points[_pointSearchInd[j]].z;\n        }\n        _matX0 = _matA0.colPivHouseholderQr().solve(_matB0);\n\n        float pa = _matX0(0, 0);\n        float pb = _matX0(1, 0);\n        float pc = _matX0(2, 0);\n        float pd = 1;\n\n        float ps = std::sqrt(pa * pa + pb * pb + pc * pc);\n        pa /= ps;\n        pb /= ps;\n        pc /= ps;\n        pd /= ps;\n\n        bool planeValid = true;\n        for (int j = 0; j < 5; j++) {\n          if (std::fabs(pa * GlobalSurfMap[id].points[_pointSearchInd[j]].x +\n                        pb * GlobalSurfMap[id].points[_pointSearchInd[j]].y +\n                        pc * GlobalSurfMap[id].points[_pointSearchInd[j]].z + pd) > 0.2) {\n            planeValid = false;\n            break;\n          }\n        }\n\n        if (planeValid) {\n          debug_num12 ++;\n          double dist = pa * _pointSel.x +\n                        pb * _pointSel.y +\n                        pc * _pointSel.z + pd;\n          Eigen::Vector3d omega(pa, pb, pc);\n          Eigen::Vector3d point_proj = Eigen::Vector3d(_pointSel.x,_pointSel.y,_pointSel.z) - (dist * omega);\n          Eigen::Vector3d e1(1, 0, 0);\n          Eigen::Matrix3d J = e1 * omega.transpose();\n          Eigen::JacobiSVD<Eigen::Matrix3d> svd(J, Eigen::ComputeThinU | Eigen::ComputeThinV);\n          Eigen::Matrix3d R_svd = svd.matrixV() * svd.matrixU().transpose();\n          Eigen::Matrix3d info = (1.0/IMUIntegrator::lidar_m) * Eigen::Matrix3d::Identity();\n          info(1, 1) *= plan_weight_tan;\n          info(2, 2) *= plan_weight_tan;\n          Eigen::Matrix3d sqrt_info = info * R_svd.transpose();\n\n          auto* e = Cost_NavState_IMU_Plan_Vec::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                       point_proj,\n                                                       Tbl,\n                                                       sqrt_info);\n          edges.push_back(e);\n          vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                     point_proj,\n                                     sqrt_info);\n          vPlanFeatures.back().ComputeError(m4d);\n\n          continue;\n        }\n        \n      }\n    }\n\n\n    if(laserCloudSurfLocal->points.size() > 20 ){\n    kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2);\n    if (_pointSearchSqDis2[4] < thres_dist) {\n      debug_num2++;\n      for (int j = 0; j < 5; j++) { \n        _matA0(j, 0) = laserCloudSurfLocal->points[_pointSearchInd2[j]].x;\n        _matA0(j, 1) = laserCloudSurfLocal->points[_pointSearchInd2[j]].y;\n        _matA0(j, 2) = laserCloudSurfLocal->points[_pointSearchInd2[j]].z;\n      }\n      _matX0 = _matA0.colPivHouseholderQr().solve(_matB0);\n\n      float pa = _matX0(0, 0);\n      float pb = _matX0(1, 0);\n      float pc = _matX0(2, 0);\n      float pd = 1;\n\n      float ps = std::sqrt(pa * pa + pb * pb + pc * pc);\n      pa /= ps;\n      pb /= ps;\n      pc /= ps;\n      pd /= ps;\n\n      bool planeValid = true;\n      for (int j = 0; j < 5; j++) {\n        if (std::fabs(pa * laserCloudSurfLocal->points[_pointSearchInd2[j]].x +\n                      pb * laserCloudSurfLocal->points[_pointSearchInd2[j]].y +\n                      pc * laserCloudSurfLocal->points[_pointSearchInd2[j]].z + pd) > 0.2) {\n          planeValid = false;\n          break;\n        }\n      }\n\n      if (planeValid) {\n        debug_num22 ++;\n        double dist = pa * _pointSel.x +\n                      pb * _pointSel.y +\n                      pc * _pointSel.z + pd;\n        Eigen::Vector3d omega(pa, pb, pc);\n        Eigen::Vector3d point_proj = Eigen::Vector3d(_pointSel.x,_pointSel.y,_pointSel.z) - (dist * omega);\n        Eigen::Vector3d e1(1, 0, 0);\n        Eigen::Matrix3d J = e1 * omega.transpose();\n        Eigen::JacobiSVD<Eigen::Matrix3d> svd(J, Eigen::ComputeThinU | Eigen::ComputeThinV);\n        Eigen::Matrix3d R_svd = svd.matrixV() * svd.matrixU().transpose();\n        Eigen::Matrix3d info = (1.0/IMUIntegrator::lidar_m) * Eigen::Matrix3d::Identity();\n        info(1, 1) *= plan_weight_tan;\n        info(2, 2) *= plan_weight_tan;\n        Eigen::Matrix3d sqrt_info = info * R_svd.transpose();\n\n        auto* e = Cost_NavState_IMU_Plan_Vec::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                      point_proj,\n                                                      Tbl,\n                                                      sqrt_info);\n        edges.push_back(e);\n        vPlanFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                    point_proj,\n                                    sqrt_info);\n        vPlanFeatures.back().ComputeError(m4d);\n      }\n    }\n  }\n\n  }\n\n}\n\n\nvoid Estimator::processNonFeatureICP(std::vector<ceres::CostFunction *>& edges,\n                                     std::vector<FeatureNon>& vNonFeatures,\n                                     const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeature,\n                                     const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureLocal,\n                                     const pcl::KdTreeFLANN<PointType>::Ptr& kdtreeLocal,\n                                     const Eigen::Matrix4d& exTlb,\n                                     const Eigen::Matrix4d& m4d){\n  Eigen::Matrix4d Tbl = Eigen::Matrix4d::Identity();\n  Tbl.topLeftCorner(3,3) = exTlb.topLeftCorner(3,3).transpose();\n  Tbl.topRightCorner(3,1) = -1.0 * Tbl.topLeftCorner(3,3) * exTlb.topRightCorner(3,1);\n  if(!vNonFeatures.empty()){\n    for(const auto& p : vNonFeatures){\n      auto* e = Cost_NonFeature_ICP::Create(p.pointOri,\n                                            p.pa,\n                                            p.pb,\n                                            p.pc,\n                                            p.pd,\n                                            Tbl,\n                                            Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n      edges.push_back(e);\n    }\n    return;\n  }\n\n  PointType _pointOri, _pointSel, _coeff;\n  std::vector<int> _pointSearchInd;\n  std::vector<float> _pointSearchSqDis;\n  std::vector<int> _pointSearchInd2;\n  std::vector<float> _pointSearchSqDis2;\n\n  Eigen::Matrix< double, 5, 3 > _matA0;\n  _matA0.setZero();\n  Eigen::Matrix< double, 5, 1 > _matB0;\n  _matB0.setOnes();\n  _matB0 *= -1;\n  Eigen::Matrix< double, 3, 1 > _matX0;\n  _matX0.setZero();\n\n  int laserCloudNonFeatureStackNum = laserCloudNonFeature->points.size();\n  for (int i = 0; i < laserCloudNonFeatureStackNum; i++) {\n    _pointOri = laserCloudNonFeature->points[i];\n    MAP_MANAGER::pointAssociateToMap(&_pointOri, &_pointSel, m4d);\n    int id = map_manager->FindUsedNonFeatureMap(&_pointSel,laserCenWidth_last,laserCenHeight_last,laserCenDepth_last);\n\n    if(id == 5000) continue;\n\n    if(std::isnan(_pointSel.x) || std::isnan(_pointSel.y) ||std::isnan(_pointSel.z)) continue;\n\n    if(GlobalNonFeatureMap[id].points.size() > 100) {\n      NonFeatureKdMap[id].nearestKSearch(_pointSel, 5, _pointSearchInd, _pointSearchSqDis);\n      if (_pointSearchSqDis[4] < 1 * thres_dist) {\n        for (int j = 0; j < 5; j++) {\n          _matA0(j, 0) = GlobalNonFeatureMap[id].points[_pointSearchInd[j]].x;\n          _matA0(j, 1) = GlobalNonFeatureMap[id].points[_pointSearchInd[j]].y;\n          _matA0(j, 2) = GlobalNonFeatureMap[id].points[_pointSearchInd[j]].z;\n        }\n        _matX0 = _matA0.colPivHouseholderQr().solve(_matB0);\n\n        float pa = _matX0(0, 0);\n        float pb = _matX0(1, 0);\n        float pc = _matX0(2, 0);\n        float pd = 1;\n\n        float ps = std::sqrt(pa * pa + pb * pb + pc * pc);\n        pa /= ps;\n        pb /= ps;\n        pc /= ps;\n        pd /= ps;\n\n        bool planeValid = true;\n        for (int j = 0; j < 5; j++) {\n          if (std::fabs(pa * GlobalNonFeatureMap[id].points[_pointSearchInd[j]].x +\n                        pb * GlobalNonFeatureMap[id].points[_pointSearchInd[j]].y +\n                        pc * GlobalNonFeatureMap[id].points[_pointSearchInd[j]].z + pd) > 0.2) {\n            planeValid = false;\n            break;\n          }\n        }\n\n        if(planeValid) {\n\n          auto* e = Cost_NonFeature_ICP::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                pa,\n                                                pb,\n                                                pc,\n                                                pd,\n                                                Tbl,\n                                                Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n          edges.push_back(e);\n          vNonFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                    pa,\n                                    pb,\n                                    pc,\n                                    pd);\n          vNonFeatures.back().ComputeError(m4d);\n\n          continue;\n        }\n      }\n    \n    }\n\n    if(laserCloudNonFeatureLocal->points.size() > 20 ){\n      kdtreeLocal->nearestKSearch(_pointSel, 5, _pointSearchInd2, _pointSearchSqDis2);\n      if (_pointSearchSqDis2[4] < 1 * thres_dist) {\n        for (int j = 0; j < 5; j++) { \n          _matA0(j, 0) = laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].x;\n          _matA0(j, 1) = laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].y;\n          _matA0(j, 2) = laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].z;\n        }\n        _matX0 = _matA0.colPivHouseholderQr().solve(_matB0);\n\n        float pa = _matX0(0, 0);\n        float pb = _matX0(1, 0);\n        float pc = _matX0(2, 0);\n        float pd = 1;\n\n        float ps = std::sqrt(pa * pa + pb * pb + pc * pc);\n        pa /= ps;\n        pb /= ps;\n        pc /= ps;\n        pd /= ps;\n\n        bool planeValid = true;\n        for (int j = 0; j < 5; j++) {\n          if (std::fabs(pa * laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].x +\n                        pb * laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].y +\n                        pc * laserCloudNonFeatureLocal->points[_pointSearchInd2[j]].z + pd) > 0.2) {\n            planeValid = false;\n            break;\n          }\n        }\n\n        if(planeValid) {\n\n          auto* e = Cost_NonFeature_ICP::Create(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                                pa,\n                                                pb,\n                                                pc,\n                                                pd,\n                                                Tbl,\n                                                Eigen::Matrix<double, 1, 1>(1/IMUIntegrator::lidar_m));\n          edges.push_back(e);\n          vNonFeatures.emplace_back(Eigen::Vector3d(_pointOri.x,_pointOri.y,_pointOri.z),\n                                    pa,\n                                    pb,\n                                    pc,\n                                    pd);\n          vNonFeatures.back().ComputeError(m4d);\n        }\n      }\n    }\n  }\n\n}\n\n\nvoid Estimator::vector2double(const std::list<LidarFrame>& lidarFrameList){\n  int i = 0;\n  for(const auto& l : lidarFrameList){\n    Eigen::Map<Eigen::Matrix<double, 6, 1>> PR(para_PR[i]);\n    PR.segment<3>(0) = l.P;\n    PR.segment<3>(3) = Sophus::SO3d(l.Q).log();\n\n    Eigen::Map<Eigen::Matrix<double, 9, 1>> VBias(para_VBias[i]);\n    VBias.segment<3>(0) = l.V;\n    VBias.segment<3>(3) = l.bg;\n    VBias.segment<3>(6) = l.ba;\n    i++;\n  }\n}\n\nvoid Estimator::double2vector(std::list<LidarFrame>& lidarFrameList){\n  int i = 0;\n  for(auto& l : lidarFrameList){\n    Eigen::Map<const Eigen::Matrix<double, 6, 1>> PR(para_PR[i]);\n    Eigen::Map<const Eigen::Matrix<double, 9, 1>> VBias(para_VBias[i]);\n    l.P = PR.segment<3>(0);\n    l.Q = Sophus::SO3d::exp(PR.segment<3>(3)).unit_quaternion();\n    l.V = VBias.segment<3>(0);\n    l.bg = VBias.segment<3>(3);\n    l.ba = VBias.segment<3>(6);\n    i++;\n  }\n}\n\nvoid Estimator::EstimateLidarPose(std::list<LidarFrame>& lidarFrameList,\n                           const Eigen::Matrix4d& exTlb,\n                           const Eigen::Vector3d& gravity,\n                           nav_msgs::Odometry& debugInfo){\n  \n  Eigen::Matrix3d exRbl = exTlb.topLeftCorner(3,3).transpose();\n  Eigen::Vector3d exPbl = -1.0 * exRbl * exTlb.topRightCorner(3,1);\n  Eigen::Matrix4d transformTobeMapped = Eigen::Matrix4d::Identity();\n  transformTobeMapped.topLeftCorner(3,3) = lidarFrameList.back().Q * exRbl;\n  transformTobeMapped.topRightCorner(3,1) = lidarFrameList.back().Q * exPbl + lidarFrameList.back().P;\n\n  int laserCloudCornerFromMapNum = map_manager->get_corner_map()->points.size();\n  int laserCloudSurfFromMapNum = map_manager->get_surf_map()->points.size();\n  int laserCloudCornerFromLocalNum = laserCloudCornerFromLocal->points.size();\n  int laserCloudSurfFromLocalNum = laserCloudSurfFromLocal->points.size();\n  int stack_count = 0;\n  for(const auto& l : lidarFrameList){\n    laserCloudCornerLast[stack_count]->clear();\n    for(const auto& p : l.laserCloud->points){\n      if(std::fabs(p.normal_z - 1.0) < 1e-5)\n        laserCloudCornerLast[stack_count]->push_back(p);\n    }\n    laserCloudSurfLast[stack_count]->clear();\n    for(const auto& p : l.laserCloud->points){\n      if(std::fabs(p.normal_z - 2.0) < 1e-5)\n        laserCloudSurfLast[stack_count]->push_back(p);\n    }\n\n    laserCloudNonFeatureLast[stack_count]->clear();\n    for(const auto& p : l.laserCloud->points){\n      if(std::fabs(p.normal_z - 3.0) < 1e-5)\n        laserCloudNonFeatureLast[stack_count]->push_back(p);\n    }\n    laserCloudCornerStack[stack_count]->clear();\n    downSizeFilterCorner.setInputCloud(laserCloudCornerLast[stack_count]);\n    downSizeFilterCorner.filter(*laserCloudCornerStack[stack_count]);\n\n    laserCloudSurfStack[stack_count]->clear();\n    downSizeFilterSurf.setInputCloud(laserCloudSurfLast[stack_count]);\n    downSizeFilterSurf.filter(*laserCloudSurfStack[stack_count]);\n\n    laserCloudNonFeatureStack[stack_count]->clear();\n    downSizeFilterNonFeature.setInputCloud(laserCloudNonFeatureLast[stack_count]);\n    downSizeFilterNonFeature.filter(*laserCloudNonFeatureStack[stack_count]);\n    stack_count++;\n  }\n  if ( ((laserCloudCornerFromMapNum > 0 && laserCloudSurfFromMapNum > 100) || \n       (laserCloudCornerFromLocalNum > 0 && laserCloudSurfFromLocalNum > 100))) {\n    Estimate(lidarFrameList, exTlb, gravity);\n  }\n\n  transformTobeMapped = Eigen::Matrix4d::Identity();\n  transformTobeMapped.topLeftCorner(3,3) = lidarFrameList.front().Q * exRbl;\n  transformTobeMapped.topRightCorner(3,1) = lidarFrameList.front().Q * exPbl + lidarFrameList.front().P;\n  // 如果地面点云不到十帧则进行添加\n  for(const auto& l : lidarFrameList) {\n    if (init_ground_count <= 10) {\n      PointType temp_point;\n      for(const auto& p : l.laserCloud->points){\n        if(std::fabs(p.normal_y + 1.0) < 1e-5) {\n          MAP_MANAGER::pointAssociateToMap(&p, &temp_point, transformTobeMapped);\n          initGroundCloud->push_back(temp_point);\n        }\n      }\n      init_ground_count++;\n      if (init_ground_count > 10) {\n        // 第一次超过十帧地面点云的时候，进行初始化的地面计算\n        init_ground_plane_coeff = livox_slam_ware::get_plane_coeffs<PointType>(initGroundCloud);\n        Cost_NavState_PR_Ground::init_ground_plane_coeff = init_ground_plane_coeff;\n      }\n    }\n  }\n  // std::cout << \"the size of init_ground_plane_coeff \" << init_ground_plane_coeff.size() << std::endl;\n  std::unique_lock<std::mutex> locker(mtx_Map);\n  *laserCloudCornerForMap = *laserCloudCornerStack[0];\n  *laserCloudSurfForMap = *laserCloudSurfStack[0];\n  *laserCloudNonFeatureForMap = *laserCloudNonFeatureStack[0];\n  transformForMap = transformTobeMapped;\n  laserCloudCornerFromLocal->clear();\n  laserCloudSurfFromLocal->clear();\n  laserCloudNonFeatureFromLocal->clear();\n  MapIncrementLocal(laserCloudCornerForMap,laserCloudSurfForMap,laserCloudNonFeatureForMap,transformTobeMapped);\n  locker.unlock();\n}\n\nvoid Estimator::Estimate(std::list<LidarFrame>& lidarFrameList,\n                         const Eigen::Matrix4d& exTlb,\n                         const Eigen::Vector3d& gravity){\n\n  int num_corner_map = 0;\n  int num_surf_map = 0;\n\n  static uint32_t frame_count = 0;\n  int windowSize = lidarFrameList.size();\n  Eigen::Matrix4d transformTobeMapped = Eigen::Matrix4d::Identity();\n  Eigen::Matrix3d exRbl = exTlb.topLeftCorner(3,3).transpose();\n  Eigen::Vector3d exPbl = -1.0 * exRbl * exTlb.topRightCorner(3,1);\n  kdtreeCornerFromLocal->setInputCloud(laserCloudCornerFromLocal);\n  kdtreeSurfFromLocal->setInputCloud(laserCloudSurfFromLocal);\n  kdtreeNonFeatureFromLocal->setInputCloud(laserCloudNonFeatureFromLocal);\n\n  std::unique_lock<std::mutex> locker3(map_manager->mtx_MapManager);\n  for(int i = 0; i < 4851; i++){\n    CornerKdMap[i] = map_manager->getCornerKdMap(i);\n    SurfKdMap[i] = map_manager->getSurfKdMap(i);\n    NonFeatureKdMap[i] = map_manager->getNonFeatureKdMap(i);\n\n    GlobalSurfMap[i] = map_manager->laserCloudSurf_for_match[i];\n    GlobalCornerMap[i] = map_manager->laserCloudCorner_for_match[i];\n    GlobalNonFeatureMap[i] = map_manager->laserCloudNonFeature_for_match[i];\n  }\n  laserCenWidth_last = map_manager->get_laserCloudCenWidth_last();\n  laserCenHeight_last = map_manager->get_laserCloudCenHeight_last();\n  laserCenDepth_last = map_manager->get_laserCloudCenDepth_last();\n\n  locker3.unlock();\n\n  // store point to line features\n  std::vector<std::vector<FeatureLine>> vLineFeatures(windowSize);\n  for(auto& v : vLineFeatures){\n    v.reserve(2000);\n  }\n\n  // store point to plan features\n  std::vector<std::vector<FeaturePlanVec>> vPlanFeatures(windowSize);\n  for(auto& v : vPlanFeatures){\n    v.reserve(2000);\n  }\n\n  std::vector<std::vector<FeatureNon>> vNonFeatures(windowSize);\n  for(auto& v : vNonFeatures){\n    v.reserve(2000);\n  }\n\n  if(windowSize == SLIDEWINDOWSIZE) {\n    plan_weight_tan = 0.0003;\n    thres_dist = 1.0;\n  } else {\n    plan_weight_tan = 0.0;\n    thres_dist = 25.0;\n  }\n\n  // excute optimize process\n  const int max_iters = 5;\n  for(int iterOpt=0; iterOpt<max_iters; ++iterOpt){\n\n    vector2double(lidarFrameList);\n\n    //create huber loss function\n    ceres::LossFunction* loss_function = NULL;\n    loss_function = new ceres::HuberLoss(0.1 / IMUIntegrator::lidar_m);\n    if(windowSize == SLIDEWINDOWSIZE) {\n      loss_function = NULL;\n    } else {\n      loss_function = new ceres::HuberLoss(0.1 / IMUIntegrator::lidar_m);\n    }\n\n    ceres::Problem::Options problem_options;\n    ceres::Problem problem(problem_options);\n\n    for(int i=0; i<windowSize; ++i) {\n      problem.AddParameterBlock(para_PR[i], 6);\n    }\n\n    for(int i=0; i<windowSize; ++i)\n      problem.AddParameterBlock(para_VBias[i], 9);\n\n    // add Ground CostFunction\n    if (init_ground_plane_coeff.size()) {\n      for(int f=0; f<windowSize; ++f) {\n        auto frame_curr = lidarFrameList.begin();\n        std::advance(frame_curr, f);\n        problem.AddResidualBlock(Cost_NavState_PR_Ground::Create(frame_curr->ground_plane_coeff), \n                                 nullptr, para_PR[f]);\n      }\n    }\n    // add IMU CostFunction\n    for(int f=1; f<windowSize; ++f){\n      auto frame_curr = lidarFrameList.begin();\n      std::advance(frame_curr, f);\n      problem.AddResidualBlock(Cost_NavState_PRV_Bias::Create(frame_curr->imuIntegrator,\n                                                              const_cast<Eigen::Vector3d&>(gravity),\n                                                              Eigen::LLT<Eigen::Matrix<double, 15, 15>>\n                                                                      (frame_curr->imuIntegrator.GetCovariance().inverse())\n                                                                      .matrixL().transpose()),\n                               nullptr,\n                               para_PR[f-1],\n                               para_VBias[f-1],\n                               para_PR[f],\n                               para_VBias[f]);\n    }\n\n    if (last_marginalization_info){\n      // construct new marginlization_factor\n      auto *marginalization_factor = new MarginalizationFactor(last_marginalization_info);\n      problem.AddResidualBlock(marginalization_factor, nullptr,\n                               last_marginalization_parameter_blocks);\n    }\n\n    Eigen::Quaterniond q_before_opti = lidarFrameList.back().Q;\n    Eigen::Vector3d t_before_opti = lidarFrameList.back().P;\n\n    std::vector<std::vector<ceres::CostFunction *>> edgesLine(windowSize);\n    std::vector<std::vector<ceres::CostFunction *>> edgesPlan(windowSize);\n    std::vector<std::vector<ceres::CostFunction *>> edgesNon(windowSize);\n    std::thread threads[3];\n    for(int f=0; f<windowSize; ++f) {\n      auto frame_curr = lidarFrameList.begin();\n      std::advance(frame_curr, f);\n      transformTobeMapped = Eigen::Matrix4d::Identity();\n      transformTobeMapped.topLeftCorner(3,3) = frame_curr->Q * exRbl;\n      transformTobeMapped.topRightCorner(3,1) = frame_curr->Q * exPbl + frame_curr->P;\n\n      threads[0] = std::thread(&Estimator::processPointToLine, this,\n                               std::ref(edgesLine[f]),\n                               std::ref(vLineFeatures[f]),\n                               std::ref(laserCloudCornerStack[f]),\n                               std::ref(laserCloudCornerFromLocal),\n                               std::ref(kdtreeCornerFromLocal),\n                               std::ref(exTlb),\n                               std::ref(transformTobeMapped));\n\n      threads[1] = std::thread(&Estimator::processPointToPlanVec, this,\n                               std::ref(edgesPlan[f]),\n                               std::ref(vPlanFeatures[f]),\n                               std::ref(laserCloudSurfStack[f]),\n                               std::ref(laserCloudSurfFromLocal),\n                               std::ref(kdtreeSurfFromLocal),\n                               std::ref(exTlb),\n                               std::ref(transformTobeMapped));\n\n      threads[2] = std::thread(&Estimator::processNonFeatureICP, this,\n                               std::ref(edgesNon[f]),\n                               std::ref(vNonFeatures[f]),\n                               std::ref(laserCloudNonFeatureStack[f]),\n                               std::ref(laserCloudNonFeatureFromLocal),\n                               std::ref(kdtreeNonFeatureFromLocal),\n                               std::ref(exTlb),\n                               std::ref(transformTobeMapped));\n\n      threads[0].join();\n      threads[1].join();\n      threads[2].join();\n    }\n\n    int cntSurf = 0;\n    int cntCorner = 0;\n    int cntNon = 0;\n    if(windowSize == SLIDEWINDOWSIZE) {\n      thres_dist = 1.0;\n      if(iterOpt == 0){\n        for(int f=0; f<windowSize; ++f){\n          int cntFtu = 0;\n          // std::cout << \"edgesLine[f].size = \" << edgesLine[f].size() << std::endl;\n          for (auto &e : edgesLine[f]) {\n            if(std::fabs(vLineFeatures[f][cntFtu].error) > 1e-5){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n              vLineFeatures[f][cntFtu].valid = true;\n            }else{\n              vLineFeatures[f][cntFtu].valid = false;\n            }\n            cntFtu++;\n            cntCorner++;\n          }\n\n          cntFtu = 0;\n          // std::cout << \"edgesPlan[f]\" << edgesPlan[f].size() << std::endl;\n          for (auto &e : edgesPlan[f]) {\n            if(std::fabs(vPlanFeatures[f][cntFtu].error) > 1e-5){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n              vPlanFeatures[f][cntFtu].valid = true;\n            }else{\n              vPlanFeatures[f][cntFtu].valid = false;\n            }\n            cntFtu++;\n            cntSurf++;\n          }\n\n          cntFtu = 0;\n          for (auto &e : edgesNon[f]) {\n            if(std::fabs(vNonFeatures[f][cntFtu].error) > 1e-5){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n              vNonFeatures[f][cntFtu].valid = true;\n            }else{\n              vNonFeatures[f][cntFtu].valid = false;\n            }\n            cntFtu++;\n            cntNon++;\n          }\n        }\n      }else{\n        for(int f=0; f<windowSize; ++f){\n          int cntFtu = 0;\n          for (auto &e : edgesLine[f]) {\n            if(vLineFeatures[f][cntFtu].valid) {\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n            }\n            cntFtu++;\n            cntCorner++;\n          }\n          cntFtu = 0;\n          for (auto &e : edgesPlan[f]) {\n            if(vPlanFeatures[f][cntFtu].valid){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n            }\n            cntFtu++;\n            cntSurf++;\n          }\n\n          cntFtu = 0;\n          for (auto &e : edgesNon[f]) {\n            if(vNonFeatures[f][cntFtu].valid){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n            }\n            cntFtu++;\n            cntNon++;\n          }\n        }\n      }\n    } else {\n        if(iterOpt == 0) {\n          thres_dist = 10.0;\n        } else {\n          thres_dist = 1.0;\n        }\n        for(int f=0; f<windowSize; ++f){\n          int cntFtu = 0;\n          for (auto &e : edgesLine[f]) {\n            if(std::fabs(vLineFeatures[f][cntFtu].error) > 1e-5){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n              vLineFeatures[f][cntFtu].valid = true;\n            }else{\n              vLineFeatures[f][cntFtu].valid = false;\n            }\n            cntFtu++;\n            cntCorner++;\n          }\n          cntFtu = 0;\n          for (auto &e : edgesPlan[f]) {\n            if(std::fabs(vPlanFeatures[f][cntFtu].error) > 1e-5){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n              vPlanFeatures[f][cntFtu].valid = true;\n            }else{\n              vPlanFeatures[f][cntFtu].valid = false;\n            }\n            cntFtu++;\n            cntSurf++;\n          }\n\n          cntFtu = 0;\n          for (auto &e : edgesNon[f]) {\n            if(std::fabs(vNonFeatures[f][cntFtu].error) > 1e-5){\n              problem.AddResidualBlock(e, loss_function, para_PR[f]);\n              vNonFeatures[f][cntFtu].valid = true;\n            }else{\n              vNonFeatures[f][cntFtu].valid = false;\n            }\n            cntFtu++;\n            cntNon++;\n          }\n        }\n    }\n\n    ceres::Solver::Options options;\n    options.linear_solver_type = ceres::DENSE_SCHUR;\n    options.trust_region_strategy_type = ceres::DOGLEG;\n    options.max_num_iterations = 10;\n    options.minimizer_progress_to_stdout = false;\n    options.num_threads = 6;\n    ceres::Solver::Summary summary;\n    ceres::Solve(options, &problem, &summary);\n\n    double2vector(lidarFrameList);\n\n    Eigen::Quaterniond q_after_opti = lidarFrameList.back().Q;\n    Eigen::Vector3d t_after_opti = lidarFrameList.back().P;\n    Eigen::Vector3d V_after_opti = lidarFrameList.back().V;\n    double deltaR = (q_before_opti.angularDistance(q_after_opti)) * 180.0 / M_PI;\n    double deltaT = (t_before_opti - t_after_opti).norm();\n\n    if (deltaR < 0.05 && deltaT < 0.05 || (iterOpt+1) == max_iters){\n      ROS_INFO(\"Frame: %d\\n\",frame_count++);\n      if(windowSize != SLIDEWINDOWSIZE) break;\n      // apply marginalization\n      auto *marginalization_info = new MarginalizationInfo();\n      if (last_marginalization_info){\n        std::vector<int> drop_set;\n        for (int i = 0; i < static_cast<int>(last_marginalization_parameter_blocks.size()); i++)\n        {\n          if (last_marginalization_parameter_blocks[i] == para_PR[0] ||\n              last_marginalization_parameter_blocks[i] == para_VBias[0])\n            drop_set.push_back(i);\n        }\n\n        auto *marginalization_factor = new MarginalizationFactor(last_marginalization_info);\n        auto *residual_block_info = new ResidualBlockInfo(marginalization_factor, nullptr,\n                                                          last_marginalization_parameter_blocks,\n                                                          drop_set);\n        marginalization_info->addResidualBlockInfo(residual_block_info);\n      }\n      \n      auto frame_curr = lidarFrameList.begin();\n      std::advance(frame_curr, 1);\n      ceres::CostFunction* IMU_Cost = Cost_NavState_PRV_Bias::Create(frame_curr->imuIntegrator,\n                                                                     const_cast<Eigen::Vector3d&>(gravity),\n                                                                     Eigen::LLT<Eigen::Matrix<double, 15, 15>>\n                                                                             (frame_curr->imuIntegrator.GetCovariance().inverse())\n                                                                             .matrixL().transpose());\n      auto *residual_block_info = new ResidualBlockInfo(IMU_Cost, nullptr,\n                                                        std::vector<double *>{para_PR[0], para_VBias[0], para_PR[1], para_VBias[1]},\n                                                        std::vector<int>{0, 1});\n      marginalization_info->addResidualBlockInfo(residual_block_info);\n\n      // marginalize the Ground constraint\n      if (init_ground_plane_coeff.size()) {\n        ceres::CostFunction* Ground_Cost = Cost_NavState_PR_Ground::Create(lidarFrameList.begin()->ground_plane_coeff);\n        auto *ground_residual_block_info = new ResidualBlockInfo(Ground_Cost, nullptr, \n                                                          std::vector<double *>{para_PR[0]},\n                                                          std::vector<int>{0});\n        marginalization_info->addResidualBlockInfo(ground_residual_block_info);     \n      }\n\n      int f = 0;\n      transformTobeMapped = Eigen::Matrix4d::Identity();\n      transformTobeMapped.topLeftCorner(3,3) = frame_curr->Q * exRbl;\n      transformTobeMapped.topRightCorner(3,1) = frame_curr->Q * exPbl + frame_curr->P;\n      edgesLine[f].clear();\n      edgesPlan[f].clear();\n      edgesNon[f].clear();\n      threads[0] = std::thread(&Estimator::processPointToLine, this,\n                               std::ref(edgesLine[f]),\n                               std::ref(vLineFeatures[f]),\n                               std::ref(laserCloudCornerStack[f]),\n                               std::ref(laserCloudCornerFromLocal),\n                               std::ref(kdtreeCornerFromLocal),\n                               std::ref(exTlb),\n                               std::ref(transformTobeMapped));\n\n      threads[1] = std::thread(&Estimator::processPointToPlanVec, this,\n                               std::ref(edgesPlan[f]),\n                               std::ref(vPlanFeatures[f]),\n                               std::ref(laserCloudSurfStack[f]),\n                               std::ref(laserCloudSurfFromLocal),\n                               std::ref(kdtreeSurfFromLocal),\n                               std::ref(exTlb),\n                               std::ref(transformTobeMapped));\n\n      threads[2] = std::thread(&Estimator::processNonFeatureICP, this,\n                               std::ref(edgesNon[f]),\n                               std::ref(vNonFeatures[f]),\n                               std::ref(laserCloudNonFeatureStack[f]),\n                               std::ref(laserCloudNonFeatureFromLocal),\n                               std::ref(kdtreeNonFeatureFromLocal),\n                               std::ref(exTlb),\n                               std::ref(transformTobeMapped));      \n                      \n      threads[0].join();\n      threads[1].join();\n      threads[2].join();\n      int cntFtu = 0;\n      for (auto &e : edgesLine[f]) {\n        if(vLineFeatures[f][cntFtu].valid){\n          auto *residual_block_info = new ResidualBlockInfo(e, nullptr,\n                                                            std::vector<double *>{para_PR[0]},\n                                                            std::vector<int>{0});\n          marginalization_info->addResidualBlockInfo(residual_block_info);\n        }\n        cntFtu++;\n      }\n      cntFtu = 0;\n      for (auto &e : edgesPlan[f]) {\n        if(vPlanFeatures[f][cntFtu].valid){\n          auto *residual_block_info = new ResidualBlockInfo(e, nullptr,\n                                                            std::vector<double *>{para_PR[0]},\n                                                            std::vector<int>{0});\n          marginalization_info->addResidualBlockInfo(residual_block_info);\n        }\n        cntFtu++;\n      }\n\n      cntFtu = 0;\n      for (auto &e : edgesNon[f]) {\n        if(vNonFeatures[f][cntFtu].valid){\n          auto *residual_block_info = new ResidualBlockInfo(e, nullptr,\n                                                            std::vector<double *>{para_PR[0]},\n                                                            std::vector<int>{0});\n          marginalization_info->addResidualBlockInfo(residual_block_info);\n        }\n        cntFtu++;\n      }\n\n      marginalization_info->preMarginalize();\n      marginalization_info->marginalize();\n\n      std::unordered_map<long, double *> addr_shift;\n      for (int i = 1; i < SLIDEWINDOWSIZE; i++)\n      {\n        addr_shift[reinterpret_cast<long>(para_PR[i])] = para_PR[i - 1];\n        addr_shift[reinterpret_cast<long>(para_VBias[i])] = para_VBias[i - 1];\n      }\n      std::vector<double *> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift);\n\n      delete last_marginalization_info;\n      last_marginalization_info = marginalization_info;\n      last_marginalization_parameter_blocks = parameter_blocks;\n      break;\n    }\n\n    if(windowSize != SLIDEWINDOWSIZE) {\n      for(int f=0; f<windowSize; ++f){\n        edgesLine[f].clear();\n        edgesPlan[f].clear();\n        edgesNon[f].clear();\n        vLineFeatures[f].clear();\n        vPlanFeatures[f].clear();\n        vNonFeatures[f].clear();\n      }\n    }\n  }\n\n}\nvoid Estimator::MapIncrementLocal(const pcl::PointCloud<PointType>::Ptr& laserCloudCornerStack,\n                                  const pcl::PointCloud<PointType>::Ptr& laserCloudSurfStack,\n                                  const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureStack,\n                                  const Eigen::Matrix4d& transformTobeMapped){\n  int laserCloudCornerStackNum = laserCloudCornerStack->points.size();\n  int laserCloudSurfStackNum = laserCloudSurfStack->points.size();\n  int laserCloudNonFeatureStackNum = laserCloudNonFeatureStack->points.size();\n  PointType pointSel;\n  PointType pointSel2;\n  size_t Id = localMapID % localMapWindowSize;\n  localCornerMap[Id]->clear();\n  localSurfMap[Id]->clear();\n  localNonFeatureMap[Id]->clear();\n  for (int i = 0; i < laserCloudCornerStackNum; i++) {\n    MAP_MANAGER::pointAssociateToMap(&laserCloudCornerStack->points[i], &pointSel, transformTobeMapped);\n    localCornerMap[Id]->push_back(pointSel);\n  }\n  for (int i = 0; i < laserCloudSurfStackNum; i++) {\n    MAP_MANAGER::pointAssociateToMap(&laserCloudSurfStack->points[i], &pointSel2, transformTobeMapped);\n    localSurfMap[Id]->push_back(pointSel2);\n  }\n  for (int i = 0; i < laserCloudNonFeatureStackNum; i++) {\n    MAP_MANAGER::pointAssociateToMap(&laserCloudNonFeatureStack->points[i], &pointSel2, transformTobeMapped);\n    localNonFeatureMap[Id]->push_back(pointSel2);\n  }\n\n  for (int i = 0; i < localMapWindowSize; i++) {\n    *laserCloudCornerFromLocal += *localCornerMap[i];\n    *laserCloudSurfFromLocal += *localSurfMap[i];\n    *laserCloudNonFeatureFromLocal += *localNonFeatureMap[i];\n  }\n  pcl::PointCloud<PointType>::Ptr temp(new pcl::PointCloud<PointType>());\n  downSizeFilterCorner.setInputCloud(laserCloudCornerFromLocal);\n  downSizeFilterCorner.filter(*temp);\n  laserCloudCornerFromLocal = temp;\n  pcl::PointCloud<PointType>::Ptr temp2(new pcl::PointCloud<PointType>());\n  downSizeFilterSurf.setInputCloud(laserCloudSurfFromLocal);\n  downSizeFilterSurf.filter(*temp2);\n  laserCloudSurfFromLocal = temp2;\n  pcl::PointCloud<PointType>::Ptr temp3(new pcl::PointCloud<PointType>());\n  downSizeFilterNonFeature.setInputCloud(laserCloudNonFeatureFromLocal);\n  downSizeFilterNonFeature.filter(*temp3);\n  laserCloudNonFeatureFromLocal = temp3;\n  localMapID ++;\n}"
  },
  {
    "path": "LIO-Livox/src/lio/IMUIntegrator.cpp",
    "content": "#include \"IMUIntegrator/IMUIntegrator.h\"\n\nIMUIntegrator::IMUIntegrator(){\n  Reset();\n  noise.setZero();\n  noise.block<3, 3>(0, 0) =  Eigen::Matrix3d::Identity() * gyr_n * gyr_n;\n  noise.block<3, 3>(3, 3) =  Eigen::Matrix3d::Identity() * acc_n * acc_n;\n  noise.block<3, 3>(6, 6) =  Eigen::Matrix3d::Identity() * gyr_w * gyr_w;\n  noise.block<3, 3>(9, 9) =  Eigen::Matrix3d::Identity() * acc_w * acc_w;\n}\n\n/** \\brief constructor of IMUIntegrator\n * \\param[in] vIMU: IMU messages need to be integrated\n */\nIMUIntegrator::IMUIntegrator(std::vector<sensor_msgs::ImuConstPtr> vIMU):\nvimuMsg(std::move(vIMU)){\n  Reset();\n  noise.setZero();\n  noise.block<3, 3>(0, 0) =  Eigen::Matrix3d::Identity() * gyr_n * gyr_n;\n  noise.block<3, 3>(3, 3) =  Eigen::Matrix3d::Identity() * acc_n * acc_n;\n  noise.block<3, 3>(6, 6) =  Eigen::Matrix3d::Identity() * gyr_w * gyr_w;\n  noise.block<3, 3>(9, 9) =  Eigen::Matrix3d::Identity() * acc_w * acc_w;\n}\n\nvoid IMUIntegrator::Reset(){\n  dq.setIdentity();\n  dp.setZero();\n  dv.setZero();\n  dtime = 0;\n  covariance.setZero();\n  jacobian.setIdentity();\n  linearized_bg.setZero();\n  linearized_ba.setZero();\n}\n\nconst Eigen::Quaterniond & IMUIntegrator::GetDeltaQ() const {return dq;}\n\nconst Eigen::Vector3d & IMUIntegrator::GetDeltaP() const {return dp;}\n\nconst Eigen::Vector3d & IMUIntegrator::GetDeltaV() const {return dv;}\n\nconst double & IMUIntegrator::GetDeltaTime() const {return dtime;}\n\nconst Eigen::Vector3d & IMUIntegrator::GetBiasGyr() const {return linearized_bg;}\n\nconst Eigen::Vector3d& IMUIntegrator::GetBiasAcc() const {return linearized_ba;}\n\nconst Eigen::Matrix<double, 15, 15>& IMUIntegrator::GetCovariance(){return covariance;}\n\nconst Eigen::Matrix<double, 15, 15> & IMUIntegrator::GetJacobian() const {return jacobian;}\n\nvoid IMUIntegrator::PushIMUMsg(const sensor_msgs::ImuConstPtr& imu){\n  vimuMsg.push_back(imu);\n}\nvoid IMUIntegrator::PushIMUMsg(const std::vector<sensor_msgs::ImuConstPtr>& vimu){\n  vimuMsg.insert(vimuMsg.end(), vimu.begin(), vimu.end());\n}\nconst std::vector<sensor_msgs::ImuConstPtr> & IMUIntegrator::GetIMUMsg() const {return vimuMsg;}\n\nvoid IMUIntegrator::GyroIntegration(double lastTime){\n  double current_time = lastTime;\n  for(auto & imu : vimuMsg){\n    Eigen::Vector3d gyr;\n    gyr << imu->angular_velocity.x,\n            imu->angular_velocity.y,\n            imu->angular_velocity.z;\n    double dt = imu->header.stamp.toSec() - current_time;\n    ROS_ASSERT(dt >= 0);\n    Eigen::Matrix3d dR = Sophus::SO3d::exp(gyr*dt).matrix();\n    Eigen::Quaterniond qr(dq*dR);\n    if (qr.w()<0)\n      qr.coeffs() *= -1;\n    dq = qr.normalized();\n    current_time = imu->header.stamp.toSec();\n  }\n}\n\nvoid IMUIntegrator::PreIntegration(double lastTime, const Eigen::Vector3d& bg, const Eigen::Vector3d& ba){\n  Reset();\n  linearized_bg = bg;\n  linearized_ba = ba;\n  double current_time = lastTime;\n  for(auto & imu : vimuMsg){\n    Eigen::Vector3d gyr;\n    gyr <<  imu->angular_velocity.x,\n            imu->angular_velocity.y,\n            imu->angular_velocity.z;\n    Eigen::Vector3d acc;\n    acc << imu->linear_acceleration.x * gnorm,\n            imu->linear_acceleration.y * gnorm,\n            imu->linear_acceleration.z * gnorm;\n    double dt = imu->header.stamp.toSec() - current_time;\n    if(dt <= 0 )\n      ROS_WARN(\"dt <= 0\");\n    gyr -= bg;\n    acc -= ba;\n    double dt2 = dt*dt;\n    Eigen::Vector3d gyr_dt = gyr*dt;\n    Eigen::Matrix3d dR = Sophus::SO3d::exp(gyr_dt).matrix();\n    Eigen::Matrix3d Jr = Eigen::Matrix3d::Identity();\n    double gyr_dt_norm = gyr_dt.norm();\n    if(gyr_dt_norm > 0.00001){\n      Eigen::Vector3d k = gyr_dt.normalized();\n      Eigen::Matrix3d K = Sophus::SO3d::hat(k);\n      Jr =   Eigen::Matrix3d::Identity()\n             - (1-cos(gyr_dt_norm))/gyr_dt_norm*K\n             + (1-sin(gyr_dt_norm)/gyr_dt_norm)*K*K;\n    }\n    Eigen::Matrix<double,15,15> A = Eigen::Matrix<double,15,15>::Identity();\n    A.block<3,3>(0,3) = -0.5*dq.matrix()*Sophus::SO3d::hat(acc)*dt2;\n    A.block<3,3>(0,6) = Eigen::Matrix3d::Identity()*dt;\n    A.block<3,3>(0,12) = -0.5*dq.matrix()*dt2;\n    A.block<3,3>(3,3) = dR.transpose();\n    A.block<3,3>(3,9) = - Jr*dt;\n    A.block<3,3>(6,3) = -dq.matrix()*Sophus::SO3d::hat(acc)*dt;\n    A.block<3,3>(6,12) = -dq.matrix()*dt;\n    Eigen::Matrix<double,15,12> B = Eigen::Matrix<double,15,12>::Zero();\n    B.block<3,3>(0,3) = 0.5*dq.matrix()*dt2;\n    B.block<3,3>(3,0) = Jr*dt;\n    B.block<3,3>(6,3) = dq.matrix()*dt;\n    B.block<3,3>(9,6) = Eigen::Matrix3d::Identity()*dt;\n    B.block<3,3>(12,9) = Eigen::Matrix3d::Identity()*dt;\n    jacobian = A * jacobian;\n    covariance = A * covariance * A.transpose() + B * noise * B.transpose();\n    dp += dv*dt + 0.5*dq.matrix()*acc*dt2;\n    dv += dq.matrix()*acc*dt;\n    Eigen::Matrix3d m3dR = dq.matrix()*dR;\n    Eigen::Quaterniond qtmp(m3dR);\n    if (qtmp.w()<0)\n      qtmp.coeffs() *= -1;\n    dq = qtmp.normalized();\n    dtime += dt;\n    current_time = imu->header.stamp.toSec();\n  }\n}\n\nEigen::Vector3d IMUIntegrator::GetAverageAcc() {\n  int i = 0;\n  Eigen::Vector3d sum_acc(0, 0, 0);\n  for(auto & imu : vimuMsg){\n    Eigen::Vector3d acc;\n    acc << imu->linear_acceleration.x * gnorm,\n           imu->linear_acceleration.y * gnorm,\n           imu->linear_acceleration.z * gnorm;\n    sum_acc += acc;\n    i++;\n    if(i > 30) break;\n  }\n  return sum_acc / i;\n}\n\n"
  },
  {
    "path": "LIO-Livox/src/lio/LidarFeatureExtractor.cpp",
    "content": "#include \"LidarFeatureExtractor/LidarFeatureExtractor.h\"\n\nLidarFeatureExtractor::LidarFeatureExtractor(int n_scans,int NumCurvSize,float DistanceFaraway,int NumFlat,\n                                             int PartNum,float FlatThreshold,float BreakCornerDis,float LidarNearestDis,float KdTreeCornerOutlierDis)\n                                             :N_SCANS(n_scans),\n                                              thNumCurvSize(NumCurvSize),\n                                              thDistanceFaraway(DistanceFaraway),\n                                              thNumFlat(NumFlat),\n                                              thPartNum(PartNum),\n                                              thFlatThreshold(FlatThreshold),\n                                              thBreakCornerDis(BreakCornerDis),\n                                              thLidarNearestDis(LidarNearestDis){\n  vlines.resize(N_SCANS);\n  for(auto & ptr : vlines){\n    ptr.reset(new pcl::PointCloud<PointType>());\n  }\n  vcorner.resize(N_SCANS);\n  vsurf.resize(N_SCANS);\n}\n\nbool LidarFeatureExtractor::plane_judge(const std::vector<PointType>& point_list,const int plane_threshold)\n{\n  int num = point_list.size();\n  float cx = 0;\n  float cy = 0;\n  float cz = 0;\n  for (int j = 0; j < num; j++) {\n    cx += point_list[j].x;\n    cy += point_list[j].y;\n    cz += point_list[j].z;\n  }\n  cx /= num;\n  cy /= num;\n  cz /= num;\n  //mean square error\n  float a11 = 0;\n  float a12 = 0;\n  float a13 = 0;\n  float a22 = 0;\n  float a23 = 0;\n  float a33 = 0;\n  for (int j = 0; j < num; j++) {\n    float ax = point_list[j].x - cx;\n    float ay = point_list[j].y - cy;\n    float az = point_list[j].z - cz;\n\n    a11 += ax * ax;\n    a12 += ax * ay;\n    a13 += ax * az;\n    a22 += ay * ay;\n    a23 += ay * az;\n    a33 += az * az;\n  }\n  a11 /= num;\n  a12 /= num;\n  a13 /= num;\n  a22 /= num;\n  a23 /= num;\n  a33 /= num;\n\n  Eigen::Matrix< double, 3, 3 > _matA1;\n  _matA1.setZero();\n  Eigen::Matrix< double, 3, 1 > _matD1;\n  _matD1.setZero();\n  Eigen::Matrix< double, 3, 3 > _matV1;\n  _matV1.setZero();\n\n  _matA1(0, 0) = a11;\n  _matA1(0, 1) = a12;\n  _matA1(0, 2) = a13;\n  _matA1(1, 0) = a12;\n  _matA1(1, 1) = a22;\n  _matA1(1, 2) = a23;\n  _matA1(2, 0) = a13;\n  _matA1(2, 1) = a23;\n  _matA1(2, 2) = a33;\n\n  Eigen::JacobiSVD<Eigen::Matrix3d> svd(_matA1, Eigen::ComputeThinU | Eigen::ComputeThinV);\n  _matD1 = svd.singularValues();\n  _matV1 = svd.matrixU();\n  if (_matD1(0, 0) < plane_threshold * _matD1(1, 0)) {\n    return true;\n  }\n  else{\n    return false;\n  }\n}\n\nvoid LidarFeatureExtractor::detectFeaturePoint(pcl::PointCloud<PointType>::Ptr& cloud,\n                                                std::vector<int>& pointsLessSharp,\n                                                std::vector<int>& pointsLessFlat){\n  int CloudFeatureFlag[20000];\n  float cloudCurvature[20000];\n  float cloudDepth[20000];\n  int cloudSortInd[20000];\n  float cloudReflect[20000];\n  int reflectSortInd[20000];\n  int cloudAngle[20000];\n\n  pcl::PointCloud<PointType>::Ptr& laserCloudIn = cloud;\n\n  int cloudSize = laserCloudIn->points.size();\n\n  PointType point;\n  pcl::PointCloud<PointType>::Ptr _laserCloud(new pcl::PointCloud<PointType>());\n  _laserCloud->reserve(cloudSize);\n\n  for (int i = 0; i < cloudSize; i++) {\n    point.x = laserCloudIn->points[i].x;\n    point.y = laserCloudIn->points[i].y;\n    point.z = laserCloudIn->points[i].z;\n#ifdef UNDISTORT\n    point.normal_x = laserCloudIn.points[i].normal_x;\n#else\n    point.normal_x = 1.0;\n#endif\n    point.intensity = laserCloudIn->points[i].intensity;\n\n    if (!pcl_isfinite(point.x) ||\n        !pcl_isfinite(point.y) ||\n        !pcl_isfinite(point.z)) {\n      continue;\n    }\n\n    _laserCloud->push_back(point);\n    CloudFeatureFlag[i] = 0;\n  }\n\n  cloudSize = _laserCloud->size();\n\n  int debugnum1 = 0;\n  int debugnum2 = 0;\n  int debugnum3 = 0;\n  int debugnum4 = 0;\n  int debugnum5 = 0;\n\n  int count_num = 1;\n  bool left_surf_flag = false;\n  bool right_surf_flag = false;\n\n  //---------------------------------------- surf feature extract ---------------------------------------------\n  int scanStartInd = 5;\n  int scanEndInd = cloudSize - 6;\n\n  int thDistanceFaraway_fea = 0;\n\n  for (int i = 5; i < cloudSize - 5; i ++ ) {\n\n    float diffX = 0;\n    float diffY = 0;\n    float diffZ = 0;\n\n    float dis = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x +\n                     _laserCloud->points[i].y * _laserCloud->points[i].y +\n                     _laserCloud->points[i].z * _laserCloud->points[i].z);\n\n    Eigen::Vector3d pt_last(_laserCloud->points[i-1].x, _laserCloud->points[i-1].y, _laserCloud->points[i-1].z);\n    Eigen::Vector3d pt_cur(_laserCloud->points[i].x, _laserCloud->points[i].y, _laserCloud->points[i].z);\n    Eigen::Vector3d pt_next(_laserCloud->points[i+1].x, _laserCloud->points[i+1].y, _laserCloud->points[i+1].z);\n\n    double angle_last = (pt_last-pt_cur).dot(pt_cur) / ((pt_last-pt_cur).norm()*pt_cur.norm());\n    double angle_next = (pt_next-pt_cur).dot(pt_cur) / ((pt_next-pt_cur).norm()*pt_cur.norm());\n \n    if (dis > thDistanceFaraway || (fabs(angle_last) > 0.966 && fabs(angle_next) > 0.966)) {\n      thNumCurvSize = 2;\n    } else {\n      thNumCurvSize = 3;\n    }\n\n    if(fabs(angle_last) > 0.966 && fabs(angle_next) > 0.966) {\n      cloudAngle[i] = 1;\n    }\n\n    float diffR = -2 * thNumCurvSize * _laserCloud->points[i].intensity;\n    for (int j = 1; j <= thNumCurvSize; ++j) {\n      diffX += _laserCloud->points[i - j].x + _laserCloud->points[i + j].x;\n      diffY += _laserCloud->points[i - j].y + _laserCloud->points[i + j].y;\n      diffZ += _laserCloud->points[i - j].z + _laserCloud->points[i + j].z;\n      diffR += _laserCloud->points[i - j].intensity + _laserCloud->points[i + j].intensity;\n    }\n    diffX -= 2 * thNumCurvSize * _laserCloud->points[i].x;\n    diffY -= 2 * thNumCurvSize * _laserCloud->points[i].y;\n    diffZ -= 2 * thNumCurvSize * _laserCloud->points[i].z;\n\n    cloudDepth[i] = dis;\n    cloudCurvature[i] = diffX * diffX + diffY * diffY + diffZ * diffZ;// / (2 * thNumCurvSize * dis + 1e-3);\n    cloudSortInd[i] = i;\n    cloudReflect[i] = diffR;\n    reflectSortInd[i] = i;\n\n  }\n\n  for (int j = 0; j < thPartNum; j++) {\n    int sp = scanStartInd + (scanEndInd - scanStartInd) * j / thPartNum;\n    int ep = scanStartInd + (scanEndInd - scanStartInd) * (j + 1) / thPartNum - 1;\n\n    // sort the curvatures from small to large\n    for (int k = sp + 1; k <= ep; k++) {\n      for (int l = k; l >= sp + 1; l--) {\n        if (cloudCurvature[cloudSortInd[l]] <\n            cloudCurvature[cloudSortInd[l - 1]]) {\n          int temp = cloudSortInd[l - 1];\n          cloudSortInd[l - 1] = cloudSortInd[l];\n          cloudSortInd[l] = temp;\n        }\n      }\n    }\n\n    // sort the reflectivity from small to large\n    for (int k = sp + 1; k <= ep; k++) {\n      for (int l = k; l >= sp + 1; l--) {\n        if (cloudReflect[reflectSortInd[l]] <\n            cloudReflect[reflectSortInd[l - 1]]) {\n          int temp = reflectSortInd[l - 1];\n          reflectSortInd[l - 1] = reflectSortInd[l];\n          reflectSortInd[l] = temp;\n        }\n      }\n    }\n\n    int smallestPickedNum = 1;\n    int sharpestPickedNum = 1;\n    for (int k = sp; k <= ep; k++) {\n      int ind = cloudSortInd[k];\n\n      if (CloudFeatureFlag[ind] != 0) continue;\n\n      if (cloudCurvature[ind] < thFlatThreshold * cloudDepth[ind] * thFlatThreshold * cloudDepth[ind]) {\n        \n        CloudFeatureFlag[ind] = 3;\n\n        for (int l = 1; l <= thNumCurvSize; l++) {\n          float diffX = _laserCloud->points[ind + l].x -\n                        _laserCloud->points[ind + l - 1].x;\n          float diffY = _laserCloud->points[ind + l].y -\n                        _laserCloud->points[ind + l - 1].y;\n          float diffZ = _laserCloud->points[ind + l].z -\n                        _laserCloud->points[ind + l - 1].z;\n          if (diffX * diffX + diffY * diffY + diffZ * diffZ > 0.02 || cloudDepth[ind] > thDistanceFaraway) {\n            break;\n          }\n\n          CloudFeatureFlag[ind + l] = 1;\n        }\n        for (int l = -1; l >= -thNumCurvSize; l--) {\n          float diffX = _laserCloud->points[ind + l].x -\n                        _laserCloud->points[ind + l + 1].x;\n          float diffY = _laserCloud->points[ind + l].y -\n                        _laserCloud->points[ind + l + 1].y;\n          float diffZ = _laserCloud->points[ind + l].z -\n                        _laserCloud->points[ind + l + 1].z;\n          if (diffX * diffX + diffY * diffY + diffZ * diffZ > 0.02 || cloudDepth[ind] > thDistanceFaraway) {\n            break;\n          }\n\n          CloudFeatureFlag[ind + l] = 1;\n        }\n      }\n    }\n    \n    for (int k = sp; k <= ep; k++) {\n      int ind = cloudSortInd[k];\n      if(((CloudFeatureFlag[ind] == 3) && (smallestPickedNum <= thNumFlat)) || \n          ((CloudFeatureFlag[ind] == 3) && (cloudDepth[ind] > thDistanceFaraway)) ||\n          cloudAngle[ind] == 1){\n        smallestPickedNum ++;\n        CloudFeatureFlag[ind] = 2;\n        if(cloudDepth[ind] > thDistanceFaraway) {\n          thDistanceFaraway_fea++;\n        }\n      }\n\n      int idx = reflectSortInd[k];\n      if(cloudCurvature[idx] < 0.7 * thFlatThreshold * cloudDepth[idx] * thFlatThreshold * cloudDepth[idx]\n         && sharpestPickedNum <= 3 && cloudReflect[idx] > 20.0){\n        sharpestPickedNum ++;\n        CloudFeatureFlag[idx] = 300;\n      }\n    }\n    \n  }\n\n  //---------------------------------------- line feature where surfaces meet -------------------------------------\n  for (int i = 5; i < cloudSize - 5; i += count_num ) {\n    float depth = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x +\n                       _laserCloud->points[i].y * _laserCloud->points[i].y +\n                       _laserCloud->points[i].z * _laserCloud->points[i].z);\n    //left curvature\n    float ldiffX =\n            _laserCloud->points[i - 4].x + _laserCloud->points[i - 3].x\n            - 4 * _laserCloud->points[i - 2].x\n            + _laserCloud->points[i - 1].x + _laserCloud->points[i].x;\n\n    float ldiffY =\n            _laserCloud->points[i - 4].y + _laserCloud->points[i - 3].y\n            - 4 * _laserCloud->points[i - 2].y\n            + _laserCloud->points[i - 1].y + _laserCloud->points[i].y;\n\n    float ldiffZ =\n            _laserCloud->points[i - 4].z + _laserCloud->points[i - 3].z\n            - 4 * _laserCloud->points[i - 2].z\n            + _laserCloud->points[i - 1].z + _laserCloud->points[i].z;\n\n    float left_curvature = ldiffX * ldiffX + ldiffY * ldiffY + ldiffZ * ldiffZ;\n\n    if(left_curvature < thFlatThreshold * depth){\n\n      std::vector<PointType> left_list;\n\n      for(int j = -4; j < 0; j++){\n        left_list.push_back(_laserCloud->points[i + j]);\n      }\n\n      left_surf_flag = true;\n    }\n    else{\n      left_surf_flag = false;\n    }\n\n    //right curvature\n    float rdiffX =\n            _laserCloud->points[i + 4].x + _laserCloud->points[i + 3].x\n            - 4 * _laserCloud->points[i + 2].x\n            + _laserCloud->points[i + 1].x + _laserCloud->points[i].x;\n\n    float rdiffY =\n            _laserCloud->points[i + 4].y + _laserCloud->points[i + 3].y\n            - 4 * _laserCloud->points[i + 2].y\n            + _laserCloud->points[i + 1].y + _laserCloud->points[i].y;\n\n    float rdiffZ =\n            _laserCloud->points[i + 4].z + _laserCloud->points[i + 3].z\n            - 4 * _laserCloud->points[i + 2].z\n            + _laserCloud->points[i + 1].z + _laserCloud->points[i].z;\n\n    float right_curvature = rdiffX * rdiffX + rdiffY * rdiffY + rdiffZ * rdiffZ;\n\n    if(right_curvature < thFlatThreshold * depth){\n      std::vector<PointType> right_list;\n\n      for(int j = 1; j < 5; j++){\n        right_list.push_back(_laserCloud->points[i + j]);\n      }\n      count_num = 4;\n      right_surf_flag = true;\n    }\n    else{\n      count_num = 1;\n      right_surf_flag = false;\n    }\n\n    //calculate the included angle\n    if(left_surf_flag && right_surf_flag){\n      debugnum4 ++;\n\n      Eigen::Vector3d norm_left(0,0,0);\n      Eigen::Vector3d norm_right(0,0,0);\n      for(int k = 1;k<5;k++){\n        Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i - k].x - _laserCloud->points[i].x,\n                                              _laserCloud->points[i - k].y - _laserCloud->points[i].y,\n                                              _laserCloud->points[i - k].z - _laserCloud->points[i].z);\n        tmp.normalize();\n        norm_left += (k/10.0)* tmp;\n      }\n      for(int k = 1;k<5;k++){\n        Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i + k].x - _laserCloud->points[i].x,\n                                              _laserCloud->points[i + k].y - _laserCloud->points[i].y,\n                                              _laserCloud->points[i + k].z - _laserCloud->points[i].z);\n        tmp.normalize();\n        norm_right += (k/10.0)* tmp;\n      }\n\n      //calculate the angle between this group and the previous group\n      double cc = fabs( norm_left.dot(norm_right) / (norm_left.norm()*norm_right.norm()) );\n      //calculate the maximum distance, the distance cannot be too small\n      Eigen::Vector3d last_tmp = Eigen::Vector3d(_laserCloud->points[i - 4].x - _laserCloud->points[i].x,\n                                                 _laserCloud->points[i - 4].y - _laserCloud->points[i].y,\n                                                 _laserCloud->points[i - 4].z - _laserCloud->points[i].z);\n      Eigen::Vector3d current_tmp = Eigen::Vector3d(_laserCloud->points[i + 4].x - _laserCloud->points[i].x,\n                                                    _laserCloud->points[i + 4].y - _laserCloud->points[i].y,\n                                                    _laserCloud->points[i + 4].z - _laserCloud->points[i].z);\n      double last_dis = last_tmp.norm();\n      double current_dis = current_tmp.norm();\n\n      if(cc < 0.5 && last_dis > 0.05 && current_dis > 0.05 ){ //\n        debugnum5 ++;\n        CloudFeatureFlag[i] = 150;\n      }\n    }\n\n  }\n\n  //--------------------------------------------------- break points ---------------------------------------------\n  for(int i = 5; i < cloudSize - 5; i ++){\n    float diff_left[2];\n    float diff_right[2];\n    float depth = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x +\n                       _laserCloud->points[i].y * _laserCloud->points[i].y +\n                       _laserCloud->points[i].z * _laserCloud->points[i].z);\n\n    for(int count = 1; count < 3; count++ ){\n      float diffX1 = _laserCloud->points[i + count].x - _laserCloud->points[i].x;\n      float diffY1 = _laserCloud->points[i + count].y - _laserCloud->points[i].y;\n      float diffZ1 = _laserCloud->points[i + count].z - _laserCloud->points[i].z;\n      diff_right[count - 1] = sqrt(diffX1 * diffX1 + diffY1 * diffY1 + diffZ1 * diffZ1);\n\n      float diffX2 = _laserCloud->points[i - count].x - _laserCloud->points[i].x;\n      float diffY2 = _laserCloud->points[i - count].y - _laserCloud->points[i].y;\n      float diffZ2 = _laserCloud->points[i - count].z - _laserCloud->points[i].z;\n      diff_left[count - 1] = sqrt(diffX2 * diffX2 + diffY2 * diffY2 + diffZ2 * diffZ2);\n    }\n\n    float depth_right = sqrt(_laserCloud->points[i + 1].x * _laserCloud->points[i + 1].x +\n                             _laserCloud->points[i + 1].y * _laserCloud->points[i + 1].y +\n                             _laserCloud->points[i + 1].z * _laserCloud->points[i + 1].z);\n    float depth_left = sqrt(_laserCloud->points[i - 1].x * _laserCloud->points[i - 1].x +\n                            _laserCloud->points[i - 1].y * _laserCloud->points[i - 1].y +\n                            _laserCloud->points[i - 1].z * _laserCloud->points[i - 1].z);\n    \n    if(fabs(diff_right[0] - diff_left[0]) > thBreakCornerDis){\n      if(diff_right[0] > diff_left[0]){\n\n        Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i - 1].x - _laserCloud->points[i].x,\n                                                      _laserCloud->points[i - 1].y - _laserCloud->points[i].y,\n                                                      _laserCloud->points[i - 1].z - _laserCloud->points[i].z);\n        Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x,\n                                                       _laserCloud->points[i].y,\n                                                       _laserCloud->points[i].z);\n        double left_surf_dis = surf_vector.norm();\n        //calculate the angle between the laser direction and the surface\n        double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) );\n\n        std::vector<PointType> left_list;\n        double min_dis = 10000;\n        double max_dis = 0;\n        for(int j = 0; j < 4; j++){   //TODO: change the plane window size and add thin rod support\n          left_list.push_back(_laserCloud->points[i - j]);\n          Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i - j].x - _laserCloud->points[i - j - 1].x,\n                                                        _laserCloud->points[i - j].y - _laserCloud->points[i - j - 1].y,\n                                                        _laserCloud->points[i - j].z - _laserCloud->points[i - j - 1].z);\n\n          if(j == 3) break;\n          double temp_dis = temp_vector.norm();\n          if(temp_dis < min_dis) min_dis = temp_dis;\n          if(temp_dis > max_dis) max_dis = temp_dis;\n        }\n        bool left_is_plane = plane_judge(left_list,100);\n\n        if( cc < 0.95 ){//(max_dis < 2*min_dis) && left_surf_dis < 0.05 * depth  && left_is_plane &&\n          if(depth_right > depth_left){\n            CloudFeatureFlag[i] = 100;\n          }\n          else{\n            if(depth_right == 0) CloudFeatureFlag[i] = 100;\n          }\n        }\n      }\n      else{\n\n        Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i + 1].x - _laserCloud->points[i].x,\n                                                      _laserCloud->points[i + 1].y - _laserCloud->points[i].y,\n                                                      _laserCloud->points[i + 1].z - _laserCloud->points[i].z);\n        Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x,\n                                                       _laserCloud->points[i].y,\n                                                       _laserCloud->points[i].z);\n        double right_surf_dis = surf_vector.norm();\n        //calculate the angle between the laser direction and the surface\n        double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) );\n\n        std::vector<PointType> right_list;\n        double min_dis = 10000;\n        double max_dis = 0;\n        for(int j = 0; j < 4; j++){ //TODO: change the plane window size and add thin rod support\n          right_list.push_back(_laserCloud->points[i - j]);\n          Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i + j].x - _laserCloud->points[i + j + 1].x,\n                                                        _laserCloud->points[i + j].y - _laserCloud->points[i + j + 1].y,\n                                                        _laserCloud->points[i + j].z - _laserCloud->points[i + j + 1].z);\n\n          if(j == 3) break;\n          double temp_dis = temp_vector.norm();\n          if(temp_dis < min_dis) min_dis = temp_dis;\n          if(temp_dis > max_dis) max_dis = temp_dis;\n        }\n        bool right_is_plane = plane_judge(right_list,100);\n\n        if( cc < 0.95){ //right_is_plane  && (max_dis < 2*min_dis) && right_surf_dis < 0.05 * depth &&\n\n          if(depth_right < depth_left){\n            CloudFeatureFlag[i] = 100;\n          }\n          else{\n            if(depth_left == 0) CloudFeatureFlag[i] = 100;\n          }\n        }\n      }\n    }\n\n    // break points select\n    if(CloudFeatureFlag[i] == 100){\n      debugnum2++;\n      std::vector<Eigen::Vector3d> front_norms;\n      Eigen::Vector3d norm_front(0,0,0);\n      Eigen::Vector3d norm_back(0,0,0);\n\n      for(int k = 1;k<4;k++){\n\n        float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x +\n                        _laserCloud->points[i - k].y * _laserCloud->points[i - k].y +\n                        _laserCloud->points[i - k].z * _laserCloud->points[i - k].z);\n\n        if(temp_depth < 1){\n          continue;\n        }\n\n        Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i - k].x - _laserCloud->points[i].x,\n                                              _laserCloud->points[i - k].y - _laserCloud->points[i].y,\n                                              _laserCloud->points[i - k].z - _laserCloud->points[i].z);\n        tmp.normalize();\n        front_norms.push_back(tmp);\n        norm_front += (k/6.0)* tmp;\n      }\n      std::vector<Eigen::Vector3d> back_norms;\n      for(int k = 1;k<4;k++){\n\n        float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x +\n                        _laserCloud->points[i - k].y * _laserCloud->points[i - k].y +\n                        _laserCloud->points[i - k].z * _laserCloud->points[i - k].z);\n\n        if(temp_depth < 1){\n          continue;\n        }\n\n        Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i + k].x - _laserCloud->points[i].x,\n                                              _laserCloud->points[i + k].y - _laserCloud->points[i].y,\n                                              _laserCloud->points[i + k].z - _laserCloud->points[i].z);\n        tmp.normalize();\n        back_norms.push_back(tmp);\n        norm_back += (k/6.0)* tmp;\n      }\n      double cc = fabs( norm_front.dot(norm_back) / (norm_front.norm()*norm_back.norm()) );\n      if(cc < 0.95){\n        debugnum3++;\n      }else{\n        CloudFeatureFlag[i] = 101;\n      }\n\n    }\n\n  }\n\n  pcl::PointCloud<PointType>::Ptr laserCloudCorner(new pcl::PointCloud<PointType>());\n  pcl::PointCloud<PointType> cornerPointsSharp;\n\n  std::vector<int> pointsLessSharp_ori;\n\n  int num_surf = 0;\n  int num_corner = 0;\n\n  //push_back feature\n\n  for(int i = 5; i < cloudSize - 5; i ++){\n\n    float dis = _laserCloud->points[i].x * _laserCloud->points[i].x\n            + _laserCloud->points[i].y * _laserCloud->points[i].y\n            + _laserCloud->points[i].z * _laserCloud->points[i].z;\n\n    if(dis < thLidarNearestDis*thLidarNearestDis) continue;\n\n    if(CloudFeatureFlag[i] == 2){\n      pointsLessFlat.push_back(i);\n      num_surf++;\n      continue;\n    }\n\n    if(CloudFeatureFlag[i] == 100 || CloudFeatureFlag[i] == 150){ //\n      pointsLessSharp_ori.push_back(i);\n      laserCloudCorner->push_back(_laserCloud->points[i]);\n    }\n\n  }\n\n  for(int i = 0; i < laserCloudCorner->points.size();i++){\n      pointsLessSharp.push_back(pointsLessSharp_ori[i]);\n      num_corner++;\n  }\n\n}\n\nvoid LidarFeatureExtractor::FeatureExtract_with_segment(const livox_ros_driver::CustomMsgConstPtr &msg,\n                                                        pcl::PointCloud<PointType>::Ptr& laserCloud,\n                                                        pcl::PointCloud<PointType>::Ptr& laserConerFeature,\n                                                        pcl::PointCloud<PointType>::Ptr& laserSurfFeature,\n                                                        pcl::PointCloud<PointType>::Ptr& laserNonFeature,\n                                                        sensor_msgs::PointCloud2 &msg_seg,\n                                                        const int Used_Line){\n  laserCloud->clear();\n  laserConerFeature->clear();\n  laserSurfFeature->clear();\n  laserCloud->clear();\n  laserCloud->reserve(15000*N_SCANS);\n  for(auto & ptr : vlines){\n    ptr->clear();\n  }\n  for(auto & v : vcorner){\n    v.clear();\n  }\n  for(auto & v : vsurf){\n    v.clear();\n  }\n\n  int dnum = msg->points.size();\n\n  int *idtrans = (int*)calloc(dnum, sizeof(int));\n  float *data=(float*)calloc(dnum*4,sizeof(float));\n  int point_num = 0;\n\n  double timeSpan = ros::Time().fromNSec(msg->points.back().offset_time).toSec();\n  PointType point;\n  for(const auto& p : msg->points){\n\n    int line_num = (int)p.line;\n    if(line_num > Used_Line-1) continue;\n    if(p.x < 0.01) continue;\n    if (!pcl_isfinite(p.x) ||\n        !pcl_isfinite(p.y) ||\n        !pcl_isfinite(p.z)) {\n      continue;\n    }\n    point.x = p.x;\n    point.y = p.y;\n    point.z = p.z;\n    point.intensity = p.reflectivity;\n    point.normal_x = ros::Time().fromNSec(p.offset_time).toSec() /timeSpan;\n    point.normal_y = _int_as_float(line_num);\n    laserCloud->push_back(point);\n\n    data[point_num*4+0] = point.x;\n    data[point_num*4+1] = point.y;\n    data[point_num*4+2] = point.z;\n    data[point_num*4+3] = point.intensity;\n\n\n    point_num++;\n  }\n\n  PCSeg pcseg;\n  pcseg.DoSeg(idtrans,data,dnum);\n  int estimate_gnd_count = 0;\n  std::size_t cloud_num = laserCloud->size();\n  for(std::size_t i=0; i<cloud_num; ++i){\n    int line_idx = _float_as_int(laserCloud->points[i].normal_y);\n    laserCloud->points[i].normal_z = _int_as_float(i);\n    vlines[line_idx]->push_back(laserCloud->points[i]);\n  }\n\n  std::thread threads[N_SCANS];\n  for(int i=0; i<N_SCANS; ++i){\n    threads[i] = std::thread(&LidarFeatureExtractor::detectFeaturePoint3, this, std::ref(vlines[i]),std::ref(vcorner[i]));\n  }\n\n  for(int i=0; i<N_SCANS; ++i){\n    threads[i].join();\n  }\n\n  int num_corner = 0;\n  for(int i=0; i<N_SCANS; ++i){\n    for(int j=0; j<vcorner[i].size(); ++j){\n      laserCloud->points[_float_as_int(vlines[i]->points[vcorner[i][j]].normal_z)].normal_z = 1.0; \n      num_corner++;\n    }\n  }\n\n  detectFeaturePoint2(laserCloud, laserSurfFeature, laserNonFeature);\n\n  for(std::size_t i=0; i<cloud_num; ++i){\n    float dis = laserCloud->points[i].x * laserCloud->points[i].x\n                + laserCloud->points[i].y * laserCloud->points[i].y\n                + laserCloud->points[i].z * laserCloud->points[i].z;\n    if( idtrans[i] > 9 && dis < 50*50){\n      laserCloud->points[i].normal_z = 0;\n    }\n    if (idtrans[i] == 0) {\n      laserCloud->points[i].normal_y = -1.0;\n    }\n  }\n\n  pcl::PointCloud<PointType>::Ptr laserConerFeature_filter;\n  laserConerFeature_filter.reset(new pcl::PointCloud<PointType>());\n  laserConerFeature.reset(new pcl::PointCloud<PointType>());\n  laserSurfFeature.reset(new pcl::PointCloud<PointType>());\n  laserNonFeature.reset(new pcl::PointCloud<PointType>());\n  for(const auto& p : laserCloud->points){\n    if(std::fabs(p.normal_z - 1.0) < 1e-5)\n      laserConerFeature->push_back(p);\n  }\n\n  for(const auto& p : laserCloud->points){\n    if(std::fabs(p.normal_z - 2.0) < 1e-5)\n      laserSurfFeature->push_back(p);\n    if(std::fabs(p.normal_z - 3.0) < 1e-5)\n      laserNonFeature->push_back(p);\n  }\n\n}\n\n\nvoid LidarFeatureExtractor::detectFeaturePoint2(pcl::PointCloud<PointType>::Ptr& cloud,\n                                                pcl::PointCloud<PointType>::Ptr& pointsLessFlat,\n                                                pcl::PointCloud<PointType>::Ptr& pointsNonFeature){\n\n  int cloudSize = cloud->points.size();\n\n  pointsLessFlat.reset(new pcl::PointCloud<PointType>());\n  pointsNonFeature.reset(new pcl::PointCloud<PointType>());\n\n  pcl::KdTreeFLANN<PointType>::Ptr KdTreeCloud;\n  KdTreeCloud.reset(new pcl::KdTreeFLANN<PointType>);\n  KdTreeCloud->setInputCloud(cloud);\n\n  std::vector<int> _pointSearchInd;\n  std::vector<float> _pointSearchSqDis;\n\n  int num_near = 10;\n  int stride = 1;\n  int interval = 4;\n\n  for(int i = 5; i < cloudSize - 5; i = i+stride) {\n    if(fabs(cloud->points[i].normal_z - 1.0) < 1e-5) {\n      continue;\n    }\n\n    double thre1d = 0.5;\n    double thre2d = 0.8;\n    double thre3d = 0.5;\n    double thre3d2 = 0.13;\n\n    double disti = sqrt(cloud->points[i].x * cloud->points[i].x + \n                        cloud->points[i].y * cloud->points[i].y + \n                        cloud->points[i].z * cloud->points[i].z);\n\n    if(disti < 30.0) {\n      thre1d = 0.5;\n      thre2d = 0.8;\n      thre3d2 = 0.07;\n      stride = 14;\n      interval = 4;\n    } else if(disti < 60.0) {\n      stride = 10;\n      interval = 3;\n    } else {\n      stride = 1;\n      interval = 0;\n    }\n\n    if(disti > 100.0) {\n      num_near = 6;\n\n      cloud->points[i].normal_z = 3.0;\n      pointsNonFeature->points.push_back(cloud->points[i]);\n      continue;\n    } else if(disti > 60.0) {\n      num_near = 8;\n    } else {\n      num_near = 10;\n    }\n\n    KdTreeCloud->nearestKSearch(cloud->points[i], num_near, _pointSearchInd, _pointSearchSqDis);\n\n    if (_pointSearchSqDis[num_near-1] > 5.0 && disti < 90.0) {\n      continue;\n    }\n\n    Eigen::Matrix< double, 3, 3 > _matA1;\n    _matA1.setZero();\n\n    float cx = 0;\n    float cy = 0;\n    float cz = 0;\n    for (int j = 0; j < num_near; j++) {\n      cx += cloud->points[_pointSearchInd[j]].x;\n      cy += cloud->points[_pointSearchInd[j]].y;\n      cz += cloud->points[_pointSearchInd[j]].z;\n    }\n    cx /= num_near;\n    cy /= num_near;\n    cz /= num_near;\n\n    float a11 = 0;\n    float a12 = 0;\n    float a13 = 0;\n    float a22 = 0;\n    float a23 = 0;\n    float a33 = 0;\n    for (int j = 0; j < num_near; j++) {\n      float ax = cloud->points[_pointSearchInd[j]].x - cx;\n      float ay = cloud->points[_pointSearchInd[j]].y - cy;\n      float az = cloud->points[_pointSearchInd[j]].z - cz;\n\n      a11 += ax * ax;\n      a12 += ax * ay;\n      a13 += ax * az;\n      a22 += ay * ay;\n      a23 += ay * az;\n      a33 += az * az;\n    }\n    a11 /= num_near;\n    a12 /= num_near;\n    a13 /= num_near;\n    a22 /= num_near;\n    a23 /= num_near;\n    a33 /= num_near;\n\n    _matA1(0, 0) = a11;\n    _matA1(0, 1) = a12;\n    _matA1(0, 2) = a13;\n    _matA1(1, 0) = a12;\n    _matA1(1, 1) = a22;\n    _matA1(1, 2) = a23;\n    _matA1(2, 0) = a13;\n    _matA1(2, 1) = a23;\n    _matA1(2, 2) = a33;\n\n    Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> saes(_matA1);\n    double a1d = (sqrt(saes.eigenvalues()[2]) - sqrt(saes.eigenvalues()[1])) / sqrt(saes.eigenvalues()[2]);\n    double a2d = (sqrt(saes.eigenvalues()[1]) - sqrt(saes.eigenvalues()[0])) / sqrt(saes.eigenvalues()[2]);\n    double a3d = sqrt(saes.eigenvalues()[0]) / sqrt(saes.eigenvalues()[2]);\n\n    if(a2d > thre2d || (a3d < thre3d2 && a1d < thre1d)) {\n      for(int k = 1; k < interval; k++) {\n        cloud->points[i-k].normal_z = 2.0;\n        pointsLessFlat->points.push_back(cloud->points[i-k]);\n        cloud->points[i+k].normal_z = 2.0;\n        pointsLessFlat->points.push_back(cloud->points[i+k]);\n      }\n      cloud->points[i].normal_z = 2.0;\n      pointsLessFlat->points.push_back(cloud->points[i]);\n    } else if(a3d > thre3d) {\n      for(int k = 1; k < interval; k++) {\n        cloud->points[i-k].normal_z = 3.0;\n        pointsNonFeature->points.push_back(cloud->points[i-k]);\n        cloud->points[i+k].normal_z = 3.0;\n        pointsNonFeature->points.push_back(cloud->points[i+k]);\n      }\n      cloud->points[i].normal_z = 3.0;\n      pointsNonFeature->points.push_back(cloud->points[i]);\n    }\n  }  \n}\n\n\nvoid LidarFeatureExtractor::detectFeaturePoint3(pcl::PointCloud<PointType>::Ptr& cloud,\n                                                std::vector<int>& pointsLessSharp){\n  int CloudFeatureFlag[20000];\n  float cloudCurvature[20000];\n  float cloudDepth[20000];\n  int cloudSortInd[20000];\n  float cloudReflect[20000];\n  int reflectSortInd[20000];\n  int cloudAngle[20000];\n\n  pcl::PointCloud<PointType>::Ptr& laserCloudIn = cloud;\n\n  int cloudSize = laserCloudIn->points.size();\n\n  PointType point;\n  pcl::PointCloud<PointType>::Ptr _laserCloud(new pcl::PointCloud<PointType>());\n  _laserCloud->reserve(cloudSize);\n\n  for (int i = 0; i < cloudSize; i++) {\n    point.x = laserCloudIn->points[i].x;\n    point.y = laserCloudIn->points[i].y;\n    point.z = laserCloudIn->points[i].z;\n    point.normal_x = 1.0;\n    point.intensity = laserCloudIn->points[i].intensity;\n\n    if (!pcl_isfinite(point.x) ||\n        !pcl_isfinite(point.y) ||\n        !pcl_isfinite(point.z)) {\n      continue;\n    }\n\n    _laserCloud->push_back(point);\n    CloudFeatureFlag[i] = 0;\n  }\n\n  cloudSize = _laserCloud->size();\n\n  int count_num = 1;\n  bool left_surf_flag = false;\n  bool right_surf_flag = false;\n\n  //--------------------------------------------------- break points ---------------------------------------------\n  for(int i = 5; i < cloudSize - 5; i ++){\n    float diff_left[2];\n    float diff_right[2];\n    float depth = sqrt(_laserCloud->points[i].x * _laserCloud->points[i].x +\n                       _laserCloud->points[i].y * _laserCloud->points[i].y +\n                       _laserCloud->points[i].z * _laserCloud->points[i].z);\n\n    for(int count = 1; count < 3; count++ ){\n      float diffX1 = _laserCloud->points[i + count].x - _laserCloud->points[i].x;\n      float diffY1 = _laserCloud->points[i + count].y - _laserCloud->points[i].y;\n      float diffZ1 = _laserCloud->points[i + count].z - _laserCloud->points[i].z;\n      diff_right[count - 1] = sqrt(diffX1 * diffX1 + diffY1 * diffY1 + diffZ1 * diffZ1);\n\n      float diffX2 = _laserCloud->points[i - count].x - _laserCloud->points[i].x;\n      float diffY2 = _laserCloud->points[i - count].y - _laserCloud->points[i].y;\n      float diffZ2 = _laserCloud->points[i - count].z - _laserCloud->points[i].z;\n      diff_left[count - 1] = sqrt(diffX2 * diffX2 + diffY2 * diffY2 + diffZ2 * diffZ2);\n    }\n\n    float depth_right = sqrt(_laserCloud->points[i + 1].x * _laserCloud->points[i + 1].x +\n                             _laserCloud->points[i + 1].y * _laserCloud->points[i + 1].y +\n                             _laserCloud->points[i + 1].z * _laserCloud->points[i + 1].z);\n    float depth_left = sqrt(_laserCloud->points[i - 1].x * _laserCloud->points[i - 1].x +\n                            _laserCloud->points[i - 1].y * _laserCloud->points[i - 1].y +\n                            _laserCloud->points[i - 1].z * _laserCloud->points[i - 1].z);\n\n    \n    if(fabs(diff_right[0] - diff_left[0]) > thBreakCornerDis){\n      if(diff_right[0] > diff_left[0]){\n\n        Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i - 1].x - _laserCloud->points[i].x,\n                                                      _laserCloud->points[i - 1].y - _laserCloud->points[i].y,\n                                                      _laserCloud->points[i - 1].z - _laserCloud->points[i].z);\n        Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x,\n                                                       _laserCloud->points[i].y,\n                                                       _laserCloud->points[i].z);\n        double left_surf_dis = surf_vector.norm();\n        //calculate the angle between the laser direction and the surface\n        double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) );\n\n        std::vector<PointType> left_list;\n        double min_dis = 10000;\n        double max_dis = 0;\n        for(int j = 0; j < 4; j++){   //TODO: change the plane window size and add thin rod support\n          left_list.push_back(_laserCloud->points[i - j]);\n          Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i - j].x - _laserCloud->points[i - j - 1].x,\n                                                        _laserCloud->points[i - j].y - _laserCloud->points[i - j - 1].y,\n                                                        _laserCloud->points[i - j].z - _laserCloud->points[i - j - 1].z);\n\n          if(j == 3) break;\n          double temp_dis = temp_vector.norm();\n          if(temp_dis < min_dis) min_dis = temp_dis;\n          if(temp_dis > max_dis) max_dis = temp_dis;\n        }\n        // bool left_is_plane = plane_judge(left_list,0.3);\n\n        if(cc < 0.93){//(max_dis < 2*min_dis) && left_surf_dis < 0.05 * depth  && left_is_plane &&\n          if(depth_right > depth_left){\n            CloudFeatureFlag[i] = 100;\n          }\n          else{\n            if(depth_right == 0) CloudFeatureFlag[i] = 100;\n          }\n        }\n      }\n      else{\n\n        Eigen::Vector3d surf_vector = Eigen::Vector3d(_laserCloud->points[i + 1].x - _laserCloud->points[i].x,\n                                                      _laserCloud->points[i + 1].y - _laserCloud->points[i].y,\n                                                      _laserCloud->points[i + 1].z - _laserCloud->points[i].z);\n        Eigen::Vector3d lidar_vector = Eigen::Vector3d(_laserCloud->points[i].x,\n                                                       _laserCloud->points[i].y,\n                                                       _laserCloud->points[i].z);\n        double right_surf_dis = surf_vector.norm();\n        //calculate the angle between the laser direction and the surface\n        double cc = fabs( surf_vector.dot(lidar_vector) / (surf_vector.norm()*lidar_vector.norm()) );\n\n        std::vector<PointType> right_list;\n        double min_dis = 10000;\n        double max_dis = 0;\n        for(int j = 0; j < 4; j++){ //TODO: change the plane window size and add thin rod support\n          right_list.push_back(_laserCloud->points[i - j]);\n          Eigen::Vector3d temp_vector = Eigen::Vector3d(_laserCloud->points[i + j].x - _laserCloud->points[i + j + 1].x,\n                                                        _laserCloud->points[i + j].y - _laserCloud->points[i + j + 1].y,\n                                                        _laserCloud->points[i + j].z - _laserCloud->points[i + j + 1].z);\n\n          if(j == 3) break;\n          double temp_dis = temp_vector.norm();\n          if(temp_dis < min_dis) min_dis = temp_dis;\n          if(temp_dis > max_dis) max_dis = temp_dis;\n        }\n        // bool right_is_plane = plane_judge(right_list,0.3);\n\n        if(cc < 0.93){ //right_is_plane  && (max_dis < 2*min_dis) && right_surf_dis < 0.05 * depth &&\n\n          if(depth_right < depth_left){\n            CloudFeatureFlag[i] = 100;\n          }\n          else{\n            if(depth_left == 0) CloudFeatureFlag[i] = 100;\n          }\n        }\n      }\n    }\n\n    // break points select\n    if(CloudFeatureFlag[i] == 100){\n      std::vector<Eigen::Vector3d> front_norms;\n      Eigen::Vector3d norm_front(0,0,0);\n      Eigen::Vector3d norm_back(0,0,0);\n\n      for(int k = 1;k<4;k++){\n\n        float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x +\n                        _laserCloud->points[i - k].y * _laserCloud->points[i - k].y +\n                        _laserCloud->points[i - k].z * _laserCloud->points[i - k].z);\n\n        if(temp_depth < 1){\n          continue;\n        }\n\n        Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i - k].x - _laserCloud->points[i].x,\n                                              _laserCloud->points[i - k].y - _laserCloud->points[i].y,\n                                              _laserCloud->points[i - k].z - _laserCloud->points[i].z);\n        tmp.normalize();\n        front_norms.push_back(tmp);\n        norm_front += (k/6.0)* tmp;\n      }\n      std::vector<Eigen::Vector3d> back_norms;\n      for(int k = 1;k<4;k++){\n\n        float temp_depth = sqrt(_laserCloud->points[i - k].x * _laserCloud->points[i - k].x +\n                        _laserCloud->points[i - k].y * _laserCloud->points[i - k].y +\n                        _laserCloud->points[i - k].z * _laserCloud->points[i - k].z);\n\n        if(temp_depth < 1){\n          continue;\n        }\n\n        Eigen::Vector3d tmp = Eigen::Vector3d(_laserCloud->points[i + k].x - _laserCloud->points[i].x,\n                                              _laserCloud->points[i + k].y - _laserCloud->points[i].y,\n                                              _laserCloud->points[i + k].z - _laserCloud->points[i].z);\n        tmp.normalize();\n        back_norms.push_back(tmp);\n        norm_back += (k/6.0)* tmp;\n      }\n      double cc = fabs( norm_front.dot(norm_back) / (norm_front.norm()*norm_back.norm()) );\n      if(cc < 0.93){\n      }else{\n        CloudFeatureFlag[i] = 101;\n      }\n\n    }\n\n  }\n\n  pcl::PointCloud<PointType>::Ptr laserCloudCorner(new pcl::PointCloud<PointType>());\n  pcl::PointCloud<PointType> cornerPointsSharp;\n\n  std::vector<int> pointsLessSharp_ori;\n\n  int num_surf = 0;\n  int num_corner = 0;\n\n  for(int i = 5; i < cloudSize - 5; i ++){\n    Eigen::Vector3d left_pt = Eigen::Vector3d(_laserCloud->points[i - 1].x,\n                                              _laserCloud->points[i - 1].y,\n                                              _laserCloud->points[i - 1].z);\n    Eigen::Vector3d right_pt = Eigen::Vector3d(_laserCloud->points[i + 1].x,\n                                               _laserCloud->points[i + 1].y,\n                                               _laserCloud->points[i + 1].z);\n\n    Eigen::Vector3d cur_pt = Eigen::Vector3d(_laserCloud->points[i].x,\n                                             _laserCloud->points[i].y,\n                                             _laserCloud->points[i].z);\n\n    float dis = _laserCloud->points[i].x * _laserCloud->points[i].x +\n                _laserCloud->points[i].y * _laserCloud->points[i].y +\n                _laserCloud->points[i].z * _laserCloud->points[i].z;\n\n    double clr = fabs(left_pt.dot(right_pt) / (left_pt.norm()*right_pt.norm()));\n    double cl = fabs(left_pt.dot(cur_pt) / (left_pt.norm()*cur_pt.norm()));\n    double cr = fabs(right_pt.dot(cur_pt) / (right_pt.norm()*cur_pt.norm()));\n\n    if(clr < 0.999){\n      CloudFeatureFlag[i] = 200;\n    }\n\n    if(dis < thLidarNearestDis*thLidarNearestDis) continue;\n\n    if(CloudFeatureFlag[i] == 100 || CloudFeatureFlag[i] == 200){ //\n      pointsLessSharp_ori.push_back(i);\n      laserCloudCorner->push_back(_laserCloud->points[i]);\n    }\n  }\n\n  for(int i = 0; i < laserCloudCorner->points.size();i++){\n      pointsLessSharp.push_back(pointsLessSharp_ori[i]);\n      num_corner++;\n  }\n\n}\n\n\nvoid LidarFeatureExtractor::FeatureExtract(const livox_ros_driver::CustomMsgConstPtr &msg,\n                                           pcl::PointCloud<PointType>::Ptr& laserCloud,\n                                           pcl::PointCloud<PointType>::Ptr& laserConerFeature,\n                                           pcl::PointCloud<PointType>::Ptr& laserSurfFeature,\n                                           const int Used_Line){\n  laserCloud->clear();\n  laserConerFeature->clear();\n  laserSurfFeature->clear();\n  laserCloud->reserve(15000*N_SCANS);\n  for(auto & ptr : vlines){\n  ptr->clear();\n  }\n  for(auto & v : vcorner){\n  v.clear();\n  }\n  for(auto & v : vsurf){\n  v.clear();\n  }\n  double timeSpan = ros::Time().fromNSec(msg->points.back().offset_time).toSec();\n  PointType point;\n  for(const auto& p : msg->points){\n  int line_num = (int)p.line;\n  if(line_num > Used_Line-1) continue;\n  if(p.x < 0.01) continue;\n  point.x = p.x;\n  point.y = p.y;\n  point.z = p.z;\n  point.intensity = p.reflectivity;\n  point.normal_x = ros::Time().fromNSec(p.offset_time).toSec() /timeSpan;\n  point.normal_y = _int_as_float(line_num);\n  laserCloud->push_back(point);\n  }\n  std::size_t cloud_num = laserCloud->size();\n  for(std::size_t i=0; i<cloud_num; ++i){\n  int line_idx = _float_as_int(laserCloud->points[i].normal_y);\n  laserCloud->points[i].normal_z = _int_as_float(i);\n  vlines[line_idx]->push_back(laserCloud->points[i]);\n  laserCloud->points[i].normal_z = 0;\n  }\n  std::thread threads[N_SCANS];\n  for(int i=0; i<N_SCANS; ++i){\n  threads[i] = std::thread(&LidarFeatureExtractor::detectFeaturePoint, this, std::ref(vlines[i]),\n                     std::ref(vcorner[i]), std::ref(vsurf[i]));\n  }\n  for(int i=0; i<N_SCANS; ++i){\n  threads[i].join();\n  }\n  for(int i=0; i<N_SCANS; ++i){\n  for(int j=0; j<vcorner[i].size(); ++j){\n  laserCloud->points[_float_as_int(vlines[i]->points[vcorner[i][j]].normal_z)].normal_z = 1.0;\n  }\n  for(int j=0; j<vsurf[i].size(); ++j){\n  laserCloud->points[_float_as_int(vlines[i]->points[vsurf[i][j]].normal_z)].normal_z = 2.0;\n  }\n  }\n\n  for(const auto& p : laserCloud->points){\n  if(std::fabs(p.normal_z - 1.0) < 1e-5)\n  laserConerFeature->push_back(p);\n  }\n  for(const auto& p : laserCloud->points){\n  if(std::fabs(p.normal_z - 2.0) < 1e-5)\n  laserSurfFeature->push_back(p);\n  }\n}"
  },
  {
    "path": "LIO-Livox/src/lio/Map_Manager.cpp",
    "content": "#include \"MapManager/Map_Manager.h\"\n#include <fstream>\n\nMAP_MANAGER::MAP_MANAGER(const float& filter_corner, const float& filter_surf){\n  for (int i = 0; i < laserCloudNum; i++) {\n    laserCloudCornerArray[i].reset(new pcl::PointCloud<PointType>());\n    laserCloudSurfArray[i].reset(new pcl::PointCloud<PointType>());\n    laserCloudNonFeatureArray[i].reset(new pcl::PointCloud<PointType>());\n    laserCloudCornerArrayStack[i].reset(new pcl::PointCloud<PointType>());\n    laserCloudSurfArrayStack[i].reset(new pcl::PointCloud<PointType>());\n    laserCloudNonFeatureArrayStack[i].reset(new pcl::PointCloud<PointType>());\n\n    laserCloudCornerKdMap[i].reset(new pcl::KdTreeFLANN<PointType>);\n    laserCloudSurfKdMap[i].reset(new pcl::KdTreeFLANN<PointType>);\n    laserCloudNonFeatureKdMap[i].reset(new pcl::KdTreeFLANN<PointType>);\n  }\n  for (int i = 0; i < localMapWindowSize; i++) {\n    localCornerMap[i].reset(new pcl::PointCloud<PointType>());\n    localSurfMap[i].reset(new pcl::PointCloud<PointType>());\n    localNonFeatureMap[i].reset(new pcl::PointCloud<PointType>());\n  }\n  laserCloudCornerFromMap.reset(new pcl::PointCloud<PointType>());\n  laserCloudSurfFromMap.reset(new pcl::PointCloud<PointType>());\n  laserCloudNonFeatureFromMap.reset(new pcl::PointCloud<PointType>());\n  downSizeFilterCorner.setLeafSize(0.4, 0.4, 0.4);\n  downSizeFilterSurf.setLeafSize(0.4, 0.4, 0.4);\n  downSizeFilterNonFeature.setLeafSize(0.4, 0.4, 0.4);\n}\n\nsize_t MAP_MANAGER::ToIndex(int i, int j, int k)  {\n  return i + laserCloudDepth * j + laserCloudDepth * laserCloudWidth * k;\n}\n\n/** \\brief transform point pi to the MAP coordinate\n * \\param[in] pi: point to be transformed\n * \\param[in] po: point after transfomation\n * \\param[in] _transformTobeMapped: transform matrix between pi and po\n */\nvoid MAP_MANAGER::pointAssociateToMap(PointType const * const pi,\n                                      PointType * const po,\n                                      const Eigen::Matrix4d& _transformTobeMapped){\n        Eigen::Vector3d pin, pout;\n        pin.x() = pi->x;\n        pin.y() = pi->y;\n        pin.z() = pi->z;\n        pout = _transformTobeMapped.topLeftCorner(3,3) * pin + _transformTobeMapped.topRightCorner(3,1);\n        po->x = pout.x();\n        po->y = pout.y();\n        po->z = pout.z();\n        po->intensity = pi->intensity;\n        po->normal_z = pi->normal_z;\n        po->normal_y = pi->normal_y;\n      }\nvoid MAP_MANAGER::featureAssociateToMap(const pcl::PointCloud<PointType>::Ptr& laserCloudCorner,\n                                        const pcl::PointCloud<PointType>::Ptr& laserCloudSurf,\n                                        const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeature,\n                                        const pcl::PointCloud<PointType>::Ptr& laserCloudCornerToMap,\n                                        const pcl::PointCloud<PointType>::Ptr& laserCloudSurfToMap,\n                                        const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureToMap,\n                                        const Eigen::Matrix4d& transformTobeMapped){\n\n  int laserCloudCornerNum = laserCloudCorner->points.size();\n  int laserCloudSurfNum = laserCloudSurf->points.size();\n  int laserCloudNonFeatureNum = laserCloudNonFeature->points.size();\n  PointType pointSel1,pointSel2,pointSel3;\n  for (int i = 0; i < laserCloudCornerNum; i++) {\n    pointAssociateToMap(&laserCloudCorner->points[i], &pointSel1, transformTobeMapped);\n    laserCloudCornerToMap->push_back(pointSel1);\n  }\n  for (int i = 0; i < laserCloudSurfNum; i++) {\n    pointAssociateToMap(&laserCloudSurf->points[i], &pointSel2, transformTobeMapped);\n    laserCloudSurfToMap->push_back(pointSel2);\n  }\n  for (int i = 0; i < laserCloudNonFeatureNum; i++) {\n    pointAssociateToMap(&laserCloudNonFeature->points[i], &pointSel3, transformTobeMapped);\n    laserCloudNonFeatureToMap->push_back(pointSel3);\n  }\n  \n}\n/** \\brief add new lidar points to the map\n * \\param[in] laserCloudCornerStack: coner feature points that need to be added to map\n * \\param[in] laserCloudSurfStack: surf feature points that need to be added to map\n * \\param[in] transformTobeMapped: transform matrix of the lidar pose\n */\nvoid MAP_MANAGER::MapIncrement(const pcl::PointCloud<PointType>::Ptr& laserCloudCornerStack,\n                               const pcl::PointCloud<PointType>::Ptr& laserCloudSurfStack,\n                               const pcl::PointCloud<PointType>::Ptr& laserCloudNonFeatureStack,\n                               const Eigen::Matrix4d& transformTobeMapped){\n  \n  clock_t t0,t1,t2,t3,t4,t5;\n  t0 = clock();\n  std::unique_lock<std::mutex> locker2(mtx_MapManager);\n  for(int i = 0; i < laserCloudNum; i++){\n    CornerKdMap_last[i] = *laserCloudCornerKdMap[i];\n    SurfKdMap_last[i] = *laserCloudSurfKdMap[i];\n    NonFeatureKdMap_last[i] = *laserCloudNonFeatureKdMap[i];\n    laserCloudSurf_for_match[i] = *laserCloudSurfArray[i];\n    laserCloudCorner_for_match[i] = *laserCloudCornerArray[i];\n    laserCloudNonFeature_for_match[i] = *laserCloudNonFeatureArray[i];\n  }\n\n  laserCloudCenWidth_last = laserCloudCenWidth;\n  laserCloudCenHeight_last = laserCloudCenHeight;\n  laserCloudCenDepth_last = laserCloudCenDepth;\n\n  locker2.unlock();\n  \n  t1 = clock();\n  MapMove(transformTobeMapped);\n\n  t2 = clock();\n  int laserCloudCornerStackNum = laserCloudCornerStack->points.size();\n  int laserCloudSurfStackNum = laserCloudSurfStack->points.size();\n  int laserCloudNonFeatureStackNum = laserCloudNonFeatureStack->points.size();\n  bool CornerChangeFlag[laserCloudNum] = {false};\n  bool SurfChangeFlag[laserCloudNum] = {false};\n  bool NonFeatureChangeFlag[laserCloudNum] = {false};\n  PointType pointSel;\n  for (int i = 0; i < laserCloudCornerStackNum; i++) {\n\n    pointSel = laserCloudCornerStack->points[i];\n\n    int cubeI = int((pointSel.x + 25.0) / 50.0) + laserCloudCenDepth;\n    int cubeJ = int((pointSel.y + 25.0) / 50.0) + laserCloudCenWidth;\n    int cubeK = int((pointSel.z + 25.0) / 50.0) + laserCloudCenHeight;\n\n    if (pointSel.x + 25.0 < 0) cubeI--;\n    if (pointSel.y + 25.0 < 0) cubeJ--;\n    if (pointSel.z + 25.0 < 0) cubeK--;\n\n    if (cubeI >= 0 && cubeI < laserCloudDepth &&\n        cubeJ >= 0 && cubeJ < laserCloudWidth &&\n        cubeK >= 0 &&\n        cubeK < laserCloudHeight) {\n      size_t cubeInd = ToIndex(cubeI, cubeJ, cubeK);\n      laserCloudCornerArray[cubeInd]->push_back(pointSel);\n      CornerChangeFlag[cubeInd] = true;\n    }\n  }\n\n  for (int i = 0; i < laserCloudSurfStackNum; i++) {\n    pointSel = laserCloudSurfStack->points[i];\n    int cubeI = int((pointSel.x + 25.0) / 50.0) + laserCloudCenDepth;\n    int cubeJ = int((pointSel.y + 25.0) / 50.0) + laserCloudCenWidth;\n    int cubeK = int((pointSel.z + 25.0) / 50.0) + laserCloudCenHeight;\n\n    if (pointSel.x + 25.0 < 0) cubeI--;\n    if (pointSel.y + 25.0 < 0) cubeJ--;\n    if (pointSel.z + 25.0 < 0) cubeK--;\n\n    if (cubeI >= 0 && cubeI < laserCloudDepth &&\n        cubeJ >= 0 && cubeJ < laserCloudWidth &&\n        cubeK >= 0 && cubeK < laserCloudHeight) {\n      size_t cubeInd = ToIndex(cubeI, cubeJ, cubeK);\n      laserCloudSurfArray[cubeInd]->push_back(pointSel);\n      SurfChangeFlag[cubeInd] = true;\n    }\n  }\n\n  for (int i = 0; i < laserCloudNonFeatureStackNum; i++) {\n    pointSel = laserCloudNonFeatureStack->points[i];\n    int cubeI = int((pointSel.x + 25.0) / 50.0) + laserCloudCenDepth;\n    int cubeJ = int((pointSel.y + 25.0) / 50.0) + laserCloudCenWidth;\n    int cubeK = int((pointSel.z + 25.0) / 50.0) + laserCloudCenHeight;\n\n    if (pointSel.x + 25.0 < 0) cubeI--;\n    if (pointSel.y + 25.0 < 0) cubeJ--;\n    if (pointSel.z + 25.0 < 0) cubeK--;\n\n    if (cubeI >= 0 && cubeI < laserCloudDepth &&\n        cubeJ >= 0 && cubeJ < laserCloudWidth &&\n        cubeK >= 0 && cubeK < laserCloudHeight) {\n      size_t cubeInd = ToIndex(cubeI, cubeJ, cubeK);\n      laserCloudNonFeatureArray[cubeInd]->push_back(pointSel);\n      NonFeatureChangeFlag[cubeInd] = true;\n    }\n  }\n\n  t3 = clock();\n  \n  laserCloudCornerFromMap->clear();\n  laserCloudSurfFromMap->clear();\n  laserCloudNonFeatureFromMap->clear();\n  for(int i = 0; i < laserCloudNum; i++){\n    if(CornerChangeFlag[i]){\n      if(laserCloudCornerArray[i]->points.size() > 300){\n        downSizeFilterCorner.setInputCloud(laserCloudCornerArray[i]);\n        laserCloudCornerArrayStack[i]->clear();\n        downSizeFilterCorner.filter(*laserCloudCornerArrayStack[i]);\n        pcl::PointCloud<PointType>::Ptr tmp = laserCloudCornerArrayStack[i];\n        laserCloudCornerArrayStack[i] = laserCloudCornerArray[i];\n        laserCloudCornerArray[i] = tmp;\n      }\n\n      laserCloudCornerKdMap[i]->setInputCloud(laserCloudCornerArray[i]); \n      *laserCloudCornerFromMap += *laserCloudCornerKdMap[i]->getInputCloud();\n    }\n\n    if(SurfChangeFlag[i]){\n      if(laserCloudSurfArray[i]->points.size() > 300){\n        downSizeFilterSurf.setInputCloud(laserCloudSurfArray[i]);\n        laserCloudSurfArrayStack[i]->clear();\n        downSizeFilterSurf.filter(*laserCloudSurfArrayStack[i]);\n        pcl::PointCloud<PointType>::Ptr tmp = laserCloudSurfArrayStack[i];\n        laserCloudSurfArrayStack[i] = laserCloudSurfArray[i];\n        laserCloudSurfArray[i] = tmp;\n      }\n\n      laserCloudSurfKdMap[i]->setInputCloud(laserCloudSurfArray[i]);\n      *laserCloudSurfFromMap += *laserCloudSurfKdMap[i]->getInputCloud();\n    }\n\n    if(NonFeatureChangeFlag[i]){\n      if(laserCloudNonFeatureArray[i]->points.size() > 300){\n        downSizeFilterNonFeature.setInputCloud(laserCloudNonFeatureArray[i]);\n        laserCloudNonFeatureArrayStack[i]->clear();\n        downSizeFilterNonFeature.filter(*laserCloudNonFeatureArrayStack[i]);\n        pcl::PointCloud<PointType>::Ptr tmp = laserCloudNonFeatureArrayStack[i];\n        laserCloudNonFeatureArrayStack[i] = laserCloudNonFeatureArray[i];\n        laserCloudNonFeatureArray[i] = tmp;\n      }\n\n      laserCloudNonFeatureKdMap[i]->setInputCloud(laserCloudNonFeatureArray[i]);\n      *laserCloudNonFeatureFromMap += *laserCloudNonFeatureKdMap[i]->getInputCloud();\n    }\n      \n  }\n\n  t4 = clock();\n  std::unique_lock<std::mutex> locker(mtx_MapManager);\n  for(int i = 0; i < laserCloudNum; i++){\n    CornerKdMap_copy[i] = *laserCloudCornerKdMap[i];\n    SurfKdMap_copy[i] = *laserCloudSurfKdMap[i];\n    NonFeatureKdMap_copy[i] = *laserCloudNonFeatureKdMap[i];\n  }\n\n  locker.unlock();\n  t5 = clock();\n\n  currentUpdatePos ++;\n\n}\n\n/** \\brief move the map index if need\n * \\param[in] transformTobeMapped: transform matrix of the lidar pose\n */\nvoid MAP_MANAGER::MapMove(const Eigen::Matrix4d& transformTobeMapped){\n  const Eigen::Matrix3d transformTobeMapped_R = transformTobeMapped.topLeftCorner(3, 3);\n  const Eigen::Vector3d transformTobeMapped_t = transformTobeMapped.topRightCorner(3, 1);\n\n  PointType pointOnYAxis;\n  pointOnYAxis.x = 0.0;\n  pointOnYAxis.y = 0.0;\n  pointOnYAxis.z = 10.0;\n\n  pointAssociateToMap(&pointOnYAxis, &pointOnYAxis, transformTobeMapped);\n\n  int centerCubeI = int((transformTobeMapped_t.x() + 25.0) / 50.0) + laserCloudCenDepth;\n  int centerCubeJ = int((transformTobeMapped_t.y() + 25.0) / 50.0) + laserCloudCenWidth;\n  int centerCubeK = int((transformTobeMapped_t.z() + 25.0) / 50.0) + laserCloudCenHeight;\n\n  if (transformTobeMapped_t.x() + 25.0 < 0) centerCubeI--;\n  if (transformTobeMapped_t.y() + 25.0 < 0) centerCubeJ--;\n  if (transformTobeMapped_t.z() + 25.0 < 0) centerCubeK--;\n\n  while (centerCubeI < 8) {\n    for (int j = 0; j < laserCloudWidth; j++) {\n      for (int k = 0; k < laserCloudHeight; k++) {\n        int i = laserCloudDepth - 1;\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeCornerPointerKd =\n                laserCloudCornerKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeSurfPointerKd =\n                laserCloudSurfKdMap[ToIndex(i, j, k)];\n\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeNonFeaturePointerKd =\n                laserCloudNonFeatureKdMap[ToIndex(i, j, k)];\n\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeCornerPointer =\n                laserCloudCornerArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeSurfPointer =\n                laserCloudSurfArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeNonFeaturePointer =\n                laserCloudNonFeatureArray[ToIndex(i, j, k)];\n\n        for (; i >= 1; i--) {\n          const size_t index_a = ToIndex(i, j, k);\n          const size_t index_b = ToIndex(i - 1, j, k);\n          laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b];\n          laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b];\n          laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b];\n\n          laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b];\n          laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b];\n          laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b];\n        }\n        //此时i已经移动至0\n        laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd;\n        laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd;\n        laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd;\n\n        laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer;\n        laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer;\n        laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer;\n        laserCloudCubeCornerPointer->clear();\n        laserCloudCubeSurfPointer->clear();\n        laserCloudCubeNonFeaturePointer->clear();\n      }\n    }\n\n    centerCubeI++;\n    laserCloudCenDepth++;\n  }\n\n  while (centerCubeI >= laserCloudDepth - 8) {\n    for (int j = 0; j < laserCloudWidth; j++) {\n      for (int k = 0; k < laserCloudHeight; k++) {\n        int i = 0;\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeCornerPointerKd =\n                laserCloudCornerKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeSurfPointerKd =\n                laserCloudSurfKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeNonFeaturePointerKd =\n                laserCloudNonFeatureKdMap[ToIndex(i, j, k)];\n\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeCornerPointer =\n                laserCloudCornerArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeSurfPointer =\n                laserCloudSurfArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeNonFeaturePointer =\n                laserCloudNonFeatureArray[ToIndex(i, j, k)];\n        \n        for (; i < laserCloudDepth - 1; i++) {\n          const size_t index_a = ToIndex(i, j, k);\n          const size_t index_b = ToIndex(i + 1, j, k);\n          laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b];\n          laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b];\n          laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b];\n\n          laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b];\n          laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b];\n          laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b];\n        }\n        laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd;\n        laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd;\n        laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd;\n\n        laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer;\n        laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer;\n        laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer;\n        laserCloudCubeCornerPointer->clear();\n        laserCloudCubeSurfPointer->clear();\n        laserCloudCubeNonFeaturePointer->clear();\n      }\n    }\n\n    centerCubeI--;\n    laserCloudCenDepth--;\n  }\n\n  while (centerCubeJ < 8) {\n    for (int i = 0; i < laserCloudDepth; i++) {\n      for (int k = 0; k < laserCloudHeight; k++) {\n        int j = laserCloudWidth - 1;\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeCornerPointerKd =\n                laserCloudCornerKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeSurfPointerKd =\n                laserCloudSurfKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeNonFeaturePointerKd =\n                laserCloudNonFeatureKdMap[ToIndex(i, j, k)];\n\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeCornerPointer =\n                laserCloudCornerArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeSurfPointer =\n                laserCloudSurfArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeNonFeaturePointer =\n                laserCloudNonFeatureArray[ToIndex(i, j, k)];\n        for (; j >= 1; j--) {\n          const size_t index_a = ToIndex(i, j, k);\n          const size_t index_b = ToIndex(i, j - 1, k);\n          laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b];\n          laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b];\n          laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b];\n\n          laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b];\n          laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b];\n          laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b];\n        }\n        laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd;\n        laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd;\n        laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd;\n\n        laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer;\n        laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer;\n        laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer;\n        laserCloudCubeCornerPointer->clear();\n        laserCloudCubeSurfPointer->clear();\n        laserCloudCubeNonFeaturePointer->clear();\n      }\n    }\n\n    centerCubeJ++;\n    laserCloudCenWidth++;\n  }\n\n  while (centerCubeJ >= laserCloudWidth - 8) {\n    for (int i = 0; i < laserCloudDepth; i++) {\n      for (int k = 0; k < laserCloudHeight; k++) {\n        int j = 0;\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeCornerPointerKd =\n                laserCloudCornerKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeSurfPointerKd =\n                laserCloudSurfKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeNonFeaturePointerKd =\n                laserCloudNonFeatureKdMap[ToIndex(i, j, k)];\n\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeCornerPointer =\n                laserCloudCornerArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeSurfPointer =\n                laserCloudSurfArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeNonFeaturePointer =\n                laserCloudNonFeatureArray[ToIndex(i, j, k)];\n        for (; j < laserCloudWidth - 1; j++) {\n          const size_t index_a = ToIndex(i, j, k);\n          const size_t index_b = ToIndex(i, j + 1, k);\n          laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b];\n          laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b];\n          laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b];\n\n          laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b];\n          laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b];\n          laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b];\n        }\n        laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd;\n        laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd;\n        laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd;\n\n        laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer;\n        laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer;\n        laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer;\n        laserCloudCubeCornerPointer->clear();\n        laserCloudCubeSurfPointer->clear();\n        laserCloudCubeNonFeaturePointer->clear();\n      }\n    }\n\n    centerCubeJ--;\n    laserCloudCenWidth--;\n  }\n\n  while (centerCubeK < 8) {\n    for (int i = 0; i < laserCloudDepth; i++) {\n      for (int j = 0; j < laserCloudWidth; j++) {\n        int k = laserCloudHeight - 1;\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeCornerPointerKd =\n                laserCloudCornerKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeSurfPointerKd =\n                laserCloudSurfKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeNonFeaturePointerKd =\n                laserCloudNonFeatureKdMap[ToIndex(i, j, k)];\n\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeCornerPointer =\n                laserCloudCornerArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeSurfPointer =\n                laserCloudSurfArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeNonFeaturePointer =\n                laserCloudNonFeatureArray[ToIndex(i, j, k)];\n        for (; k >= 1; k--) {\n          const size_t index_a = ToIndex(i, j, k);\n          const size_t index_b = ToIndex(i, j, k - 1);\n          laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b];\n          laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b];\n          laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b];\n\n          laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b];\n          laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b];\n          laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b];\n        }\n        laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd;\n        laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd;\n        laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd;\n\n        laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer;\n        laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer;\n        laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer;\n        laserCloudCubeCornerPointer->clear();\n        laserCloudCubeSurfPointer->clear();\n        laserCloudCubeNonFeaturePointer->clear();\n      }\n    }\n\n    centerCubeK++;\n    laserCloudCenHeight++;\n  }\n\n  while (centerCubeK >= laserCloudHeight - 8) {\n    for (int i = 0; i < laserCloudDepth; i++) {\n      for (int j = 0; j < laserCloudWidth; j++) {\n        int k = 0;\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeCornerPointerKd =\n                laserCloudCornerKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeSurfPointerKd =\n                laserCloudSurfKdMap[ToIndex(i, j, k)];\n        pcl::KdTreeFLANN<PointType>::Ptr laserCloudCubeNonFeaturePointerKd =\n                laserCloudNonFeatureKdMap[ToIndex(i, j, k)];\n\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeCornerPointer =\n                laserCloudCornerArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeSurfPointer =\n                laserCloudSurfArray[ToIndex(i, j, k)];\n        pcl::PointCloud<PointType>::Ptr laserCloudCubeNonFeaturePointer =\n                laserCloudNonFeatureArray[ToIndex(i, j, k)];\n        for (; k < laserCloudHeight - 1; k++) {\n          const size_t index_a = ToIndex(i, j, k);\n          const size_t index_b = ToIndex(i, j, k + 1);\n          laserCloudCornerKdMap[index_a] = laserCloudCornerKdMap[index_b];\n          laserCloudSurfKdMap[index_a] = laserCloudSurfKdMap[index_b];\n          laserCloudNonFeatureKdMap[index_a] = laserCloudNonFeatureKdMap[index_b];\n\n          laserCloudCornerArray[index_a] = laserCloudCornerArray[index_b];\n          laserCloudSurfArray[index_a] = laserCloudSurfArray[index_b];\n          laserCloudNonFeatureArray[index_a] = laserCloudNonFeatureArray[index_b];\n        }\n        laserCloudCornerKdMap[ToIndex(i, j, k)] = laserCloudCubeCornerPointerKd;\n        laserCloudSurfKdMap[ToIndex(i, j, k)] = laserCloudCubeSurfPointerKd;\n        laserCloudNonFeatureKdMap[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointerKd;\n\n        laserCloudCornerArray[ToIndex(i, j, k)] = laserCloudCubeCornerPointer;\n        laserCloudSurfArray[ToIndex(i, j, k)] = laserCloudCubeSurfPointer;\n        laserCloudNonFeatureArray[ToIndex(i, j, k)] = laserCloudCubeNonFeaturePointer;\n        laserCloudCubeCornerPointer->clear();\n        laserCloudCubeSurfPointer->clear();\n        laserCloudCubeNonFeaturePointer->clear();\n      }\n    }\n\n    centerCubeK--;\n    laserCloudCenHeight--;\n  }\n\n}\n\nsize_t MAP_MANAGER::FindUsedCornerMap(const PointType *p,int a,int b, int c)\n{\n    int cubeI = int((p->x + 25.0) / 50.0) + c;\n    int cubeJ = int((p->y + 25.0) / 50.0) + a;\n    int cubeK = int((p->z + 25.0) / 50.0) + b;\n\n    size_t cubeInd = 0;\n\n    if (p->x + 25.0 < 0) cubeI--;\n    if (p->y + 25.0 < 0) cubeJ--;\n    if (p->z + 25.0 < 0) cubeK--;\n\n    if (cubeI >= 0 && cubeI < laserCloudDepth &&\n        cubeJ >= 0 && cubeJ < laserCloudWidth &&\n        cubeK >= 0 && cubeK < laserCloudHeight) {\n      cubeInd = ToIndex(cubeI, cubeJ, cubeK);\n    }\n    else{\n      cubeInd = 5000;\n    }\n\n    return cubeInd;  \n}\nsize_t MAP_MANAGER::FindUsedSurfMap(const PointType *p,int a,int b, int c)\n{\n    int cubeI = int((p->x + 25.0) / 50.0) + c;\n    int cubeJ = int((p->y + 25.0) / 50.0) + a;\n    int cubeK = int((p->z + 25.0) / 50.0) + b;\n\n    size_t cubeInd = 0;\n\n    if (p->x + 25.0 < 0) cubeI--;\n    if (p->y + 25.0 < 0) cubeJ--;\n    if (p->z + 25.0 < 0) cubeK--;\n\n    if (cubeI >= 0 && cubeI < laserCloudDepth &&\n        cubeJ >= 0 && cubeJ < laserCloudWidth &&\n        cubeK >= 0 && cubeK < laserCloudHeight) {\n      cubeInd = ToIndex(cubeI, cubeJ, cubeK);\n    }\n    else{\n      cubeInd = 5000;\n    }\n\n    return cubeInd;\n}\n\nsize_t MAP_MANAGER::FindUsedNonFeatureMap(const PointType *p,int a,int b, int c)\n{\n    int cubeI = int((p->x + 25.0) / 50.0) + c;\n    int cubeJ = int((p->y + 25.0) / 50.0) + a;\n    int cubeK = int((p->z + 25.0) / 50.0) + b;\n\n    size_t cubeInd = 0;\n\n    if (p->x + 25.0 < 0) cubeI--;\n    if (p->y + 25.0 < 0) cubeJ--;\n    if (p->z + 25.0 < 0) cubeK--;\n\n    if (cubeI >= 0 && cubeI < laserCloudDepth &&\n        cubeJ >= 0 && cubeJ < laserCloudWidth &&\n        cubeK >= 0 && cubeK < laserCloudHeight) {\n      cubeInd = ToIndex(cubeI, cubeJ, cubeK);\n    }\n    else{\n      cubeInd = 5000;\n    }\n\n    return cubeInd; \n}\n"
  },
  {
    "path": "LIO-Livox/src/lio/PoseEstimation.cpp",
    "content": "#include \"Estimator/Estimator.h\"\n#include <fstream>\n#include <memory>\n#include <string>\n#include <iomanip>\ntypedef pcl::PointXYZINormal PointType;\n\nint WINDOWSIZE;\nbool LidarIMUInited = false;\nboost::shared_ptr<std::list<Estimator::LidarFrame>> lidarFrameList;\npcl::PointCloud<PointType>::Ptr laserCloudFullRes;\nEstimator* estimator;\n\nros::Publisher pubLaserOdometry;\nros::Publisher pubLaserOdometryPath;\nros::Publisher pubFullLaserCloud;\nros::Publisher pubFullLaserCloudIMU;\ntf::StampedTransform laserOdometryTrans;\ntf::TransformBroadcaster* tfBroadcaster;\nros::Publisher pubGps;\n\nbool newfullCloud = false;\n\nEigen::Matrix4d transformAftMapped = Eigen::Matrix4d::Identity();\n\nstd::mutex _mutexLidarQueue;\nstd::queue<sensor_msgs::PointCloud2ConstPtr> _lidarMsgQueue;\nstd::mutex _mutexIMUQueue;\nstd::queue<sensor_msgs::ImuConstPtr> _imuMsgQueue;\nEigen::Matrix4d exTlb = Eigen::Matrix4d::Identity();\nEigen::Matrix3d exRlb, exRbl;\nEigen::Vector3d exPlb, exPbl;\nEigen::Vector3d GravityVector;\nfloat filter_parameter_corner = 0.2;\nfloat filter_parameter_surf = 0.4;\nint IMU_Mode = 2;\nsensor_msgs::NavSatFix gps;\nint pushCount = 0;\ndouble startTime = 0;\n\nnav_msgs::Path laserOdoPath;\n\n// save odometry\nstd::string save_path;\nstd::shared_ptr<std::ofstream> odo;\n\n/** \\brief publish odometry infomation\n  * \\param[in] newPose: pose to be published\n  * \\param[in] timefullCloud: time stamp\n  */\nvoid pubOdometry(const Eigen::Matrix4d& newPose, double& timefullCloud){\n  nav_msgs::Odometry laserOdometry;\n\n  Eigen::Matrix3d Rcurr = newPose.topLeftCorner(3, 3);\n  Eigen::Quaterniond newQuat(Rcurr);\n  Eigen::Vector3d newPosition = newPose.topRightCorner(3, 1);\n  laserOdometry.header.frame_id = \"/world\";\n  laserOdometry.child_frame_id = \"/livox_frame\";\n  laserOdometry.header.stamp = ros::Time().fromSec(timefullCloud);\n  laserOdometry.pose.pose.orientation.x = newQuat.x();\n  laserOdometry.pose.pose.orientation.y = newQuat.y();\n  laserOdometry.pose.pose.orientation.z = newQuat.z();\n  laserOdometry.pose.pose.orientation.w = newQuat.w();\n  laserOdometry.pose.pose.position.x = newPosition.x();\n  laserOdometry.pose.pose.position.y = newPosition.y();\n  laserOdometry.pose.pose.position.z = newPosition.z();\n  pubLaserOdometry.publish(laserOdometry);\n  // saveodometry\n  *odo //<< std::setw(20) \n    << laserOdometry.header.stamp << \" \" \n    << newPosition.x() << \" \" << newPosition.y() << \" \" << newPosition.z() << \" \"\n    // << gr[0] << \" \" << gr[1] << \" \" << gr[2] << \" \"\n    << 0 << \" \" << 0 << \" \" << 0 << \" \" << 1\n    << std::endl;\n  Eigen::Vector3d gr;\n  gr.segment<2>(0) = newQuat.toRotationMatrix().block<2, 1>(0, 2);\n  gr[2] = newPosition[2];\n\n  geometry_msgs::PoseStamped laserPose;\n  laserPose.header = laserOdometry.header;\n  laserPose.pose = laserOdometry.pose.pose;\n  laserOdoPath.header.stamp = laserOdometry.header.stamp;\n  laserOdoPath.poses.push_back(laserPose);\n  laserOdoPath.header.frame_id = \"/world\";\n  pubLaserOdometryPath.publish(laserOdoPath);\n\n  laserOdometryTrans.frame_id_ = \"/world\";\n  laserOdometryTrans.child_frame_id_ = \"/livox_frame\";\n  laserOdometryTrans.stamp_ = ros::Time().fromSec(timefullCloud);\n  laserOdometryTrans.setRotation(tf::Quaternion(newQuat.x(), newQuat.y(), newQuat.z(), newQuat.w()));\n  laserOdometryTrans.setOrigin(tf::Vector3(newPosition.x(), newPosition.y(), newPosition.z()));\n  tfBroadcaster->sendTransform(laserOdometryTrans);\n\n  gps.header.stamp = ros::Time().fromSec(timefullCloud);\n  gps.header.frame_id = \"world\";\n  gps.latitude = newPosition.x();\n  gps.longitude = newPosition.y();\n  gps.altitude = newPosition.z();\n  gps.position_covariance = {\n          Rcurr(0, 0), Rcurr(1, 0), Rcurr(2, 0),\n          Rcurr(0, 1), Rcurr(1, 1), Rcurr(2, 1),\n          Rcurr(0, 2), Rcurr(1, 2), Rcurr(2, 2)\n  };\n  pubGps.publish(gps);\n}\n\nvoid fullCallBack(const sensor_msgs::PointCloud2ConstPtr &msg){\n  // push lidar msg to queue\n\tstd::unique_lock<std::mutex> lock(_mutexLidarQueue);\n  _lidarMsgQueue.push(msg);\n}\n\nvoid imu_callback(const sensor_msgs::ImuConstPtr &imu_msg){\n  // push IMU msg to queue\n  std::unique_lock<std::mutex> lock(_mutexIMUQueue);\n  _imuMsgQueue.push(imu_msg);\n}\n\n/** \\brief get IMU messages in a certain time interval\n  * \\param[in] startTime: left boundary of time interval\n  * \\param[in] endTime: right boundary of time interval\n  * \\param[in] vimuMsg: store IMU messages\n  */\nbool fetchImuMsgs(double startTime, double endTime, std::vector<sensor_msgs::ImuConstPtr> &vimuMsg){\n  std::unique_lock<std::mutex> lock(_mutexIMUQueue);\n  double current_time = 0;\n  vimuMsg.clear();\n  while(true)\n  {\n    if(_imuMsgQueue.empty()) {\n      break;\n    }\n    if(_imuMsgQueue.back()->header.stamp.toSec()<endTime ||\n       _imuMsgQueue.front()->header.stamp.toSec()>=endTime) {\n      \n      // std::cout << \"imu back time is \" << std::setprecision(20) << _imuMsgQueue.back()->header.stamp.toSec() << \" \" \n      //           << \"endtime is \" << std::setprecision(20) << endTime << std::endl;\n      break;\n    }\n    sensor_msgs::ImuConstPtr& tmpimumsg = _imuMsgQueue.front();\n    double time = tmpimumsg->header.stamp.toSec();\n    if(time<=endTime && time>startTime){\n      vimuMsg.push_back(tmpimumsg);\n      current_time = time;\n      _imuMsgQueue.pop();\n      if(time == endTime) {\n        break;\n      }\n    } else{\n      if(time<=startTime){\n        _imuMsgQueue.pop();\n      } else{\n        double dt_1 = endTime - current_time;\n        double dt_2 = time - endTime;\n        ROS_ASSERT(dt_1 >= 0);\n        ROS_ASSERT(dt_2 >= 0);\n        ROS_ASSERT(dt_1 + dt_2 > 0);\n        double w1 = dt_2 / (dt_1 + dt_2);\n        double w2 = dt_1 / (dt_1 + dt_2);\n        sensor_msgs::ImuPtr theLastIMU(new sensor_msgs::Imu);\n        theLastIMU->linear_acceleration.x = w1 * vimuMsg.back()->linear_acceleration.x + w2 * tmpimumsg->linear_acceleration.x;\n        theLastIMU->linear_acceleration.y = w1 * vimuMsg.back()->linear_acceleration.y + w2 * tmpimumsg->linear_acceleration.y;\n        theLastIMU->linear_acceleration.z = w1 * vimuMsg.back()->linear_acceleration.z + w2 * tmpimumsg->linear_acceleration.z;\n        theLastIMU->angular_velocity.x = w1 * vimuMsg.back()->angular_velocity.x + w2 * tmpimumsg->angular_velocity.x;\n        theLastIMU->angular_velocity.y = w1 * vimuMsg.back()->angular_velocity.y + w2 * tmpimumsg->angular_velocity.y;\n        theLastIMU->angular_velocity.z = w1 * vimuMsg.back()->angular_velocity.z + w2 * tmpimumsg->angular_velocity.z;\n        theLastIMU->header.stamp.fromSec(endTime);\n        vimuMsg.emplace_back(theLastIMU);\n        break;\n      }\n    }\n  }\n  return !vimuMsg.empty();\n}\n\n/** \\brief Remove Lidar Distortion\n  * \\param[in] cloud: lidar cloud need to be undistorted\n  * \\param[in] dRlc: delta rotation\n  * \\param[in] dtlc: delta displacement\n  */\nvoid RemoveLidarDistortion(pcl::PointCloud<PointType>::Ptr& cloud,\n                           const Eigen::Matrix3d& dRlc, const Eigen::Vector3d& dtlc){\n  int PointsNum = cloud->points.size();\n  for (int i = 0; i < PointsNum; i++) {\n    Eigen::Vector3d startP;\n    float s = cloud->points[i].normal_x;\n    Eigen::Quaterniond qlc = Eigen::Quaterniond(dRlc).normalized();\n    Eigen::Quaterniond delta_qlc = Eigen::Quaterniond::Identity().slerp(s, qlc).normalized();\n    const Eigen::Vector3d delta_Plc = s * dtlc;\n    startP = delta_qlc * Eigen::Vector3d(cloud->points[i].x,cloud->points[i].y,cloud->points[i].z) + delta_Plc;\n    Eigen::Vector3d _po = dRlc.transpose() * (startP - dtlc);\n\n    cloud->points[i].x = _po(0);\n    cloud->points[i].y = _po(1);\n    cloud->points[i].z = _po(2);\n    cloud->points[i].normal_x = 1.0;\n  }\n}\n/** \\brief Convert the lidar point cloud from the odometer to the ground coordinate system\n  * \\param[in] cloud: lidar cloud need to be convert\n  * \\param[in] Rgo: rotation from odometer to ground\n  */\nvoid ConvertPointCloudFromOdometerToGround(pcl::PointCloud<PointType>::Ptr& cloud, \n                                           const Eigen::Matrix3f& Rgo){\n  int PointsNum = cloud->points.size();\n  for (int i = 0; i < PointsNum; i++) {\n    Eigen::Vector3f startP{cloud->points[i].x,cloud->points[i].y,cloud->points[i].z};\n    Eigen::Vector3f _po = Rgo * startP;\n    cloud->points[i].x = _po(0);\n    cloud->points[i].y = _po(1);\n    cloud->points[i].z = _po(2);\n    cloud->points[i].normal_x = 1.0;\n  }\n}\n/** \\brief Convert the lidar point cloud from the lidar to the IMU coordinate system\n  * \\param[in] pi: lidar point need to be convert\n  * \\param[in] po: lidar point after convert\n  * \\param[in] exTbl: transformation from lidar to imu\n  */\nvoid ConvertPointFromLidarToIMU(PointType const * const pi, \n                                PointType * const po, \n                                const Eigen::Matrix4d& exTbl){\n  Eigen::Vector3d pin, pout;\n  pin[0] = pi->x;\n  pin[1] = pi->y;\n  pin[2] = pi->z;\n  pout = exTbl.topLeftCorner(3,3) * pin + exTbl.topRightCorner(3,1);\n  po->x = pout[0];\n  po->y = pout[1];\n  po->z = pout[2];\n  po->intensity = pi->intensity;\n  po->normal_z = pi->normal_z;\n  po->normal_y = pi->normal_y;\n}\n\nbool TryMAPInitialization() {\n\n  Eigen::Vector3d average_acc = -lidarFrameList->begin()->imuIntegrator.GetAverageAcc();\n  double info_g = std::fabs(9.805 - average_acc.norm());\n  average_acc = average_acc * 9.805 / average_acc.norm();\n\n  // calculate the initial gravity direction\n  double para_quat[4];\n  para_quat[0] = 1;\n  para_quat[1] = 0;\n  para_quat[2] = 0;\n  para_quat[3] = 0;\n\n\n  ceres::LocalParameterization *quatParam = new ceres::QuaternionParameterization();\n  ceres::Problem problem_quat;\n  \n  problem_quat.AddParameterBlock(para_quat, 4, quatParam);\n\n  problem_quat.AddResidualBlock(Cost_Initial_G::Create(average_acc),\n                                nullptr,\n                                para_quat);\n\n  ceres::Solver::Options options_quat;\n  ceres::Solver::Summary summary_quat;\n  ceres::Solve(options_quat, &problem_quat, &summary_quat);\n\n  Eigen::Quaterniond q_wg(para_quat[0], para_quat[1], para_quat[2], para_quat[3]);\n\n\n  //build prior factor of LIO initialization\n  Eigen::Vector3d prior_r = Eigen::Vector3d::Zero();\n  Eigen::Vector3d prior_ba = Eigen::Vector3d::Zero();\n  Eigen::Vector3d prior_bg = Eigen::Vector3d::Zero();\n  std::vector<Eigen::Vector3d> prior_v;\n  int v_size = lidarFrameList->size();\n  for(int i = 0; i < v_size; i++) {\n    prior_v.push_back(Eigen::Vector3d::Zero());\n  }\n  Sophus::SO3d SO3_R_wg(q_wg.toRotationMatrix());\n  prior_r = SO3_R_wg.log();\n  \n  for (int i = 1; i < v_size; i++){\n    auto iter = lidarFrameList->begin();\n    auto iter_next = lidarFrameList->begin();\n    std::advance(iter, i-1);\n    std::advance(iter_next, i);\n\n    Eigen::Vector3d velo_imu = (iter_next->P - iter->P + iter_next->Q*exPlb - iter->Q*exPlb) / (iter_next->timeStamp - iter->timeStamp);\n    prior_v[i] = velo_imu;\n  }\n  prior_v[0] = prior_v[1];\n\n  double para_v[v_size][3];\n  double para_r[3];\n  double para_ba[3];\n  double para_bg[3];\n\n  for(int i = 0; i < 3; i++) {\n    para_r[i] = 0;\n    para_ba[i] = 0;\n    para_bg[i] = 0;\n  }\n\n  for(int i = 0; i < v_size; i++) {\n    for(int j = 0; j < 3; j++) {\n      para_v[i][j] = prior_v[i][j];\n    }\n  }\n\n  Eigen::Matrix<double, 3, 3> sqrt_information_r = 2000.0 * Eigen::Matrix<double, 3, 3>::Identity();\n  Eigen::Matrix<double, 3, 3> sqrt_information_ba = 1000.0 * Eigen::Matrix<double, 3, 3>::Identity();\n  Eigen::Matrix<double, 3, 3> sqrt_information_bg = 4000.0 * Eigen::Matrix<double, 3, 3>::Identity();\n  Eigen::Matrix<double, 3, 3> sqrt_information_v = 4000.0 * Eigen::Matrix<double, 3, 3>::Identity();\n\n  ceres::Problem::Options problem_options;\n  ceres::Problem problem(problem_options);\n  problem.AddParameterBlock(para_r, 3);\n  problem.AddParameterBlock(para_ba, 3);\n  problem.AddParameterBlock(para_bg, 3);\n  for(int i = 0; i < v_size; i++) {\n    problem.AddParameterBlock(para_v[i], 3);\n  }\n  \n  // add CostFunction\n  problem.AddResidualBlock(Cost_Initialization_Prior_R::Create(prior_r, sqrt_information_r),\n                           nullptr,\n                           para_r);\n  \n  problem.AddResidualBlock(Cost_Initialization_Prior_bv::Create(prior_ba, sqrt_information_ba),\n                           nullptr,\n                           para_ba);\n  problem.AddResidualBlock(Cost_Initialization_Prior_bv::Create(prior_bg, sqrt_information_bg),\n                           nullptr,\n                           para_bg);\n\n  for(int i = 0; i < v_size; i++) {\n    problem.AddResidualBlock(Cost_Initialization_Prior_bv::Create(prior_v[i], sqrt_information_v),\n                             nullptr,\n                             para_v[i]);\n  }\n\n  for(int i = 1; i < v_size; i++) {\n    auto iter = lidarFrameList->begin();\n    auto iter_next = lidarFrameList->begin();\n    std::advance(iter, i-1);\n    std::advance(iter_next, i);\n\n    Eigen::Vector3d pi = iter->P + iter->Q*exPlb;\n    Sophus::SO3d SO3_Ri(iter->Q*exRlb);\n    Eigen::Vector3d ri = SO3_Ri.log();\n    Eigen::Vector3d pj = iter_next->P + iter_next->Q*exPlb;\n    Sophus::SO3d SO3_Rj(iter_next->Q*exRlb);\n    Eigen::Vector3d rj = SO3_Rj.log();\n\n    problem.AddResidualBlock(Cost_Initialization_IMU::Create(iter_next->imuIntegrator,\n                                                                   ri,\n                                                                   rj,\n                                                                   pj-pi,\n                                                                   Eigen::LLT<Eigen::Matrix<double, 9, 9>>\n                                                                    (iter_next->imuIntegrator.GetCovariance().block<9,9>(0,0).inverse())\n                                                                    .matrixL().transpose()),\n                             nullptr,\n                             para_r,\n                             para_v[i-1],\n                             para_v[i],\n                             para_ba,\n                             para_bg);\n  }\n\n  ceres::Solver::Options options;\n  options.minimizer_progress_to_stdout = false;\n  options.linear_solver_type = ceres::DENSE_QR;\n  options.num_threads = 6;\n  ceres::Solver::Summary summary;\n  ceres::Solve(options, &problem, &summary);\n\n  Eigen::Vector3d r_wg(para_r[0], para_r[1], para_r[2]);\n  GravityVector = Sophus::SO3d::exp(r_wg) * Eigen::Vector3d(0, 0, -9.805);\n\n  Eigen::Vector3d ba_vec(para_ba[0], para_ba[1], para_ba[2]);\n  Eigen::Vector3d bg_vec(para_bg[0], para_bg[1], para_bg[2]);\n\n  if(ba_vec.norm() > 0.5 || bg_vec.norm() > 0.5) {\n    ROS_WARN(\"Too Large Biases! Initialization Failed!\");\n    return false;\n  }\n\n  for(int i = 0; i < v_size; i++) {\n    auto iter = lidarFrameList->begin();\n    std::advance(iter, i);\n    iter->ba = ba_vec;\n    iter->bg = bg_vec;\n    Eigen::Vector3d bv_vec(para_v[i][0], para_v[i][1], para_v[i][2]);\n    if((bv_vec - prior_v[i]).norm() > 2.0) {\n      ROS_WARN(\"Too Large Velocity! Initialization Failed!\");\n      std::cout<<\"delta v norm: \"<<(bv_vec - prior_v[i]).norm()<<std::endl;\n      return false;\n    }\n    iter->V = bv_vec;\n  }\n\n  for(size_t i = 0; i < v_size - 1; i++){\n    auto laser_trans_i = lidarFrameList->begin();\n    auto laser_trans_j = lidarFrameList->begin();\n    std::advance(laser_trans_i, i);\n    std::advance(laser_trans_j, i+1);\n    laser_trans_j->imuIntegrator.PreIntegration(laser_trans_i->timeStamp, laser_trans_i->bg, laser_trans_i->ba);\n  }\n\n\n  // //if IMU success initialized\n  WINDOWSIZE = Estimator::SLIDEWINDOWSIZE;\n  while(lidarFrameList->size() > WINDOWSIZE){\n\t  lidarFrameList->pop_front();\n  }\n\tEigen::Vector3d Pwl = lidarFrameList->back().P;\n\tEigen::Quaterniond Qwl = lidarFrameList->back().Q;\n\tlidarFrameList->back().P = Pwl + Qwl*exPlb;\n\tlidarFrameList->back().Q = Qwl * exRlb;\n\n\t// std::cout << \"\\n=============================\\n| Initialization Successful |\"<<\"\\n=============================\\n\" << std::endl;\n  \n  return true;\n}\n\n\n\n/** \\brief Mapping main thread\n  */\nvoid process(){\n  double time_last_lidar = -1;\n  double time_curr_lidar = -1;\n  Eigen::Matrix3d delta_Rl = Eigen::Matrix3d::Identity();\n  Eigen::Vector3d delta_tl = Eigen::Vector3d::Zero();\n\tEigen::Matrix3d delta_Rb = Eigen::Matrix3d::Identity();\n\tEigen::Vector3d delta_tb = Eigen::Vector3d::Zero();\n  std::vector<sensor_msgs::ImuConstPtr> vimuMsg;\n  while(ros::ok()){\n    newfullCloud = false;\n    laserCloudFullRes.reset(new pcl::PointCloud<PointType>());\n\t  std::unique_lock<std::mutex> lock_lidar(_mutexLidarQueue);\n    if(!_lidarMsgQueue.empty()){\n      // get new lidar msg\n      time_curr_lidar = _lidarMsgQueue.front()->header.stamp.toSec();\n      pcl::fromROSMsg(*_lidarMsgQueue.front(), *laserCloudFullRes);\n      _lidarMsgQueue.pop();\n      newfullCloud = true;\n    }\n    lock_lidar.unlock();\n\n    if(newfullCloud){\n      nav_msgs::Odometry debugInfo;\n      debugInfo.pose.pose.position.x = 0;\n      debugInfo.pose.pose.position.y = 0;\n      debugInfo.pose.pose.position.z = 0;\n      if(IMU_Mode > 0 && time_last_lidar > 0){\n        // get IMU msg int the Specified time interval\n        vimuMsg.clear();\n        int countFail = 0;\n        while (!fetchImuMsgs(time_last_lidar, time_curr_lidar, vimuMsg)) {\n          countFail++;\n          if (countFail > 100){\n            break;\n          }\n          std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );\n        }\n      }\n      // this lidar frame init\n      Estimator::LidarFrame lidarFrame;\n      lidarFrame.laserCloud = laserCloudFullRes;\n      lidarFrame.timeStamp = time_curr_lidar;\n\n\t    boost::shared_ptr<std::list<Estimator::LidarFrame>> lidar_list;\n\t    if(!vimuMsg.empty()){\n\t    \tif(!LidarIMUInited) {\n\t    \t\t// if get IMU msg successfully, use gyro integration to update delta_Rl\n\t\t\t    lidarFrame.imuIntegrator.PushIMUMsg(vimuMsg);\n\t\t\t    lidarFrame.imuIntegrator.GyroIntegration(time_last_lidar);\n\t\t\t    delta_Rb = lidarFrame.imuIntegrator.GetDeltaQ().toRotationMatrix();\n\t\t\t    delta_Rl = exTlb.topLeftCorner(3, 3) * delta_Rb * exTlb.topLeftCorner(3, 3).transpose();\n\n\t\t\t    // predict current lidar pose\n\t\t\t    lidarFrame.P = transformAftMapped.topLeftCorner(3,3) * delta_tb\n\t\t\t                   + transformAftMapped.topRightCorner(3,1);\n\t\t\t    Eigen::Matrix3d m3d = transformAftMapped.topLeftCorner(3,3) * delta_Rb;\n\t\t\t    lidarFrame.Q = m3d;\n\n\t\t\t    lidar_list.reset(new std::list<Estimator::LidarFrame>);\n\t\t\t    lidar_list->push_back(lidarFrame);\n\t\t    }else{\n\t\t\t    // if get IMU msg successfully, use pre-integration to update delta lidar pose\n\t\t\t    lidarFrame.imuIntegrator.PushIMUMsg(vimuMsg);\n\t\t\t    lidarFrame.imuIntegrator.PreIntegration(lidarFrameList->back().timeStamp, lidarFrameList->back().bg, lidarFrameList->back().ba);\n\n\t\t\t    const Eigen::Vector3d& Pwbpre = lidarFrameList->back().P;\n\t\t\t    const Eigen::Quaterniond& Qwbpre = lidarFrameList->back().Q;\n\t\t\t    const Eigen::Vector3d& Vwbpre = lidarFrameList->back().V;\n\n\t\t\t    const Eigen::Quaterniond& dQ =  lidarFrame.imuIntegrator.GetDeltaQ();\n\t\t\t    const Eigen::Vector3d& dP = lidarFrame.imuIntegrator.GetDeltaP();\n\t\t\t    const Eigen::Vector3d& dV = lidarFrame.imuIntegrator.GetDeltaV();\n\t\t\t    double dt = lidarFrame.imuIntegrator.GetDeltaTime();\n\n\t\t\t    lidarFrame.Q = Qwbpre * dQ;\n\t\t\t    lidarFrame.P = Pwbpre + Vwbpre*dt + 0.5*GravityVector*dt*dt + Qwbpre*(dP);\n\t\t\t    lidarFrame.V = Vwbpre + GravityVector*dt + Qwbpre*(dV);\n\t\t\t    lidarFrame.bg = lidarFrameList->back().bg;\n\t\t\t    lidarFrame.ba = lidarFrameList->back().ba;\n\n\t\t\t    Eigen::Quaterniond Qwlpre = Qwbpre * Eigen::Quaterniond(exRbl);\n\t\t\t    Eigen::Vector3d Pwlpre = Qwbpre * exPbl + Pwbpre;\n\n\t\t\t    Eigen::Quaterniond Qwl = lidarFrame.Q * Eigen::Quaterniond(exRbl);\n\t\t\t    Eigen::Vector3d Pwl = lidarFrame.Q * exPbl + lidarFrame.P;\n\n\t\t\t    delta_Rl = Qwlpre.conjugate() * Qwl;\n\t\t\t    delta_tl = Qwlpre.conjugate() * (Pwl - Pwlpre);\n\t\t\t    delta_Rb = dQ.toRotationMatrix();\n\t\t\t    delta_tb = dP;\n\n\t\t\t    lidarFrameList->push_back(lidarFrame);\n\t\t\t    lidarFrameList->pop_front();\n\t\t\t    lidar_list = lidarFrameList;\n\t    \t}\n\t    }else{\n\t    \tif(LidarIMUInited) {\n          std::cout << \"IMU is empty \" << std::endl;\n\t    \t  break;\n        }\n\t    \telse{\n\t\t\t    // predict current lidar pose\n\t\t\t    lidarFrame.P = transformAftMapped.topLeftCorner(3,3) * delta_tb\n\t\t\t                   + transformAftMapped.topRightCorner(3,1);\n\t\t\t    Eigen::Matrix3d m3d = transformAftMapped.topLeftCorner(3,3) * delta_Rb;\n\t\t\t    lidarFrame.Q = m3d;\n\n\t\t\t    lidar_list.reset(new std::list<Estimator::LidarFrame>);\n\t\t\t    lidar_list->push_back(lidarFrame);\n\t    \t}\n\t    }\n\n\t    // remove lidar distortion\n\t    RemoveLidarDistortion(laserCloudFullRes, delta_Rl, delta_tl);\n\n      \n      static bool first_flag = true;\n      Eigen::Matrix3f Rgo;\n      if (first_flag) {\n        lidar_list->back().ground_plane_coeff = livox_slam_ware::get_plane_coeffs<PointType>(laserCloudFullRes);\n        Eigen::Vector3f v1 = lidar_list->back().ground_plane_coeff.head<3>();\n        Eigen::Vector3f v2{0,0,1};\n        Rgo = Eigen::Quaternionf::FromTwoVectors(v1,v2).toRotationMatrix();\n        std:: cout << \"the rotation matrix is \" << Rgo << std::endl;\n        first_flag = false;\n      }\n      ConvertPointCloudFromOdometerToGround(laserCloudFullRes, Rgo);\n      // calculate the ground plane parameters\n      lidarFrame.ground_plane_coeff = livox_slam_ware::get_plane_coeffs<PointType>(laserCloudFullRes);\n      lidar_list->back().ground_plane_coeff = lidarFrame.ground_plane_coeff;\n      // std::cout << \"ground_plane_coeff \" << std::endl << lidar_list->back().ground_plane_coeff.transpose() << std::endl;\n\n      // optimize current lidar pose with IMU\n      estimator->EstimateLidarPose(*lidar_list, exTlb, GravityVector, debugInfo);\n\n      pcl::PointCloud<PointType>::Ptr laserCloudCornerMap(new pcl::PointCloud<PointType>());\n      pcl::PointCloud<PointType>::Ptr laserCloudSurfMap(new pcl::PointCloud<PointType>());\n\n\t    Eigen::Matrix4d transformTobeMapped = Eigen::Matrix4d::Identity();\n\t    transformTobeMapped.topLeftCorner(3,3) = lidar_list->front().Q * exRbl;\n\t    transformTobeMapped.topRightCorner(3,1) = lidar_list->front().Q * exPbl + lidar_list->front().P;\n\n\t    // update delta transformation\n\t    delta_Rb = transformAftMapped.topLeftCorner(3, 3).transpose() * lidar_list->front().Q.toRotationMatrix();\n\t    delta_tb = transformAftMapped.topLeftCorner(3, 3).transpose() * (lidar_list->front().P - transformAftMapped.topRightCorner(3, 1));\n\t    Eigen::Matrix3d Rwlpre = transformAftMapped.topLeftCorner(3, 3) * exRbl;\n\t    Eigen::Vector3d Pwlpre = transformAftMapped.topLeftCorner(3, 3) * exPbl + transformAftMapped.topRightCorner(3, 1);\n\t    delta_Rl = Rwlpre.transpose() * transformTobeMapped.topLeftCorner(3,3);\n\t    delta_tl = Rwlpre.transpose() * (transformTobeMapped.topRightCorner(3,1) - Pwlpre);\n\t    transformAftMapped.topLeftCorner(3,3) = lidar_list->front().Q.toRotationMatrix();\n\t    transformAftMapped.topRightCorner(3,1) = lidar_list->front().P;\n\n\n      if (LidarIMUInited) {\n        // publish odometry rostopic\n        pubOdometry(transformTobeMapped, lidar_list->front().timeStamp);\n        // publish lidar points\n        int laserCloudFullResNum = lidar_list->front().laserCloud->points.size();\n        pcl::PointCloud<PointType>::Ptr laserCloudAfterEstimate(new pcl::PointCloud<PointType>());\n        pcl::PointCloud<PointType>::Ptr laserCloudIMU(new pcl::PointCloud<PointType>());\n        laserCloudAfterEstimate->reserve(laserCloudFullResNum);\n        laserCloudIMU->reserve(laserCloudFullResNum);\n\n        Eigen::Matrix4d exTbl = exTlb.inverse();\n        for (int i = 0; i < laserCloudFullResNum; i++) {\n          PointType temp_point;\n          auto &p = lidar_list->front().laserCloud->points[i];\n          // if (std::fabs(p.normal_y + 1.0) < 1e-5) {\n          MAP_MANAGER::pointAssociateToMap(&lidar_list->front().laserCloud->points[i], &temp_point, transformAftMapped);\n          laserCloudAfterEstimate->push_back(temp_point);\n          ConvertPointFromLidarToIMU(&lidar_list->front().laserCloud->points[i], &temp_point, exTbl);\n          // temp_point = lidar_list->front().laserCloud->points[i];\n          laserCloudIMU->push_back(temp_point);\n          // }\n        }\n        // std::cout << \"laserCloudAfterEstimate size = \" << laserCloudAfterEstimate->size() << std::endl;\n        sensor_msgs::PointCloud2 laserCloudMsg, laserCloudIMUMsg;\n        pcl::toROSMsg(*laserCloudAfterEstimate, laserCloudMsg);\n        pcl::toROSMsg(*laserCloudIMU, laserCloudIMUMsg);\n        laserCloudMsg.header.frame_id = \"/world\";\n        laserCloudIMUMsg.header.frame_id = \"/world\";\n        laserCloudMsg.header.stamp.fromSec(lidar_list->front().timeStamp);\n        laserCloudIMUMsg.header.stamp.fromSec(lidar_list->front().timeStamp);\n        pubFullLaserCloud.publish(laserCloudMsg);\n        pubFullLaserCloudIMU.publish(laserCloudIMUMsg);\n      }\n\t    // if tightly coupled IMU message, start IMU initialization\n\t    if(IMU_Mode > 1 && !LidarIMUInited){\n\t\t    // update lidar frame pose\n\t\t    lidarFrame.P = transformTobeMapped.topRightCorner(3,1);\n\t\t    Eigen::Matrix3d m3d = transformTobeMapped.topLeftCorner(3,3);\n\t\t    lidarFrame.Q = m3d;\n\n\t\t    // static int pushCount = 0;\n\t\t    if(pushCount == 0){\n\t\t\t    lidarFrameList->push_back(lidarFrame);\n\t\t\t    lidarFrameList->back().imuIntegrator.Reset();\n\t\t\t    if(lidarFrameList->size() > WINDOWSIZE)\n\t\t\t\t    lidarFrameList->pop_front();\n\t\t    }else{\n\t\t\t    lidarFrameList->back().laserCloud = lidarFrame.laserCloud;\n\t\t\t    lidarFrameList->back().imuIntegrator.PushIMUMsg(vimuMsg);\n\t\t\t    lidarFrameList->back().timeStamp = lidarFrame.timeStamp;\n\t\t\t    lidarFrameList->back().P = lidarFrame.P;\n\t\t\t    lidarFrameList->back().Q = lidarFrame.Q;\n\t\t    }\n\t\t    pushCount++;\n\t\t    if (pushCount >= 3){\n\t\t\t    pushCount = 0;\n\t\t\t    if(lidarFrameList->size() > 1){\n\t\t\t\t    auto iterRight = std::prev(lidarFrameList->end());\n\t\t\t\t    auto iterLeft = std::prev(std::prev(lidarFrameList->end()));\n\t\t\t\t    iterRight->imuIntegrator.PreIntegration(iterLeft->timeStamp, iterLeft->bg, iterLeft->ba);\n\t\t\t    }\n\n          if (lidarFrameList->size() == int(WINDOWSIZE / 1.5)) {\n\t\t\t\t\t  startTime = lidarFrameList->back().timeStamp;\n\t\t\t    }\n\n\t\t\t    if (!LidarIMUInited && lidarFrameList->size() == WINDOWSIZE && lidarFrameList->front().timeStamp >= startTime){\n            std::cout<<\"**************Start MAP Initialization!!!******************\"<<std::endl;\n\t\t\t\t    if(TryMAPInitialization()){\n              LidarIMUInited = true;\n\t\t\t\t\t    pushCount = 0;\n              startTime = 0;\n\t\t\t\t    }\n            std::cout<<\"**************Finish MAP Initialization!!!******************\"<<std::endl;\n\t\t\t    }\n\n\t\t    }\n\t    }\n      time_last_lidar = time_curr_lidar;\n\n    }\n  }\n  std::cout << \"finished this thread\" << std::endl;\n}\n\nint main(int argc, char** argv)\n{\n  ros::init(argc, argv, \"PoseEstimation\");\n  ros::NodeHandle nodeHandler(\"~\");\n\n  ros::param::get(\"~filter_parameter_corner\",filter_parameter_corner);\n  ros::param::get(\"~filter_parameter_surf\",filter_parameter_surf);\n  ros::param::get(\"~IMU_Mode\",IMU_Mode);\n\tstd::vector<double> vecTlb;\n\tros::param::get(\"~Extrinsic_Tlb\",vecTlb);\n  ros::param::get(\"~save_path\",save_path);\n  // set extrinsic matrix between lidar & IMU\n  Eigen::Matrix3d R;\n  Eigen::Vector3d t;\n\tR << vecTlb[0], vecTlb[1], vecTlb[2],\n\t     vecTlb[4], vecTlb[5], vecTlb[6],\n\t     vecTlb[8], vecTlb[9], vecTlb[10];\n\tt << vecTlb[3], vecTlb[7], vecTlb[11];\n  Eigen::Quaterniond qr(R);\n  R = qr.normalized().toRotationMatrix();\n  exTlb.topLeftCorner(3,3) = R;\n  exTlb.topRightCorner(3,1) = t;\n  exRlb = R;\n  exRbl = R.transpose();\n  exPlb = t;\n  exPbl = -1.0 * exRbl * exPlb;\n\n  ros::Subscriber subFullCloud = nodeHandler.subscribe<sensor_msgs::PointCloud2>(\"/livox_full_cloud\", 10000, fullCallBack);\n  ros::Subscriber sub_imu;\n  if(IMU_Mode > 0)\n    sub_imu = nodeHandler.subscribe(\"/livox/imu\", 200000, imu_callback, ros::TransportHints().unreliable());\n  if(IMU_Mode < 2)\n    WINDOWSIZE = 1;\n  else\n    WINDOWSIZE = 20;\n\n  pubFullLaserCloud = nodeHandler.advertise<sensor_msgs::PointCloud2>(\"/livox_full_cloud_mapped\", 10);\n  pubFullLaserCloudIMU = nodeHandler.advertise<sensor_msgs::PointCloud2>(\"/livox_full_cloud_imu\", 10);\n  pubLaserOdometry = nodeHandler.advertise<nav_msgs::Odometry> (\"/livox_odometry_mapped\", 5);\n  pubLaserOdometryPath = nodeHandler.advertise<nav_msgs::Path> (\"/livox_odometry_path_mapped\", 5);\n\tpubGps = nodeHandler.advertise<sensor_msgs::NavSatFix>(\"/lidar\", 1000);\n\n  tfBroadcaster = new tf::TransformBroadcaster();\n\n  laserCloudFullRes.reset(new pcl::PointCloud<PointType>);\n  estimator = new Estimator(filter_parameter_corner, filter_parameter_surf);\n\tlidarFrameList.reset(new std::list<Estimator::LidarFrame>);\n  odo.reset(new std::ofstream(save_path));\n  std::thread thread_process{process};\n  std::cout << \"finished\" << std::endl;\n  ros::spin();\n  std::cout << \"finished ros\" << std::endl;\n  return 0;\n}\n\n"
  },
  {
    "path": "LIO-Livox/src/lio/ScanRegistration.cpp",
    "content": "#include \"LidarFeatureExtractor/LidarFeatureExtractor.h\"\n\ntypedef pcl::PointXYZINormal PointType;\n\nros::Publisher pubFullLaserCloud;\nros::Publisher pubSharpCloud;\nros::Publisher pubFlatCloud;\nros::Publisher pubNonFeature;\n\nLidarFeatureExtractor* lidarFeatureExtractor;\npcl::PointCloud<PointType>::Ptr laserCloud;\npcl::PointCloud<PointType>::Ptr laserConerCloud;\npcl::PointCloud<PointType>::Ptr laserSurfCloud;\npcl::PointCloud<PointType>::Ptr laserNonFeatureCloud;\nint Lidar_Type = 0;\nint N_SCANS = 6;\nbool Feature_Mode = false;\nbool Use_seg = false;\n\nvoid lidarCallBackHorizon(const livox_ros_driver::CustomMsgConstPtr &msg) {\n\n  sensor_msgs::PointCloud2 msg2;\n\n  if(Use_seg){\n    lidarFeatureExtractor->FeatureExtract_with_segment(msg, laserCloud, laserConerCloud, laserSurfCloud, laserNonFeatureCloud, msg2,N_SCANS);\n  }\n  else{\n    lidarFeatureExtractor->FeatureExtract(msg, laserCloud, laserConerCloud, laserSurfCloud,N_SCANS);\n  } \n\n  sensor_msgs::PointCloud2 laserCloudMsg;\n  pcl::toROSMsg(*laserCloud, laserCloudMsg);\n  laserCloudMsg.header = msg->header;\n  laserCloudMsg.header.stamp.fromNSec(msg->timebase+msg->points.back().offset_time);\n  pubFullLaserCloud.publish(laserCloudMsg);\n  // std::cout << \"time in front is \" << laserCloudMsg.header.stamp << std::endl;\n}\n\nint main(int argc, char** argv)\n{\n  ros::init(argc, argv, \"ScanRegistration\");\n  ros::NodeHandle nodeHandler(\"~\");\n\n  ros::Subscriber customCloud;;\n\n  std::string config_file;\n  nodeHandler.getParam(\"config_file\", config_file);\n\n  cv::FileStorage fsSettings(config_file, cv::FileStorage::READ);\n  if (!fsSettings.isOpened()) {\n    std::cout << \"config_file error: cannot open \" << config_file << std::endl;\n    return false;\n  }\n  Lidar_Type = static_cast<int>(fsSettings[\"Lidar_Type\"]);\n  N_SCANS = static_cast<int>(fsSettings[\"Used_Line\"]);\n  Feature_Mode = static_cast<int>(fsSettings[\"Feature_Mode\"]);\n  Use_seg = static_cast<int>(fsSettings[\"Use_seg\"]);\n\n  int NumCurvSize = static_cast<int>(fsSettings[\"NumCurvSize\"]);\n  float DistanceFaraway = static_cast<float>(fsSettings[\"DistanceFaraway\"]);\n  int NumFlat = static_cast<int>(fsSettings[\"NumFlat\"]);\n  int PartNum = static_cast<int>(fsSettings[\"PartNum\"]);\n  float FlatThreshold = static_cast<float>(fsSettings[\"FlatThreshold\"]);\n  float BreakCornerDis = static_cast<float>(fsSettings[\"BreakCornerDis\"]);\n  float LidarNearestDis = static_cast<float>(fsSettings[\"LidarNearestDis\"]);\n  float KdTreeCornerOutlierDis = static_cast<float>(fsSettings[\"KdTreeCornerOutlierDis\"]);\n\n  laserCloud.reset(new pcl::PointCloud<PointType>);\n  laserConerCloud.reset(new pcl::PointCloud<PointType>);\n  laserSurfCloud.reset(new pcl::PointCloud<PointType>);\n  laserNonFeatureCloud.reset(new pcl::PointCloud<PointType>);\n\n  customCloud = nodeHandler.subscribe<livox_ros_driver::CustomMsg>(\"/livox/lidar\", 100, &lidarCallBackHorizon);\n\n  pubFullLaserCloud = nodeHandler.advertise<sensor_msgs::PointCloud2>(\"/livox_full_cloud\", 10);\n  pubSharpCloud = nodeHandler.advertise<sensor_msgs::PointCloud2>(\"/livox_less_sharp_cloud\", 10);\n  pubFlatCloud = nodeHandler.advertise<sensor_msgs::PointCloud2>(\"/livox_less_flat_cloud\", 10);\n  pubNonFeature = nodeHandler.advertise<sensor_msgs::PointCloud2>(\"/livox_nonfeature_cloud\", 10);\n\n  lidarFeatureExtractor = new LidarFeatureExtractor(N_SCANS,NumCurvSize,DistanceFaraway,NumFlat,PartNum,\n                                                    FlatThreshold,BreakCornerDis,LidarNearestDis,KdTreeCornerOutlierDis);\n\n  ros::spin();\n\n  return 0;\n}\n\n"
  },
  {
    "path": "LIO-Livox/src/lio/ceresfunc.cpp",
    "content": "#include \"utils/ceresfunc.h\"\n\nvoid* ThreadsConstructA(void* threadsstruct)\n{\n  ThreadsStruct* p = ((ThreadsStruct*)threadsstruct);\n  for (auto it : p->sub_factors)\n  {\n    for (int i = 0; i < static_cast<int>(it->parameter_blocks.size()); i++)\n    {\n      int idx_i = p->parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[i])];\n      int size_i = p->parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[i])];\n      Eigen::MatrixXd jacobian_i = it->jacobians[i].leftCols(size_i);\n      for (int j = i; j < static_cast<int>(it->parameter_blocks.size()); j++)\n      {\n        int idx_j = p->parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[j])];\n        int size_j = p->parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[j])];\n        Eigen::MatrixXd jacobian_j = it->jacobians[j].leftCols(size_j);\n        if (i == j)\n          p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;\n        else\n        {\n          p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;\n          p->A.block(idx_j, idx_i, size_j, size_i) = p->A.block(idx_i, idx_j, size_i, size_j).transpose();\n        }\n      }\n      p->b.segment(idx_i, size_i) += jacobian_i.transpose() * it->residuals;\n    }\n  }\n  return threadsstruct;\n}\n// Eigen::Vector3d sqrt_information_vec(10, 10, 10); // close\nEigen::Vector3d sqrt_information_vec(0.0000205, 0.0000205, 0.0001); // open 0.000205, 0.000205, 0.01\nEigen::Matrix<double, 3, 3> Cost_NavState_PR_Ground::sqrt_information = sqrt_information_vec.asDiagonal().inverse();\nEigen::VectorXf Cost_NavState_PR_Ground::init_ground_plane_coeff(4); \n"
  },
  {
    "path": "LIO-Livox/src/segment/pointsCorrect.cpp",
    "content": "#include \"segment/pointsCorrect.hpp\"\n\nfloat gnd_pos[6];\nint frame_count = 0;\nint frame_lenth_threshold = 5;//5 frames update\n\nint GetNeiborPCA_cor(SNeiborPCA_cor &npca, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> kdtree, pcl::PointXYZ searchPoint, float fSearchRadius)\n{\n    std::vector<float> k_dis;\n    pcl::PointCloud<pcl::PointXYZ>::Ptr subCloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    if(kdtree.radiusSearch(searchPoint,fSearchRadius,npca.neibors,k_dis)>5)\n    {\n        subCloud->width=npca.neibors.size();\n        subCloud->height=1;\n        subCloud->points.resize(subCloud->width*subCloud->height);\n\n        for (int pid=0;pid<subCloud->points.size();pid++)//搜索半径内的地面点云 sy\n        {\n            subCloud->points[pid].x=cloud->points[npca.neibors[pid]].x;\n            subCloud->points[pid].y=cloud->points[npca.neibors[pid]].y;\n            subCloud->points[pid].z=cloud->points[npca.neibors[pid]].z;\n        }\n        //利用PCA主元分析法获得点云的三个主方向，获取质心，计算协方差，获得协方差矩阵，求取协方差矩阵的特征值和特长向量，特征向量即为主方向。 sy\n        Eigen::Vector4f pcaCentroid;\n    \tpcl::compute3DCentroid(*subCloud, pcaCentroid);\n\t    Eigen::Matrix3f covariance;\n\t    pcl::computeCovarianceMatrixNormalized(*subCloud, pcaCentroid, covariance);\n\t    Eigen::SelfAdjointEigenSolver<Eigen::Matrix3f> eigen_solver(covariance, Eigen::ComputeEigenvectors);\n\t    npca.eigenVectorsPCA = eigen_solver.eigenvectors();\n\t    npca.eigenValuesPCA = eigen_solver.eigenvalues();\n        float vsum=npca.eigenValuesPCA(0)+npca.eigenValuesPCA(1)+npca.eigenValuesPCA(2);\n        npca.eigenValuesPCA(0)=npca.eigenValuesPCA(0)/(vsum+0.000001);//单位化 sy\n        npca.eigenValuesPCA(1)=npca.eigenValuesPCA(1)/(vsum+0.000001);\n        npca.eigenValuesPCA(2)=npca.eigenValuesPCA(2)/(vsum+0.000001);\n    }\n    else\n    {\n        npca.neibors.clear();\n    }\n    //std::cout << \"in PCA2\\n\";\n    return npca.neibors.size();\n}\n\nint FilterGndForPos_cor(float* outPoints,float*inPoints,int inNum)\n{\n    int outNum=0;\n    float dx=2;\n    float dy=2;\n    int x_len = 20;\n    int y_len = 10;\n    int nx=2*x_len/dx; //80\n    int ny=2*y_len/dy; //10\n    float offx=-20,offy=-10;\n    float THR=0.4;\n    \n\n    float *imgMinZ=(float*)calloc(nx*ny,sizeof(float));\n    float *imgMaxZ=(float*)calloc(nx*ny,sizeof(float));\n    float *imgSumZ=(float*)calloc(nx*ny,sizeof(float));\n    float *imgMeanZ=(float*)calloc(nx*ny,sizeof(float));\n    int *imgNumZ=(int*)calloc(nx*ny,sizeof(int));\n    int *idtemp = (int*)calloc(inNum,sizeof(int));\n    for(int ii=0;ii<nx*ny;ii++)\n    {\n        imgMinZ[ii]=10;\n        imgMaxZ[ii]=-10;\n        imgMeanZ[ii] = -10;\n        imgSumZ[ii]=0;\n        imgNumZ[ii]=0;\n    }\n\n    for(int pid=0;pid<inNum;pid++)\n    {\n        idtemp[pid] = -1;\n        if((inPoints[pid*4] > -x_len) && (inPoints[pid*4]<x_len)&&(inPoints[pid*4+1]>-y_len)&&(inPoints[pid*4+1]<y_len))\n        {\n            int idx=(inPoints[pid*4]-offx)/dx;\n            int idy=(inPoints[pid*4+1]-offy)/dy;\n            idtemp[pid] = idx+idy*nx;\n            if (idtemp[pid] >= nx*ny)\n                continue;\n            imgSumZ[idx+idy*nx] += inPoints[pid*4+2];\n            imgNumZ[idx+idy*nx] +=1;\n            if(inPoints[pid*4+2]<imgMinZ[idx+idy*nx])\n            {\n                imgMinZ[idx+idy*nx]=inPoints[pid*4+2];\n            }\n            if(inPoints[pid*4+2]>imgMaxZ[idx+idy*nx]){\n                imgMaxZ[idx+idy*nx]=inPoints[pid*4+2];\n            }\n        }\n    }\n    for(int pid=0;pid<inNum;pid++)\n    {\n        if (outNum >= 60000)\n            break;\n        if(idtemp[pid] > 0 && idtemp[pid] < nx*ny)\n        {\n            imgMeanZ[idtemp[pid]] = float(imgSumZ[idtemp[pid]]/(imgNumZ[idtemp[pid]] + 0.0001));\n            //最高点与均值高度差小于阈值；点数大于3；均值高度小于1 \n            if((imgMaxZ[idtemp[pid]] - imgMeanZ[idtemp[pid]]) < THR && imgNumZ[idtemp[pid]] > 3 && imgMeanZ[idtemp[pid]] < 2)\n            {// imgMeanZ[idtemp[pid]]<0&&\n                outPoints[outNum*4]=inPoints[pid*4];\n                outPoints[outNum*4+1]=inPoints[pid*4+1];\n                outPoints[outNum*4+2]=inPoints[pid*4+2];\n                outPoints[outNum*4+3]=inPoints[pid*4+3];\n                outNum++;\n            }\n        }\n    }\n\n    free(imgMinZ);\n    free(imgMaxZ);\n    free(imgSumZ);\n    free(imgMeanZ);\n    free(imgNumZ);\n    free(idtemp);\n    return outNum;\n}\n\nint CalGndPos_cor(float *gnd, float *fPoints,int pointNum,float fSearchRadius)\n{\n    // 初始化gnd\n    for(int ii=0;ii<6;ii++)\n    {\n        gnd[ii]=0;\n    }\n    // 转换点云到pcl的格式\n    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    //去除异常点\n    if (pointNum <= 3)\n    {\n        return 0;\n    }\n    cloud->width=pointNum;\n    cloud->height=1;\n    cloud->points.resize(cloud->width*cloud->height);\n\n    for (int pid=0;pid<cloud->points.size();pid++)\n    {\n        cloud->points[pid].x=fPoints[pid*4];\n        cloud->points[pid].y=fPoints[pid*4+1];\n        cloud->points[pid].z=fPoints[pid*4+2];\n    }\n    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;\n    kdtree.setInputCloud (cloud);\n    int nNum=0;\n    unsigned char* pLabel = (unsigned char*)calloc(pointNum,sizeof(unsigned char));\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if ((nNum<1000)&&(pLabel[pid]==0))\n        {\n            SNeiborPCA_cor npca;\n            pcl::PointXYZ searchPoint;\n            searchPoint.x=cloud->points[pid].x;\n            searchPoint.y=cloud->points[pid].y;\n            searchPoint.z=cloud->points[pid].z;\n\n            if(GetNeiborPCA_cor(npca,cloud,kdtree,searchPoint,fSearchRadius)>0)\n            {\n                for(int ii=0;ii<npca.neibors.size();ii++)\n                {\n                    pLabel[npca.neibors[ii]]=1;\n                }\n\n                if(npca.eigenValuesPCA[1]/(npca.eigenValuesPCA[0] + 0.00001)>5000){ //指的是主方向与次方向差异较大。即这一小块接近平面 sy\n\n                        if(npca.eigenVectorsPCA(2,0)>0) //垂直向上？\n                        {\n                            gnd[0]+=npca.eigenVectorsPCA(0,0);\n                            gnd[1]+=npca.eigenVectorsPCA(1,0);\n                            gnd[2]+=npca.eigenVectorsPCA(2,0);\n\n                            gnd[3]+=searchPoint.x;\n                            gnd[4]+=searchPoint.y;\n                            gnd[5]+=searchPoint.z;\n                        }\n                        else\n                        {\n                            gnd[0]+=-npca.eigenVectorsPCA(0,0);\n                            gnd[1]+=-npca.eigenVectorsPCA(1,0);\n                            gnd[2]+=-npca.eigenVectorsPCA(2,0);\n\n                            gnd[3]+=searchPoint.x;\n                            gnd[4]+=searchPoint.y;\n                            gnd[5]+=searchPoint.z;\n                        }\n                        nNum++;\n\n                }\n            }\n        }\n    }\n    free(pLabel);\n    if(nNum>0)\n    {\n        for(int ii=0;ii<6;ii++)\n        {\n            gnd[ii]/=nNum; //平均法向量 & 地面点云的中心\n        }\n        if(abs(gnd[0])<0.1){\n            gnd[0]=gnd[0]*(1-abs(gnd[0]))*4.5;\n        }\n        else if(abs(gnd[0])<0.2){\n            gnd[0]=gnd[0]*(1-abs(gnd[0]))*3.2;\n        }\n        else{\n            gnd[0]=gnd[0]*(1-abs(gnd[0]))*2.8;\n        }\n        gnd[1] = gnd[1]*2.3;\n        \n    }\n    return nNum;\n}\n\nint GetRTMatrix_cor(float *RTM, float *v0, float *v1)\n{\n    // 归一化\n    float nv0=sqrt(v0[0]*v0[0]+v0[1]*v0[1]+v0[2]*v0[2]);\n    v0[0]/=(nv0+0.000001);\n    v0[1]/=(nv0+0.000001);\n    v0[2]/=(nv0+0.000001);\n\n    float nv1=sqrt(v1[0]*v1[0]+v1[1]*v1[1]+v1[2]*v1[2]);\n    v1[0]/=(nv1+0.000001);\n    v1[1]/=(nv1+0.000001);\n    v1[2]/=(nv1+0.000001);\n\n    // 叉乘\n    float v2[3];\n    v2[0]=v0[1]*v1[2]-v0[2]*v1[1];\n    v2[1]=v0[2]*v1[0]-v0[0]*v1[2];\n    v2[2]=v0[0]*v1[1]-v0[1]*v1[0];\n\n    // 正余弦\n    float cosAng=0,sinAng=0;\n    cosAng=v0[0]*v1[0]+v0[1]*v1[1]+v0[2]*v1[2];\n    sinAng=sqrt(1-cosAng*cosAng);\n\n    // 计算旋转矩阵\n    RTM[0]=v2[0]*v2[0]*(1-cosAng)+cosAng;\n    RTM[4]=v2[1]*v2[1]*(1-cosAng)+cosAng;\n    RTM[8]=v2[2]*v2[2]*(1-cosAng)+cosAng;\n\n    RTM[1]=RTM[3]=v2[0]*v2[1]*(1-cosAng);\n    RTM[2]=RTM[6]=v2[0]*v2[2]*(1-cosAng);\n    RTM[5]=RTM[7]=v2[1]*v2[2]*(1-cosAng);\n\n    RTM[1]+=(v2[2])*sinAng;\n    RTM[2]+=(-v2[1])*sinAng;\n    RTM[3]+=(-v2[2])*sinAng;\n\n    RTM[5]+=(v2[0])*sinAng;\n    RTM[6]+=(v2[1])*sinAng;\n    RTM[7]+=(-v2[0])*sinAng;\n\n    return 0;\n}\n\nint CorrectPoints_cor(float *fPoints,int pointNum,float *gndPos)\n{\n    float RTM[9];\n    float gndHeight=0;\n    float znorm[3]={0,0,1};\n    float tmp[3];\n\n    GetRTMatrix_cor(RTM,gndPos,znorm);\n\n    gndHeight = RTM[2]*gndPos[3]+RTM[5]*gndPos[4]+RTM[8]*gndPos[5];\n\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        tmp[0]=RTM[0]*fPoints[pid*4]+RTM[3]*fPoints[pid*4+1]+RTM[6]*fPoints[pid*4+2];\n        tmp[1]=RTM[1]*fPoints[pid*4]+RTM[4]*fPoints[pid*4+1]+RTM[7]*fPoints[pid*4+2];\n        tmp[2]=RTM[2]*fPoints[pid*4]+RTM[5]*fPoints[pid*4+1]+RTM[8]*fPoints[pid*4+2]-gndHeight;\n\n        fPoints[pid*4]=tmp[0];\n        fPoints[pid*4+1]=tmp[1];\n        fPoints[pid*4+2]=tmp[2];\n    }\n    return 0;\n}\n\n\nint GetGndPos(float *pos, float *fPoints,int pointNum){\n    float *fPoints3=(float*)calloc(60000*4,sizeof(float));//地面点\n    int pnum3 = FilterGndForPos_cor(fPoints3,fPoints,pointNum);\n    float tmpPos[6];\n    if (pnum3 < 3)\n    {\n        std::cout << \"too few ground points!\\n\";\n    }\n    int gndnum = CalGndPos_cor(tmpPos,fPoints3,pnum3,1.0);//用法向量判断，获取到法向量 & 地面搜索点，放到tmppos\n    if(gnd_pos[5]==0){\n        memcpy(gnd_pos,tmpPos,sizeof(tmpPos));\n    }\n    else{\n\n        if(frame_count<frame_lenth_threshold&&tmpPos[5]!=0){\n            if(gndnum>0&&abs(gnd_pos[0]-tmpPos[0])<0.1&&abs(gnd_pos[1]-tmpPos[1])<0.1){//更新法向量            \n                for(int i = 0;i<6;i++){\n                    gnd_pos[i] = (gnd_pos[i]+tmpPos[i])*0.5;\n                }\n                frame_count = 0;\n            }\n            else{\n                frame_count++;\n            }\n        }\n        else if(tmpPos[5]!=0){\n            memcpy(gnd_pos,tmpPos,sizeof(tmpPos));\n            frame_count = 0;\n        }\n    }\n   \n    memcpy(pos,gnd_pos,sizeof(float)*6);\n\n    free(fPoints3);\n    \n    return 0;\n}\n\n\n\n"
  },
  {
    "path": "LIO-Livox/src/segment/segment.cpp",
    "content": "#include \"segment/segment.hpp\"\n\n#define N_FRAME 5\n\nfloat tmp_gnd_pose[100*6];\nint tem_gnd_num = 0;\n\nPCSeg::PCSeg()\n{\n    this->posFlag=0;\n    this->pVImg=(unsigned char*)calloc(DN_SAMPLE_IMG_NX*DN_SAMPLE_IMG_NY*DN_SAMPLE_IMG_NZ,sizeof(unsigned char));\n    this->corPoints=NULL;\n}\nPCSeg::~PCSeg()\n{\n    if(this->pVImg!=NULL)\n    {\n        free(this->pVImg);\n    }\n    if(this->corPoints!=NULL)\n    {\n        free(this->corPoints);\n    }\n}\n\nint PCSeg::DoSeg(int *pLabel1, float* fPoints1, int pointNum)\n{\n\n    // 1 down sampling\n    float *fPoints2=(float*)calloc(pointNum*4,sizeof(float));\n    int *idtrans1=(int*)calloc(pointNum,sizeof(int));\n    int *idtrans2=(int*)calloc(pointNum,sizeof(int));\n    int pntNum=0;\n    if (this->pVImg == NULL)\n    {\n        this->pVImg=(unsigned char*)calloc(DN_SAMPLE_IMG_NX*DN_SAMPLE_IMG_NY*DN_SAMPLE_IMG_NZ,sizeof(unsigned char));\n    }\n    memset(pVImg,0,sizeof(unsigned char)*DN_SAMPLE_IMG_NX*DN_SAMPLE_IMG_NY*DN_SAMPLE_IMG_NZ);//600*200*30  \n    \n    for(int pid=0;pid<pointNum;pid++)\n    {\n        int ix=(fPoints1[pid*4]+DN_SAMPLE_IMG_OFFX)/DN_SAMPLE_IMG_DX; //0-240m -> -40-190m\n        int iy=(fPoints1[pid*4+1]+DN_SAMPLE_IMG_OFFY)/DN_SAMPLE_IMG_DY; //-40-40m\n        int iz=(fPoints1[pid*4+2]+DN_SAMPLE_IMG_OFFZ)/DN_SAMPLE_IMG_DZ;//认为地面为-1.8？ -2.5~17.5\n\n        idtrans1[pid]=-1;\n        if((ix>=0)&&(ix<DN_SAMPLE_IMG_NX)&&(iy>=0)&&(iy<DN_SAMPLE_IMG_NY)&&(iz>=0)&&(iz<DN_SAMPLE_IMG_NZ)) //DN_SAMPLE_IMG_OFFX = 0 因此只保留前半块\n        {\n            idtrans1[pid]=iz*DN_SAMPLE_IMG_NX*DN_SAMPLE_IMG_NY+iy*DN_SAMPLE_IMG_NX+ix; //记录这个点对应的索引\n            if(pVImg[idtrans1[pid]]==0)//没有访问过，肯定栅格内会有重复的，所以fPoints2只取第一个\n            {\n                fPoints2[pntNum*4]=fPoints1[pid*4];\n                fPoints2[pntNum*4+1]=fPoints1[pid*4+1];\n                fPoints2[pntNum*4+2]=fPoints1[pid*4+2];\n                fPoints2[pntNum*4+3]=fPoints1[pid*4+3];\n\n                idtrans2[pntNum]=idtrans1[pid];\n\n                pntNum++;\n            }\n            pVImg[idtrans1[pid]]=1;\n        }\n\n    }\n\n    //先进行地面校正\n    float tmpPos[6];\n    tmpPos[0]=-0.15;\n    tmpPos[1]=0;\n    tmpPos[2]=1;\n    tmpPos[3]=0;\n    tmpPos[4]=0;\n    tmpPos[5]=-2.04;\n    GetGndPos(tmpPos,fPoints2,pntNum); //tempPos是更新后的地面搜索点 & 平均法向量 ys\n    memcpy(this->gndPos,tmpPos,6*sizeof(float));\n    \n    this->posFlag=1;//(this->posFlag+1)%SELF_CALI_FRAMES;\n\n    // 3 点云矫正\n    this->CorrectPoints(fPoints2,pntNum,this->gndPos);\n    if(this->corPoints!=NULL)\n        free(this->corPoints);\n    this->corPoints=(float*)calloc(pntNum*4,sizeof(float));\n    this->corNum=pntNum;\n    memcpy(this->corPoints,fPoints2,4*pntNum*sizeof(float));\n\n    // 4 粗略地面分割for地上分割\n    int *pLabelGnd=(int*)calloc(pntNum,sizeof(int));\n    int gnum=GndSeg(pLabelGnd,fPoints2,pntNum,1.0);\n\n    // 5 地上分割\n    int agnum = pntNum-gnum;\n    float *fPoints3=(float*)calloc(agnum*4,sizeof(float));\n    int *idtrans3=(int*)calloc(agnum,sizeof(int));\n    int *pLabel2=(int*)calloc(pntNum,sizeof(int));\n    int agcnt=0;//非地面点数量\n    int gcnt=0;//地面点数量\n    for(int ii=0;ii<pntNum;ii++)\n    {\n        if(pLabelGnd[ii]==0) //上一步地面标记为1，非地面标记为0\n        {\n            fPoints3[agcnt*4]=fPoints2[ii*4];\n            fPoints3[agcnt*4+1]=fPoints2[ii*4+1];\n            fPoints3[agcnt*4+2]=fPoints2[ii*4+2];\n            pLabel2[ii] = 2;\n            idtrans3[agcnt]=ii; //记录fp3在fp2里面的位置\n            agcnt++;\n        }\n        else\n        {\n            pLabel2[ii] = 0; //地面为1\n            gcnt++;\n        }\n    }\n    // std::cout << \"pointNum = \" << pointNum << \" gcnt = \" << gcnt << \" gnum = \" << gnum << std::endl;\n    int *pLabelAg=(int*)calloc(agnum,sizeof(int));\n    if (agnum != 0)\n    {\n        AbvGndSeg(pLabelAg,fPoints3,agnum);  \n    }\n    else\n    {\n        std::cout << \"0 above ground points!\\n\";\n    }\n\n  \n    for(int ii=0;ii<agcnt;ii++)\n    {   \n        if (pLabelAg[ii] >= 10)//前景为0 背景为1 物体分类后 >=10\n            pLabel2[idtrans3[ii]] = 200;//pLabelAg[ii]; //前景\n        else if (pLabelAg[ii] > 0)\n            pLabel2[idtrans3[ii]] = 1; //背景1 -> 0\n        else\n        {\n            pLabel2[idtrans3[ii]] = -1;\n        }\n        \n    }\n     \n    for(int pid=0;pid<pntNum;pid++)\n    {\n        pVImg[idtrans2[pid]] = pLabel2[pid];\n    }\n\n    for(int pid = 0; pid < pointNum; pid++)\n    {\n        if(idtrans1[pid]>=0)\n        {\n            pLabel1[pid]= pVImg[idtrans1[pid]];\n        }\n        else\n        {\n            pLabel1[pid] = -1;//未分类\n        }\n    }\n\n    free(fPoints2);\n    free(idtrans1);\n    free(idtrans2);\n    free(idtrans3);\n    free(fPoints3);\n    \n    free(pLabelAg);\n    free(pLabelGnd);\n\n\n    free(pLabel2);\n}\n\nint PCSeg::GetMainVectors(float*fPoints, int* pLabel, int pointNum)\n{\n    float *pClusterPoints=(float*)calloc(4*pointNum,sizeof(float));\n    for(int ii=0;ii<256;ii++)\n    {\n        this->pnum[ii]=0;\n\tthis->objClass[ii]=-1;\n    }\n    this->clusterNum=0;\n\n    int clabel=10;\n    for(int ii=10;ii<256;ii++)\n    {\n        int pnum=0;\n        for(int pid=0;pid<pointNum;pid++)\n        {\n            if(pLabel[pid]==ii)\n            {\n                pClusterPoints[pnum*4]=fPoints[4*pid];\n                pClusterPoints[pnum*4+1]=fPoints[4*pid+1];\n                pClusterPoints[pnum*4+2]=fPoints[4*pid+2];\n                pnum++;\n            }\n        }\n\n        if(pnum>0)\n        {\n            SClusterFeature cf = CalOBB(pClusterPoints,pnum);\n\n            if(cf.cls<1)\n            {\n                this->pVectors[clabel*3]=cf.d0[0];\n                this->pVectors[clabel*3+1]=cf.d0[1];\n                this->pVectors[clabel*3+2]=cf.d0[2];//朝向向量\n\n                this->pCenters[clabel*3]=cf.center[0];\n                this->pCenters[clabel*3+1]=cf.center[1];\n                this->pCenters[clabel*3+2]=cf.center[2];\n\n                this->pnum[clabel]=cf.pnum;\n                for(int jj=0;jj<8;jj++)\n                    this->pOBBs[clabel*8+jj]=cf.obb[jj];\n\n                this->objClass[clabel]=cf.cls;//0 or 1 背景\n\n                this->zs[clabel]=cf.zmax;//-cf.zmin;\n\n                if(clabel!=ii)\n                {\n                    for(int pid=0;pid<pointNum;pid++)\n                    {\n                        if(pLabel[pid]==ii)\n                        {\n                            pLabel[pid]=clabel;\n                        }\n                    }\n                }\n\n                this->clusterNum++;\n                clabel++;\n            }\n            else\n            {\n                for(int pid=0;pid<pointNum;pid++)\n                {\n                    if(pLabel[pid]==ii)\n                    {\n                        pLabel[pid]=1;\n                    }\n                }\n            }\n\n\n        }\n    }\n\n    free(pClusterPoints);\n\n    return 0;\n}\n\nint PCSeg::EncodeFeatures(float *pFeas)\n{\n    int nLen=1+256+256+256*4;\n    // pFeas:(1+256*6)*3\n    // 0        3 ~ 3+356*3 = 257*3        275*3 ~ 257*3+256*3             3+256*6 ~ 3+256*6 + 256*4*3+12+1 \n    // number    center                  每个障碍物的点数、类别、zmax           OBB 12个点\n    // clusterNum\n    pFeas[0]=this->clusterNum;\n\n    memcpy(pFeas+3,this->pCenters,sizeof(float)*3*256);//重心\n\n    for(int ii=0;ii<256;ii++)//最大容纳256个障碍物\n    {\n        pFeas[257*3+ii*3]=this->pnum[ii];\n        pFeas[257*3+ii*3+1]=this->objClass[ii];\n        pFeas[257*3+ii*3+2]=this->zs[ii];\n    }\n\n    for(int ii=0;ii<256;ii++)\n    {\n        for(int jj=0;jj<4;jj++)\n        {\n            pFeas[257*3+256*3+(ii*4+jj)*3]=this->pOBBs[ii*8+jj*2];\n            pFeas[257*3+256*3+(ii*4+jj)*3+1]=this->pOBBs[ii*8+jj*2+1];\n        }\n    }\n\n\n}\nint FilterGndForPos(float* outPoints,float*inPoints,int inNum)\n{\n    int outNum=0;\n    float dx=2;\n    float dy=2;\n    int x_len = 40;\n    int y_len = 10;\n    int nx=x_len/dx;\n    int ny=2*y_len/dy;\n    float offx=0,offy=-10;\n    float THR=0.2;\n    \n\n    float *imgMinZ=(float*)calloc(nx*ny,sizeof(float));\n    float *imgMaxZ=(float*)calloc(nx*ny,sizeof(float));\n    float *imgSumZ=(float*)calloc(nx*ny,sizeof(float));\n    float *imgMeanZ=(float*)calloc(nx*ny,sizeof(float));\n    int *imgNumZ=(int*)calloc(nx*ny,sizeof(int));\n    int *idtemp = (int*)calloc(inNum,sizeof(int));\n    for(int ii=0;ii<nx*ny;ii++)\n    {\n        imgMinZ[ii]=10;\n        imgMaxZ[ii]=-10;\n        imgSumZ[ii]=0;\n        imgNumZ[ii]=0;\n    }\n\n    for(int pid=0;pid<inNum;pid++)\n    {\n        idtemp[pid] = -1;\n        if((inPoints[pid*4]<x_len)&&(inPoints[pid*4+1]>-y_len)&&(inPoints[pid*4+1]<y_len))\n        {\n            int idx=(inPoints[pid*4]-offx)/dx;\n            int idy=(inPoints[pid*4+1]-offy)/dy;\n            idtemp[pid] = idx+idy*nx;\n            imgSumZ[idx+idy*nx] += inPoints[pid*4+2];\n            imgNumZ[idx+idy*nx] +=1;\n            if(inPoints[pid*4+2]<imgMinZ[idx+idy*nx])//寻找最小点，感觉不对。最小点有误差。\n            {\n                imgMinZ[idx+idy*nx]=inPoints[pid*4+2];\n            }\n            if(inPoints[pid*4+2]>imgMaxZ[idx+idy*nx]){\n                imgMaxZ[idx+idy*nx]=inPoints[pid*4+2];\n            }\n        }\n    }\n    for(int pid=0;pid<inNum;pid++)\n    {\n        if(idtemp[pid]>0){\n            imgMeanZ[idtemp[pid]] = float(imgSumZ[idtemp[pid]]/imgNumZ[idtemp[pid]]);\n            if((imgMaxZ[idtemp[pid]]-imgMinZ[idtemp[pid]])<THR && imgMeanZ[idtemp[pid]]<-1.5&&imgNumZ[idtemp[pid]]>3){\n                outPoints[outNum*4]=inPoints[pid*4];\n                outPoints[outNum*4+1]=inPoints[pid*4+1];\n                outPoints[outNum*4+2]=inPoints[pid*4+2];\n                outPoints[outNum*4+3]=inPoints[pid*4+3];\n                outNum++;\n            }\n        }\n    }\n    free(imgMinZ);\n    free(imgMaxZ);\n    free(imgSumZ);\n    free(imgMeanZ);\n    free(imgNumZ);\n    free(idtemp);\n    return outNum;\n}\n\nint CalNomarls(float *nvects, float *fPoints,int pointNum,float fSearchRadius)\n{\n    // 转换点云到pcl的格式\n    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    cloud->width=pointNum;\n    cloud->height=1;\n    cloud->points.resize(cloud->width*cloud->height);\n\n    for (int pid=0;pid<cloud->points.size();pid++)\n    {\n        cloud->points[pid].x=fPoints[pid*4];\n        cloud->points[pid].y=fPoints[pid*4+1];\n        cloud->points[pid].z=fPoints[pid*4+2];\n    }\n\n    //调用pcl的kdtree生成方法\n    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;\n    kdtree.setInputCloud (cloud);\n\n    //遍历每个点，获得候选地面点\n    int nNum=0;\n    unsigned char* pLabel = (unsigned char*)calloc(pointNum,sizeof(unsigned char));\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if ((nNum<10)&&(pLabel[pid]==0))\n        {\n            SNeiborPCA npca;\n            pcl::PointXYZ searchPoint;\n            searchPoint.x=cloud->points[pid].x;\n            searchPoint.y=cloud->points[pid].y;\n            searchPoint.z=cloud->points[pid].z;\n\n            if(GetNeiborPCA(npca,cloud,kdtree,searchPoint,fSearchRadius)>0)\n            {\n                for(int ii=0;ii<npca.neibors.size();ii++)\n                {\n                    pLabel[npca.neibors[ii]]=1;\n                }\n\n                // 地面向量筛选\n                if((npca.eigenValuesPCA[1]>0.4) && (npca.eigenValuesPCA[0]<0.01))\n                {\n                    if((npca.eigenVectorsPCA(2,0)>0.9)|(npca.eigenVectorsPCA(2,0)<-0.9))\n                    {\n                        if(npca.eigenVectorsPCA(2,0)>0)\n                        {\n                            nvects[nNum*6]=npca.eigenVectorsPCA(0,0);\n                            nvects[nNum*6+1]=npca.eigenVectorsPCA(1,0);\n                            nvects[nNum*6+2]=npca.eigenVectorsPCA(2,0);\n\n                            nvects[nNum*6+3]=searchPoint.x;\n                            nvects[nNum*6+4]=searchPoint.y;\n                            nvects[nNum*6+5]=searchPoint.z;\n                        }\n                        else\n                        {\n                            nvects[nNum*6]=-npca.eigenVectorsPCA(0,0);\n                            nvects[nNum*6+1]=-npca.eigenVectorsPCA(1,0);\n                            nvects[nNum*6+2]=-npca.eigenVectorsPCA(2,0);\n\n                            nvects[nNum*6+3]=searchPoint.x;\n                            nvects[nNum*6+4]=searchPoint.y;\n                            nvects[nNum*6+5]=searchPoint.z;\n                        }\n                        nNum++;\n                    }\n                }\n            }\n        }\n    }\n\n    free(pLabel);\n\n\n    return nNum;\n}\nint CalGndPos(float *gnd, float *fPoints,int pointNum,float fSearchRadius)\n{\n    // 初始化gnd\n    //float gnd[6]={0};//(vx,vy,vz,x,y,z)\n    for(int ii=0;ii<6;ii++)\n    {\n        gnd[ii]=0;\n    }\n    // 转换点云到pcl的格式\n    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    //去除异常点\n    float height = 0;\n    for(int cur = 0;cur<pointNum;cur++){\n        height+=fPoints[cur*4+2];\n    }\n    float height_mean = height/(pointNum+0.000000001);\n\n    for(int cur = 0;cur<pointNum;cur++){\n        if(fPoints[cur*4+2]>height_mean+0.5||fPoints[cur*4+2]<height_mean-0.5){\n            pointNum--;\n        }\n    }\n\n    cloud->width=pointNum;\n    cloud->height=1;\n    cloud->points.resize(cloud->width*cloud->height);\n\n    for (int pid=0;pid<cloud->points.size();pid++)\n    {\n        if(fPoints[pid*4+2]<=height_mean+0.5&&fPoints[pid*4+2]>=height_mean-0.5){\n            cloud->points[pid].x=fPoints[pid*4];\n            cloud->points[pid].y=fPoints[pid*4+1];\n            cloud->points[pid].z=fPoints[pid*4+2];\n        }\n    }\n\n    //调用pcl的kdtree生成方法\n    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;\n    kdtree.setInputCloud (cloud);\n\n    //遍历每个点，获得候选地面点\n    // float *nvects=(float*)calloc(1000*6,sizeof(float));//最多支持1000个点\n    int nNum=0;\n    unsigned char* pLabel = (unsigned char*)calloc(pointNum,sizeof(unsigned char));\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if ((nNum<1000)&&(pLabel[pid]==0))\n        {\n            SNeiborPCA npca;\n            pcl::PointXYZ searchPoint;\n            searchPoint.x=cloud->points[pid].x;\n            searchPoint.y=cloud->points[pid].y;\n            searchPoint.z=cloud->points[pid].z;\n\n            if(GetNeiborPCA(npca,cloud,kdtree,searchPoint,fSearchRadius)>0)\n            {\n                for(int ii=0;ii<npca.neibors.size();ii++)\n                {\n                    pLabel[npca.neibors[ii]]=1;\n                }\n                // 地面向量筛选\n                if((npca.eigenValuesPCA[1]>0.2) && (npca.eigenValuesPCA[0]<0.1) && searchPoint.x>29.9 && searchPoint.x<40)//&&searchPoint.x>19.9\n                {\n                    if((npca.eigenVectorsPCA(2,0)>0.9)|(npca.eigenVectorsPCA(2,0)<-0.9))\n                    {\n                        if(npca.eigenVectorsPCA(2,0)>0)\n                        {\n                            gnd[0]+=npca.eigenVectorsPCA(0,0);\n                            gnd[1]+=npca.eigenVectorsPCA(1,0);\n                            gnd[2]+=npca.eigenVectorsPCA(2,0);\n\n                            gnd[3]+=searchPoint.x;\n                            gnd[4]+=searchPoint.y;\n                            gnd[5]+=searchPoint.z;\n                        }\n                        else\n                        {\n                            gnd[0]+=-npca.eigenVectorsPCA(0,0);\n                            gnd[1]+=-npca.eigenVectorsPCA(1,0);\n                            gnd[2]+=-npca.eigenVectorsPCA(2,0);\n\n                            gnd[3]+=searchPoint.x;\n                            gnd[4]+=searchPoint.y;\n                            gnd[5]+=searchPoint.z;\n                        }\n                        nNum++;\n                    }\n                }\n            }\n        }\n    }\n\n    free(pLabel);\n\n    // 估计地面的高度和法相量\n    if(nNum>0)\n    {\n        for(int ii=0;ii<6;ii++)\n        {\n            gnd[ii]/=nNum;\n        }\n        // gnd[0] *=5.0;\n    }\n    return nNum;\n}\n\nint GetNeiborPCA(SNeiborPCA &npca, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> kdtree, pcl::PointXYZ searchPoint, float fSearchRadius)\n{\n    std::vector<float> k_dis;\n    pcl::PointCloud<pcl::PointXYZ>::Ptr subCloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    if(kdtree.radiusSearch(searchPoint,fSearchRadius,npca.neibors,k_dis)>5)\n    {\n        subCloud->width=npca.neibors.size();\n        subCloud->height=1;\n        subCloud->points.resize(subCloud->width*subCloud->height);\n\n        for (int pid=0;pid<subCloud->points.size();pid++)\n        {\n            subCloud->points[pid].x=cloud->points[npca.neibors[pid]].x;\n            subCloud->points[pid].y=cloud->points[npca.neibors[pid]].y;\n            subCloud->points[pid].z=cloud->points[npca.neibors[pid]].z;\n\n        }\n\n        Eigen::Vector4f pcaCentroid;\n    \tpcl::compute3DCentroid(*subCloud, pcaCentroid);\n\t    Eigen::Matrix3f covariance;\n\t    pcl::computeCovarianceMatrixNormalized(*subCloud, pcaCentroid, covariance);\n\t    Eigen::SelfAdjointEigenSolver<Eigen::Matrix3f> eigen_solver(covariance, Eigen::ComputeEigenvectors);\n\t    npca.eigenVectorsPCA = eigen_solver.eigenvectors();\n\t    npca.eigenValuesPCA = eigen_solver.eigenvalues();\n        float vsum=npca.eigenValuesPCA(0)+npca.eigenValuesPCA(1)+npca.eigenValuesPCA(2);\n        npca.eigenValuesPCA(0)=npca.eigenValuesPCA(0)/(vsum+0.000001);\n        npca.eigenValuesPCA(1)=npca.eigenValuesPCA(1)/(vsum+0.000001);\n        npca.eigenValuesPCA(2)=npca.eigenValuesPCA(2)/(vsum+0.000001);\n    }\n    else\n    {\n        npca.neibors.clear();\n    }\n\n\n    return npca.neibors.size();\n}\n\nint GetRTMatrix(float *RTM, float *v0, float *v1) // v0 gndpos v1:001垂直向上\n{\n    // 归一化\n    float nv0=sqrt(v0[0]*v0[0]+v0[1]*v0[1]+v0[2]*v0[2]);\n    v0[0]/=(nv0+0.000001);\n    v0[1]/=(nv0+0.000001);\n    v0[2]/=(nv0+0.000001);\n\n    float nv1=sqrt(v1[0]*v1[0]+v1[1]*v1[1]+v1[2]*v1[2]);\n    v1[0]/=(nv1+0.000001);\n    v1[1]/=(nv1+0.000001);\n    v1[2]/=(nv1+0.000001);\n\n    // 叉乘\n    //  ^ v2  ^ v0\n    //  |   /\n    //  |  /\n    //  |-----> v1\n    //\n    float v2[3]; // 叉乘的向量代表了v0旋转到v1的旋转轴，因为叉乘结果垂直于v0、v1构成的平面\n    v2[0]=v0[1]*v1[2]-v0[2]*v1[1];\n    v2[1]=v0[2]*v1[0]-v0[0]*v1[2];\n    v2[2]=v0[0]*v1[1]-v0[1]*v1[0];\n\n    // 正余弦\n    float cosAng=0,sinAng=0;\n    cosAng=v0[0]*v1[0]+v0[1]*v1[1]+v0[2]*v1[2]; //a.b = |a||b|cos theta\n    sinAng=sqrt(1-cosAng*cosAng);\n\n    // 计算旋转矩阵\n    RTM[0]=v2[0]*v2[0]*(1-cosAng)+cosAng;\n    RTM[4]=v2[1]*v2[1]*(1-cosAng)+cosAng;\n    RTM[8]=v2[2]*v2[2]*(1-cosAng)+cosAng;\n\n    RTM[1]=RTM[3]=v2[0]*v2[1]*(1-cosAng);\n    RTM[2]=RTM[6]=v2[0]*v2[2]*(1-cosAng);\n    RTM[5]=RTM[7]=v2[1]*v2[2]*(1-cosAng);\n\n    RTM[1]+=(v2[2])*sinAng;\n    RTM[2]+=(-v2[1])*sinAng;\n    RTM[3]+=(-v2[2])*sinAng;\n\n    RTM[5]+=(v2[0])*sinAng;\n    RTM[6]+=(v2[1])*sinAng;\n    RTM[7]+=(-v2[0])*sinAng;\n\n    return 0;\n}\n\nint PCSeg::CorrectPoints(float *fPoints,int pointNum,float *gndPos)\n{\n    float RTM[9];\n    float gndHeight=0;\n    float znorm[3]={0,0,1};\n    float tmp[3];\n\n    GetRTMatrix(RTM,gndPos,znorm); // RTM 由当前地面法向量gnd，将平面转到以(0,0,1)为法向量的平面 sy\n\n    gndHeight = RTM[2]*gndPos[3]+RTM[5]*gndPos[4]+RTM[8]*gndPos[5];\n\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        tmp[0]=RTM[0]*fPoints[pid*4]+RTM[3]*fPoints[pid*4+1]+RTM[6]*fPoints[pid*4+2];\n        tmp[1]=RTM[1]*fPoints[pid*4]+RTM[4]*fPoints[pid*4+1]+RTM[7]*fPoints[pid*4+2];\n        tmp[2]=RTM[2]*fPoints[pid*4]+RTM[5]*fPoints[pid*4+1]+RTM[8]*fPoints[pid*4+2]-gndHeight; //将地面高度变换到0 sy\n\n        fPoints[pid*4]=tmp[0];\n        fPoints[pid*4+1]=tmp[1];\n        fPoints[pid*4+2]=tmp[2];\n    }\n\n    return 0;\n}\n\nint AbvGndSeg(int *pLabel, float *fPoints, int pointNum)\n{\n    // 转换点云到pcl的格式\n    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    cloud->width=pointNum;\n    cloud->height=1;\n    cloud->points.resize(cloud->width*cloud->height);\n\n    for (int pid=0;pid<cloud->points.size();pid++)\n    {\n        cloud->points[pid].x=fPoints[pid*4];\n        cloud->points[pid].y=fPoints[pid*4+1];\n        cloud->points[pid].z=fPoints[pid*4+2];\n    }\n\n\n    //调用pcl的kdtree生成方法\n    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;\n    kdtree.setInputCloud (cloud);\n\n    SegBG(pLabel,cloud,kdtree,0.5); //背景label=1 前景0\n\n    SegObjects(pLabel,cloud,kdtree,0.7);\n\n    FreeSeg(fPoints,pLabel,pointNum);\n    return 0;\n}\n\nint SegBG(int *pLabel, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius)\n{\n    //从较高的一些点开始从上往下增长，这样能找到一些类似树木、建筑物这样的高大背景\n    // clock_t t0,t1,tsum=0;\n    int pnum=cloud->points.size();\n    pcl::PointXYZ searchPoint;\n\n    // 初始化种子点\n    std::vector<int> seeds;\n    for (int pid=0;pid<pnum;pid++)\n    {\n        if(cloud->points[pid].z>4)\n        {\n            pLabel[pid]=1;\n            if (cloud->points[pid].z<6)\n            {\n                seeds.push_back(pid);\n            }\n        }\n        else\n        {\n            pLabel[pid]=0;\n        }\n\n    }\n\n    // 区域增长\n    while(seeds.size()>0)\n    {\n        int sid = seeds[seeds.size()-1];\n        seeds.pop_back();\n\n        std::vector<float> k_dis;\n        std::vector<int> k_inds;\n        // t0=clock();\n        if (cloud->points[sid].x<44.8)\n            kdtree.radiusSearch(sid,fSearchRadius,k_inds,k_dis);\n        else\n            kdtree.radiusSearch(sid,1.5*fSearchRadius,k_inds,k_dis);\n\n        for(int ii=0;ii<k_inds.size();ii++)\n        {\n            if(pLabel[k_inds[ii]]==0)\n            {\n                pLabel[k_inds[ii]]=1;\n                if(cloud->points[k_inds[ii]].z>0.2)//地面60cm以下不参与背景分割，以防止错误的地面点导致过度分割\n                {\n                    seeds.push_back(k_inds[ii]);\n                }\n            }\n        }\n\n    }\n    return 0;\n\n}\n\nSClusterFeature FindACluster(int *pLabel, int seedId, int labelId, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius, float thrHeight)\n{\n    // 初始化种子\n    std::vector<int> seeds;\n    seeds.push_back(seedId);\n    pLabel[seedId]=labelId;\n    // int cnum=1;//当前簇里的点数\n\n    SClusterFeature cf;\n    cf.pnum=1;\n    cf.xmax=-2000;\n    cf.xmin=2000;\n    cf.ymax=-2000;\n    cf.ymin=2000;\n    cf.zmax=-2000;\n    cf.zmin=2000;\n    cf.zmean=0;\n\n    // 区域增长\n    while(seeds.size()>0) //实际上是种子点找了下k近邻，然后k近邻再纳入种子点 sy\n    {\n        int sid=seeds[seeds.size()-1];\n        seeds.pop_back();\n\n        pcl::PointXYZ searchPoint;\n        searchPoint.x=cloud->points[sid].x;\n        searchPoint.y=cloud->points[sid].y;\n        searchPoint.z=cloud->points[sid].z;\n\n        // 特征统计\n        {\n            if(searchPoint.x>cf.xmax) cf.xmax=searchPoint.x;\n            if(searchPoint.x<cf.xmin) cf.xmin=searchPoint.x;\n\n            if(searchPoint.y>cf.ymax) cf.ymax=searchPoint.y;\n            if(searchPoint.y<cf.ymin) cf.ymin=searchPoint.y;\n\n            if(searchPoint.z>cf.zmax) cf.zmax=searchPoint.z;\n            if(searchPoint.z<cf.zmin) cf.zmin=searchPoint.z;\n\n            cf.zmean+=searchPoint.z;\n        }\n\n        std::vector<float> k_dis;\n        std::vector<int> k_inds;\n\n        if(searchPoint.x<44.8)\n            kdtree.radiusSearch(searchPoint,fSearchRadius,k_inds,k_dis);\n        else\n            kdtree.radiusSearch(searchPoint,2*fSearchRadius,k_inds,k_dis);\n\n        for(int ii=0;ii<k_inds.size();ii++)\n        {\n            if(pLabel[k_inds[ii]]==0)\n            {\n                pLabel[k_inds[ii]]=labelId;\n                // cnum++;\n                cf.pnum++;\n                if(cloud->points[k_inds[ii]].z>thrHeight)//地面60cm以下不参与分割\n                {\n                    seeds.push_back(k_inds[ii]);\n                }\n            }\n        }\n    }\n    cf.zmean/=(cf.pnum+0.000001);\n    return cf;\n}\n\nint SegObjects(int *pLabel, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius)\n{\n    int pnum=cloud->points.size();\n    int labelId=10;//object编号从10开始\n\n    //遍历每个非背景点，若高度大于1则寻找一个簇，并给一个编号(>10)\n    for(int pid=0;pid<pnum;pid++)\n    {\n        if(pLabel[pid]==0)\n        {\n            if(cloud->points[pid].z>0.4)//高度阈值\n            {\n                SClusterFeature cf = FindACluster(pLabel,pid,labelId,cloud,kdtree,fSearchRadius,0);\n                int isBg=0;\n\n                // cluster 分类\n                float dx=cf.xmax-cf.xmin;\n                float dy=cf.ymax-cf.ymin;\n                float dz=cf.zmax-cf.zmin;\n                float cx=10000;\n                for(int ii=0;ii<pnum;ii++)\n                {\n                    if (cx>cloud->points[pid].x)\n                    {\n                        cx=cloud->points[pid].x;\n                    }\n                }\n\n                if((dx>15)||(dy>15)||((dx>10)&&(dy>10)))// 太大\n                {\n                    isBg=2;\n                }\n                else if(((dx>6)||(dy>6))&&(cf.zmean < 1.5))//长而过低\n                {\n                    isBg = 3;//1;//2;\n                }\n                else if(((dx<1.5)&&(dy<1.5))&&(cf.zmax>2.5))//小而过高\n                {\n                    isBg = 4;\n                }\n                else if(cf.pnum<5 || (cf.pnum<10 && cx<50)) //点太少\n                {\n                    isBg=5;\n                }\n                else if((cf.zmean>3)||(cf.zmean<0.3))//太高或太低\n                {\n                    isBg = 6;//1;\n                }\n\n                if(isBg>0)\n                {\n                    for(int ii=0;ii<pnum;ii++)\n                    {\n                        if(pLabel[ii]==labelId)\n                        {\n                            pLabel[ii]=isBg;\n                        }\n                    }\n                }\n                else\n\t\t        {\n                    labelId++;\n                }\n            }\n        }\n    }\n    return labelId-10;\n}\n\nint CompleteObjects(int *pLabel, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, pcl::KdTreeFLANN<pcl::PointXYZ> &kdtree, float fSearchRadius)\n{\n    int pnum=cloud->points.size();\n    int *tmpLabel = (int*)calloc(pnum,sizeof(int));\n    for(int pid=0;pid<pnum;pid++)\n    {\n        if(pLabel[pid]!=2)\n        {\n            tmpLabel[pid]=1;\n        }\n    }\n\n    //遍历每个非背景点，若高度大于1则寻找一个簇，并给一个编号(>10)\n    for(int pid=0;pid<pnum;pid++)\n    {\n        if(pLabel[pid]>=10)\n        {\n            FindACluster(tmpLabel,pid,pLabel[pid],cloud,kdtree,fSearchRadius,0);\n        }\n    }\n    for(int pid=0;pid<pnum;pid++)\n    {\n        if((pLabel[pid]==2)&&(tmpLabel[pid]>=10))\n        {\n            pLabel[pid]=tmpLabel[pid];\n        }\n    }\n\n    free(tmpLabel);\n\n    return 0;\n}\nint ExpandObjects(int *pLabel, float* fPoints, int pointNum, float fSearchRadius)\n{\n    int *tmpLabel = (int*)calloc(pointNum,sizeof(int));\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if(pLabel[pid]<10)\n        {\n            tmpLabel[pid]=1;\n        }\n    }\n\n    // 转换点云到pcl的格式\n    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    cloud->width=pointNum;\n    cloud->height=1;\n    cloud->points.resize(cloud->width*cloud->height);\n\n    for (int pid=0;pid<cloud->points.size();pid++)\n    {\n        cloud->points[pid].x=fPoints[pid*4];\n        cloud->points[pid].y=fPoints[pid*4+1];\n        cloud->points[pid].z=fPoints[pid*4+2];\n    }\n\n\n    //调用pcl的kdtree生成方法\n    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;\n    kdtree.setInputCloud (cloud);\n    std::vector<float> k_dis;\n    std::vector<int> k_inds;\n\n    for(int pid=0;pid<cloud->points.size();pid++)\n    {\n        if((pLabel[pid]>=10)&&(tmpLabel[pid]==0))\n        {\n            kdtree.radiusSearch(pid,fSearchRadius,k_inds,k_dis);\n\n            for(int ii=0;ii<k_inds.size();ii++)\n            {\n                if((pLabel[k_inds[ii]]<10)&&(cloud->points[k_inds[ii]].z>0.2))\n                {\n                    pLabel[k_inds[ii]]=pLabel[pid];\n                }\n            }\n        }\n    }\n\n    free(tmpLabel);\n\n    return 0;\n}\nint ExpandBG(int *pLabel, float* fPoints, int pointNum, float fSearchRadius)\n{\n    int *tmpLabel = (int*)calloc(pointNum,sizeof(int));\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if((pLabel[pid]>0)&&(pLabel[pid]<10))\n        {\n            tmpLabel[pid]=1;\n        }\n    }\n\n    // 转换点云到pcl的格式\n    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    cloud->width=pointNum;\n    cloud->height=1;\n    cloud->points.resize(cloud->width*cloud->height);\n\n    for (int pid=0;pid<cloud->points.size();pid++)\n    {\n        cloud->points[pid].x=fPoints[pid*4];\n        cloud->points[pid].y=fPoints[pid*4+1];\n        cloud->points[pid].z=fPoints[pid*4+2];\n    }\n\n\n    //调用pcl的kdtree生成方法\n    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;\n    kdtree.setInputCloud (cloud);\n    std::vector<float> k_dis;\n    std::vector<int> k_inds;\n\n    for(int pid=0;pid<cloud->points.size();pid++)\n    {\n        if((tmpLabel[pid]==1))\n        {\n            kdtree.radiusSearch(pid,fSearchRadius,k_inds,k_dis);\n\n            for(int ii=0;ii<k_inds.size();ii++)\n            {\n                if((pLabel[k_inds[ii]]==0)&&(cloud->points[k_inds[ii]].z>0.4))\n                {\n                    pLabel[k_inds[ii]]=pLabel[pid];\n                }\n            }\n        }\n    }\n\n    free(tmpLabel);\n\n    return 0;\n}\n\nint CalFreeRegion(float *pFreeDis, float *fPoints,int *pLabel,int pointNum)\n{\n    int da=FREE_DELTA_ANG;\n    int thetaId;\n    float dis;\n\n    for(int ii=0;ii<FREE_ANG_NUM;ii++)\n    {\n        pFreeDis[ii]=20000;\n    }\n\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if((pLabel[pid]==1)&&(fPoints[pid*4+2]<4.5))\n        {\n            dis=fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1];\n            thetaId=((atan2f(fPoints[pid*4+1],fPoints[pid*4])+FREE_PI)/FREE_DELTA_ANG);\n            thetaId=thetaId%FREE_ANG_NUM;\n\n            if(pFreeDis[thetaId]>dis)\n            {\n                pFreeDis[thetaId]=dis;\n            }\n        }\n    }\n\n    return 0;\n}\n\nint FreeSeg(float *fPoints,int *pLabel,int pointNum)\n{\n    float *pFreeDis = (float*)calloc(FREE_ANG_NUM,sizeof(float));\n    int thetaId;\n    float dis;\n\n    CalFreeRegion(pFreeDis,fPoints,pLabel,pointNum);\n\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        //! 需要考虑的是，被前景遮住的背景和被背景点遮住的前景，那个需要处理？\n        {\n            dis=fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1];\n            thetaId=((atan2f(fPoints[pid*4+1],fPoints[pid*4])+FREE_PI)/FREE_DELTA_ANG);\n            thetaId=thetaId%FREE_ANG_NUM;\n\n            if(pFreeDis[thetaId]<dis) //表示在背后\n            {\n                pLabel[pid]=1;\n            }\n        }\n    }\n    if (pFreeDis != NULL)\n        free(pFreeDis);\n}\n\n\nint GndSeg(int* pLabel,float *fPoints,int pointNum,float fSearchRadius)\n{\n    int gnum=0;\n\n    // 根据BV最低点来缩小地面点搜索范围----2\n    float *pGndImg1 = (float*)calloc(GND_IMG_NX1*GND_IMG_NY1,sizeof(float));\n    int *tmpLabel1 = (int*)calloc(pointNum,sizeof(int));\n    \n    for(int ii=0;ii<GND_IMG_NX1*GND_IMG_NY1;ii++)\n    {\n        pGndImg1[ii]=100;\n    }\n    for(int pid=0;pid<pointNum;pid++)//求最低点图像\n    {\n        int ix= (fPoints[pid*4]+GND_IMG_OFFX1)/(GND_IMG_DX1+0.000001);\n        int iy= (fPoints[pid*4+1]+GND_IMG_OFFY1)/(GND_IMG_DY1+0.000001);\n        if(ix<0 || ix>=GND_IMG_NX1 || iy<0 || iy>=GND_IMG_NY1)\n        {\n            tmpLabel1[pid]=-1;\n            continue;\n        }\n\n        int iid=ix+iy*GND_IMG_NX1;\n        tmpLabel1[pid]=iid;\n\n        if(pGndImg1[iid]>fPoints[pid*4+2])//找最小高度\n        {\n            pGndImg1[iid]=fPoints[pid*4+2];\n        }\n\n    }\n\n    int pnum=0;\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if(tmpLabel1[pid]>=0)\n        {\n            if(pGndImg1[tmpLabel1[pid]]+0.5>fPoints[pid*4+2])// 相对高度限制 即pid所在点对应的格子里面高度不超过0.5，标记为1\n            {\n                {pLabel[pid]=1;\n                pnum++;}\n            }\n        }\n    }\n\n    free(pGndImg1);\n    free(tmpLabel1);\n\n\n    // 绝对高度限制\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if(pLabel[pid]==1)\n        {\n            if(fPoints[pid*4+2]>1)//for all cases 即使这个点所在格子高度差小于0.5，但绝对高度大于1，那也不行\n            {\n                pLabel[pid]=0;\n            }\n            else if(fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1]<225)// 10m内，强制要求高度小于0.5\n            {\n                if(fPoints[pid*4+2]>0.5)\n                {\n                    pLabel[pid]=0;\n\n                }\n            }\n\n        }\n        else\n        {\n            if(fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1]<400)// 20m内，0.2以下强制设为地面\n            {\n                if(fPoints[pid*4+2]<0.2)\n                {\n                    pLabel[pid]=1;\n                }\n            }\n        }\n\n    }\n\n    // 平面拟合与剔除\n    float zMean=0;\n    gnum=0;\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if(pLabel[pid]==1 && fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1]<400)\n        {\n            zMean+=fPoints[pid*4+2];\n            gnum++;\n        }\n    }\n    zMean/=(gnum+0.0001);\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if(pLabel[pid]==1)\n        {\n            if(fPoints[pid*4]*fPoints[pid*4]+fPoints[pid*4+1]*fPoints[pid*4+1]<400 && fPoints[pid*4+2]>zMean+0.1)\n                pLabel[pid]=0;\n        }\n    }\n\n\n    // 个数统计\n    gnum=0;\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if(pLabel[pid]==1)\n        {\n            gnum++;\n        }\n    }\n\n    return gnum;\n}\n\nSClusterFeature CalBBox(float *fPoints,int pointNum)\n{\n    SClusterFeature cf;\n     // 转换点云到pcl的格式\n    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n    cloud->width=pointNum;\n    cloud->height=1;\n    cloud->points.resize(cloud->width*cloud->height);\n\n    for (int pid=0;pid<cloud->points.size();pid++)\n    {\n        cloud->points[pid].x=fPoints[pid*4];\n        cloud->points[pid].y=fPoints[pid*4+1];\n        cloud->points[pid].z=0;//fPoints[pid*4+2];\n    }\n\n    //调用pcl的kdtree生成方法\n    pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;\n    kdtree.setInputCloud (cloud);\n\n    //计算特征向量和特征值\n    Eigen::Vector4f pcaCentroid;\n    pcl::compute3DCentroid(*cloud, pcaCentroid);\n    Eigen::Matrix3f covariance;\n    pcl::computeCovarianceMatrixNormalized(*cloud, pcaCentroid, covariance);\n    Eigen::SelfAdjointEigenSolver<Eigen::Matrix3f> eigen_solver(covariance, Eigen::ComputeEigenvectors);\n\n    Eigen::Matrix3f eigenVectorsPCA = eigen_solver.eigenvectors();\n    Eigen::Vector3f eigenValuesPCA = eigen_solver.eigenvalues();\n    float vsum = eigenValuesPCA(0)+eigenValuesPCA(1)+eigenValuesPCA(2);\n    eigenValuesPCA(0) = eigenValuesPCA(0)/(vsum+0.000001);\n    eigenValuesPCA(1) = eigenValuesPCA(1)/(vsum+0.000001);\n    eigenValuesPCA(2) = eigenValuesPCA(2)/(vsum+0.000001);\n\n    cf.d0[0]=eigenVectorsPCA(0,2);\n    cf.d0[1]=eigenVectorsPCA(1,2);\n    cf.d0[2]=eigenVectorsPCA(2,2);\n\n    cf.d1[0]=eigenVectorsPCA(0,1);\n    cf.d1[1]=eigenVectorsPCA(1,1);\n    cf.d1[2]=eigenVectorsPCA(2,1);\n\n    cf.center[0]=pcaCentroid(0);\n    cf.center[1]=pcaCentroid(1);\n    cf.center[2]=pcaCentroid(2);\n\n    cf.pnum=pointNum;\n\n    return cf;\n}\n\nSClusterFeature CalOBB(float *fPoints,int pointNum)\n{\n    SClusterFeature cf;\n    cf.pnum=pointNum;\n\n    float *hoff=(float*)calloc(20000,sizeof(float));\n    int hnum=0;\n\n    float coef_as[180],coef_bs[180];\n    for(int ii=0;ii<180;ii++)\n    {\n        coef_as[ii]=cos(ii*0.5/180*FREE_PI);\n        coef_bs[ii]=sin(ii*0.5/180*FREE_PI);\n    }\n\n    float *rxy=(float*)calloc(pointNum*2,sizeof(float));\n\n    float area=-1000;//1000000;\n    int ori=-1;\n    int angid=0;\n    for(int ii=0;ii<180;ii++)\n    {\n\n        float a_min=10000,a_max=-10000;\n        float b_min=10000,b_max=-10000;\n        for(int pid=0;pid<pointNum;pid++)\n        {\n            float val=fPoints[pid*4]*coef_as[ii]+fPoints[pid*4+1]*coef_bs[ii];// x*cos + y*sin 这是把每个点旋转一下，然后求旋转后点云的xy最大最小值 sy\n            if(a_min>val)\n                a_min=val;\n            if(a_max<val)\n                a_max=val;\n            rxy[pid*2]=val;\n\n            val=fPoints[pid*4+1]*coef_as[ii]-fPoints[pid*4]*coef_bs[ii];\n            if(b_min>val)\n                b_min=val;\n            if(b_max<val)\n                b_max=val;\n            rxy[pid*2+1]=val;\n        }\n\n        float weights=0;\n\n        hnum=(a_max-a_min)/0.05;\n        for(int ih=0;ih<hnum;ih++)\n        {\n            hoff[ih]=0;\n        }\n        for(int pid=0;pid<pointNum;pid++)\n        {\n            int ix0=(rxy[pid*2]-a_min)*20;\n            hoff[ix0]+=1;\n        }\n\n        int mh=-1;\n        for(int ih=0;ih<hnum;ih++)\n        {\n            if(hoff[ih]>weights)\n            {\n              weights=hoff[ih];\n              mh=ih;\n            }\n        }\n\n        if (mh>0)\n        {\n            if(mh*3<hnum)\n            {\n                a_min=a_min+mh*0.05/2;\n            }\n            else if((hnum-mh)*3<hnum)\n            {\n                a_max=a_max-(hnum-mh)*0.05/2;\n            }\n        }\n\n        // --y\n        float weights1=0;\n        hnum=(b_max-b_min)/0.05;\n        for(int ih=0;ih<hnum;ih++)\n        {\n            hoff[ih]=0;\n        }\n        for(int pid=0;pid<pointNum;pid++)\n        {\n            int iy0=(rxy[pid*2+1]-b_min)*20;\n            hoff[iy0]+=1;\n        }\n        int mh1=-1;\n        for(int ih=0;ih<hnum;ih++)\n        {\n            if(hoff[ih]>weights1)\n            {\n                weights1=hoff[ih];\n                mh1=ih;\n            }\n        }\n        if(mh1>0)\n        {\n            if(mh1*3<hnum)\n            {\n                b_min=b_min+mh1*0.05/2;\n            }\n            else if((hnum-mh1)*3<hnum)\n            {\n                b_max=b_max-(hnum-mh1)*0.05/2;\n            }\n        }\n\n        if(weights < weights1)\n        {\n            weights=weights1;\n        }\n\n        if(weights>area)//(b_max-b_min)*(a_max-a_min)<area)\n        {\n            area=weights;//(b_max-b_min)*(a_max-a_min);\n            ori=ii;\n            angid=ii;\n\n            cf.obb[0]=a_max*coef_as[ii]-b_max*coef_bs[ii];\n            cf.obb[1]=a_max*coef_bs[ii]+b_max*coef_as[ii];\n\n            cf.obb[2]=a_max*coef_as[ii]-b_min*coef_bs[ii];\n            cf.obb[3]=a_max*coef_bs[ii]+b_min*coef_as[ii];\n\n            cf.obb[4]=a_min*coef_as[ii]-b_min*coef_bs[ii];\n            cf.obb[5]=a_min*coef_bs[ii]+b_min*coef_as[ii];\n\n            cf.obb[6]=a_min*coef_as[ii]-b_max*coef_bs[ii];\n            cf.obb[7]=a_min*coef_bs[ii]+b_max*coef_as[ii];\n\n            cf.center[0]=(a_max+a_min)/2*coef_as[ii]-(b_max+b_min)/2*coef_bs[ii];\n            cf.center[1]=(a_max+a_min)/2*coef_bs[ii]+(b_max+b_min)/2*coef_as[ii];\n            cf.center[2]=0;\n\n            cf.d0[0]=coef_as[ii]; //相当于朝向角\n            cf.d0[1]=coef_bs[ii];\n            cf.d0[2]=0;\n\n            cf.xmin=a_min;\n            cf.xmax=a_max;\n            cf.ymin=b_min;\n            cf.ymax=b_max;\n        }\n    }\n\n    //center\n    cf.center[0]=0;\n    cf.center[1]=0;\n    cf.center[2]=0;\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        cf.center[0]+=fPoints[pid*4];\n        cf.center[1]+=fPoints[pid*4+1];\n        cf.center[2]+=fPoints[pid*4+2];\n    }\n\n    cf.center[0]/=(pointNum+0.0001);\n    cf.center[1]/=(pointNum+0.0001);\n    cf.center[2]/=(pointNum+0.0001);\n\n    //z\n    float z_min=10000,z_max=-10000;\n    for(int pid=0;pid<pointNum;pid++)\n    {\n        if(fPoints[pid*4+2]>z_max)\n            z_max=fPoints[pid*4+2];\n        if(fPoints[pid*4+2]<z_min)\n            z_min=fPoints[pid*4+2];\n    }\n\n    cf.zmin=z_min;\n    cf.zmax=z_max;\n\n    // 分类\n    cf.cls=0;\n    float dx=cf.xmax-cf.xmin;\n    float dy=cf.ymax-cf.ymin;\n    float dz=cf.zmax-cf.zmin;\n    if((dx>15)||(dy>15))// 太大\n    {\n        cf.cls=1;//bkg\n    }\n    else if((dx>4)&&(dy>4))//太大\n    {\n        cf.cls=1;\n    }\n    else if((dz<0.5||cf.zmax<1) && (dx>3 || dy>3))//too large\n    {\n        cf.cls=1;\n    }\n    else if(cf.zmax>3 && (dx<0.5 || dy<0.5))//too small\n    {\n        cf.cls=1;\n    }\n    else if(dx<0.5 && dy<0.5)//too small\n    {\n        cf.cls=1;\n    }\n    else if(dz<2&&(dx>6||dy>6||dx/dy>5||dy/dx>5))//too small\n    {\n        //cf.cls=1;\n    }\n    else if((dz<4)&&(dx>3)&&(dy>3))//太大\n    {\n        //cf.cls=1;\n    }\n    \n    else if(cf.center[0]>45 && (dx>=3 || dy>=3 || dx*dy>2.3))//太da\n    {\n        //cf.cls=1;\n    }\n    else if(dx/dy>5||dy/dx>5) //太窄\n    {\n        //cf.cls=0;\n    }\n    else if((dz<2.5)&&((dx/dy>3)||(dy/dx>3)))\n    {\n        //cf.cls=0;\n    }\n    else if((dz<2.5)&&(dx>2.5)&&(dy>2.5))\n    {\n        //cf.cls=1;\n    }\n\n\n    free(rxy);\n    free(hoff);\n    return cf;\n}\n"
  },
  {
    "path": "README.md",
    "content": "# Livox-Mapping\n\nThis repository implements an all-in-one and ready-to-use LiDAR-inertial odometry system for Livox LiDAR. The system is developed based on the open-source odometry framework [**LIO-Livox**](https://github.com/Livox-SDK/LIO-Livox) to get the odometry information in our front-end part. Besides, we have added more constraints and features to enhance the mapping performance. \n\nAt the same time, we provide you one simple lidar-map based localization module in [Livox-Localization](https://github.com/SiyuanHuang95/Livox-Localization).\n\n![Livox_Mapping](images/lio_livox_mapping_down.gif)\n\n## 1. Features\n\nWe only describe the new features added to our baseline framework [**LIO-Livox**](https://github.com/Livox-SDK/LIO-Livox). For the amazing features developed by LIO-Livox, please check their own repository.\n\n### 1.1 Loop Closure Detection\n\nIn the LCD part, we use the [**ScanContext**](https://github.com/irapkaist/scancontext)  to perform the loop closure detection and then add the loop closure constrains to the optimization solver. With the LCD module, we are able to close the gap and compensate the accumulated translation error of the scan matching in large environments.\n\n| with Loop Closure              | without Loop Closure                 |\n| ------------------------------ | ------------------------------------ |\n| ![with_sc](images/with_sc.gif) | ![without_sc](images/without_sc.gif) |\n\n### 1.2 Ground Constrain\n\nTo avoid the drifting alongside the Z-axis, the general problem occurred in LOAM (LiDAR Odometry and Mapping) algorithms, we add the the ground constraints ( the plane detection is done by LIO-Livox)  to the optimization solver as  **[hdl_graph_slam](https://github.com/koide3/hdl_graph_slam)**.\n\n| with Ground Constrain                  | without Ground Constrain                          |\n| -------------------------------------- | ------------------------------------------------- |\n| ![with_ground](images/with_ground.gif) | ![wo_ground](images/without_ground_constrins.gif) |\n\n### 1.3 Map Merging from Multiple Rosbags\n\nIn practice, we usually collect environment data in multiple times, thus we need one functionality to fuse the map information from multiple rosbags. In this implementation, we use GPS information (optionally) to convert LiDAR scans collected in different time into one coordinate system and then use ScanContext and ICP to find fine matching result. At the same time, we maintain one lifelong optimization vector in our backend to store the previous key-frames. Through this approach, we  are able to connect scans in different collections.\n\n| ![bags](images/multi_bag.gif) |\n| -------------------------------------------- |\n\n#### 1.4 GPS Factor\n\n We also offer the GPS factor to aid the mapping process. This is only one optional choice. Without the GPS information, the whole system could also work.\n\n### 1.5 Compatible with Mapping Editor *interactive_slam*\n\n [**interactive_slam**](https://github.com/SMRT-AIST/interactive_slam) is an open-source 3D LiDAR-based mapping framework. We add one functionality to output the mapping result in the format compatible with *interactive_slam*, by this, you can easily edit your mapping result and get one more accurate LiDAR map. At the same time, if you have one camera alongside the Livox, we also offer one dummy moving objects removal [function](SC-PGO/utils/moving_object_removal/README.md) with aid of [mmdection](https://github.com/open-mmlab/mmdetection) and easy LiDAR points projection.\n\n| Represented in interactive_slam               | Moving Object Removal                                 |\n| --------------------------------------------- | ----------------------------------------------------- |\n| ![interact_slam](images/interactive_slam.png) | ![image-20211223215338497](images/object_removal.png) |\n\n## 2. Prerequisites\n\n### 2.1 Dependencies for baseline\n\nOur framework is developed based on the open-source framework [**LIO-Livox**](https://github.com/Livox-SDK/LIO-Livox) for odometry part,  [**ScanContext**](https://github.com/irapkaist/scancontext)  for loop closure part and [**interactive_slam**](https://github.com/SMRT-AIST/interactive_slam) for map-editing part (if you need), so technically, please refer to these repos for detailed information.\n\n### 2.2 General Dependencies  \n\n- [Ubuntu](http://ubuntu.com) (tested on 16.04 and 18.04)\n\n- [ROS](http://wiki.ros.org/ROS/Installation) (tested with Kinetic and Melodic)\n\n- [Eigen](http://eigen.tuxfamily.org/index.php?title=Main_Page)\n\n- [Ceres Solver](http://ceres-solver.org/installation.html)\n\n- [PCL](http://www.pointclouds.org/downloads/linux.html)\n\n- [livox_ros_driver](https://github.com/Livox-SDK/livox_ros_driver)\n\n- libsuitesparse\n\n### 2.3 Docker\n\nFor one quick and convenient test, we are offering you a Docker Image with DockerHub.\n\nPlease check the DockerHub [Link](https://hub.docker.com/r/siyuanhuang95/livox_slam) for more information.\n\n- Pull the docker image.\n\n```shell\ndocker pull siyuanhuang95/livox_slam:release\n```\n\n- Execute the docker run command. You can use -v to bind the docker to your local machine as you wish.\n\n```shell\ndocker run --gpus all -it -p 3316:22 --device=/dev/dri --group-add video --volume=/tmp/.X11-unix:/tmp/.X11-unix  --env=\"DISPLAY=$DISPLAY\" siyuanhuang95/livox_slam:release /bin/bash\n```\n\n- To allow the RVIZ in Docker to run in the host PC. In the local mashine:\n\n```shell\nxhost +\n```\n\n## 3. Build\n\n- Clone the repository and catkin_make\n\n```shell\ncd ~/catkin_ws/src\ngit clone https://github.com/PJLab-ADG/Livox-Mapping.git\ncd ..\ncatkin_make\n```\n\n- Remember to source the livox_ros_driver before build (follow [livox_ros_driver](https://github.com/hku-mars/FAST_LIO#13-livox_ros_driver))\n\n## 4. Run\n\n### 4.1 Dataset Requirements\n\n Before running the mapping functionality, please make sure the sensor data are published to the correct rostopic.\n\n- Livox LiDAR: /livox/lidar\n- IMU: /livox/imu\n- GPS: /localization/navstate_info\n\n### 4.2 Run Mapping Function\n\n- In one terminal, launch the **mapping thread**\n\n```shell\n# If you don't have the GPS data\ncd ~/catkin_ws\nsource devel/setup.bash\nroslaunch livox_mapping livox_mapping.launch save_path:=\"PATH_TO_SAVE_SLAM_POSE_RESULT\"\n```\n\n```shell\n# If you have the GPS data\ncd ~/catkin_ws\nsource devel/setup.bash\nroslaunch livox_mapping livox_mapping.launch useRTK:=\"true\" save_path:=\"PATH_TO_SAVE_SLAM_POSE_RESULT\"\n```\n\n- In another terminal, launch the **odometry thread**\n\n```shell\ncd ~/catkin_ws\nsource devel/setup.bash\nroslaunch livox_odometry livox_odometry.launch save_path:=\"PATH_TO_SAVE_ODOM_RESULT_in_txt\"\n```\n\n- In another terminal, play the rosbag\n\n```shell\nrosbag play YOUR_ROSBAG.bag\n```\n\n## 4.3 Multiple Bags Merge\n\nIf you have more rosbags collected in one same area, you can restart odometry thread at the end of the first odometry thread run and play the next package.\n\n- In the terminal for **odometry thread**, use **Ctrl+C** to shut down the **old odometry thread**.\n- In the terminal for **odometry thread**, use the command below to start one new odometry thread\n\n```shell\nroslaunch livox_odometry livox_odometry.launch save_path:=\"PATH_TO_SAVE_ODOM_RESULT_in_txt\"\n```\n\n- Play one new rosbag.\n\n\n\n## 5. Datasets\n\nFor the ease of usage, we are providing several test rosbags collected in one industrial park located in Shanghai. Please be aware that all codes and datasets included in this repository are for academic research purposes only. Other usages are **NOT** encouraged, and it is at your own risk. If You have any concerns including privacy, please contact us by sending an e-mail to huangsiyuan@pjlab.org.cn\n\nThe dataset can be downloaded through the Baidu Netdisk with:\n\n```shell\nLink：https://pan.baidu.com/s/17ElBOWiFVr68975FtXY8ZA \nPassword：pjop\n```\n\n\n\n## 6. Acknowledgments\n\nThanks for the authors of **[LIO-Livox](https://github.com/Livox-SDK/LIO-Livox),** [**ScanContext**](https://github.com/irapkaist/scancontext)，[**Fast-LIO-SLAM**](https://github.com/gisbi-kim/FAST_LIO_SLAM) and  [**interactive_slam**](https://github.com/SMRT-AIST/interactive_slam).\n\n## Contact\n\n- If you have any questions, contact here please\n  - Send email to shibotian@pjlab.org.cn [Preferred] or huangsiyuan@pjlab.org.cn.\n  - Report issues on GitHub [Preferred].\n  - For other coorperation possibilities, please contact shibotian@pjlab.org.cn [Preferred].\n"
  },
  {
    "path": "SC-PGO/.gitignore",
    "content": "PCD/"
  },
  {
    "path": "SC-PGO/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.8.3)\nproject(livox_mapping)\n\nset(CMAKE_BUILD_TYPE \"Release\")\n# set(CMAKE_CXX_FLAGS \"-std=c++11\")\nset(CMAKE_CXX_STANDARD 17)\nset(CMAKE_CXX_FLAGS_RELEASE \"-O3 -Wall -g\")\n\nfind_package(catkin REQUIRED COMPONENTS\n  geometry_msgs\n  nav_msgs\n  sensor_msgs\n  roscpp\n  rospy\n  rosbag\n  std_msgs\n  ad_localization_msgs\n  image_transport\n  cv_bridge\n  tf\n  tf_conversions\n)\n\n#find_package(Eigen3 REQUIRED)\nfind_package(PCL REQUIRED)\nfind_package(OpenCV REQUIRED)\nfind_package(Ceres REQUIRED)\nfind_package(Boost REQUIRED COMPONENTS timer)\n\nfind_package(OpenMP REQUIRED)\nfind_package(GTSAM REQUIRED QUIET)\n\ninclude_directories(\n  include\n\t${catkin_INCLUDE_DIRS} \n\t${PCL_INCLUDE_DIRS}\n  ${CERES_INCLUDE_DIRS}\n  ${OpenCV_INCLUDE_DIRS}\n  ${GTSAM_INCLUDE_DIR}\n)\n\ncatkin_package(\n  CATKIN_DEPENDS geometry_msgs nav_msgs roscpp rospy std_msgs ad_localization_msgs\n  DEPENDS EIGEN3 PCL \n  INCLUDE_DIRS include\n)\nadd_executable(livox_mapping \n  src/laserPosegraphOptimization.cpp\n  include/scancontext/Scancontext.cpp\n)\ntarget_compile_options(livox_mapping \n  PRIVATE ${OpenMP_CXX_FLAGS}\n)\ntarget_link_libraries(livox_mapping \n  ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${CERES_LIBRARIES}\n  ${OpenMP_CXX_FLAGS}\n  gtsam\n)\n\n\n\n\n\n"
  },
  {
    "path": "SC-PGO/README.md",
    "content": "# SC-A-LOAM\n\n## What is SC-A-LOAM? \n- A real-time LiDAR SLAM package that integrates A-LOAM and ScanContext. \n    - **A-LOAM** for odometry (i.e., consecutive motion estimation)\n    - **ScanContext** for coarse global localization that can deal with big drifts (i.e., place recognition as kidnapped robot problem without initial pose)\n    - and iSAM2 of GTSAM is used for pose-graph optimization. \n- This package aims to show ScanContext's handy applicability. \n    - The only things a user should do is just to include `Scancontext.h`, call `makeAndSaveScancontextAndKeys` and `detectLoopClosureID`. \n\n## Features \n1.  A strong place recognition and loop closing \n    - We integrated ScanContext as a loop detector into A-LOAM, and ISAM2-based pose-graph optimization is followed. (see https://youtu.be/okML_zNadhY?t=313 to enjoy the drift-closing moment)\n2. A modular implementation \n    - The only difference from A-LOAM is the addition of the `laserPosegraphOptimization.cpp` file. In the new file, we subscribe the point cloud topic and odometry topic (as a result of A-LOAM, published from `laserMapping.cpp`). That is, our implementation is generic to any front-end odometry methods. Thus, our pose-graph optimization module (i.e., `laserPosegraphOptimization.cpp`) can easily be integrated with any odometry algorithms such as non-LOAM family or even other sensors (e.g., visual odometry).  \n    - <p align=\"center\"><img src=\"picture/anypipe.png\" width=800></p>\n3. (optional) Altitude stabilization using consumer-level GPS  \n    - To make a result more trustworthy, we supports GPS (consumer-level price, such as U-Blox EVK-7P)-based altitude stabilization. The LOAM family of methods are known to be susceptible to z-errors in outdoors. We used the robust loss for only the altitude term. For the details, see the variable `robustGPSNoise` in the `laserPosegraphOptimization.cpp` file. \n\n## Prerequisites (dependencies)\n- We mainly depend on ROS, Ceres (for A-LOAM), and GTSAM (for pose-graph optimization). \n    - For the details to install the prerequisites, please follow the A-LOAM and LIO-SAM repositiory. \n- The below examples are done under ROS melodic (ubuntu 18) and GTSAM version 4.x. \n\n## How to use? \n- First, install the abovementioned dependencies, and follow below lines. \n```\n    mkdir -p ~/catkin_scaloam_ws/src\n    cd ~/catkin_scaloam_ws/src\n    git clone https://github.com/gisbi-kim/SC-A-LOAM.git\n    cd ../\n    catkin_make\n    source ~/catkin_scaloam_ws/devel/setup.bash\n    roslaunch aloam_velodyne aloam_mulran.launch # for MulRan dataset setting \n```\n\n## Example Results \n\n### Riverside 01, MulRan dataset \n- The MulRan dataset provides lidar scans (Ouster OS1-64, horizontally mounted, 10Hz) and consumer level gps (U-Blox EVK-7P, 4Hz) data.\n    - About how to use (publishing data) data: see here https://github.com/irapkaist/file_player_mulran\n- example videos on Riverside 01 sequence. \n    1. with consumer level GPS-based altitude stabilization: https://youtu.be/FwAVX5TVm04\n    2. without the z stabilization: https://youtu.be/okML_zNadhY \n- example result:\n\n<p align=\"center\"><img src=\"picture/riverside01.png\" width=800></p>\n\n### KITTI 05 \n- For KITTI (HDL-64 sensor), run using the command \n    ```\n    roslaunch aloam_velodyne aloam_velodyne_HDL_64.launch # for KITTI dataset setting\n    ```\n- To publish KITTI scans, you can use mini-kitti publisher, a simple python script: https://github.com/gisbi-kim/mini-kitti-publisher\n- example video (no GPS used here): https://youtu.be/hk3Xx8SKkv4\n- example result: \n\n<p align=\"center\"><img src=\"picture/kitti05.png\" width=800></p>\n\n### Indoor\n- ScanContext also works at indoor environments (use smaller sc_max_radius value).\n- example video: https://youtu.be/Uv6_BRmxJho\n- example result: \n<p align=\"center\"><img src=\"picture/indoor.png\" width=800></p>\n\n### For Livox LiDAR \n- Scan Context also works for Livox LiDAR data\n    - In this example, Scan Context is integrated with FAST-LIO (https://github.com/hku-mars/FAST_LIO).\n    - Note: No additional integration effort is required. A user just run seperately FAST-LIO node and SC-A-LOAM's posegraphoptimization.cpp node!\n- example video (tutoial and results): https://youtu.be/Fw9S6D6HozA\n- example result: \n    <p align=\"center\"><img src=\"picture/scfastlio.png\" width=600></p>\n\n### For Navtech Radar \n- Scan Context also works for Navtech Radar data!\n- For the details, please see \n    - https://github.com/gisbi-kim/navtech-radar-slam\n        - used the pose-graph optimization node of this repository (SC-A-LOAM)\n    - [example video](https://www.youtube.com/watch?v=avtIQ8fesgU&t=128s)\n\n## Utilities\n\n### Data saver and Map construction \n- Similar to the [SC-LIO-SAM's saver utility](https://github.com/gisbi-kim/SC-LIO-SAM#applications), we support pose and scan saver per keyframes. Using these saved data, the map (within ROI) can be constructed offline. See the `utils/python/makeMergedMap.py` and [this tutorial](https://youtu.be/jmR3DH_A4Co). \n- Below is the example results of MulRan dataset KAIST 03's merged map, visualized using CloudCompare ([download the map data here](https://www.dropbox.com/sh/96jrpx3x6hh316j/AACb07kGbocnQWMIpksmU6MQa?dl=0)).  \n\n    <p align=\"center\"><img src=\"picture/kaist-03-merged.png\" width=800></p>\n\n- A user also can remove dynamic points using these saved keyframe poses and scans. See [this tutorial](https://www.youtube.com/watch?v=UiYYrPMcIRU) and our [Removert project](https://github.com/irapkaist/removert).\n\n## Acknowledgements\n- Thanks to LOAM, A-LOAM, and LIO-SAM code authors. The major codes in this repository are borrowed from their efforts.\n\n## Maintainer \n- please contact me through `paulgkim@kaist.ac.kr` \n\n## TODO\n- Delayed RS loop closings \n- SLAM with multi-session localization \n- More examples on other datasets (KITTI, complex urban dataset, etc.)\n"
  },
  {
    "path": "SC-PGO/include/livox_mapping/common.h",
    "content": "// This is an advanced implementation of the algorithm described in the following paper:\n//   J. Zhang and S. Singh. LOAM: Lidar Odometry and Mapping in Real-time.\n//     Robotics: Science and Systems Conference (RSS). Berkeley, CA, July 2014. \n\n// Modifier: Tong Qin               qintonguav@gmail.com\n// \t         Shaozu Cao \t\t    saozu.cao@connect.ust.hk\n\n\n// Copyright 2013, Ji Zhang, Carnegie Mellon University\n// Further contributions copyright (c) 2016, Southwest Research Institute\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice,\n//    this list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n// 3. Neither the name of the copyright holder nor the names of its\n//    contributors may be used to endorse or promote products derived from this\n//    software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n// POSSIBILITY OF SUCH DAMAGE.\n\n#pragma once\n\n#include <cmath>\n\n#include <pcl/point_types.h>\n\ntypedef pcl::PointXYZI PointType;\n\ninline double rad2deg(double radians)\n{\n  return radians * 180.0 / M_PI;\n}\n\ninline double deg2rad(double degrees)\n{\n  return degrees * M_PI / 180.0;\n}\n\nstruct Pose6D {\n  double x;\n  double y;\n  double z;\n  double roll;\n  double pitch;\n  double yaw;\n};"
  },
  {
    "path": "SC-PGO/include/livox_mapping/tic_toc.h",
    "content": "// Author:   Tong Qin               qintonguav@gmail.com\n// \t         Shaozu Cao \t\t    saozu.cao@connect.ust.hk\n\n#pragma once\n\n#include <ctime>\n#include <cstdlib>\n#include <chrono>\n\nclass TicToc\n{\n  public:\n    TicToc()\n    {\n        tic();\n    }\n\n    void tic()\n    {\n        start = std::chrono::system_clock::now();\n    }\n\n    double toc()\n    {\n        end = std::chrono::system_clock::now();\n        std::chrono::duration<double> elapsed_seconds = end - start;\n        return elapsed_seconds.count() * 1000;\n    }\n\n  private:\n    std::chrono::time_point<std::chrono::system_clock> start, end;\n};\n\nclass TicTocV2\n{\npublic:\n    TicTocV2()\n    {\n        tic();\n    }\n\n    TicTocV2( bool _disp )\n    {\n        disp_ = _disp;\n        tic();\n    }\n\n    void tic()\n    {\n        start = std::chrono::system_clock::now();\n    }\n\n    void toc( std::string _about_task )\n    {\n        end = std::chrono::system_clock::now();\n        std::chrono::duration<double> elapsed_seconds = end - start;\n        double elapsed_ms = elapsed_seconds.count() * 1000;\n\n        if( disp_ )\n        {\n          std::cout.precision(3); // 10 for sec, 3 for ms \n          std::cout << _about_task << \": \" << elapsed_ms << \" msec.\" << std::endl;\n        }\n    }\n\nprivate:  \n    std::chrono::time_point<std::chrono::system_clock> start, end;\n    bool disp_ = false;\n};"
  },
  {
    "path": "SC-PGO/include/scancontext/KDTreeVectorOfVectorsAdaptor.h",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2011-16 Jose Luis Blanco (joseluisblancoc@gmail.com).\n *   All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n#pragma once\n\n#include \"scancontext/nanoflann.hpp\"\n\n#include <vector>\n\n// ===== This example shows how to use nanoflann with these types of containers: =======\n//typedef std::vector<std::vector<double> > my_vector_of_vectors_t;\n//typedef std::vector<Eigen::VectorXd> my_vector_of_vectors_t;   // This requires #include <Eigen/Dense>\n// =====================================================================================\n\n\n/** A simple vector-of-vectors adaptor for nanoflann, without duplicating the storage.\n  *  The i'th vector represents a point in the state space.\n  *\n  *  \\tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations.\n  *  \\tparam num_t The type of the point coordinates (typically, double or float).\n  *  \\tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.\n  *  \\tparam IndexType The type for indices in the KD-tree index (typically, size_t of int)\n  */\ntemplate <class VectorOfVectorsType, typename num_t = double, int DIM = -1, class Distance = nanoflann::metric_L2, typename IndexType = size_t>\nstruct KDTreeVectorOfVectorsAdaptor\n{\n\ttypedef KDTreeVectorOfVectorsAdaptor<VectorOfVectorsType,num_t,DIM,Distance> self_t;\n\ttypedef typename Distance::template traits<num_t,self_t>::distance_t metric_t;\n\ttypedef nanoflann::KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType>  index_t;\n\n\tindex_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index.\n\n\t/// Constructor: takes a const ref to the vector of vectors object with the data points\n\tKDTreeVectorOfVectorsAdaptor(const size_t /* dimensionality */, const VectorOfVectorsType &mat, const int leaf_max_size = 10) : m_data(mat)\n\t{\n\t\tassert(mat.size() != 0 && mat[0].size() != 0);\n\t\tconst size_t dims = mat[0].size();\n\t\tif (DIM>0 && static_cast<int>(dims) != DIM)\n\t\t\tthrow std::runtime_error(\"Data set dimensionality does not match the 'DIM' template argument\");\n\t\tindex = new index_t( static_cast<int>(dims), *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) );\n\t\tindex->buildIndex();\n\t}\n\n\t~KDTreeVectorOfVectorsAdaptor() {\n\t\tdelete index;\n\t}\n\n\tconst VectorOfVectorsType &m_data;\n\n\t/** Query for the \\a num_closest closest points to a given point (entered as query_point[0:dim-1]).\n\t  *  Note that this is a short-cut method for index->findNeighbors().\n\t  *  The user can also call index->... methods as desired.\n\t  * \\note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface.\n\t  */\n\tinline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int nChecks_IGNORED = 10) const\n\t{\n\t\tnanoflann::KNNResultSet<num_t,IndexType> resultSet(num_closest);\n\t\tresultSet.init(out_indices, out_distances_sq);\n\t\tindex->findNeighbors(resultSet, query_point, nanoflann::SearchParams());\n\t}\n\n\t/** @name Interface expected by KDTreeSingleIndexAdaptor\n\t  * @{ */\n\n\tconst self_t & derived() const {\n\t\treturn *this;\n\t}\n\tself_t & derived()       {\n\t\treturn *this;\n\t}\n\n\t// Must return the number of data points\n\tinline size_t kdtree_get_point_count() const {\n\t\treturn m_data.size();\n\t}\n\n\t// Returns the dim'th component of the idx'th point in the class:\n\tinline num_t kdtree_get_pt(const size_t idx, const size_t dim) const {\n\t\treturn m_data[idx][dim];\n\t}\n\n\t// Optional bounding-box computation: return false to default to a standard bbox computation loop.\n\t//   Return true if the BBOX was already computed by the class and returned in \"bb\" so it can be avoided to redo it again.\n\t//   Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds)\n\ttemplate <class BBOX>\n\tbool kdtree_get_bbox(BBOX & /*bb*/) const {\n\t\treturn false;\n\t}\n\n\t/** @} */\n\n}; // end of KDTreeVectorOfVectorsAdaptor\n"
  },
  {
    "path": "SC-PGO/include/scancontext/Scancontext.cpp",
    "content": "#include \"scancontext/Scancontext.h\"\n\n// namespace SC2\n// {\n\nvoid coreImportTest (void)\n{\n    cout << \"scancontext lib is successfully imported.\" << endl;\n} // coreImportTest\n\n\nfloat rad2deg(float radians)\n{\n    return radians * 180.0 / M_PI;\n}\n\nfloat deg2rad(float degrees)\n{\n    return degrees * M_PI / 180.0;\n}\n\n\nfloat xy2theta( const float & _x, const float & _y )\n{\n    if ( (_x >= 0) & (_y >= 0)) \n        return (180/M_PI) * atan(_y / _x);\n\n    if ( (_x < 0) & (_y >= 0)) \n        return 180 - ( (180/M_PI) * atan(_y / (-_x)) );\n\n    if ( (_x < 0) & (_y < 0)) \n        return 180 + ( (180/M_PI) * atan(_y / _x) );\n\n    if ( (_x >= 0) & (_y < 0))\n        return 360 - ( (180/M_PI) * atan((-_y) / _x) );\n} // xy2theta\n\n\nMatrixXd circshift( MatrixXd &_mat, int _num_shift )\n{\n    // shift columns to right direction \n    assert(_num_shift >= 0);\n\n    if( _num_shift == 0 )\n    {\n        MatrixXd shifted_mat( _mat );\n        return shifted_mat; // Early return \n    }\n\n    MatrixXd shifted_mat = MatrixXd::Zero( _mat.rows(), _mat.cols() );\n    for ( int col_idx = 0; col_idx < _mat.cols(); col_idx++ )\n    {\n        int new_location = (col_idx + _num_shift) % _mat.cols();\n        shifted_mat.col(new_location) = _mat.col(col_idx);\n    }\n\n    return shifted_mat;\n\n} // circshift\n\n\nstd::vector<float> eig2stdvec( MatrixXd _eigmat )\n{\n    std::vector<float> vec( _eigmat.data(), _eigmat.data() + _eigmat.size() );\n    return vec;\n} // eig2stdvec\n\n\ndouble SCManager::distDirectSC ( MatrixXd &_sc1, MatrixXd &_sc2 )\n{\n    int num_eff_cols = 0; // i.e., to exclude all-nonzero sector\n    double sum_sector_similarity = 0;\n    for ( int col_idx = 0; col_idx < _sc1.cols(); col_idx++ )\n    {\n        VectorXd col_sc1 = _sc1.col(col_idx);\n        VectorXd col_sc2 = _sc2.col(col_idx);\n        \n        if( (col_sc1.norm() == 0) | (col_sc2.norm() == 0) )\n            continue; // don't count this sector pair. \n\n        double sector_similarity = col_sc1.dot(col_sc2) / (col_sc1.norm() * col_sc2.norm());\n\n        sum_sector_similarity = sum_sector_similarity + sector_similarity;\n        num_eff_cols = num_eff_cols + 1;\n    }\n    \n    double sc_sim = sum_sector_similarity / num_eff_cols;\n    return 1.0 - sc_sim;\n\n} // distDirectSC\n\n\nint SCManager::fastAlignUsingVkey( MatrixXd & _vkey1, MatrixXd & _vkey2)\n{\n    int argmin_vkey_shift = 0;\n    double min_veky_diff_norm = 10000000;\n    for ( int shift_idx = 0; shift_idx < _vkey1.cols(); shift_idx++ )\n    {\n        MatrixXd vkey2_shifted = circshift(_vkey2, shift_idx);\n\n        MatrixXd vkey_diff = _vkey1 - vkey2_shifted;\n\n        double cur_diff_norm = vkey_diff.norm();\n        if( cur_diff_norm < min_veky_diff_norm )\n        {\n            argmin_vkey_shift = shift_idx;\n            min_veky_diff_norm = cur_diff_norm;\n        }\n    }\n\n    return argmin_vkey_shift;\n\n} // fastAlignUsingVkey\n\n\nstd::pair<double, int> SCManager::distanceBtnScanContext( MatrixXd &_sc1, MatrixXd &_sc2 )\n{\n    // 1. fast align using variant key (not in original IROS18)\n    MatrixXd vkey_sc1 = makeSectorkeyFromScancontext( _sc1 );\n    MatrixXd vkey_sc2 = makeSectorkeyFromScancontext( _sc2 );\n    int argmin_vkey_shift = fastAlignUsingVkey( vkey_sc1, vkey_sc2 );\n\n    const int SEARCH_RADIUS = round( 0.5 * SEARCH_RATIO * _sc1.cols() ); // a half of search range \n    std::vector<int> shift_idx_search_space { argmin_vkey_shift };\n    for ( int ii = 1; ii < SEARCH_RADIUS + 1; ii++ )\n    {\n        shift_idx_search_space.push_back( (argmin_vkey_shift + ii + _sc1.cols()) % _sc1.cols() );\n        shift_idx_search_space.push_back( (argmin_vkey_shift - ii + _sc1.cols()) % _sc1.cols() );\n    }\n    std::sort(shift_idx_search_space.begin(), shift_idx_search_space.end());\n\n    // 2. fast columnwise diff \n    int argmin_shift = 0;\n    double min_sc_dist = 10000000;\n    for ( int num_shift: shift_idx_search_space )\n    {\n        MatrixXd sc2_shifted = circshift(_sc2, num_shift);\n        double cur_sc_dist = distDirectSC( _sc1, sc2_shifted );\n        if( cur_sc_dist < min_sc_dist )\n        {\n            argmin_shift = num_shift;\n            min_sc_dist = cur_sc_dist;\n        }\n    }\n\n    return make_pair(min_sc_dist, argmin_shift);\n\n} // distanceBtnScanContext\n\n\nMatrixXd SCManager::makeScancontext( pcl::PointCloud<SCPointType> & _scan_down )\n{\n    TicTocV2 t_making_desc;\n\n    int num_pts_scan_down = _scan_down.points.size();\n\n    // main\n    const int NO_POINT = -1000;\n    MatrixXd desc = NO_POINT * MatrixXd::Ones(PC_NUM_RING, PC_NUM_SECTOR);\n\n    SCPointType pt;\n    float azim_angle, azim_range; // wihtin 2d plane\n    int ring_idx, sctor_idx;\n    for (int pt_idx = 0; pt_idx < num_pts_scan_down; pt_idx++)\n    {\n        pt.x = _scan_down.points[pt_idx].x; \n        pt.y = _scan_down.points[pt_idx].y;\n        pt.z = _scan_down.points[pt_idx].z + LIDAR_HEIGHT; // naive adding is ok (all points should be > 0).\n\n        // xyz to ring, sector\n        azim_range = sqrt(pt.x * pt.x + pt.y * pt.y);\n        azim_angle = xy2theta(pt.x, pt.y);\n\n        // if range is out of roi, pass\n        if( azim_range > PC_MAX_RADIUS )\n            continue;\n\n        ring_idx = std::max( std::min( PC_NUM_RING, int(ceil( (azim_range / PC_MAX_RADIUS) * PC_NUM_RING )) ), 1 );\n        sctor_idx = std::max( std::min( PC_NUM_SECTOR, int(ceil( (azim_angle / 360.0) * PC_NUM_SECTOR )) ), 1 );\n\n        // taking maximum z \n        if ( desc(ring_idx-1, sctor_idx-1) < pt.z ) // -1 means cpp starts from 0\n            desc(ring_idx-1, sctor_idx-1) = pt.z; // update for taking maximum value at that bin\n    }\n\n    // reset no points to zero (for cosine dist later)\n    for ( int row_idx = 0; row_idx < desc.rows(); row_idx++ )\n        for ( int col_idx = 0; col_idx < desc.cols(); col_idx++ )\n            if( desc(row_idx, col_idx) == NO_POINT )\n                desc(row_idx, col_idx) = 0;\n\n    t_making_desc.toc(\"PolarContext making\");\n\n    return desc;\n} // SCManager::makeScancontext\n\n\nMatrixXd SCManager::makeRingkeyFromScancontext( Eigen::MatrixXd &_desc )\n{\n    /* \n     * summary: rowwise mean vector\n    */\n    Eigen::MatrixXd invariant_key(_desc.rows(), 1);\n    for ( int row_idx = 0; row_idx < _desc.rows(); row_idx++ )\n    {\n        Eigen::MatrixXd curr_row = _desc.row(row_idx);\n        invariant_key(row_idx, 0) = curr_row.mean();\n    }\n\n    return invariant_key;\n} // SCManager::makeRingkeyFromScancontext\n\n\nMatrixXd SCManager::makeSectorkeyFromScancontext( Eigen::MatrixXd &_desc )\n{\n    /* \n     * summary: columnwise mean vector\n    */\n    Eigen::MatrixXd variant_key(1, _desc.cols());\n    for ( int col_idx = 0; col_idx < _desc.cols(); col_idx++ )\n    {\n        Eigen::MatrixXd curr_col = _desc.col(col_idx);\n        variant_key(0, col_idx) = curr_col.mean();\n    }\n\n    return variant_key;\n} // SCManager::makeSectorkeyFromScancontext\n\n\nconst Eigen::MatrixXd& SCManager::getConstRefRecentSCD(void)\n{\n    return polarcontexts_.back();\n}\n\n\nvoid SCManager::saveScancontextAndKeys( Eigen::MatrixXd _scd )\n{\n    Eigen::MatrixXd ringkey = makeRingkeyFromScancontext( _scd );\n    Eigen::MatrixXd sectorkey = makeSectorkeyFromScancontext( _scd );\n    std::vector<float> polarcontext_invkey_vec = eig2stdvec( ringkey );\n\n    polarcontexts_.push_back( _scd ); \n    polarcontext_invkeys_.push_back( ringkey );\n    polarcontext_vkeys_.push_back( sectorkey );\n    polarcontext_invkeys_mat_.push_back( polarcontext_invkey_vec );\n} // SCManager::makeAndSaveScancontextAndKeys\n\n\nvoid SCManager::makeAndSaveScancontextAndKeys( pcl::PointCloud<SCPointType> & _scan_down )\n{\n    Eigen::MatrixXd sc = makeScancontext(_scan_down); // v1 \n    Eigen::MatrixXd ringkey = makeRingkeyFromScancontext( sc );\n    Eigen::MatrixXd sectorkey = makeSectorkeyFromScancontext( sc );\n    std::vector<float> polarcontext_invkey_vec = eig2stdvec( ringkey );\n\n    polarcontexts_.push_back( sc ); \n    polarcontext_invkeys_.push_back( ringkey );\n    polarcontext_vkeys_.push_back( sectorkey );\n    polarcontext_invkeys_mat_.push_back( polarcontext_invkey_vec );\n} // SCManager::makeAndSaveScancontextAndKeys\n\nvoid SCManager::setSCdistThres(double _new_thres)\n{\n    SC_DIST_THRES = _new_thres;\n} // SCManager::setThres\n\nvoid SCManager::setMaximumRadius(double _max_r)\n{\n    PC_MAX_RADIUS = _max_r;\n} // SCManager::setMaximumRadius\n\nstd::pair<int, float> SCManager::detectLoopClosureIDBetweenSession (std::vector<float>& _curr_key, Eigen::MatrixXd& _curr_desc)\n{\n    int loop_id { -1 }; // init with -1, -1 means no loop (== LeGO-LOAM's variable \"closestHistoryFrameID\")\n\n    auto& curr_key = _curr_key;\n    auto& curr_desc = _curr_desc; // current observation (query)\n\n    // step 0: if first, construct the tree in batch\n    if( ! is_tree_batch_made ) // run only once\n    {\n        polarcontext_invkeys_to_search_.clear();\n        polarcontext_invkeys_to_search_.assign( polarcontext_invkeys_mat_.begin(), polarcontext_invkeys_mat_.end() ) ;\n\n        polarcontext_tree_batch_.reset(); \n        polarcontext_tree_batch_ = std::make_unique<InvKeyTree>(PC_NUM_RING /* dim */, polarcontext_invkeys_to_search_, 10 /* max leaf */ );\n\n        is_tree_batch_made = true; // for running this block only once\n    }\n        \n    double min_dist = 10000000; // init with somthing large\n    int nn_align = 0;\n    int nn_idx = 0;\n\n    // step 1: knn search\n    std::vector<size_t> candidate_indexes( NUM_CANDIDATES_FROM_TREE ); \n    std::vector<float> out_dists_sqr( NUM_CANDIDATES_FROM_TREE );\n\n    nanoflann::KNNResultSet<float> knnsearch_result( NUM_CANDIDATES_FROM_TREE );\n    knnsearch_result.init( &candidate_indexes[0], &out_dists_sqr[0] );\n    polarcontext_tree_batch_->index->findNeighbors( knnsearch_result, &curr_key[0] /* query */, nanoflann::SearchParams(10) ); // error here\n\n    // step 2: pairwise distance (find optimal columnwise best-fit using cosine distance)\n    TicTocV2 t_calc_dist;   \n    for ( int candidate_iter_idx = 0; candidate_iter_idx < NUM_CANDIDATES_FROM_TREE; candidate_iter_idx++ )\n    {\n        MatrixXd polarcontext_candidate = polarcontexts_[ candidate_indexes[candidate_iter_idx] ];\n        std::pair<double, int> sc_dist_result = distanceBtnScanContext( curr_desc, polarcontext_candidate ); \n        \n        double candidate_dist = sc_dist_result.first;\n        int candidate_align = sc_dist_result.second;\n\n        if( candidate_dist < min_dist )\n        {\n            min_dist = candidate_dist;\n            nn_align = candidate_align;\n\n            nn_idx = candidate_indexes[candidate_iter_idx];\n        }\n    }\n    t_calc_dist.toc(\"Distance calc\");\n\n    // step 3: similarity threshold\n    if( min_dist < SC_DIST_THRES )\n        loop_id = nn_idx; \n\n    // To do: return also nn_align (i.e., yaw diff)\n    float yaw_diff_rad = deg2rad(nn_align * PC_UNIT_SECTORANGLE);\n    std::pair<int, float> result {loop_id, yaw_diff_rad};\n\n    return result;\n\n} // SCManager::detectLoopClosureIDBetweenSession\n\n\nstd::pair<int, float> SCManager::detectLoopClosureID ( void )\n{\n    int loop_id { -1 }; // init with -1, -1 means no loop (== LeGO-LOAM's variable \"closestHistoryFrameID\")\n\n    auto curr_key = polarcontext_invkeys_mat_.back(); // current observation (query)\n    auto curr_desc = polarcontexts_.back(); // current observation (query)\n\n    /* \n     * step 1: candidates from ringkey tree_\n     */\n    if( (int)polarcontext_invkeys_mat_.size() < NUM_EXCLUDE_RECENT + 1)\n    {\n        std::pair<int, float> result {loop_id, 0.0};\n        return result; // Early return \n    }\n\n    // tree_ reconstruction (not mandatory to make everytime)\n    if( tree_making_period_conter % TREE_MAKING_PERIOD_ == 0) // to save computation cost\n    {\n        TicTocV2 t_tree_construction;\n\n        polarcontext_invkeys_to_search_.clear();\n        polarcontext_invkeys_to_search_.assign( polarcontext_invkeys_mat_.begin(), polarcontext_invkeys_mat_.end() - NUM_EXCLUDE_RECENT ) ;\n\n        polarcontext_tree_.reset(); \n        polarcontext_tree_ = std::make_unique<InvKeyTree>(PC_NUM_RING /* dim */, polarcontext_invkeys_to_search_, 10 /* max leaf */ );\n        // tree_ptr_->index->buildIndex(); // inernally called in the constructor of InvKeyTree (for detail, refer the nanoflann and KDtreeVectorOfVectorsAdaptor)\n        t_tree_construction.toc(\"Tree construction\");\n    }\n    tree_making_period_conter = tree_making_period_conter + 1;\n        \n    double min_dist = 10000000; // init with somthing large\n    int nn_align = 0;\n    int nn_idx = 0;\n\n    // knn search\n    std::vector<size_t> candidate_indexes( NUM_CANDIDATES_FROM_TREE ); \n    std::vector<float> out_dists_sqr( NUM_CANDIDATES_FROM_TREE );\n\n    TicTocV2 t_tree_search;\n    nanoflann::KNNResultSet<float> knnsearch_result( NUM_CANDIDATES_FROM_TREE );\n    knnsearch_result.init( &candidate_indexes[0], &out_dists_sqr[0] );\n    polarcontext_tree_->index->findNeighbors( knnsearch_result, &curr_key[0] /* query */, nanoflann::SearchParams(10) ); \n    t_tree_search.toc(\"Tree search\");\n\n    /* \n     *  step 2: pairwise distance (find optimal columnwise best-fit using cosine distance)\n     */\n    TicTocV2 t_calc_dist;   \n    for ( int candidate_iter_idx = 0; candidate_iter_idx < NUM_CANDIDATES_FROM_TREE; candidate_iter_idx++ )\n    {\n        MatrixXd polarcontext_candidate = polarcontexts_[ candidate_indexes[candidate_iter_idx] ];\n        std::pair<double, int> sc_dist_result = distanceBtnScanContext( curr_desc, polarcontext_candidate ); \n        \n        double candidate_dist = sc_dist_result.first;\n        int candidate_align = sc_dist_result.second;\n\n        if( candidate_dist < min_dist )\n        {\n            min_dist = candidate_dist;\n            nn_align = candidate_align;\n\n            nn_idx = candidate_indexes[candidate_iter_idx];\n        }\n    }\n    t_calc_dist.toc(\"Distance calc\");\n\n    /* \n     * loop threshold check\n     */\n    if( min_dist < SC_DIST_THRES )\n    {\n        loop_id = nn_idx; \n    \n        // std::cout.precision(3); \n        cout << \"[Loop found] Nearest distance: \" << min_dist << \" btn \" << polarcontexts_.size()-1 << \" and \" << nn_idx << \".\" << endl;\n        // cout << \"[Loop found] yaw diff: \" << nn_align * PC_UNIT_SECTORANGLE << \" deg.\" << endl;\n    }\n    else\n    {\n        std::cout.precision(3); \n        cout << \"[Not loop] Nearest distance: \" << min_dist << \" btn \" << polarcontexts_.size()-1 << \" and \" << nn_idx << \".\" << endl;\n        // cout << \"[Not loop] yaw diff: \" << nn_align * PC_UNIT_SECTORANGLE << \" deg.\" << endl;\n    }\n\n    // To do: return also nn_align (i.e., yaw diff)\n    float yaw_diff_rad = deg2rad(nn_align * PC_UNIT_SECTORANGLE);\n    std::pair<int, float> result {loop_id, yaw_diff_rad};\n\n    return result;\n\n} // SCManager::detectLoopClosureID\n\n// } // namespace SC2"
  },
  {
    "path": "SC-PGO/include/scancontext/Scancontext.h",
    "content": "#pragma once\n\n#include <ctime>\n#include <cassert>\n#include <cmath>\n#include <utility>\n#include <vector>\n#include <algorithm> \n#include <cstdlib>\n#include <memory>\n#include <iostream>\n\n#include <Eigen/Dense>\n\n#include <opencv2/opencv.hpp>\n#include <opencv2/core/eigen.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <cv_bridge/cv_bridge.h>\n\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl_conversions/pcl_conversions.h>\n\n#include \"scancontext/nanoflann.hpp\"\n#include \"scancontext/KDTreeVectorOfVectorsAdaptor.h\"\n#include \"livox_mapping/tic_toc.h\"\n\nusing namespace Eigen;\nusing namespace nanoflann;\n\nusing std::cout;\nusing std::endl;\nusing std::make_pair;\n\nusing std::atan2;\nusing std::cos;\nusing std::sin;\n\nusing SCPointType = pcl::PointXYZI; // using xyz only. but a user can exchange the original bin encoding function (i.e., max hegiht) to max intensity (for detail, refer 20 ICRA Intensity Scan Context)\nusing KeyMat = std::vector<std::vector<float> >;\nusing InvKeyTree = KDTreeVectorOfVectorsAdaptor< KeyMat, float >;\n\n\n// namespace SC2\n// {\n\nvoid coreImportTest ( void );\n\n\n// sc param-independent helper functions \nfloat xy2theta( const float & _x, const float & _y );\nMatrixXd circshift( MatrixXd &_mat, int _num_shift );\nstd::vector<float> eig2stdvec( MatrixXd _eigmat );\n\n\nclass SCManager\n{\npublic: \n    SCManager( ) = default; // reserving data space (of std::vector) could be considered. but the descriptor is lightweight so don't care.\n\n    Eigen::MatrixXd makeScancontext( pcl::PointCloud<SCPointType> & _scan_down );\n    Eigen::MatrixXd makeRingkeyFromScancontext( Eigen::MatrixXd &_desc );\n    Eigen::MatrixXd makeSectorkeyFromScancontext( Eigen::MatrixXd &_desc );\n\n    int fastAlignUsingVkey ( MatrixXd & _vkey1, MatrixXd & _vkey2 ); \n    double distDirectSC ( MatrixXd &_sc1, MatrixXd &_sc2 ); // \"d\" (eq 5) in the original paper (IROS 18)\n    std::pair<double, int> distanceBtnScanContext ( MatrixXd &_sc1, MatrixXd &_sc2 ); // \"D\" (eq 6) in the original paper (IROS 18)\n\n    // User-side API\n    void makeAndSaveScancontextAndKeys( pcl::PointCloud<SCPointType> & _scan_down );\n    std::pair<int, float> detectLoopClosureID( void ); // int: nearest node index, float: relative yaw  \n\n    // for ltslam \n    // User-side API for multi-session\n    void saveScancontextAndKeys( Eigen::MatrixXd _scd );\n    std::pair<int, float> detectLoopClosureIDBetweenSession ( std::vector<float>& curr_key,  Eigen::MatrixXd& curr_desc);\n\n    const Eigen::MatrixXd& getConstRefRecentSCD(void);\n\npublic:\n    // hyper parameters ()\n    const double LIDAR_HEIGHT = 2.0; // lidar height : add this for simply directly using lidar scan in the lidar local coord (not robot base coord) / if you use robot-coord-transformed lidar scans, just set this as 0.\n\n    const int    PC_NUM_RING = 20; // 20 in the original paper (IROS 18)\n    const int    PC_NUM_SECTOR = 60; // 60 in the original paper (IROS 18)\n    double PC_MAX_RADIUS = 80.0; // 80 meter max in the original paper (IROS 18)\n    const double PC_UNIT_SECTORANGLE = 360.0 / double(PC_NUM_SECTOR);\n    const double PC_UNIT_RINGGAP = PC_MAX_RADIUS / double(PC_NUM_RING);\n\n    // tree\n    const int    NUM_EXCLUDE_RECENT = 30; // simply just keyframe gap (related with loopClosureFrequency in yaml), but node position distance-based exclusion is ok. \n    const int    NUM_CANDIDATES_FROM_TREE = 3; // 10 is enough. (refer the IROS 18 paper)\n\n    // loop thres\n    const double SEARCH_RATIO = 0.1; // for fast comparison, no Brute-force, but search 10 % is okay. // not was in the original conf paper, but improved ver.\n    // const double SC_DIST_THRES = 0.13; // empirically 0.1-0.2 is fine (rare false-alarms) for 20x60 polar context (but for 0.15 <, DCS or ICP fit score check (e.g., in LeGO-LOAM) should be required for robustness)\n\n    double SC_DIST_THRES = 0.2; // 0.4-0.6 is good choice for using with robust kernel (e.g., Cauchy, DCS) + icp fitness threshold / if not, recommend 0.1-0.15\n    // const double SC_DIST_THRES = 0.7; // 0.4-0.6 is good choice for using with robust kernel (e.g., Cauchy, DCS) + icp fitness threshold / if not, recommend 0.1-0.15\n\n    // config \n    const int    TREE_MAKING_PERIOD_ = 30; // i.e., remaking tree frequency, to avoid non-mandatory every remaking, to save time cost / in the LeGO-LOAM integration, it is synchronized with the loop detection callback (which is 1Hz) so it means the tree is updated evrey 10 sec. But you can use the smaller value because it is enough fast ~ 5-50ms wrt N.\n    int          tree_making_period_conter = 0;\n\n    // setter\n    void setSCdistThres(double _new_thres);\n    void setMaximumRadius(double _max_r);\n\n    // data \n    std::vector<double> polarcontexts_timestamp_; // optional.\n    std::vector<Eigen::MatrixXd> polarcontexts_;\n    std::vector<Eigen::MatrixXd> polarcontext_invkeys_;\n    std::vector<Eigen::MatrixXd> polarcontext_vkeys_;\n\n    KeyMat polarcontext_invkeys_mat_;\n    KeyMat polarcontext_invkeys_to_search_;\n    std::unique_ptr<InvKeyTree> polarcontext_tree_;\n\n    bool is_tree_batch_made = false;\n    std::unique_ptr<InvKeyTree> polarcontext_tree_batch_;\n\n}; // SCManager\n\n// } // namespace SC2\n"
  },
  {
    "path": "SC-PGO/include/scancontext/nanoflann.hpp",
    "content": "/***********************************************************************\n * Software License Agreement (BSD License)\n *\n * Copyright 2008-2009  Marius Muja (mariusm@cs.ubc.ca). All rights reserved.\n * Copyright 2008-2009  David G. Lowe (lowe@cs.ubc.ca). All rights reserved.\n * Copyright 2011-2016  Jose Luis Blanco (joseluisblancoc@gmail.com).\n *   All rights reserved.\n *\n * THE BSD LICENSE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *************************************************************************/\n\n/** \\mainpage nanoflann C++ API documentation\n *  nanoflann is a C++ header-only library for building KD-Trees, mostly\n *  optimized for 2D or 3D point clouds.\n *\n *  nanoflann does not require compiling or installing, just an\n *  #include <nanoflann.hpp> in your code.\n *\n *  See:\n *   - <a href=\"modules.html\" >C++ API organized by modules</a>\n *   - <a href=\"https://github.com/jlblancoc/nanoflann\" >Online README</a>\n *   - <a href=\"http://jlblancoc.github.io/nanoflann/\" >Doxygen\n * documentation</a>\n */\n\n#ifndef NANOFLANN_HPP_\n#define NANOFLANN_HPP_\n\n#include <algorithm>\n#include <array>\n#include <cassert>\n#include <cmath>   // for abs()\n#include <cstdio>  // for fwrite()\n#include <cstdlib> // for abs()\n#include <functional>\n#include <limits> // std::reference_wrapper\n#include <stdexcept>\n#include <vector>\n\n/** Library version: 0xMmP (M=Major,m=minor,P=patch) */\n#define NANOFLANN_VERSION 0x132\n\n// Avoid conflicting declaration of min/max macros in windows headers\n#if !defined(NOMINMAX) &&                                                      \\\n    (defined(_WIN32) || defined(_WIN32_) || defined(WIN32) || defined(_WIN64))\n#define NOMINMAX\n#ifdef max\n#undef max\n#undef min\n#endif\n#endif\n\nnamespace nanoflann {\n/** @addtogroup nanoflann_grp nanoflann C++ library for ANN\n *  @{ */\n\n/** the PI constant (required to avoid MSVC missing symbols) */\ntemplate <typename T> T pi_const() {\n  return static_cast<T>(3.14159265358979323846);\n}\n\n/**\n * Traits if object is resizable and assignable (typically has a resize | assign\n * method)\n */\ntemplate <typename T, typename = int> struct has_resize : std::false_type {};\n\ntemplate <typename T>\nstruct has_resize<T, decltype((void)std::declval<T>().resize(1), 0)>\n    : std::true_type {};\n\ntemplate <typename T, typename = int> struct has_assign : std::false_type {};\n\ntemplate <typename T>\nstruct has_assign<T, decltype((void)std::declval<T>().assign(1, 0), 0)>\n    : std::true_type {};\n\n/**\n * Free function to resize a resizable object\n */\ntemplate <typename Container>\ninline typename std::enable_if<has_resize<Container>::value, void>::type\nresize(Container &c, const size_t nElements) {\n  c.resize(nElements);\n}\n\n/**\n * Free function that has no effects on non resizable containers (e.g.\n * std::array) It raises an exception if the expected size does not match\n */\ntemplate <typename Container>\ninline typename std::enable_if<!has_resize<Container>::value, void>::type\nresize(Container &c, const size_t nElements) {\n  if (nElements != c.size())\n    throw std::logic_error(\"Try to change the size of a std::array.\");\n}\n\n/**\n * Free function to assign to a container\n */\ntemplate <typename Container, typename T>\ninline typename std::enable_if<has_assign<Container>::value, void>::type\nassign(Container &c, const size_t nElements, const T &value) {\n  c.assign(nElements, value);\n}\n\n/**\n * Free function to assign to a std::array\n */\ntemplate <typename Container, typename T>\ninline typename std::enable_if<!has_assign<Container>::value, void>::type\nassign(Container &c, const size_t nElements, const T &value) {\n  for (size_t i = 0; i < nElements; i++)\n    c[i] = value;\n}\n\n/** @addtogroup result_sets_grp Result set classes\n *  @{ */\ntemplate <typename _DistanceType, typename _IndexType = size_t,\n          typename _CountType = size_t>\nclass KNNResultSet {\npublic:\n  typedef _DistanceType DistanceType;\n  typedef _IndexType IndexType;\n  typedef _CountType CountType;\n\nprivate:\n  IndexType *indices;\n  DistanceType *dists;\n  CountType capacity;\n  CountType count;\n\npublic:\n  inline KNNResultSet(CountType capacity_)\n      : indices(0), dists(0), capacity(capacity_), count(0) {}\n\n  inline void init(IndexType *indices_, DistanceType *dists_) {\n    indices = indices_;\n    dists = dists_;\n    count = 0;\n    if (capacity)\n      dists[capacity - 1] = (std::numeric_limits<DistanceType>::max)();\n  }\n\n  inline CountType size() const { return count; }\n\n  inline bool full() const { return count == capacity; }\n\n  /**\n   * Called during search to add an element matching the criteria.\n   * @return true if the search should be continued, false if the results are\n   * sufficient\n   */\n  inline bool addPoint(DistanceType dist, IndexType index) {\n    CountType i;\n    for (i = count; i > 0; --i) {\n#ifdef NANOFLANN_FIRST_MATCH // If defined and two points have the same\n                             // distance, the one with the lowest-index will be\n                             // returned first.\n      if ((dists[i - 1] > dist) ||\n          ((dist == dists[i - 1]) && (indices[i - 1] > index))) {\n#else\n      if (dists[i - 1] > dist) {\n#endif\n        if (i < capacity) {\n          dists[i] = dists[i - 1];\n          indices[i] = indices[i - 1];\n        }\n      } else\n        break;\n    }\n    if (i < capacity) {\n      dists[i] = dist;\n      indices[i] = index;\n    }\n    if (count < capacity)\n      count++;\n\n    // tell caller that the search shall continue\n    return true;\n  }\n\n  inline DistanceType worstDist() const { return dists[capacity - 1]; }\n};\n\n/** operator \"<\" for std::sort() */\nstruct IndexDist_Sorter {\n  /** PairType will be typically: std::pair<IndexType,DistanceType> */\n  template <typename PairType>\n  inline bool operator()(const PairType &p1, const PairType &p2) const {\n    return p1.second < p2.second;\n  }\n};\n\n/**\n * A result-set class used when performing a radius based search.\n */\ntemplate <typename _DistanceType, typename _IndexType = size_t>\nclass RadiusResultSet {\npublic:\n  typedef _DistanceType DistanceType;\n  typedef _IndexType IndexType;\n\npublic:\n  const DistanceType radius;\n\n  std::vector<std::pair<IndexType, DistanceType>> &m_indices_dists;\n\n  inline RadiusResultSet(\n      DistanceType radius_,\n      std::vector<std::pair<IndexType, DistanceType>> &indices_dists)\n      : radius(radius_), m_indices_dists(indices_dists) {\n    init();\n  }\n\n  inline void init() { clear(); }\n  inline void clear() { m_indices_dists.clear(); }\n\n  inline size_t size() const { return m_indices_dists.size(); }\n\n  inline bool full() const { return true; }\n\n  /**\n   * Called during search to add an element matching the criteria.\n   * @return true if the search should be continued, false if the results are\n   * sufficient\n   */\n  inline bool addPoint(DistanceType dist, IndexType index) {\n    if (dist < radius)\n      m_indices_dists.push_back(std::make_pair(index, dist));\n    return true;\n  }\n\n  inline DistanceType worstDist() const { return radius; }\n\n  /**\n   * Find the worst result (furtherest neighbor) without copying or sorting\n   * Pre-conditions: size() > 0\n   */\n  std::pair<IndexType, DistanceType> worst_item() const {\n    if (m_indices_dists.empty())\n      throw std::runtime_error(\"Cannot invoke RadiusResultSet::worst_item() on \"\n                               \"an empty list of results.\");\n    typedef\n        typename std::vector<std::pair<IndexType, DistanceType>>::const_iterator\n            DistIt;\n    DistIt it = std::max_element(m_indices_dists.begin(), m_indices_dists.end(),\n                                 IndexDist_Sorter());\n    return *it;\n  }\n};\n\n/** @} */\n\n/** @addtogroup loadsave_grp Load/save auxiliary functions\n * @{ */\ntemplate <typename T>\nvoid save_value(FILE *stream, const T &value, size_t count = 1) {\n  fwrite(&value, sizeof(value), count, stream);\n}\n\ntemplate <typename T>\nvoid save_value(FILE *stream, const std::vector<T> &value) {\n  size_t size = value.size();\n  fwrite(&size, sizeof(size_t), 1, stream);\n  fwrite(&value[0], sizeof(T), size, stream);\n}\n\ntemplate <typename T>\nvoid load_value(FILE *stream, T &value, size_t count = 1) {\n  size_t read_cnt = fread(&value, sizeof(value), count, stream);\n  if (read_cnt != count) {\n    throw std::runtime_error(\"Cannot read from file\");\n  }\n}\n\ntemplate <typename T> void load_value(FILE *stream, std::vector<T> &value) {\n  size_t size;\n  size_t read_cnt = fread(&size, sizeof(size_t), 1, stream);\n  if (read_cnt != 1) {\n    throw std::runtime_error(\"Cannot read from file\");\n  }\n  value.resize(size);\n  read_cnt = fread(&value[0], sizeof(T), size, stream);\n  if (read_cnt != size) {\n    throw std::runtime_error(\"Cannot read from file\");\n  }\n}\n/** @} */\n\n/** @addtogroup metric_grp Metric (distance) classes\n * @{ */\n\nstruct Metric {};\n\n/** Manhattan distance functor (generic version, optimized for\n * high-dimensionality data sets). Corresponding distance traits:\n * nanoflann::metric_L1 \\tparam T Type of the elements (e.g. double, float,\n * uint8_t) \\tparam _DistanceType Type of distance variables (must be signed)\n * (e.g. float, double, int64_t)\n */\ntemplate <class T, class DataSource, typename _DistanceType = T>\nstruct L1_Adaptor {\n  typedef T ElementType;\n  typedef _DistanceType DistanceType;\n\n  const DataSource &data_source;\n\n  L1_Adaptor(const DataSource &_data_source) : data_source(_data_source) {}\n\n  inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size,\n                                 DistanceType worst_dist = -1) const {\n    DistanceType result = DistanceType();\n    const T *last = a + size;\n    const T *lastgroup = last - 3;\n    size_t d = 0;\n\n    /* Process 4 items with each loop for efficiency. */\n    while (a < lastgroup) {\n      const DistanceType diff0 =\n          std::abs(a[0] - data_source.kdtree_get_pt(b_idx, d++));\n      const DistanceType diff1 =\n          std::abs(a[1] - data_source.kdtree_get_pt(b_idx, d++));\n      const DistanceType diff2 =\n          std::abs(a[2] - data_source.kdtree_get_pt(b_idx, d++));\n      const DistanceType diff3 =\n          std::abs(a[3] - data_source.kdtree_get_pt(b_idx, d++));\n      result += diff0 + diff1 + diff2 + diff3;\n      a += 4;\n      if ((worst_dist > 0) && (result > worst_dist)) {\n        return result;\n      }\n    }\n    /* Process last 0-3 components.  Not needed for standard vector lengths. */\n    while (a < last) {\n      result += std::abs(*a++ - data_source.kdtree_get_pt(b_idx, d++));\n    }\n    return result;\n  }\n\n  template <typename U, typename V>\n  inline DistanceType accum_dist(const U a, const V b, const size_t) const {\n    return std::abs(a - b);\n  }\n};\n\n/** Squared Euclidean distance functor (generic version, optimized for\n * high-dimensionality data sets). Corresponding distance traits:\n * nanoflann::metric_L2 \\tparam T Type of the elements (e.g. double, float,\n * uint8_t) \\tparam _DistanceType Type of distance variables (must be signed)\n * (e.g. float, double, int64_t)\n */\ntemplate <class T, class DataSource, typename _DistanceType = T>\nstruct L2_Adaptor {\n  typedef T ElementType;\n  typedef _DistanceType DistanceType;\n\n  const DataSource &data_source;\n\n  L2_Adaptor(const DataSource &_data_source) : data_source(_data_source) {}\n\n  inline DistanceType evalMetric(const T *a, const size_t b_idx, size_t size,\n                                 DistanceType worst_dist = -1) const {\n    DistanceType result = DistanceType();\n    const T *last = a + size;\n    const T *lastgroup = last - 3;\n    size_t d = 0;\n\n    /* Process 4 items with each loop for efficiency. */\n    while (a < lastgroup) {\n      const DistanceType diff0 = a[0] - data_source.kdtree_get_pt(b_idx, d++);\n      const DistanceType diff1 = a[1] - data_source.kdtree_get_pt(b_idx, d++);\n      const DistanceType diff2 = a[2] - data_source.kdtree_get_pt(b_idx, d++);\n      const DistanceType diff3 = a[3] - data_source.kdtree_get_pt(b_idx, d++);\n      result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;\n      a += 4;\n      if ((worst_dist > 0) && (result > worst_dist)) {\n        return result;\n      }\n    }\n    /* Process last 0-3 components.  Not needed for standard vector lengths. */\n    while (a < last) {\n      const DistanceType diff0 = *a++ - data_source.kdtree_get_pt(b_idx, d++);\n      result += diff0 * diff0;\n    }\n    return result;\n  }\n\n  template <typename U, typename V>\n  inline DistanceType accum_dist(const U a, const V b, const size_t) const {\n    return (a - b) * (a - b);\n  }\n};\n\n/** Squared Euclidean (L2) distance functor (suitable for low-dimensionality\n * datasets, like 2D or 3D point clouds) Corresponding distance traits:\n * nanoflann::metric_L2_Simple \\tparam T Type of the elements (e.g. double,\n * float, uint8_t) \\tparam _DistanceType Type of distance variables (must be\n * signed) (e.g. float, double, int64_t)\n */\ntemplate <class T, class DataSource, typename _DistanceType = T>\nstruct L2_Simple_Adaptor {\n  typedef T ElementType;\n  typedef _DistanceType DistanceType;\n\n  const DataSource &data_source;\n\n  L2_Simple_Adaptor(const DataSource &_data_source)\n      : data_source(_data_source) {}\n\n  inline DistanceType evalMetric(const T *a, const size_t b_idx,\n                                 size_t size) const {\n    DistanceType result = DistanceType();\n    for (size_t i = 0; i < size; ++i) {\n      const DistanceType diff = a[i] - data_source.kdtree_get_pt(b_idx, i);\n      result += diff * diff;\n    }\n    return result;\n  }\n\n  template <typename U, typename V>\n  inline DistanceType accum_dist(const U a, const V b, const size_t) const {\n    return (a - b) * (a - b);\n  }\n};\n\n/** SO2 distance functor\n *  Corresponding distance traits: nanoflann::metric_SO2\n * \\tparam T Type of the elements (e.g. double, float)\n * \\tparam _DistanceType Type of distance variables (must be signed) (e.g.\n * float, double) orientation is constrained to be in [-pi, pi]\n */\ntemplate <class T, class DataSource, typename _DistanceType = T>\nstruct SO2_Adaptor {\n  typedef T ElementType;\n  typedef _DistanceType DistanceType;\n\n  const DataSource &data_source;\n\n  SO2_Adaptor(const DataSource &_data_source) : data_source(_data_source) {}\n\n  inline DistanceType evalMetric(const T *a, const size_t b_idx,\n                                 size_t size) const {\n    return accum_dist(a[size - 1], data_source.kdtree_get_pt(b_idx, size - 1),\n                      size - 1);\n  }\n\n  /** Note: this assumes that input angles are already in the range [-pi,pi] */\n  template <typename U, typename V>\n  inline DistanceType accum_dist(const U a, const V b, const size_t) const {\n    DistanceType result = DistanceType();\n    DistanceType PI = pi_const<DistanceType>();\n    result = b - a;\n    if (result > PI)\n      result -= 2 * PI;\n    else if (result < -PI)\n      result += 2 * PI;\n    return result;\n  }\n};\n\n/** SO3 distance functor (Uses L2_Simple)\n *  Corresponding distance traits: nanoflann::metric_SO3\n * \\tparam T Type of the elements (e.g. double, float)\n * \\tparam _DistanceType Type of distance variables (must be signed) (e.g.\n * float, double)\n */\ntemplate <class T, class DataSource, typename _DistanceType = T>\nstruct SO3_Adaptor {\n  typedef T ElementType;\n  typedef _DistanceType DistanceType;\n\n  L2_Simple_Adaptor<T, DataSource> distance_L2_Simple;\n\n  SO3_Adaptor(const DataSource &_data_source)\n      : distance_L2_Simple(_data_source) {}\n\n  inline DistanceType evalMetric(const T *a, const size_t b_idx,\n                                 size_t size) const {\n    return distance_L2_Simple.evalMetric(a, b_idx, size);\n  }\n\n  template <typename U, typename V>\n  inline DistanceType accum_dist(const U a, const V b, const size_t idx) const {\n    return distance_L2_Simple.accum_dist(a, b, idx);\n  }\n};\n\n/** Metaprogramming helper traits class for the L1 (Manhattan) metric */\nstruct metric_L1 : public Metric {\n  template <class T, class DataSource> struct traits {\n    typedef L1_Adaptor<T, DataSource> distance_t;\n  };\n};\n/** Metaprogramming helper traits class for the L2 (Euclidean) metric */\nstruct metric_L2 : public Metric {\n  template <class T, class DataSource> struct traits {\n    typedef L2_Adaptor<T, DataSource> distance_t;\n  };\n};\n/** Metaprogramming helper traits class for the L2_simple (Euclidean) metric */\nstruct metric_L2_Simple : public Metric {\n  template <class T, class DataSource> struct traits {\n    typedef L2_Simple_Adaptor<T, DataSource> distance_t;\n  };\n};\n/** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */\nstruct metric_SO2 : public Metric {\n  template <class T, class DataSource> struct traits {\n    typedef SO2_Adaptor<T, DataSource> distance_t;\n  };\n};\n/** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */\nstruct metric_SO3 : public Metric {\n  template <class T, class DataSource> struct traits {\n    typedef SO3_Adaptor<T, DataSource> distance_t;\n  };\n};\n\n/** @} */\n\n/** @addtogroup param_grp Parameter structs\n * @{ */\n\n/**  Parameters (see README.md) */\nstruct KDTreeSingleIndexAdaptorParams {\n  KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10)\n      : leaf_max_size(_leaf_max_size) {}\n\n  size_t leaf_max_size;\n};\n\n/** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */\nstruct SearchParams {\n  /** Note: The first argument (checks_IGNORED_) is ignored, but kept for\n   * compatibility with the FLANN interface */\n  SearchParams(int checks_IGNORED_ = 32, float eps_ = 0, bool sorted_ = true)\n      : checks(checks_IGNORED_), eps(eps_), sorted(sorted_) {}\n\n  int checks;  //!< Ignored parameter (Kept for compatibility with the FLANN\n               //!< interface).\n  float eps;   //!< search for eps-approximate neighbours (default: 0)\n  bool sorted; //!< only for radius search, require neighbours sorted by\n               //!< distance (default: true)\n};\n/** @} */\n\n/** @addtogroup memalloc_grp Memory allocation\n * @{ */\n\n/**\n * Allocates (using C's malloc) a generic type T.\n *\n * Params:\n *     count = number of instances to allocate.\n * Returns: pointer (of type T*) to memory buffer\n */\ntemplate <typename T> inline T *allocate(size_t count = 1) {\n  T *mem = static_cast<T *>(::malloc(sizeof(T) * count));\n  return mem;\n}\n\n/**\n * Pooled storage allocator\n *\n * The following routines allow for the efficient allocation of storage in\n * small chunks from a specified pool.  Rather than allowing each structure\n * to be freed individually, an entire pool of storage is freed at once.\n * This method has two advantages over just using malloc() and free().  First,\n * it is far more efficient for allocating small objects, as there is\n * no overhead for remembering all the information needed to free each\n * object or consolidating fragmented memory.  Second, the decision about\n * how long to keep an object is made at the time of allocation, and there\n * is no need to track down all the objects to free them.\n *\n */\n\nconst size_t WORDSIZE = 16;\nconst size_t BLOCKSIZE = 8192;\n\nclass PooledAllocator {\n  /* We maintain memory alignment to word boundaries by requiring that all\n      allocations be in multiples of the machine wordsize.  */\n  /* Size of machine word in bytes.  Must be power of 2. */\n  /* Minimum number of bytes requested at a time from\tthe system.  Must be\n   * multiple of WORDSIZE. */\n\n  size_t remaining; /* Number of bytes left in current block of storage. */\n  void *base;       /* Pointer to base of current block of storage. */\n  void *loc;        /* Current location in block to next allocate memory. */\n\n  void internal_init() {\n    remaining = 0;\n    base = NULL;\n    usedMemory = 0;\n    wastedMemory = 0;\n  }\n\npublic:\n  size_t usedMemory;\n  size_t wastedMemory;\n\n  /**\n      Default constructor. Initializes a new pool.\n   */\n  PooledAllocator() { internal_init(); }\n\n  /**\n   * Destructor. Frees all the memory allocated in this pool.\n   */\n  ~PooledAllocator() { free_all(); }\n\n  /** Frees all allocated memory chunks */\n  void free_all() {\n    while (base != NULL) {\n      void *prev =\n          *(static_cast<void **>(base)); /* Get pointer to prev block. */\n      ::free(base);\n      base = prev;\n    }\n    internal_init();\n  }\n\n  /**\n   * Returns a pointer to a piece of new memory of the given size in bytes\n   * allocated from the pool.\n   */\n  void *malloc(const size_t req_size) {\n    /* Round size up to a multiple of wordsize.  The following expression\n        only works for WORDSIZE that is a power of 2, by masking last bits of\n        incremented size to zero.\n     */\n    const size_t size = (req_size + (WORDSIZE - 1)) & ~(WORDSIZE - 1);\n\n    /* Check whether a new block must be allocated.  Note that the first word\n        of a block is reserved for a pointer to the previous block.\n     */\n    if (size > remaining) {\n\n      wastedMemory += remaining;\n\n      /* Allocate new storage. */\n      const size_t blocksize =\n          (size + sizeof(void *) + (WORDSIZE - 1) > BLOCKSIZE)\n              ? size + sizeof(void *) + (WORDSIZE - 1)\n              : BLOCKSIZE;\n\n      // use the standard C malloc to allocate memory\n      void *m = ::malloc(blocksize);\n      if (!m) {\n        fprintf(stderr, \"Failed to allocate memory.\\n\");\n        return NULL;\n      }\n\n      /* Fill first word of new block with pointer to previous block. */\n      static_cast<void **>(m)[0] = base;\n      base = m;\n\n      size_t shift = 0;\n      // int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) &\n      // (WORDSIZE-1))) & (WORDSIZE-1);\n\n      remaining = blocksize - sizeof(void *) - shift;\n      loc = (static_cast<char *>(m) + sizeof(void *) + shift);\n    }\n    void *rloc = loc;\n    loc = static_cast<char *>(loc) + size;\n    remaining -= size;\n\n    usedMemory += size;\n\n    return rloc;\n  }\n\n  /**\n   * Allocates (using this pool) a generic type T.\n   *\n   * Params:\n   *     count = number of instances to allocate.\n   * Returns: pointer (of type T*) to memory buffer\n   */\n  template <typename T> T *allocate(const size_t count = 1) {\n    T *mem = static_cast<T *>(this->malloc(sizeof(T) * count));\n    return mem;\n  }\n};\n/** @} */\n\n/** @addtogroup nanoflann_metaprog_grp Auxiliary metaprogramming stuff\n * @{ */\n\n/** Used to declare fixed-size arrays when DIM>0, dynamically-allocated vectors\n * when DIM=-1. Fixed size version for a generic DIM:\n */\ntemplate <int DIM, typename T> struct array_or_vector_selector {\n  typedef std::array<T, DIM> container_t;\n};\n/** Dynamic size version */\ntemplate <typename T> struct array_or_vector_selector<-1, T> {\n  typedef std::vector<T> container_t;\n};\n\n/** @} */\n\n/** kd-tree base-class\n *\n * Contains the member functions common to the classes KDTreeSingleIndexAdaptor\n * and KDTreeSingleIndexDynamicAdaptor_.\n *\n * \\tparam Derived The name of the class which inherits this class.\n * \\tparam DatasetAdaptor The user-provided adaptor (see comments above).\n * \\tparam Distance The distance metric to use, these are all classes derived\n * from nanoflann::Metric \\tparam DIM Dimensionality of data points (e.g. 3 for\n * 3D points) \\tparam IndexType Will be typically size_t or int\n */\n\ntemplate <class Derived, typename Distance, class DatasetAdaptor, int DIM = -1,\n          typename IndexType = size_t>\nclass KDTreeBaseClass {\n\npublic:\n  /** Frees the previously-built index. Automatically called within\n   * buildIndex(). */\n  void freeIndex(Derived &obj) {\n    obj.pool.free_all();\n    obj.root_node = NULL;\n    obj.m_size_at_index_build = 0;\n  }\n\n  typedef typename Distance::ElementType ElementType;\n  typedef typename Distance::DistanceType DistanceType;\n\n  /*--------------------- Internal Data Structures --------------------------*/\n  struct Node {\n    /** Union used because a node can be either a LEAF node or a non-leaf node,\n     * so both data fields are never used simultaneously */\n    union {\n      struct leaf {\n        IndexType left, right; //!< Indices of points in leaf node\n      } lr;\n      struct nonleaf {\n        int divfeat;                  //!< Dimension used for subdivision.\n        DistanceType divlow, divhigh; //!< The values used for subdivision.\n      } sub;\n    } node_type;\n    Node *child1, *child2; //!< Child nodes (both=NULL mean its a leaf node)\n  };\n\n  typedef Node *NodePtr;\n\n  struct Interval {\n    ElementType low, high;\n  };\n\n  /**\n   *  Array of indices to vectors in the dataset.\n   */\n  std::vector<IndexType> vind;\n\n  NodePtr root_node;\n\n  size_t m_leaf_max_size;\n\n  size_t m_size;                //!< Number of current points in the dataset\n  size_t m_size_at_index_build; //!< Number of points in the dataset when the\n                                //!< index was built\n  int dim;                      //!< Dimensionality of each data point\n\n  /** Define \"BoundingBox\" as a fixed-size or variable-size container depending\n   * on \"DIM\" */\n  typedef\n      typename array_or_vector_selector<DIM, Interval>::container_t BoundingBox;\n\n  /** Define \"distance_vector_t\" as a fixed-size or variable-size container\n   * depending on \"DIM\" */\n  typedef typename array_or_vector_selector<DIM, DistanceType>::container_t\n      distance_vector_t;\n\n  /** The KD-tree used to find neighbours */\n\n  BoundingBox root_bbox;\n\n  /**\n   * Pooled memory allocator.\n   *\n   * Using a pooled memory allocator is more efficient\n   * than allocating memory directly when there is a large\n   * number small of memory allocations.\n   */\n  PooledAllocator pool;\n\n  /** Returns number of points in dataset  */\n  size_t size(const Derived &obj) const { return obj.m_size; }\n\n  /** Returns the length of each point in the dataset */\n  size_t veclen(const Derived &obj) {\n    return static_cast<size_t>(DIM > 0 ? DIM : obj.dim);\n  }\n\n  /// Helper accessor to the dataset points:\n  inline ElementType dataset_get(const Derived &obj, size_t idx,\n                                 int component) const {\n    return obj.dataset.kdtree_get_pt(idx, component);\n  }\n\n  /**\n   * Computes the inde memory usage\n   * Returns: memory used by the index\n   */\n  size_t usedMemory(Derived &obj) {\n    return obj.pool.usedMemory + obj.pool.wastedMemory +\n           obj.dataset.kdtree_get_point_count() *\n               sizeof(IndexType); // pool memory and vind array memory\n  }\n\n  void computeMinMax(const Derived &obj, IndexType *ind, IndexType count,\n                     int element, ElementType &min_elem,\n                     ElementType &max_elem) {\n    min_elem = dataset_get(obj, ind[0], element);\n    max_elem = dataset_get(obj, ind[0], element);\n    for (IndexType i = 1; i < count; ++i) {\n      ElementType val = dataset_get(obj, ind[i], element);\n      if (val < min_elem)\n        min_elem = val;\n      if (val > max_elem)\n        max_elem = val;\n    }\n  }\n\n  /**\n   * Create a tree node that subdivides the list of vecs from vind[first]\n   * to vind[last].  The routine is called recursively on each sublist.\n   *\n   * @param left index of the first vector\n   * @param right index of the last vector\n   */\n  NodePtr divideTree(Derived &obj, const IndexType left, const IndexType right,\n                     BoundingBox &bbox) {\n    NodePtr node = obj.pool.template allocate<Node>(); // allocate memory\n\n    /* If too few exemplars remain, then make this a leaf node. */\n    if ((right - left) <= static_cast<IndexType>(obj.m_leaf_max_size)) {\n      node->child1 = node->child2 = NULL; /* Mark as leaf node. */\n      node->node_type.lr.left = left;\n      node->node_type.lr.right = right;\n\n      // compute bounding-box of leaf points\n      for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {\n        bbox[i].low = dataset_get(obj, obj.vind[left], i);\n        bbox[i].high = dataset_get(obj, obj.vind[left], i);\n      }\n      for (IndexType k = left + 1; k < right; ++k) {\n        for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {\n          if (bbox[i].low > dataset_get(obj, obj.vind[k], i))\n            bbox[i].low = dataset_get(obj, obj.vind[k], i);\n          if (bbox[i].high < dataset_get(obj, obj.vind[k], i))\n            bbox[i].high = dataset_get(obj, obj.vind[k], i);\n        }\n      }\n    } else {\n      IndexType idx;\n      int cutfeat;\n      DistanceType cutval;\n      middleSplit_(obj, &obj.vind[0] + left, right - left, idx, cutfeat, cutval,\n                   bbox);\n\n      node->node_type.sub.divfeat = cutfeat;\n\n      BoundingBox left_bbox(bbox);\n      left_bbox[cutfeat].high = cutval;\n      node->child1 = divideTree(obj, left, left + idx, left_bbox);\n\n      BoundingBox right_bbox(bbox);\n      right_bbox[cutfeat].low = cutval;\n      node->child2 = divideTree(obj, left + idx, right, right_bbox);\n\n      node->node_type.sub.divlow = left_bbox[cutfeat].high;\n      node->node_type.sub.divhigh = right_bbox[cutfeat].low;\n\n      for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {\n        bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low);\n        bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high);\n      }\n    }\n\n    return node;\n  }\n\n  void middleSplit_(Derived &obj, IndexType *ind, IndexType count,\n                    IndexType &index, int &cutfeat, DistanceType &cutval,\n                    const BoundingBox &bbox) {\n    const DistanceType EPS = static_cast<DistanceType>(0.00001);\n    ElementType max_span = bbox[0].high - bbox[0].low;\n    for (int i = 1; i < (DIM > 0 ? DIM : obj.dim); ++i) {\n      ElementType span = bbox[i].high - bbox[i].low;\n      if (span > max_span) {\n        max_span = span;\n      }\n    }\n    ElementType max_spread = -1;\n    cutfeat = 0;\n    for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {\n      ElementType span = bbox[i].high - bbox[i].low;\n      if (span > (1 - EPS) * max_span) {\n        ElementType min_elem, max_elem;\n        computeMinMax(obj, ind, count, i, min_elem, max_elem);\n        ElementType spread = max_elem - min_elem;\n        ;\n        if (spread > max_spread) {\n          cutfeat = i;\n          max_spread = spread;\n        }\n      }\n    }\n    // split in the middle\n    DistanceType split_val = (bbox[cutfeat].low + bbox[cutfeat].high) / 2;\n    ElementType min_elem, max_elem;\n    computeMinMax(obj, ind, count, cutfeat, min_elem, max_elem);\n\n    if (split_val < min_elem)\n      cutval = min_elem;\n    else if (split_val > max_elem)\n      cutval = max_elem;\n    else\n      cutval = split_val;\n\n    IndexType lim1, lim2;\n    planeSplit(obj, ind, count, cutfeat, cutval, lim1, lim2);\n\n    if (lim1 > count / 2)\n      index = lim1;\n    else if (lim2 < count / 2)\n      index = lim2;\n    else\n      index = count / 2;\n  }\n\n  /**\n   *  Subdivide the list of points by a plane perpendicular on axe corresponding\n   *  to the 'cutfeat' dimension at 'cutval' position.\n   *\n   *  On return:\n   *  dataset[ind[0..lim1-1]][cutfeat]<cutval\n   *  dataset[ind[lim1..lim2-1]][cutfeat]==cutval\n   *  dataset[ind[lim2..count]][cutfeat]>cutval\n   */\n  void planeSplit(Derived &obj, IndexType *ind, const IndexType count,\n                  int cutfeat, DistanceType &cutval, IndexType &lim1,\n                  IndexType &lim2) {\n    /* Move vector indices for left subtree to front of list. */\n    IndexType left = 0;\n    IndexType right = count - 1;\n    for (;;) {\n      while (left <= right && dataset_get(obj, ind[left], cutfeat) < cutval)\n        ++left;\n      while (right && left <= right &&\n             dataset_get(obj, ind[right], cutfeat) >= cutval)\n        --right;\n      if (left > right || !right)\n        break; // \"!right\" was added to support unsigned Index types\n      std::swap(ind[left], ind[right]);\n      ++left;\n      --right;\n    }\n    /* If either list is empty, it means that all remaining features\n     * are identical. Split in the middle to maintain a balanced tree.\n     */\n    lim1 = left;\n    right = count - 1;\n    for (;;) {\n      while (left <= right && dataset_get(obj, ind[left], cutfeat) <= cutval)\n        ++left;\n      while (right && left <= right &&\n             dataset_get(obj, ind[right], cutfeat) > cutval)\n        --right;\n      if (left > right || !right)\n        break; // \"!right\" was added to support unsigned Index types\n      std::swap(ind[left], ind[right]);\n      ++left;\n      --right;\n    }\n    lim2 = left;\n  }\n\n  DistanceType computeInitialDistances(const Derived &obj,\n                                       const ElementType *vec,\n                                       distance_vector_t &dists) const {\n    assert(vec);\n    DistanceType distsq = DistanceType();\n\n    for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {\n      if (vec[i] < obj.root_bbox[i].low) {\n        dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].low, i);\n        distsq += dists[i];\n      }\n      if (vec[i] > obj.root_bbox[i].high) {\n        dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].high, i);\n        distsq += dists[i];\n      }\n    }\n    return distsq;\n  }\n\n  void save_tree(Derived &obj, FILE *stream, NodePtr tree) {\n    save_value(stream, *tree);\n    if (tree->child1 != NULL) {\n      save_tree(obj, stream, tree->child1);\n    }\n    if (tree->child2 != NULL) {\n      save_tree(obj, stream, tree->child2);\n    }\n  }\n\n  void load_tree(Derived &obj, FILE *stream, NodePtr &tree) {\n    tree = obj.pool.template allocate<Node>();\n    load_value(stream, *tree);\n    if (tree->child1 != NULL) {\n      load_tree(obj, stream, tree->child1);\n    }\n    if (tree->child2 != NULL) {\n      load_tree(obj, stream, tree->child2);\n    }\n  }\n\n  /**  Stores the index in a binary file.\n   *   IMPORTANT NOTE: The set of data points is NOT stored in the file, so when\n   * loading the index object it must be constructed associated to the same\n   * source of data points used while building it. See the example:\n   * examples/saveload_example.cpp \\sa loadIndex  */\n  void saveIndex_(Derived &obj, FILE *stream) {\n    save_value(stream, obj.m_size);\n    save_value(stream, obj.dim);\n    save_value(stream, obj.root_bbox);\n    save_value(stream, obj.m_leaf_max_size);\n    save_value(stream, obj.vind);\n    save_tree(obj, stream, obj.root_node);\n  }\n\n  /**  Loads a previous index from a binary file.\n   *   IMPORTANT NOTE: The set of data points is NOT stored in the file, so the\n   * index object must be constructed associated to the same source of data\n   * points used while building the index. See the example:\n   * examples/saveload_example.cpp \\sa loadIndex  */\n  void loadIndex_(Derived &obj, FILE *stream) {\n    load_value(stream, obj.m_size);\n    load_value(stream, obj.dim);\n    load_value(stream, obj.root_bbox);\n    load_value(stream, obj.m_leaf_max_size);\n    load_value(stream, obj.vind);\n    load_tree(obj, stream, obj.root_node);\n  }\n};\n\n/** @addtogroup kdtrees_grp KD-tree classes and adaptors\n * @{ */\n\n/** kd-tree static index\n *\n * Contains the k-d trees and other information for indexing a set of points\n * for nearest-neighbor matching.\n *\n *  The class \"DatasetAdaptor\" must provide the following interface (can be\n * non-virtual, inlined methods):\n *\n *  \\code\n *   // Must return the number of data poins\n *   inline size_t kdtree_get_point_count() const { ... }\n *\n *\n *   // Must return the dim'th component of the idx'th point in the class:\n *   inline T kdtree_get_pt(const size_t idx, const size_t dim) const { ... }\n *\n *   // Optional bounding-box computation: return false to default to a standard\n * bbox computation loop.\n *   //   Return true if the BBOX was already computed by the class and returned\n * in \"bb\" so it can be avoided to redo it again.\n *   //   Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3\n * for point clouds) template <class BBOX> bool kdtree_get_bbox(BBOX &bb) const\n *   {\n *      bb[0].low = ...; bb[0].high = ...;  // 0th dimension limits\n *      bb[1].low = ...; bb[1].high = ...;  // 1st dimension limits\n *      ...\n *      return true;\n *   }\n *\n *  \\endcode\n *\n * \\tparam DatasetAdaptor The user-provided adaptor (see comments above).\n * \\tparam Distance The distance metric to use: nanoflann::metric_L1,\n * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \\tparam DIM\n * Dimensionality of data points (e.g. 3 for 3D points) \\tparam IndexType Will\n * be typically size_t or int\n */\ntemplate <typename Distance, class DatasetAdaptor, int DIM = -1,\n          typename IndexType = size_t>\nclass KDTreeSingleIndexAdaptor\n    : public KDTreeBaseClass<\n          KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM, IndexType>,\n          Distance, DatasetAdaptor, DIM, IndexType> {\npublic:\n  /** Deleted copy constructor*/\n  KDTreeSingleIndexAdaptor(\n      const KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM, IndexType>\n          &) = delete;\n\n  /**\n   * The dataset used by this index\n   */\n  const DatasetAdaptor &dataset; //!< The source of our data\n\n  const KDTreeSingleIndexAdaptorParams index_params;\n\n  Distance distance;\n\n  typedef typename nanoflann::KDTreeBaseClass<\n      nanoflann::KDTreeSingleIndexAdaptor<Distance, DatasetAdaptor, DIM,\n                                          IndexType>,\n      Distance, DatasetAdaptor, DIM, IndexType>\n      BaseClassRef;\n\n  typedef typename BaseClassRef::ElementType ElementType;\n  typedef typename BaseClassRef::DistanceType DistanceType;\n\n  typedef typename BaseClassRef::Node Node;\n  typedef Node *NodePtr;\n\n  typedef typename BaseClassRef::Interval Interval;\n  /** Define \"BoundingBox\" as a fixed-size or variable-size container depending\n   * on \"DIM\" */\n  typedef typename BaseClassRef::BoundingBox BoundingBox;\n\n  /** Define \"distance_vector_t\" as a fixed-size or variable-size container\n   * depending on \"DIM\" */\n  typedef typename BaseClassRef::distance_vector_t distance_vector_t;\n\n  /**\n   * KDTree constructor\n   *\n   * Refer to docs in README.md or online in\n   * https://github.com/jlblancoc/nanoflann\n   *\n   * The KD-Tree point dimension (the length of each point in the datase, e.g. 3\n   * for 3D points) is determined by means of:\n   *  - The \\a DIM template parameter if >0 (highest priority)\n   *  - Otherwise, the \\a dimensionality parameter of this constructor.\n   *\n   * @param inputData Dataset with the input features\n   * @param params Basically, the maximum leaf node size\n   */\n  KDTreeSingleIndexAdaptor(const int dimensionality,\n                           const DatasetAdaptor &inputData,\n                           const KDTreeSingleIndexAdaptorParams &params =\n                               KDTreeSingleIndexAdaptorParams())\n      : dataset(inputData), index_params(params), distance(inputData) {\n    BaseClassRef::root_node = NULL;\n    BaseClassRef::m_size = dataset.kdtree_get_point_count();\n    BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;\n    BaseClassRef::dim = dimensionality;\n    if (DIM > 0)\n      BaseClassRef::dim = DIM;\n    BaseClassRef::m_leaf_max_size = params.leaf_max_size;\n\n    // Create a permutable array of indices to the input vectors.\n    init_vind();\n  }\n\n  /**\n   * Builds the index\n   */\n  void buildIndex() {\n    BaseClassRef::m_size = dataset.kdtree_get_point_count();\n    BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;\n    init_vind();\n    this->freeIndex(*this);\n    BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;\n    if (BaseClassRef::m_size == 0)\n      return;\n    computeBoundingBox(BaseClassRef::root_bbox);\n    BaseClassRef::root_node =\n        this->divideTree(*this, 0, BaseClassRef::m_size,\n                         BaseClassRef::root_bbox); // construct the tree\n  }\n\n  /** \\name Query methods\n   * @{ */\n\n  /**\n   * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored\n   * inside the result object.\n   *\n   * Params:\n   *     result = the result object in which the indices of the\n   * nearest-neighbors are stored vec = the vector for which to search the\n   * nearest neighbors\n   *\n   * \\tparam RESULTSET Should be any ResultSet<DistanceType>\n   * \\return  True if the requested neighbors could be found.\n   * \\sa knnSearch, radiusSearch\n   */\n  template <typename RESULTSET>\n  bool findNeighbors(RESULTSET &result, const ElementType *vec,\n                     const SearchParams &searchParams) const {\n    assert(vec);\n    if (this->size(*this) == 0)\n      return false;\n    if (!BaseClassRef::root_node)\n      throw std::runtime_error(\n          \"[nanoflann] findNeighbors() called before building the index.\");\n    float epsError = 1 + searchParams.eps;\n\n    distance_vector_t\n        dists; // fixed or variable-sized container (depending on DIM)\n    auto zero = static_cast<decltype(result.worstDist())>(0);\n    assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim),\n           zero); // Fill it with zeros.\n    DistanceType distsq = this->computeInitialDistances(*this, vec, dists);\n    searchLevel(result, vec, BaseClassRef::root_node, distsq, dists,\n                epsError); // \"count_leaf\" parameter removed since was neither\n                           // used nor returned to the user.\n    return result.full();\n  }\n\n  /**\n   * Find the \"num_closest\" nearest neighbors to the \\a query_point[0:dim-1].\n   * Their indices are stored inside the result object. \\sa radiusSearch,\n   * findNeighbors \\note nChecks_IGNORED is ignored but kept for compatibility\n   * with the original FLANN interface. \\return Number `N` of valid points in\n   * the result set. Only the first `N` entries in `out_indices` and\n   * `out_distances_sq` will be valid. Return may be less than `num_closest`\n   * only if the number of elements in the tree is less than `num_closest`.\n   */\n  size_t knnSearch(const ElementType *query_point, const size_t num_closest,\n                   IndexType *out_indices, DistanceType *out_distances_sq,\n                   const int /* nChecks_IGNORED */ = 10) const {\n    nanoflann::KNNResultSet<DistanceType, IndexType> resultSet(num_closest);\n    resultSet.init(out_indices, out_distances_sq);\n    this->findNeighbors(resultSet, query_point, nanoflann::SearchParams());\n    return resultSet.size();\n  }\n\n  /**\n   * Find all the neighbors to \\a query_point[0:dim-1] within a maximum radius.\n   *  The output is given as a vector of pairs, of which the first element is a\n   * point index and the second the corresponding distance. Previous contents of\n   * \\a IndicesDists are cleared.\n   *\n   *  If searchParams.sorted==true, the output list is sorted by ascending\n   * distances.\n   *\n   *  For a better performance, it is advisable to do a .reserve() on the vector\n   * if you have any wild guess about the number of expected matches.\n   *\n   *  \\sa knnSearch, findNeighbors, radiusSearchCustomCallback\n   * \\return The number of points within the given radius (i.e. indices.size()\n   * or dists.size() )\n   */\n  size_t\n  radiusSearch(const ElementType *query_point, const DistanceType &radius,\n               std::vector<std::pair<IndexType, DistanceType>> &IndicesDists,\n               const SearchParams &searchParams) const {\n    RadiusResultSet<DistanceType, IndexType> resultSet(radius, IndicesDists);\n    const size_t nFound =\n        radiusSearchCustomCallback(query_point, resultSet, searchParams);\n    if (searchParams.sorted)\n      std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter());\n    return nFound;\n  }\n\n  /**\n   * Just like radiusSearch() but with a custom callback class for each point\n   * found in the radius of the query. See the source of RadiusResultSet<> as a\n   * start point for your own classes. \\sa radiusSearch\n   */\n  template <class SEARCH_CALLBACK>\n  size_t radiusSearchCustomCallback(\n      const ElementType *query_point, SEARCH_CALLBACK &resultSet,\n      const SearchParams &searchParams = SearchParams()) const {\n    this->findNeighbors(resultSet, query_point, searchParams);\n    return resultSet.size();\n  }\n\n  /** @} */\n\npublic:\n  /** Make sure the auxiliary list \\a vind has the same size than the current\n   * dataset, and re-generate if size has changed. */\n  void init_vind() {\n    // Create a permutable array of indices to the input vectors.\n    BaseClassRef::m_size = dataset.kdtree_get_point_count();\n    if (BaseClassRef::vind.size() != BaseClassRef::m_size)\n      BaseClassRef::vind.resize(BaseClassRef::m_size);\n    for (size_t i = 0; i < BaseClassRef::m_size; i++)\n      BaseClassRef::vind[i] = i;\n  }\n\n  void computeBoundingBox(BoundingBox &bbox) {\n    resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim));\n    if (dataset.kdtree_get_bbox(bbox)) {\n      // Done! It was implemented in derived class\n    } else {\n      const size_t N = dataset.kdtree_get_point_count();\n      if (!N)\n        throw std::runtime_error(\"[nanoflann] computeBoundingBox() called but \"\n                                 \"no data points found.\");\n      for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {\n        bbox[i].low = bbox[i].high = this->dataset_get(*this, 0, i);\n      }\n      for (size_t k = 1; k < N; ++k) {\n        for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {\n          if (this->dataset_get(*this, k, i) < bbox[i].low)\n            bbox[i].low = this->dataset_get(*this, k, i);\n          if (this->dataset_get(*this, k, i) > bbox[i].high)\n            bbox[i].high = this->dataset_get(*this, k, i);\n        }\n      }\n    }\n  }\n\n  /**\n   * Performs an exact search in the tree starting from a node.\n   * \\tparam RESULTSET Should be any ResultSet<DistanceType>\n   * \\return true if the search should be continued, false if the results are\n   * sufficient\n   */\n  template <class RESULTSET>\n  bool searchLevel(RESULTSET &result_set, const ElementType *vec,\n                   const NodePtr node, DistanceType mindistsq,\n                   distance_vector_t &dists, const float epsError) const {\n    /* If this is a leaf node, then do check and return. */\n    if ((node->child1 == NULL) && (node->child2 == NULL)) {\n      // count_leaf += (node->lr.right-node->lr.left);  // Removed since was\n      // neither used nor returned to the user.\n      DistanceType worst_dist = result_set.worstDist();\n      for (IndexType i = node->node_type.lr.left; i < node->node_type.lr.right;\n           ++i) {\n        const IndexType index = BaseClassRef::vind[i]; // reorder... : i;\n        DistanceType dist = distance.evalMetric(\n            vec, index, (DIM > 0 ? DIM : BaseClassRef::dim));\n        if (dist < worst_dist) {\n          if (!result_set.addPoint(dist, BaseClassRef::vind[i])) {\n            // the resultset doesn't want to receive any more points, we're done\n            // searching!\n            return false;\n          }\n        }\n      }\n      return true;\n    }\n\n    /* Which child branch should be taken first? */\n    int idx = node->node_type.sub.divfeat;\n    ElementType val = vec[idx];\n    DistanceType diff1 = val - node->node_type.sub.divlow;\n    DistanceType diff2 = val - node->node_type.sub.divhigh;\n\n    NodePtr bestChild;\n    NodePtr otherChild;\n    DistanceType cut_dist;\n    if ((diff1 + diff2) < 0) {\n      bestChild = node->child1;\n      otherChild = node->child2;\n      cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx);\n    } else {\n      bestChild = node->child2;\n      otherChild = node->child1;\n      cut_dist = distance.accum_dist(val, node->node_type.sub.divlow, idx);\n    }\n\n    /* Call recursively to search next level down. */\n    if (!searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError)) {\n      // the resultset doesn't want to receive any more points, we're done\n      // searching!\n      return false;\n    }\n\n    DistanceType dst = dists[idx];\n    mindistsq = mindistsq + cut_dist - dst;\n    dists[idx] = cut_dist;\n    if (mindistsq * epsError <= result_set.worstDist()) {\n      if (!searchLevel(result_set, vec, otherChild, mindistsq, dists,\n                       epsError)) {\n        // the resultset doesn't want to receive any more points, we're done\n        // searching!\n        return false;\n      }\n    }\n    dists[idx] = dst;\n    return true;\n  }\n\npublic:\n  /**  Stores the index in a binary file.\n   *   IMPORTANT NOTE: The set of data points is NOT stored in the file, so when\n   * loading the index object it must be constructed associated to the same\n   * source of data points used while building it. See the example:\n   * examples/saveload_example.cpp \\sa loadIndex  */\n  void saveIndex(FILE *stream) { this->saveIndex_(*this, stream); }\n\n  /**  Loads a previous index from a binary file.\n   *   IMPORTANT NOTE: The set of data points is NOT stored in the file, so the\n   * index object must be constructed associated to the same source of data\n   * points used while building the index. See the example:\n   * examples/saveload_example.cpp \\sa loadIndex  */\n  void loadIndex(FILE *stream) { this->loadIndex_(*this, stream); }\n\n}; // class KDTree\n\n/** kd-tree dynamic index\n *\n * Contains the k-d trees and other information for indexing a set of points\n * for nearest-neighbor matching.\n *\n *  The class \"DatasetAdaptor\" must provide the following interface (can be\n * non-virtual, inlined methods):\n *\n *  \\code\n *   // Must return the number of data poins\n *   inline size_t kdtree_get_point_count() const { ... }\n *\n *   // Must return the dim'th component of the idx'th point in the class:\n *   inline T kdtree_get_pt(const size_t idx, const size_t dim) const { ... }\n *\n *   // Optional bounding-box computation: return false to default to a standard\n * bbox computation loop.\n *   //   Return true if the BBOX was already computed by the class and returned\n * in \"bb\" so it can be avoided to redo it again.\n *   //   Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3\n * for point clouds) template <class BBOX> bool kdtree_get_bbox(BBOX &bb) const\n *   {\n *      bb[0].low = ...; bb[0].high = ...;  // 0th dimension limits\n *      bb[1].low = ...; bb[1].high = ...;  // 1st dimension limits\n *      ...\n *      return true;\n *   }\n *\n *  \\endcode\n *\n * \\tparam DatasetAdaptor The user-provided adaptor (see comments above).\n * \\tparam Distance The distance metric to use: nanoflann::metric_L1,\n * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \\tparam DIM\n * Dimensionality of data points (e.g. 3 for 3D points) \\tparam IndexType Will\n * be typically size_t or int\n */\ntemplate <typename Distance, class DatasetAdaptor, int DIM = -1,\n          typename IndexType = size_t>\nclass KDTreeSingleIndexDynamicAdaptor_\n    : public KDTreeBaseClass<KDTreeSingleIndexDynamicAdaptor_<\n                                 Distance, DatasetAdaptor, DIM, IndexType>,\n                             Distance, DatasetAdaptor, DIM, IndexType> {\npublic:\n  /**\n   * The dataset used by this index\n   */\n  const DatasetAdaptor &dataset; //!< The source of our data\n\n  KDTreeSingleIndexAdaptorParams index_params;\n\n  std::vector<int> &treeIndex;\n\n  Distance distance;\n\n  typedef typename nanoflann::KDTreeBaseClass<\n      nanoflann::KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM,\n                                                  IndexType>,\n      Distance, DatasetAdaptor, DIM, IndexType>\n      BaseClassRef;\n\n  typedef typename BaseClassRef::ElementType ElementType;\n  typedef typename BaseClassRef::DistanceType DistanceType;\n\n  typedef typename BaseClassRef::Node Node;\n  typedef Node *NodePtr;\n\n  typedef typename BaseClassRef::Interval Interval;\n  /** Define \"BoundingBox\" as a fixed-size or variable-size container depending\n   * on \"DIM\" */\n  typedef typename BaseClassRef::BoundingBox BoundingBox;\n\n  /** Define \"distance_vector_t\" as a fixed-size or variable-size container\n   * depending on \"DIM\" */\n  typedef typename BaseClassRef::distance_vector_t distance_vector_t;\n\n  /**\n   * KDTree constructor\n   *\n   * Refer to docs in README.md or online in\n   * https://github.com/jlblancoc/nanoflann\n   *\n   * The KD-Tree point dimension (the length of each point in the datase, e.g. 3\n   * for 3D points) is determined by means of:\n   *  - The \\a DIM template parameter if >0 (highest priority)\n   *  - Otherwise, the \\a dimensionality parameter of this constructor.\n   *\n   * @param inputData Dataset with the input features\n   * @param params Basically, the maximum leaf node size\n   */\n  KDTreeSingleIndexDynamicAdaptor_(\n      const int dimensionality, const DatasetAdaptor &inputData,\n      std::vector<int> &treeIndex_,\n      const KDTreeSingleIndexAdaptorParams &params =\n          KDTreeSingleIndexAdaptorParams())\n      : dataset(inputData), index_params(params), treeIndex(treeIndex_),\n        distance(inputData) {\n    BaseClassRef::root_node = NULL;\n    BaseClassRef::m_size = 0;\n    BaseClassRef::m_size_at_index_build = 0;\n    BaseClassRef::dim = dimensionality;\n    if (DIM > 0)\n      BaseClassRef::dim = DIM;\n    BaseClassRef::m_leaf_max_size = params.leaf_max_size;\n  }\n\n  /** Assignment operator definiton */\n  KDTreeSingleIndexDynamicAdaptor_\n  operator=(const KDTreeSingleIndexDynamicAdaptor_ &rhs) {\n    KDTreeSingleIndexDynamicAdaptor_ tmp(rhs);\n    std::swap(BaseClassRef::vind, tmp.BaseClassRef::vind);\n    std::swap(BaseClassRef::m_leaf_max_size, tmp.BaseClassRef::m_leaf_max_size);\n    std::swap(index_params, tmp.index_params);\n    std::swap(treeIndex, tmp.treeIndex);\n    std::swap(BaseClassRef::m_size, tmp.BaseClassRef::m_size);\n    std::swap(BaseClassRef::m_size_at_index_build,\n              tmp.BaseClassRef::m_size_at_index_build);\n    std::swap(BaseClassRef::root_node, tmp.BaseClassRef::root_node);\n    std::swap(BaseClassRef::root_bbox, tmp.BaseClassRef::root_bbox);\n    std::swap(BaseClassRef::pool, tmp.BaseClassRef::pool);\n    return *this;\n  }\n\n  /**\n   * Builds the index\n   */\n  void buildIndex() {\n    BaseClassRef::m_size = BaseClassRef::vind.size();\n    this->freeIndex(*this);\n    BaseClassRef::m_size_at_index_build = BaseClassRef::m_size;\n    if (BaseClassRef::m_size == 0)\n      return;\n    computeBoundingBox(BaseClassRef::root_bbox);\n    BaseClassRef::root_node =\n        this->divideTree(*this, 0, BaseClassRef::m_size,\n                         BaseClassRef::root_bbox); // construct the tree\n  }\n\n  /** \\name Query methods\n   * @{ */\n\n  /**\n   * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored\n   * inside the result object.\n   *\n   * Params:\n   *     result = the result object in which the indices of the\n   * nearest-neighbors are stored vec = the vector for which to search the\n   * nearest neighbors\n   *\n   * \\tparam RESULTSET Should be any ResultSet<DistanceType>\n   * \\return  True if the requested neighbors could be found.\n   * \\sa knnSearch, radiusSearch\n   */\n  template <typename RESULTSET>\n  bool findNeighbors(RESULTSET &result, const ElementType *vec,\n                     const SearchParams &searchParams) const {\n    assert(vec);\n    if (this->size(*this) == 0)\n      return false;\n    if (!BaseClassRef::root_node)\n      return false;\n    float epsError = 1 + searchParams.eps;\n\n    // fixed or variable-sized container (depending on DIM)\n    distance_vector_t dists;\n    // Fill it with zeros.\n    assign(dists, (DIM > 0 ? DIM : BaseClassRef::dim),\n           static_cast<typename distance_vector_t::value_type>(0));\n    DistanceType distsq = this->computeInitialDistances(*this, vec, dists);\n    searchLevel(result, vec, BaseClassRef::root_node, distsq, dists,\n                epsError); // \"count_leaf\" parameter removed since was neither\n                           // used nor returned to the user.\n    return result.full();\n  }\n\n  /**\n   * Find the \"num_closest\" nearest neighbors to the \\a query_point[0:dim-1].\n   * Their indices are stored inside the result object. \\sa radiusSearch,\n   * findNeighbors \\note nChecks_IGNORED is ignored but kept for compatibility\n   * with the original FLANN interface. \\return Number `N` of valid points in\n   * the result set. Only the first `N` entries in `out_indices` and\n   * `out_distances_sq` will be valid. Return may be less than `num_closest`\n   * only if the number of elements in the tree is less than `num_closest`.\n   */\n  size_t knnSearch(const ElementType *query_point, const size_t num_closest,\n                   IndexType *out_indices, DistanceType *out_distances_sq,\n                   const int /* nChecks_IGNORED */ = 10) const {\n    nanoflann::KNNResultSet<DistanceType, IndexType> resultSet(num_closest);\n    resultSet.init(out_indices, out_distances_sq);\n    this->findNeighbors(resultSet, query_point, nanoflann::SearchParams());\n    return resultSet.size();\n  }\n\n  /**\n   * Find all the neighbors to \\a query_point[0:dim-1] within a maximum radius.\n   *  The output is given as a vector of pairs, of which the first element is a\n   * point index and the second the corresponding distance. Previous contents of\n   * \\a IndicesDists are cleared.\n   *\n   *  If searchParams.sorted==true, the output list is sorted by ascending\n   * distances.\n   *\n   *  For a better performance, it is advisable to do a .reserve() on the vector\n   * if you have any wild guess about the number of expected matches.\n   *\n   *  \\sa knnSearch, findNeighbors, radiusSearchCustomCallback\n   * \\return The number of points within the given radius (i.e. indices.size()\n   * or dists.size() )\n   */\n  size_t\n  radiusSearch(const ElementType *query_point, const DistanceType &radius,\n               std::vector<std::pair<IndexType, DistanceType>> &IndicesDists,\n               const SearchParams &searchParams) const {\n    RadiusResultSet<DistanceType, IndexType> resultSet(radius, IndicesDists);\n    const size_t nFound =\n        radiusSearchCustomCallback(query_point, resultSet, searchParams);\n    if (searchParams.sorted)\n      std::sort(IndicesDists.begin(), IndicesDists.end(), IndexDist_Sorter());\n    return nFound;\n  }\n\n  /**\n   * Just like radiusSearch() but with a custom callback class for each point\n   * found in the radius of the query. See the source of RadiusResultSet<> as a\n   * start point for your own classes. \\sa radiusSearch\n   */\n  template <class SEARCH_CALLBACK>\n  size_t radiusSearchCustomCallback(\n      const ElementType *query_point, SEARCH_CALLBACK &resultSet,\n      const SearchParams &searchParams = SearchParams()) const {\n    this->findNeighbors(resultSet, query_point, searchParams);\n    return resultSet.size();\n  }\n\n  /** @} */\n\npublic:\n  void computeBoundingBox(BoundingBox &bbox) {\n    resize(bbox, (DIM > 0 ? DIM : BaseClassRef::dim));\n\n    if (dataset.kdtree_get_bbox(bbox)) {\n      // Done! It was implemented in derived class\n    } else {\n      const size_t N = BaseClassRef::m_size;\n      if (!N)\n        throw std::runtime_error(\"[nanoflann] computeBoundingBox() called but \"\n                                 \"no data points found.\");\n      for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {\n        bbox[i].low = bbox[i].high =\n            this->dataset_get(*this, BaseClassRef::vind[0], i);\n      }\n      for (size_t k = 1; k < N; ++k) {\n        for (int i = 0; i < (DIM > 0 ? DIM : BaseClassRef::dim); ++i) {\n          if (this->dataset_get(*this, BaseClassRef::vind[k], i) < bbox[i].low)\n            bbox[i].low = this->dataset_get(*this, BaseClassRef::vind[k], i);\n          if (this->dataset_get(*this, BaseClassRef::vind[k], i) > bbox[i].high)\n            bbox[i].high = this->dataset_get(*this, BaseClassRef::vind[k], i);\n        }\n      }\n    }\n  }\n\n  /**\n   * Performs an exact search in the tree starting from a node.\n   * \\tparam RESULTSET Should be any ResultSet<DistanceType>\n   */\n  template <class RESULTSET>\n  void searchLevel(RESULTSET &result_set, const ElementType *vec,\n                   const NodePtr node, DistanceType mindistsq,\n                   distance_vector_t &dists, const float epsError) const {\n    /* If this is a leaf node, then do check and return. */\n    if ((node->child1 == NULL) && (node->child2 == NULL)) {\n      // count_leaf += (node->lr.right-node->lr.left);  // Removed since was\n      // neither used nor returned to the user.\n      DistanceType worst_dist = result_set.worstDist();\n      for (IndexType i = node->node_type.lr.left; i < node->node_type.lr.right;\n           ++i) {\n        const IndexType index = BaseClassRef::vind[i]; // reorder... : i;\n        if (treeIndex[index] == -1)\n          continue;\n        DistanceType dist = distance.evalMetric(\n            vec, index, (DIM > 0 ? DIM : BaseClassRef::dim));\n        if (dist < worst_dist) {\n          if (!result_set.addPoint(\n                  static_cast<typename RESULTSET::DistanceType>(dist),\n                  static_cast<typename RESULTSET::IndexType>(\n                      BaseClassRef::vind[i]))) {\n            // the resultset doesn't want to receive any more points, we're done\n            // searching!\n            return; // false;\n          }\n        }\n      }\n      return;\n    }\n\n    /* Which child branch should be taken first? */\n    int idx = node->node_type.sub.divfeat;\n    ElementType val = vec[idx];\n    DistanceType diff1 = val - node->node_type.sub.divlow;\n    DistanceType diff2 = val - node->node_type.sub.divhigh;\n\n    NodePtr bestChild;\n    NodePtr otherChild;\n    DistanceType cut_dist;\n    if ((diff1 + diff2) < 0) {\n      bestChild = node->child1;\n      otherChild = node->child2;\n      cut_dist = distance.accum_dist(val, node->node_type.sub.divhigh, idx);\n    } else {\n      bestChild = node->child2;\n      otherChild = node->child1;\n      cut_dist = distance.accum_dist(val, node->node_type.sub.divlow, idx);\n    }\n\n    /* Call recursively to search next level down. */\n    searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError);\n\n    DistanceType dst = dists[idx];\n    mindistsq = mindistsq + cut_dist - dst;\n    dists[idx] = cut_dist;\n    if (mindistsq * epsError <= result_set.worstDist()) {\n      searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError);\n    }\n    dists[idx] = dst;\n  }\n\npublic:\n  /**  Stores the index in a binary file.\n   *   IMPORTANT NOTE: The set of data points is NOT stored in the file, so when\n   * loading the index object it must be constructed associated to the same\n   * source of data points used while building it. See the example:\n   * examples/saveload_example.cpp \\sa loadIndex  */\n  void saveIndex(FILE *stream) { this->saveIndex_(*this, stream); }\n\n  /**  Loads a previous index from a binary file.\n   *   IMPORTANT NOTE: The set of data points is NOT stored in the file, so the\n   * index object must be constructed associated to the same source of data\n   * points used while building the index. See the example:\n   * examples/saveload_example.cpp \\sa loadIndex  */\n  void loadIndex(FILE *stream) { this->loadIndex_(*this, stream); }\n};\n\n/** kd-tree dynaimic index\n *\n * class to create multiple static index and merge their results to behave as\n * single dynamic index as proposed in Logarithmic Approach.\n *\n *  Example of usage:\n *  examples/dynamic_pointcloud_example.cpp\n *\n * \\tparam DatasetAdaptor The user-provided adaptor (see comments above).\n * \\tparam Distance The distance metric to use: nanoflann::metric_L1,\n * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. \\tparam DIM\n * Dimensionality of data points (e.g. 3 for 3D points) \\tparam IndexType Will\n * be typically size_t or int\n */\ntemplate <typename Distance, class DatasetAdaptor, int DIM = -1,\n          typename IndexType = size_t>\nclass KDTreeSingleIndexDynamicAdaptor {\npublic:\n  typedef typename Distance::ElementType ElementType;\n  typedef typename Distance::DistanceType DistanceType;\n\nprotected:\n  size_t m_leaf_max_size;\n  size_t treeCount;\n  size_t pointCount;\n\n  /**\n   * The dataset used by this index\n   */\n  const DatasetAdaptor &dataset; //!< The source of our data\n\n  std::vector<int> treeIndex; //!< treeIndex[idx] is the index of tree in which\n                              //!< point at idx is stored. treeIndex[idx]=-1\n                              //!< means that point has been removed.\n\n  KDTreeSingleIndexAdaptorParams index_params;\n\n  int dim; //!< Dimensionality of each data point\n\n  typedef KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM>\n      index_container_t;\n  std::vector<index_container_t> index;\n\npublic:\n  /** Get a const ref to the internal list of indices; the number of indices is\n   * adapted dynamically as the dataset grows in size. */\n  const std::vector<index_container_t> &getAllIndices() const { return index; }\n\nprivate:\n  /** finds position of least significant unset bit */\n  int First0Bit(IndexType num) {\n    int pos = 0;\n    while (num & 1) {\n      num = num >> 1;\n      pos++;\n    }\n    return pos;\n  }\n\n  /** Creates multiple empty trees to handle dynamic support */\n  void init() {\n    typedef KDTreeSingleIndexDynamicAdaptor_<Distance, DatasetAdaptor, DIM>\n        my_kd_tree_t;\n    std::vector<my_kd_tree_t> index_(\n        treeCount, my_kd_tree_t(dim /*dim*/, dataset, treeIndex, index_params));\n    index = index_;\n  }\n\npublic:\n  Distance distance;\n\n  /**\n   * KDTree constructor\n   *\n   * Refer to docs in README.md or online in\n   * https://github.com/jlblancoc/nanoflann\n   *\n   * The KD-Tree point dimension (the length of each point in the datase, e.g. 3\n   * for 3D points) is determined by means of:\n   *  - The \\a DIM template parameter if >0 (highest priority)\n   *  - Otherwise, the \\a dimensionality parameter of this constructor.\n   *\n   * @param inputData Dataset with the input features\n   * @param params Basically, the maximum leaf node size\n   */\n  KDTreeSingleIndexDynamicAdaptor(const int dimensionality,\n                                  const DatasetAdaptor &inputData,\n                                  const KDTreeSingleIndexAdaptorParams &params =\n                                      KDTreeSingleIndexAdaptorParams(),\n                                  const size_t maximumPointCount = 1000000000U)\n      : dataset(inputData), index_params(params), distance(inputData) {\n    treeCount = static_cast<size_t>(std::log2(maximumPointCount));\n    pointCount = 0U;\n    dim = dimensionality;\n    treeIndex.clear();\n    if (DIM > 0)\n      dim = DIM;\n    m_leaf_max_size = params.leaf_max_size;\n    init();\n    const size_t num_initial_points = dataset.kdtree_get_point_count();\n    if (num_initial_points > 0) {\n      addPoints(0, num_initial_points - 1);\n    }\n  }\n\n  /** Deleted copy constructor*/\n  KDTreeSingleIndexDynamicAdaptor(\n      const KDTreeSingleIndexDynamicAdaptor<Distance, DatasetAdaptor, DIM,\n                                            IndexType> &) = delete;\n\n  /** Add points to the set, Inserts all points from [start, end] */\n  void addPoints(IndexType start, IndexType end) {\n    size_t count = end - start + 1;\n    treeIndex.resize(treeIndex.size() + count);\n    for (IndexType idx = start; idx <= end; idx++) {\n      int pos = First0Bit(pointCount);\n      index[pos].vind.clear();\n      treeIndex[pointCount] = pos;\n      for (int i = 0; i < pos; i++) {\n        for (int j = 0; j < static_cast<int>(index[i].vind.size()); j++) {\n          index[pos].vind.push_back(index[i].vind[j]);\n          if (treeIndex[index[i].vind[j]] != -1)\n            treeIndex[index[i].vind[j]] = pos;\n        }\n        index[i].vind.clear();\n        index[i].freeIndex(index[i]);\n      }\n      index[pos].vind.push_back(idx);\n      index[pos].buildIndex();\n      pointCount++;\n    }\n  }\n\n  /** Remove a point from the set (Lazy Deletion) */\n  void removePoint(size_t idx) {\n    if (idx >= pointCount)\n      return;\n    treeIndex[idx] = -1;\n  }\n\n  /**\n   * Find set of nearest neighbors to vec[0:dim-1]. Their indices are stored\n   * inside the result object.\n   *\n   * Params:\n   *     result = the result object in which the indices of the\n   * nearest-neighbors are stored vec = the vector for which to search the\n   * nearest neighbors\n   *\n   * \\tparam RESULTSET Should be any ResultSet<DistanceType>\n   * \\return  True if the requested neighbors could be found.\n   * \\sa knnSearch, radiusSearch\n   */\n  template <typename RESULTSET>\n  bool findNeighbors(RESULTSET &result, const ElementType *vec,\n                     const SearchParams &searchParams) const {\n    for (size_t i = 0; i < treeCount; i++) {\n      index[i].findNeighbors(result, &vec[0], searchParams);\n    }\n    return result.full();\n  }\n};\n\n/** An L2-metric KD-tree adaptor for working with data directly stored in an\n * Eigen Matrix, without duplicating the data storage. Each row in the matrix\n * represents a point in the state space.\n *\n *  Example of usage:\n * \\code\n * \tEigen::Matrix<num_t,Dynamic,Dynamic>  mat;\n * \t// Fill out \"mat\"...\n *\n * \ttypedef KDTreeEigenMatrixAdaptor< Eigen::Matrix<num_t,Dynamic,Dynamic> >\n * my_kd_tree_t; const int max_leaf = 10; my_kd_tree_t   mat_index(mat, max_leaf\n * ); mat_index.index->buildIndex(); mat_index.index->... \\endcode\n *\n *  \\tparam DIM If set to >0, it specifies a compile-time fixed dimensionality\n * for the points in the data set, allowing more compiler optimizations. \\tparam\n * Distance The distance metric to use: nanoflann::metric_L1,\n * nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.\n */\ntemplate <class MatrixType, int DIM = -1, class Distance = nanoflann::metric_L2>\nstruct KDTreeEigenMatrixAdaptor {\n  typedef KDTreeEigenMatrixAdaptor<MatrixType, DIM, Distance> self_t;\n  typedef typename MatrixType::Scalar num_t;\n  typedef typename MatrixType::Index IndexType;\n  typedef\n      typename Distance::template traits<num_t, self_t>::distance_t metric_t;\n  typedef KDTreeSingleIndexAdaptor<metric_t, self_t,\n                                   MatrixType::ColsAtCompileTime, IndexType>\n      index_t;\n\n  index_t *index; //! The kd-tree index for the user to call its methods as\n                  //! usual with any other FLANN index.\n\n  /// Constructor: takes a const ref to the matrix object with the data points\n  KDTreeEigenMatrixAdaptor(const size_t dimensionality,\n                           const std::reference_wrapper<const MatrixType> &mat,\n                           const int leaf_max_size = 10)\n      : m_data_matrix(mat) {\n    const auto dims = mat.get().cols();\n    if (size_t(dims) != dimensionality)\n      throw std::runtime_error(\n          \"Error: 'dimensionality' must match column count in data matrix\");\n    if (DIM > 0 && int(dims) != DIM)\n      throw std::runtime_error(\n          \"Data set dimensionality does not match the 'DIM' template argument\");\n    index =\n        new index_t(static_cast<int>(dims), *this /* adaptor */,\n                    nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size));\n    index->buildIndex();\n  }\n\npublic:\n  /** Deleted copy constructor */\n  KDTreeEigenMatrixAdaptor(const self_t &) = delete;\n\n  ~KDTreeEigenMatrixAdaptor() { delete index; }\n\n  const std::reference_wrapper<const MatrixType> m_data_matrix;\n\n  /** Query for the \\a num_closest closest points to a given point (entered as\n   * query_point[0:dim-1]). Note that this is a short-cut method for\n   * index->findNeighbors(). The user can also call index->... methods as\n   * desired. \\note nChecks_IGNORED is ignored but kept for compatibility with\n   * the original FLANN interface.\n   */\n  inline void query(const num_t *query_point, const size_t num_closest,\n                    IndexType *out_indices, num_t *out_distances_sq,\n                    const int /* nChecks_IGNORED */ = 10) const {\n    nanoflann::KNNResultSet<num_t, IndexType> resultSet(num_closest);\n    resultSet.init(out_indices, out_distances_sq);\n    index->findNeighbors(resultSet, query_point, nanoflann::SearchParams());\n  }\n\n  /** @name Interface expected by KDTreeSingleIndexAdaptor\n   * @{ */\n\n  const self_t &derived() const { return *this; }\n  self_t &derived() { return *this; }\n\n  // Must return the number of data points\n  inline size_t kdtree_get_point_count() const {\n    return m_data_matrix.get().rows();\n  }\n\n  // Returns the dim'th component of the idx'th point in the class:\n  inline num_t kdtree_get_pt(const IndexType idx, size_t dim) const {\n    return m_data_matrix.get().coeff(idx, IndexType(dim));\n  }\n\n  // Optional bounding-box computation: return false to default to a standard\n  // bbox computation loop.\n  //   Return true if the BBOX was already computed by the class and returned in\n  //   \"bb\" so it can be avoided to redo it again. Look at bb.size() to find out\n  //   the expected dimensionality (e.g. 2 or 3 for point clouds)\n  template <class BBOX> bool kdtree_get_bbox(BBOX & /*bb*/) const {\n    return false;\n  }\n\n  /** @} */\n\n}; // end of KDTreeEigenMatrixAdaptor\n   /** @} */\n\n/** @} */ // end of grouping\n} // namespace nanoflann\n\n#endif /* NANOFLANN_HPP_ */\n"
  },
  {
    "path": "SC-PGO/launch/livox_mapping.launch",
    "content": "<launch>\n\n    <param name=\"mapviz_filter_size\" type=\"double\" value=\"0.05\"/>\n\n    <!-- SC-A-LOAM -->\n    <param name=\"keyframe_meter_gap\" type=\"double\" value=\"0.1\"/>\n    <param name=\"keyframe_deg_gap\" type=\"double\" value=\"5\"/>\n\n    <!-- Scan Context -->\n    <param name=\"sc_dist_thres\" type=\"double\" value=\"0.3\"/> <!-- SC-A-LOAM, if want no outliers, use 0.1-0.15 -->\n    <!-- <param name=\"sc_max_radius\" type=\"double\" value=\"40.0\"/> 20 or 40 for indoor -->\n    <param name=\"sc_max_radius\" type=\"double\" value=\"80.0\"/> <!-- for outdoor -->\n\n    <!-- input from LIO_livox -->\n    <remap from=\"/aft_mapped_to_init\" to=\"/livox_odometry_mapped\"/>\n    <remap from=\"/velodyne_cloud_registered_local\" to=\"/livox_full_cloud_imu\"/>\n\n    <!-- Save path Parameter of odometry -->\n    <arg name=\"save_path\" default=\"lio_livox_slam.txt\"/>\n    <param name=\"save_path\" value=\"$(arg save_path)\"/>\n    <param name=\"save_pcd_path\" value=\"$(find livox_mapping)/PCD/\"/>\n    <!-- If use RTK -->\n    <arg name=\"useRTK\" default=\"false\"/>\n    <param name=\"useRTK\" value=\"$(arg useRTK)\"/>\n\n    <!-- Extrinsic Parameter between Lidar & IMU -->\n    <rosparam param=\"Extrinsic_T_livox2gnss\"> [0.997573, -0.024254, 0.065274, 0.322019,\n                                               0.023477, 0.999644, 0.012663, 0.043158,\n                                               -0.066402, -0.011080, 0.997733, 0.456606,\n                                               0.0, 0.0, 0.0, 1.0]</rosparam>\n\n    <!-- nodes -->\n    <node pkg=\"livox_mapping\" type=\"livox_mapping\" name=\"livox_mapping\" output=\"screen\" /> <!-- Scan Context-based PGO -->\n\n    <!-- visulaization -->\n    <arg name=\"rvizscpgo\" default=\"true\" />\n    <group if=\"$(arg rvizscpgo)\">\n        <node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rvizscpgo\" args=\"-d $(find livox_mapping)/rviz_cfg/livox_mapping.rviz\" />\n    </group>\n\n</launch>\n"
  },
  {
    "path": "SC-PGO/package.xml",
    "content": "<?xml version=\"1.0\"?>\n<package>\n  <name>livox_mapping</name>\n  <version>0.0.0</version>\n\n  <description>\n    This is an advanced implentation of LOAM.\n    LOAM is described in the following paper:\n    J. Zhang and S. Singh. LOAM: Lidar Odometry and Mapping in Real-time.\n      Robotics: Science and Systems Conference (RSS). Berkeley, CA, July 2014.\n  </description>\n\n  <maintainer email=\"qintonguav@gmail.com\">qintong</maintainer>\n\n  <license>BSD</license>\n\n  <author email=\"zhangji@cmu.edu\">Ji Zhang</author>\n  \n  <buildtool_depend>catkin</buildtool_depend>\n  <build_depend>geometry_msgs</build_depend>\n  <build_depend>nav_msgs</build_depend>\n  <build_depend>roscpp</build_depend>\n  <build_depend>rospy</build_depend>\n  <build_depend>std_msgs</build_depend>  \n  <build_depend>rosbag</build_depend>\n  <build_depend>sensor_msgs</build_depend>\n  <build_depend>tf</build_depend>\n  <build_depend>tf_conversions</build_depend>\n  <build_depend>image_transport</build_depend>\n  <build_depend>ad_localization_msgs</build_depend>\n  <build_depend>cv_bridge</build_depend>\n  \n  <run_depend>geometry_msgs</run_depend>\n  <run_depend>nav_msgs</run_depend>\n  <run_depend>sensor_msgs</run_depend>\n  <run_depend>roscpp</run_depend>\n  <run_depend>rospy</run_depend>\n  <run_depend>std_msgs</run_depend>\n  <run_depend>rosbag</run_depend>\n  <run_depend>tf</run_depend>\n  <run_depend>tf_conversions</run_depend>\n  <run_depend>image_transport</run_depend>\n  <run_depend>ad_localization_msgs</run_depend>\n  <export>\n  </export>\n</package>\n"
  },
  {
    "path": "SC-PGO/rviz_cfg/livox_mapping.rviz",
    "content": "Panels:\n  - Class: rviz/Displays\n    Help Height: 85\n    Name: Displays\n    Property Tree Widget:\n      Expanded:\n        - /Global Options1\n        - /Status1\n        - /odometry1/odomPath1\n        - /pgoOdom1/Shape1\n        - /pgoMap1\n        - /loop scan1\n        - /loop submap1\n      Splitter Ratio: 0.440559446811676\n    Tree Height: 768\n  - Class: rviz/Selection\n    Name: Selection\n  - Class: rviz/Tool Properties\n    Expanded:\n      - /2D Pose Estimate1\n      - /2D Nav Goal1\n      - /Publish Point1\n    Name: Tool Properties\n    Splitter Ratio: 0.5886790156364441\n  - Class: rviz/Views\n    Expanded:\n      - /Current View1\n    Name: Views\n    Splitter Ratio: 0.524163544178009\n  - Class: rviz/Time\n    Experimental: false\n    Name: Time\n    SyncMode: 0\n    SyncSource: pgoMap\n  - Class: rviz/Views\n    Expanded:\n      - /Current View1\n    Name: pgo Views\n    Splitter Ratio: 0.5\n  - Class: rviz/Views\n    Expanded:\n      - /Current View1\n    Name: Views\n    Splitter Ratio: 0.5\nPreferences:\n  PromptSaveOnExit: true\nToolbars:\n  toolButtonStyle: 2\nVisualization Manager:\n  Class: \"\"\n  Displays:\n    - Alpha: 0.5\n      Cell Size: 1\n      Class: rviz/Grid\n      Color: 160; 160; 164\n      Enabled: false\n      Line Style:\n        Line Width: 0.029999999329447746\n        Value: Lines\n      Name: Grid\n      Normal Cell Count: 0\n      Offset:\n        X: 0\n        Y: 0\n        Z: 0\n      Plane: XY\n      Plane Cell Count: 160\n      Reference Frame: <Fixed Frame>\n      Value: false\n    - Class: rviz/Axes\n      Enabled: true\n      Length: 1\n      Name: Axes\n      Radius: 0.10000000149011612\n      Reference Frame: camera_init\n      Value: true\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Buffer Length: 1\n          Class: rviz/Path\n          Color: 255; 170; 0\n          Enabled: false\n          Head Diameter: 0.30000001192092896\n          Head Length: 0.20000000298023224\n          Length: 0.30000001192092896\n          Line Style: Lines\n          Line Width: 0.029999999329447746\n          Name: gtPathlc\n          Offset:\n            X: 0\n            Y: 0\n            Z: 0\n          Pose Color: 255; 85; 255\n          Pose Style: None\n          Radius: 0.029999999329447746\n          Shaft Diameter: 0.10000000149011612\n          Shaft Length: 0.10000000149011612\n          Topic: /path_gt\n          Unreliable: false\n          Value: false\n        - Alpha: 1\n          Buffer Length: 1\n          Class: rviz/Path\n          Color: 255; 170; 0\n          Enabled: true\n          Head Diameter: 0.30000001192092896\n          Head Length: 0.20000000298023224\n          Length: 0.30000001192092896\n          Line Style: Lines\n          Line Width: 0.029999999329447746\n          Name: gtPathLidar\n          Offset:\n            X: 0\n            Y: 0\n            Z: 0\n          Pose Color: 255; 85; 255\n          Pose Style: None\n          Radius: 0.029999999329447746\n          Shaft Diameter: 0.10000000149011612\n          Shaft Length: 0.10000000149011612\n          Topic: /path_gt_lidar\n          Unreliable: false\n          Value: true\n      Enabled: false\n      Name: GT\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Buffer Length: 1\n          Class: rviz/Path\n          Color: 204; 0; 0\n          Enabled: true\n          Head Diameter: 0.30000001192092896\n          Head Length: 0.20000000298023224\n          Length: 0.30000001192092896\n          Line Style: Billboards\n          Line Width: 0.20000000298023224\n          Name: odomPath\n          Offset:\n            X: 0\n            Y: 0\n            Z: 0\n          Pose Color: 255; 85; 255\n          Pose Style: None\n          Radius: 0.029999999329447746\n          Shaft Diameter: 0.10000000149011612\n          Shaft Length: 0.10000000149011612\n          Topic: /laser_odom_path\n          Unreliable: false\n          Value: true\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: FlatColor\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Max Intensity: 63.10047912597656\n          Min Color: 0; 0; 0\n          Min Intensity: -0.0067862942814826965\n          Name: PointCloud2\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 3\n          Size (m): 0.05000000074505806\n          Style: Flat Squares\n          Topic: /velodyne_cloud_2\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: false\n      Enabled: true\n      Name: odometry\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Buffer Length: 1\n          Class: rviz/Path\n          Color: 4; 69; 246\n          Enabled: true\n          Head Diameter: 0.30000001192092896\n          Head Length: 0.20000000298023224\n          Length: 0.30000001192092896\n          Line Style: Billboards\n          Line Width: 0.30000001192092896\n          Name: mappingPath\n          Offset:\n            X: 0\n            Y: 0\n            Z: 0\n          Pose Color: 255; 85; 255\n          Pose Style: None\n          Radius: 0.029999999329447746\n          Shaft Diameter: 0.10000000149011612\n          Shaft Length: 0.10000000149011612\n          Topic: /aft_mapped_path\n          Unreliable: false\n          Value: true\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: Intensity\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Max Intensity: 50.14332962036133\n          Min Color: 0; 0; 0\n          Min Intensity: -0.04392780363559723\n          Name: allMapCloud\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 4\n          Size (m): 0.20000000298023224\n          Style: Points\n          Topic: /laser_cloud_map\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: false\n          Value: false\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: FlatColor\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Max Intensity: 15\n          Min Color: 0; 0; 0\n          Min Intensity: 0\n          Name: surround\n          Position Transformer: XYZ\n          Queue Size: 1\n          Selectable: true\n          Size (Pixels): 1\n          Size (m): 0.05000000074505806\n          Style: Squares\n          Topic: /laser_cloud_surround\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: false\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: Intensity\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Max Intensity: 50.141441345214844\n          Min Color: 0; 0; 0\n          Min Intensity: -0.024373823776841164\n          Name: currPoints\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 1.5\n          Size (m): 0.10000000149011612\n          Style: Squares\n          Topic: /velodyne_cloud_registered\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: false\n        - Angle Tolerance: 0.10000000149011612\n          Class: rviz/Odometry\n          Covariance:\n            Orientation:\n              Alpha: 0.5\n              Color: 255; 255; 127\n              Color Style: Unique\n              Frame: Local\n              Offset: 1\n              Scale: 1\n              Value: true\n            Position:\n              Alpha: 0.30000001192092896\n              Color: 204; 51; 204\n              Scale: 1\n              Value: true\n            Value: true\n          Enabled: false\n          Keep: 1\n          Name: Odometry\n          Position Tolerance: 0.10000000149011612\n          Shape:\n            Alpha: 1\n            Axes Length: 1.5\n            Axes Radius: 0.5\n            Color: 255; 25; 0\n            Head Length: 0.10000000149011612\n            Head Radius: 2\n            Shaft Length: 0.10000000149011612\n            Shaft Radius: 20\n            Value: Axes\n          Topic: /aft_mapped_to_init\n          Unreliable: false\n          Value: false\n        - Class: rviz/TF\n          Enabled: false\n          Frame Timeout: 15\n          Frames:\n            All Enabled: true\n          Marker Scale: 1\n          Name: TF\n          Show Arrows: true\n          Show Axes: true\n          Show Names: true\n          Tree:\n            {}\n          Update Interval: 0\n          Value: false\n      Enabled: true\n      Name: mapping\n    - Alpha: 1\n      Buffer Length: 1\n      Class: rviz/Path\n      Color: 25; 255; 0\n      Enabled: true\n      Head Diameter: 0.30000001192092896\n      Head Length: 0.20000000298023224\n      Length: 0.30000001192092896\n      Line Style: Billboards\n      Line Width: 0.4000000059604645\n      Name: pgoPath\n      Offset:\n        X: 0\n        Y: 0\n        Z: 0\n      Pose Color: 255; 85; 255\n      Pose Style: Axes\n      Radius: 0.10000000149011612\n      Shaft Diameter: 0.10000000149011612\n      Shaft Length: 0.10000000149011612\n      Topic: /aft_pgo_path\n      Unreliable: false\n      Value: true\n    - Angle Tolerance: 0.10000000149011612\n      Class: rviz/Odometry\n      Covariance:\n        Orientation:\n          Alpha: 0.5\n          Color: 255; 255; 127\n          Color Style: Unique\n          Frame: Local\n          Offset: 1\n          Scale: 1\n          Value: true\n        Position:\n          Alpha: 0.30000001192092896\n          Color: 204; 51; 204\n          Scale: 1\n          Value: true\n        Value: true\n      Enabled: true\n      Keep: 1\n      Name: pgoOdom\n      Position Tolerance: 0.10000000149011612\n      Shape:\n        Alpha: 1\n        Axes Length: 2\n        Axes Radius: 0.699999988079071\n        Color: 255; 25; 0\n        Head Length: 0.30000001192092896\n        Head Radius: 0.10000000149011612\n        Shaft Length: 1\n        Shaft Radius: 0.05000000074505806\n        Value: Axes\n      Topic: /aft_pgo_odom\n      Unreliable: false\n      Value: true\n    - Alpha: 0.10000000149011612\n      Autocompute Intensity Bounds: false\n      Autocompute Value Bounds:\n        Max Value: 10\n        Min Value: -10\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 255; 255; 255\n      Color Transformer: Intensity\n      Decay Time: 0\n      Enabled: true\n      Invert Rainbow: true\n      Max Color: 255; 255; 255\n      Max Intensity: 30\n      Min Color: 0; 0; 0\n      Min Intensity: 0\n      Name: pgoMap\n      Position Transformer: XYZ\n      Queue Size: 1\n      Selectable: true\n      Size (Pixels): 2\n      Size (m): 0.4000000059604645\n      Style: Points\n      Topic: /aft_pgo_map\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: true\n    - Alpha: 1\n      Autocompute Intensity Bounds: true\n      Autocompute Value Bounds:\n        Max Value: 10\n        Min Value: -10\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 0; 255; 0\n      Color Transformer: FlatColor\n      Decay Time: 0\n      Enabled: false\n      Invert Rainbow: false\n      Max Color: 255; 255; 255\n      Max Intensity: 20.06364631652832\n      Min Color: 0; 0; 0\n      Min Intensity: 3.003571033477783\n      Name: loop scan\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 6\n      Size (m): 0.5\n      Style: Points\n      Topic: /loop_scan_local\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: false\n    - Alpha: 1\n      Autocompute Intensity Bounds: true\n      Autocompute Value Bounds:\n        Max Value: 32.84956741333008\n        Min Value: -16.756437301635742\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 255; 31; 11\n      Color Transformer: FlatColor\n      Decay Time: 0\n      Enabled: false\n      Invert Rainbow: false\n      Max Color: 255; 255; 127\n      Max Intensity: 172\n      Min Color: 0; 0; 0\n      Min Intensity: 0\n      Name: loop submap\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 5\n      Size (m): 0.20000000298023224\n      Style: Points\n      Topic: /loop_submap_local\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: false\n  Enabled: true\n  Global Options:\n    Background Color: 255; 255; 255\n    Default Light: true\n    Fixed Frame: camera_init\n    Frame Rate: 30\n  Name: root\n  Tools:\n    - Class: rviz/Interact\n      Hide Inactive Objects: true\n    - Class: rviz/MoveCamera\n    - Class: rviz/Select\n    - Class: rviz/FocusCamera\n    - Class: rviz/Measure\n    - Class: rviz/SetInitialPose\n      Theta std deviation: 0.2617993950843811\n      Topic: /initialpose\n      X std deviation: 0.5\n      Y std deviation: 0.5\n    - Class: rviz/SetGoal\n      Topic: /move_base_simple/goal\n    - Class: rviz/PublishPoint\n      Single click: true\n      Topic: /clicked_point\n  Value: true\n  Views:\n    Current:\n      Class: rviz/Orbit\n      Distance: 206.20489501953125\n      Enable Stereo Rendering:\n        Stereo Eye Separation: 0.05999999865889549\n        Stereo Focal Distance: 1\n        Swap Stereo Eyes: false\n        Value: false\n      Focal Point:\n        X: 45.543460845947266\n        Y: -22.18519401550293\n        Z: 0.49449437856674194\n      Focal Shape Fixed Size: true\n      Focal Shape Size: 0.05000000074505806\n      Invert Z Axis: false\n      Name: Current View\n      Near Clip Distance: 0.009999999776482582\n      Pitch: 1.0561977624893188\n      Target Frame: camera_init\n      Value: Orbit (rviz)\n      Yaw: 2.9488861560821533\n    Saved: ~\nWindow Geometry:\n  Displays:\n    collapsed: false\n  Height: 1016\n  Hide Left Dock: false\n  Hide Right Dock: false\n  QMainWindow State: 000000ff00000000fd0000000400000000000001af00000396fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006b00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000004400000396000000eb00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006500000002ef000000c30000000000000000fb0000001200700067006f00200056006900650077007300000002db000000d7000000b900ffffff000000010000010f00000396fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fc0000004400000396000000da01000020fa000000000100000002fb0000000a005600690065007700730100000000ffffffff0000010200fffffffb0000000a0056006900650077007301000006710000010f0000010200fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007530000003efc0100000002fb0000000800540069006d0065000000000000000753000004f900fffffffb0000000800540069006d00650100000000000004500000000000000000000004870000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000\n  Selection:\n    collapsed: false\n  Time:\n    collapsed: false\n  Tool Properties:\n    collapsed: false\n  Views:\n    collapsed: false\n  Width: 1875\n  X: 685\n  Y: 104\n  pgo Views:\n    collapsed: false\n"
  },
  {
    "path": "SC-PGO/src/laserPosegraphOptimization.cpp",
    "content": "#include <fstream>\n#include <math.h>\n#include <vector>\n#include <mutex>\n#include <queue>\n#include <thread>\n#include <iostream>\n#include <string>\n#include <optional>\n\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <pcl/search/impl/search.hpp>\n#include <pcl/range_image/range_image.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/common/common.h>\n#include <pcl/common/transforms.h>\n#include <pcl/filters/extract_indices.h>\n#include <pcl/registration/icp.h>\n#include <pcl/io/pcd_io.h>\n#include <pcl/filters/filter.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl/octree/octree_pointcloud_voxelcentroid.h>\n#include <pcl/filters/crop_box.h> \n#include <pcl_conversions/pcl_conversions.h>\n\n#include <ros/ros.h>\n#include \"csignal\"\n#include <sensor_msgs/Imu.h>\n#include <sensor_msgs/PointCloud2.h>\n#include <sensor_msgs/NavSatFix.h>\n#include <tf/transform_datatypes.h>\n#include <tf/transform_broadcaster.h>\n#include <tf_conversions/tf_eigen.h>\n#include <nav_msgs/Odometry.h>\n#include <nav_msgs/Path.h>\n#include <geometry_msgs/PoseStamped.h>\n\n#include <eigen3/Eigen/Dense>\n\n#include <ceres/ceres.h>\n\n#include <gtsam/inference/Symbol.h>\n#include <gtsam/nonlinear/Values.h>\n#include <gtsam/nonlinear/Marginals.h>\n#include <gtsam/geometry/Rot3.h>\n#include <gtsam/geometry/Pose3.h>\n#include <gtsam/geometry/Rot2.h>\n#include <gtsam/geometry/Pose2.h>\n#include <gtsam/slam/PriorFactor.h>\n#include <gtsam/slam/BetweenFactor.h>\n#include <gtsam/navigation/GPSFactor.h>\n#include <gtsam/nonlinear/NonlinearFactorGraph.h>\n#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>\n#include <gtsam/nonlinear/ISAM2.h>\n#include <ad_localization_msgs/NavStateInfo.h>\n\n#include \"livox_mapping/common.h\"\n#include \"livox_mapping/tic_toc.h\"\n\n#include \"scancontext/Scancontext.h\"\n\nusing namespace gtsam;\n\nusing std::cout;\nusing std::endl;\n\ndouble keyframeMeterGap;\ndouble keyframeDegGap, keyframeRadGap;\ndouble translationAccumulated = 1000000.0; // large value means must add the first given frame.\ndouble rotaionAccumulated = 1000000.0; // large value means must add the first given frame.\n\nbool isNowKeyFrame = false; \n\nPose6D odom_pose_prev {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // init \nPose6D odom_pose_curr {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // init pose is zero\nPose6D currRTK {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};\ntf::Transform odom_to_enu; // init rtk pose in enu\ntf::Transform T_livox2gnss;\n\nstd::queue<nav_msgs::Odometry::ConstPtr> odometryBuf;\nstd::queue<sensor_msgs::PointCloud2ConstPtr> fullResBuf;\nstd::queue<sensor_msgs::NavSatFix::ConstPtr> gpsBuf;\nstd::deque<ad_localization_msgs::NavStateInfo::ConstPtr> rtkBuf;\nstd::queue<std::pair<int, int> > scLoopICPBuf;\n\nstd::mutex mBuf;\nstd::mutex mKF;\nstd::mutex rtkBufLock;\nstd::mutex rtkKFLock;\ndouble timeLaserOdometry = 0.0;\ndouble timeLaser = 0.0;\n\npcl::PointCloud<PointType>::Ptr laserCloudFullRes(new pcl::PointCloud<PointType>());\npcl::PointCloud<PointType>::Ptr laserCloudMapAfterPGO(new pcl::PointCloud<PointType>());\n\nstd::vector<pcl::PointCloud<PointType>::Ptr> keyframeLaserClouds; \nstd::vector<Pose6D> keyframePoses;\nstd::vector<Pose6D> keyframePosesUpdated;\nstd::unordered_map<int, Pose6D> keyframePosesRTK;\nstd::vector<double> keyframeTimes;\nstd::vector<ros::Time> keyframeTimesOri;\nint recentIdxUpdated = 0;\n\ngtsam::NonlinearFactorGraph gtSAMgraph;\nbool gtSAMgraphMade = false;\ngtsam::Values initialEstimate;\ngtsam::ISAM2 *isam;\ngtsam::Values isamCurrentEstimate;\n\nnoiseModel::Diagonal::shared_ptr priorNoise;\nnoiseModel::Diagonal::shared_ptr odomNoise;\nnoiseModel::Diagonal::shared_ptr RTKNoise;\nnoiseModel::Base::shared_ptr robustLoopNoise;\nnoiseModel::Base::shared_ptr robustGPSNoise;\n\npcl::VoxelGrid<PointType> downSizeFilterScancontext;\nSCManager scManager;\ndouble scDistThres, scMaximumRadius;\n\npcl::VoxelGrid<PointType> downSizeFilterICP;\nstd::mutex mtxICP;\nstd::mutex mtxPosegraph;\nstd::mutex mtxRecentPose;\n\npcl::PointCloud<PointType>::Ptr laserCloudMapPGO(new pcl::PointCloud<PointType>());\npcl::VoxelGrid<PointType> downSizeFilterMapPGO;\nbool laserCloudMapPGORedraw = true;\n\nbool useRTK = false;\nsensor_msgs::NavSatFix::ConstPtr currGPS;\nbool hasGPSforThisKF = false;\nbool gpsOffsetInitialized = false;\nbool hasRTKforThisKF = false;\nbool rtkOffsetInitialized = false;\ndouble gpsAltitudeInitOffset = 0.0;\ndouble recentOptimizedX = 0.0;\ndouble recentOptimizedY = 0.0;\n\nros::Publisher pubMapAftPGO, pubOdomAftPGO, pubPathAftPGO, pubOdomResultForInteractiveSlam, pubMapResultForInteractiveSlam;\nros::Publisher pubLoopScanLocal, pubLoopSubmapLocal;\nros::Publisher pubOdomRepubVerifier;\n\n// save TUM format path\nstd::string save_path;\nstd::string save_pcd_path;\nstd::shared_ptr<std::ofstream> odo;\n\nstd::string padZeros(int val, int num_digits = 6) {\n  std::ostringstream out;\n  out << std::internal << std::setfill('0') << std::setw(num_digits) << val;\n  return out.str();\n}\n\ngtsam::Pose3 Pose6DtoGTSAMPose3(const Pose6D& p)\n{\n    return gtsam::Pose3( gtsam::Rot3::RzRyRx(p.roll, p.pitch, p.yaw), gtsam::Point3(p.x, p.y, p.z) );\n} // Pose6DtoGTSAMPose3\n\ntf::Transform Pose6DtoTransform(const Pose6D& p)\n{\n    tf::Vector3 t(p.x, p.y, p.z);\n    tf::Quaternion q;\n    q.setRPY(p.roll, p.pitch, p.yaw);\n    tf::Transform tran(q, t);\n    return tf::Transform(q, t);\n} // Pose6DtoTransform\nPose6D TransformtoPose6D(const tf::Transform& trans)\n{\n    double roll, pitch, yaw;\n    tf::Matrix3x3(trans.getRotation()).getRPY(roll, pitch, yaw);\n    tf::Vector3 t=trans.getOrigin();\n    return Pose6D{t[0], t[1], t[2], roll, pitch, yaw}; \n} // TransformtoPose6D\nPose6D PoseInterpolation(const Pose6D& p_before, const Pose6D& p_after, double step_length) {\n    tf::Quaternion q_before, q_after;\n    q_before.setRPY(p_before.roll, p_before.pitch, p_before.yaw);\n    q_after.setRPY(p_after.roll, p_after.pitch, p_after.yaw);\n    tf::Vector3 t_before(p_before.x, p_before.y, p_before.z);\n    tf::Vector3 t_after(p_after.x, p_after.y, p_after.z);\n    tf::Quaternion q_curr = q_before.slerp(q_after, step_length);\n    tf::Vector3 t_curr = t_before.lerp(t_after, step_length);\n    return TransformtoPose6D(tf::Transform(q_curr, t_curr));\n} // PoseInterpolation\n\nvoid laserOdometryHandler(const nav_msgs::Odometry::ConstPtr &_laserOdometry)\n{\n  static double last_odom_time = 0;\n\tmBuf.lock();\n    if (!useRTK && keyframeTimes.size() &&\n        abs(_laserOdometry->header.stamp.toSec() - last_odom_time) > 1) {\n        std::cout << std::endl << std::endl << std::endl << std::endl;\n        std::cout << \"the time gap is \" << _laserOdometry->header.stamp.toSec() - keyframeTimes.back() << std::endl;\n        std::cout << \"receive the next rosbag\" << std::endl;\n        std::cout << std::endl << std::endl << std::endl << std::endl;\n        gtSAMgraphMade = false;\n    }\n\todometryBuf.push(_laserOdometry);\n  last_odom_time = _laserOdometry->header.stamp.toSec();\n\tmBuf.unlock();\n} // laserOdometryHandler\n\nvoid laserCloudFullResHandler(const sensor_msgs::PointCloud2ConstPtr &_laserCloudFullRes)\n{\n\tmBuf.lock();\n\tfullResBuf.push(_laserCloudFullRes);\n\tmBuf.unlock();\n} // laserCloudFullResHandler\n\nvoid rtkHandler(const ad_localization_msgs::NavStateInfo::ConstPtr &_rtk)\n{\n    static double last_rtk_time = 0;\n    rtkBufLock.lock();\n    if (rtkBuf.size() &&\n        abs(_rtk->header.stamp.toSec() - last_rtk_time) > 0.2) {\n        std::cout << std::endl << std::endl << std::endl << std::endl;\n        std::cout << \"the time gap is \" << _rtk->header.stamp.toSec() - last_rtk_time << std::endl;\n        std::cout << \"receive the next rosbag\" << std::endl;\n        std::cout << std::endl << std::endl << std::endl << std::endl;\n        rtkBuf.clear();\n        rtkOffsetInitialized = false;\n        gtSAMgraphMade = false;\n    }\n    rtkBuf.push_back(_rtk);\n    last_rtk_time = _rtk->header.stamp.toSec();\n    rtkBufLock.unlock();\n} // gpsHandler\n\nvoid initNoises( void )\n{\n    gtsam::Vector priorNoiseVector6(6);\n    priorNoiseVector6 << 1e-12, 1e-12, 1e-12, 1e-12, 1e-12, 1e-12;\n    priorNoise = noiseModel::Diagonal::Variances(priorNoiseVector6);\n\n    gtsam::Vector odomNoiseVector6(6);\n    // odomNoiseVector6 << 1e-4, 1e-4, 1e-4, 1e-4, 1e-4, 1e-4;\n    odomNoiseVector6 << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4;\n    odomNoise = noiseModel::Diagonal::Variances(odomNoiseVector6);\n\n    gtsam::Vector RTKNoiseVector6(6);\n    RTKNoiseVector6 << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4;\n    RTKNoise = noiseModel::Diagonal::Variances(RTKNoiseVector6);\n\n    double loopNoiseScore = 0.001; // constant is ok...\n    gtsam::Vector robustNoiseVector6(6); // gtsam::Pose3 factor has 6 elements (6D)\n    robustNoiseVector6 << loopNoiseScore, loopNoiseScore, loopNoiseScore, loopNoiseScore, loopNoiseScore, loopNoiseScore;\n    robustLoopNoise = gtsam::noiseModel::Robust::Create(\n                    gtsam::noiseModel::mEstimator::Cauchy::Create(1), // optional: replacing Cauchy by DCS or GemanMcClure is okay but Cauchy is empirically good.\n                    gtsam::noiseModel::Diagonal::Variances(robustNoiseVector6) );\n\n    double bigNoiseTolerentToXY = 1000000000.0; // 1e9\n    double gpsAltitudeNoiseScore = 250.0; // if height is misaligned after loop clsosing, use this value bigger\n    gtsam::Vector robustNoiseVector3(3); // gps factor has 3 elements (xyz)\n    robustNoiseVector3 << bigNoiseTolerentToXY, bigNoiseTolerentToXY, gpsAltitudeNoiseScore; // means only caring altitude here. (because LOAM-like-methods tends to be asymptotically flyging)\n    robustGPSNoise = gtsam::noiseModel::Robust::Create(\n                    gtsam::noiseModel::mEstimator::Cauchy::Create(1), // optional: replacing Cauchy by DCS or GemanMcClure is okay but Cauchy is empirically good.\n                    gtsam::noiseModel::Diagonal::Variances(robustNoiseVector3) );\n\n} // initNoises\n\nPose6D getOdom(nav_msgs::Odometry::ConstPtr _odom)\n{\n    auto tx = _odom->pose.pose.position.x;\n    auto ty = _odom->pose.pose.position.y;\n    auto tz = _odom->pose.pose.position.z;\n\n    double roll, pitch, yaw;\n    geometry_msgs::Quaternion quat = _odom->pose.pose.orientation;\n    tf::Matrix3x3(tf::Quaternion(quat.x, quat.y, quat.z, quat.w)).getRPY(roll, pitch, yaw);\n\n    return Pose6D{tx, ty, tz, roll, pitch, yaw}; \n} // getOdom\n\nPose6D diffTransformation(const Pose6D& _p1, const Pose6D& _p2)\n{\n    Eigen::Affine3f SE3_p1 = pcl::getTransformation(_p1.x, _p1.y, _p1.z, _p1.roll, _p1.pitch, _p1.yaw);\n    Eigen::Affine3f SE3_p2 = pcl::getTransformation(_p2.x, _p2.y, _p2.z, _p2.roll, _p2.pitch, _p2.yaw);\n    Eigen::Matrix4f SE3_delta0 = SE3_p1.matrix().inverse() * SE3_p2.matrix();\n    Eigen::Affine3f SE3_delta; SE3_delta.matrix() = SE3_delta0;\n    float dx, dy, dz, droll, dpitch, dyaw;\n    pcl::getTranslationAndEulerAngles (SE3_delta, dx, dy, dz, droll, dpitch, dyaw);\n    // std::cout << \"delta : \" << dx << \", \" << dy << \", \" << dz << \", \" << droll << \", \" << dpitch << \", \" << dyaw << std::endl;\n\n    return Pose6D{double(abs(dx)), double(abs(dy)), double(abs(dz)), double(abs(droll)), double(abs(dpitch)), double(abs(dyaw))};\n} // SE3Diff\n\nPose6D convertOdomToENU(const Pose6D &pose_curr)\n{\n    tf::Transform T_curr = Pose6DtoTransform(pose_curr);\n    tf::Transform T_curr_enu;\n    T_curr_enu.mult(odom_to_enu, T_curr);\n    return TransformtoPose6D(T_curr_enu);\n} // getOdom\n\npcl::PointCloud<PointType>::Ptr local2global(const pcl::PointCloud<PointType>::Ptr &cloudIn, const Pose6D& tf)\n{\n    pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>());\n\n    int cloudSize = cloudIn->size();\n    cloudOut->resize(cloudSize);\n\n    Eigen::Affine3f transCur = pcl::getTransformation(tf.x, tf.y, tf.z, tf.roll, tf.pitch, tf.yaw);\n\n    int numberOfCores = 16;\n    #pragma omp parallel for num_threads(numberOfCores)\n    for (int i = 0; i < cloudSize; ++i)\n    {\n        const auto &pointFrom = cloudIn->points[i];\n        cloudOut->points[i].x = transCur(0,0) * pointFrom.x + transCur(0,1) * pointFrom.y + transCur(0,2) * pointFrom.z + transCur(0,3);\n        cloudOut->points[i].y = transCur(1,0) * pointFrom.x + transCur(1,1) * pointFrom.y + transCur(1,2) * pointFrom.z + transCur(1,3);\n        cloudOut->points[i].z = transCur(2,0) * pointFrom.x + transCur(2,1) * pointFrom.y + transCur(2,2) * pointFrom.z + transCur(2,3);\n        cloudOut->points[i].intensity = pointFrom.intensity;\n    }\n\n    return cloudOut;\n}\nvoid pubMap(void)\n{\n    int SKIP_FRAMES = 2; // sparse map visulalization to save computations \n    int counter = 0;\n\n    mKF.lock(); \n    laserCloudMapPGO->clear();\n    // for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) {\n    for (int node_idx=0; node_idx < recentIdxUpdated; node_idx++) {\n        if(counter % SKIP_FRAMES == 0) {\n            *laserCloudMapPGO += *local2global(keyframeLaserClouds[node_idx], keyframePosesUpdated[node_idx]);\n        }\n        counter++;\n    }\n    mKF.unlock(); \n\n    downSizeFilterMapPGO.setInputCloud(laserCloudMapPGO);\n    downSizeFilterMapPGO.filter(*laserCloudMapPGO);\n\n    sensor_msgs::PointCloud2 laserCloudMapPGOMsg;\n    pcl::toROSMsg(*laserCloudMapPGO, laserCloudMapPGOMsg);\n    laserCloudMapPGOMsg.header.frame_id = \"/camera_init\";\n    pubMapAftPGO.publish(laserCloudMapPGOMsg);\n}\n\nvoid pubPath( void )\n{\n    // pub odom and path \n    nav_msgs::Odometry odomAftPGO;\n    nav_msgs::Path pathAftPGO;\n    pathAftPGO.header.frame_id = \"/camera_init\";\n    mKF.lock(); \n    // for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()) - 1; node_idx++) // -1 is just delayed visualization (because sometimes mutexed while adding(push_back) a new one)\n    for (int node_idx=0; node_idx < recentIdxUpdated; node_idx++) // -1 is just delayed visualization (because sometimes mutexed while adding(push_back) a new one)\n    {\n        const Pose6D& pose_est = keyframePosesUpdated.at(node_idx); // upodated poses\n        // const gtsam::Pose3& pose_est = isamCurrentEstimate.at<gtsam::Pose3>(node_idx);\n\n        nav_msgs::Odometry odomAftPGOthis;\n        odomAftPGOthis.header.frame_id = \"/camera_init\";\n        odomAftPGOthis.child_frame_id = \"/aft_pgo\";\n        odomAftPGOthis.header.stamp = ros::Time().fromSec(keyframeTimes.at(node_idx));\n        odomAftPGOthis.pose.pose.position.x = pose_est.x;\n        odomAftPGOthis.pose.pose.position.y = pose_est.y;\n        odomAftPGOthis.pose.pose.position.z = pose_est.z;\n        odomAftPGOthis.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(pose_est.roll, pose_est.pitch, pose_est.yaw);\n        odomAftPGO = odomAftPGOthis;\n\n        geometry_msgs::PoseStamped poseStampAftPGO;\n        poseStampAftPGO.header = odomAftPGOthis.header;\n        poseStampAftPGO.pose = odomAftPGOthis.pose.pose;\n\n        pathAftPGO.header.stamp = odomAftPGOthis.header.stamp;\n        pathAftPGO.header.frame_id = \"/camera_init\";\n        pathAftPGO.poses.push_back(poseStampAftPGO);\n    }\n    mKF.unlock(); \n    pubOdomAftPGO.publish(odomAftPGO); // last pose \n    pubPathAftPGO.publish(pathAftPGO); // poses \n\n    static tf::TransformBroadcaster br;\n    tf::Transform transform;\n    tf::Quaternion q;\n    transform.setOrigin(tf::Vector3(odomAftPGO.pose.pose.position.x, odomAftPGO.pose.pose.position.y, odomAftPGO.pose.pose.position.z));\n    q.setW(odomAftPGO.pose.pose.orientation.w);\n    q.setX(odomAftPGO.pose.pose.orientation.x);\n    q.setY(odomAftPGO.pose.pose.orientation.y);\n    q.setZ(odomAftPGO.pose.pose.orientation.z);\n    transform.setRotation(q);\n    br.sendTransform(tf::StampedTransform(transform, odomAftPGO.header.stamp, \"/camera_init\", \"/aft_pgo\"));\n} // pubPath\n\nvoid updatePoses(void)\n{\n    mKF.lock(); \n    for (int node_idx=0; node_idx < int(isamCurrentEstimate.size()); node_idx++)\n    {\n        Pose6D& p =keyframePosesUpdated[node_idx];\n        p.x = isamCurrentEstimate.at<gtsam::Pose3>(node_idx).translation().x();\n        p.y = isamCurrentEstimate.at<gtsam::Pose3>(node_idx).translation().y();\n        p.z = isamCurrentEstimate.at<gtsam::Pose3>(node_idx).translation().z();\n        p.roll = isamCurrentEstimate.at<gtsam::Pose3>(node_idx).rotation().roll();\n        p.pitch = isamCurrentEstimate.at<gtsam::Pose3>(node_idx).rotation().pitch();\n        p.yaw = isamCurrentEstimate.at<gtsam::Pose3>(node_idx).rotation().yaw();\n    }\n    mKF.unlock();\n\n    mtxRecentPose.lock();\n    const gtsam::Pose3& lastOptimizedPose = isamCurrentEstimate.at<gtsam::Pose3>(int(isamCurrentEstimate.size())-1);\n    recentOptimizedX = lastOptimizedPose.translation().x();\n    recentOptimizedY = lastOptimizedPose.translation().y();\n\n    recentIdxUpdated = int(keyframePosesUpdated.size()) - 1;\n\n    mtxRecentPose.unlock();\n} // updatePoses\n\nvoid runISAM2opt(void)\n{\n    // called when a variable added \n    isam->update(gtSAMgraph, initialEstimate);\n    isam->update();\n    \n    gtSAMgraph.resize(0);\n    initialEstimate.clear();\n\n    isamCurrentEstimate = isam->calculateEstimate();\n    updatePoses();\n}\n\npcl::PointCloud<PointType>::Ptr transformPointCloud(pcl::PointCloud<PointType>::Ptr cloudIn, gtsam::Pose3 transformIn)\n{\n    pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>());\n\n    PointType *pointFrom;\n\n    int cloudSize = cloudIn->size();\n    cloudOut->resize(cloudSize);\n\n    Eigen::Affine3f transCur = pcl::getTransformation(\n                                    transformIn.translation().x(), transformIn.translation().y(), transformIn.translation().z(), \n                                    transformIn.rotation().roll(), transformIn.rotation().pitch(), transformIn.rotation().yaw() );\n    \n    int numberOfCores = 8; // TODO move to yaml \n    #pragma omp parallel for num_threads(numberOfCores)\n    for (int i = 0; i < cloudSize; ++i)\n    {\n        pointFrom = &cloudIn->points[i];\n        cloudOut->points[i].x = transCur(0,0) * pointFrom->x + transCur(0,1) * pointFrom->y + transCur(0,2) * pointFrom->z + transCur(0,3);\n        cloudOut->points[i].y = transCur(1,0) * pointFrom->x + transCur(1,1) * pointFrom->y + transCur(1,2) * pointFrom->z + transCur(1,3);\n        cloudOut->points[i].z = transCur(2,0) * pointFrom->x + transCur(2,1) * pointFrom->y + transCur(2,2) * pointFrom->z + transCur(2,3);\n        cloudOut->points[i].intensity = pointFrom->intensity;\n    }\n    return cloudOut;\n} // transformPointCloud\n\nvoid loopFindNearKeyframesCloud( pcl::PointCloud<PointType>::Ptr& nearKeyframes, pcl::PointCloud<PointType>::Ptr& nearKeyframesViz, const int& key, const int& submap_size)\n{\n    // extract and stacking near keyframes (in global coord)\n    nearKeyframes->clear();\n    mKF.lock();\n    tf::Transform T_key = Pose6DtoTransform(keyframePosesUpdated[key]);\n    for (int i = -submap_size; i <= submap_size; ++i) {\n        int keyNear = key + i;\n        if (keyNear < 0 || keyNear >= int(keyframeLaserClouds.size()) )\n            continue;\n\n        tf::Transform T_keyNear = Pose6DtoTransform(keyframePosesUpdated[keyNear]);\n        Pose6D pose_relative = TransformtoPose6D(T_key.inverseTimes(T_keyNear));\n        *nearKeyframes += *local2global(keyframeLaserClouds[keyNear], pose_relative);\n        *nearKeyframesViz += *local2global(keyframeLaserClouds[keyNear], keyframePosesUpdated[keyNear]);\n        \n    }\n    mKF.unlock(); \n\n    if (nearKeyframes->empty())\n        return;\n\n    // downsample near keyframes\n    pcl::PointCloud<PointType>::Ptr cloud_temp(new pcl::PointCloud<PointType>());\n    downSizeFilterICP.setInputCloud(nearKeyframes);\n    downSizeFilterICP.filter(*cloud_temp);\n    *nearKeyframes = *cloud_temp;\n    downSizeFilterICP.setInputCloud(nearKeyframesViz);\n    downSizeFilterICP.filter(*cloud_temp);\n    *nearKeyframesViz = *cloud_temp;\n} // loopFindNearKeyframesCloud\n\n\nstd::optional<gtsam::Pose3> doICPVirtualRelative( int _loop_kf_idx, int _curr_kf_idx )\n{\n    // Check that the loopGap is correct\n    Pose6D pose_loop;\n    Pose6D pose_curr;\n    if (useRTK) {\n      rtkKFLock.lock();\n      auto it_loop = keyframePosesRTK.find(_loop_kf_idx);\n      auto it_curr = keyframePosesRTK.find(_curr_kf_idx);\n      if (it_loop != keyframePosesRTK.end() &&\n          it_curr != keyframePosesRTK.end()) {\n        pose_loop = it_loop->second;\n        pose_curr = it_curr->second;\n        rtkKFLock.unlock();\n      } else {\n        rtkKFLock.unlock();\n        std::cout << \"this loop don't have rtk init pose, so reject\" << std::endl;\n        return std::nullopt;\n      }\n    } else {\n      pose_loop = keyframePosesUpdated[_loop_kf_idx];\n      pose_curr = keyframePosesUpdated[_curr_kf_idx];\n    }\n    double loopGapThreshold = 5.0;\n    double dx = pose_loop.x - pose_curr.x, dy = pose_loop.y - pose_curr.y;\n    double loopGap = std::sqrt(dx * dx + dy * dy);\n    if (loopGap > loopGapThreshold) {\n      std::cout << \"loopGap is too big \" << std::endl;\n      std::cout << \"loopGap = \" << loopGap << std::endl;\n      std::cout << \"pose_curr = \" << pose_curr.x << \" \" << pose_curr.y << \" \" << pose_curr.z << std::endl;\n      std::cout << \"pose_loop = \" << pose_loop.x << \" \" << pose_loop.y << \" \" << pose_loop.z << std::endl;\n      return std::nullopt;\n    }\n\n    // parse pointclouds\n    int historyKeyframeSearchNum = 25; // enough. ex. [-25, 25] covers submap length of 50x1 = 50m if every kf gap is 1m\n    pcl::PointCloud<PointType>::Ptr currKeyframeCloud(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr currKeyframeCloudViz(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr icpKeyframeCloudViz(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr targetKeyframeCloud(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr targetKeyframeCloudViz(new pcl::PointCloud<PointType>());\n    loopFindNearKeyframesCloud(currKeyframeCloud, currKeyframeCloudViz, _curr_kf_idx, 0); // use same root of loop kf idx \n    loopFindNearKeyframesCloud(targetKeyframeCloud, targetKeyframeCloudViz, _loop_kf_idx, historyKeyframeSearchNum); \n\n    // ICP Settings\n    pcl::IterativeClosestPoint<PointType, PointType> icp;\n    icp.setMaxCorrespondenceDistance(150); // giseop , use a value can cover 2*historyKeyframeSearchNum range in meter \n    icp.setMaximumIterations(100);\n    icp.setTransformationEpsilon(1e-6);\n    icp.setEuclideanFitnessEpsilon(1e-6);\n    icp.setRANSACIterations(0);\n\n    // cal the the initial position transform\n    Eigen::Isometry3d T_init = Eigen::Isometry3d::Identity();\n    tf::Transform T_relative;\n    tf::Transform T_loop = Pose6DtoTransform(pose_loop);\n    tf::Transform T_curr = Pose6DtoTransform(pose_curr);\n    T_relative = T_loop.inverseTimes(T_curr);\n    tf::transformTFToEigen (T_relative, T_init);\n\n    // Align pointclouds\n    icp.setInputSource(currKeyframeCloud);\n    icp.setInputTarget(targetKeyframeCloud);\n    pcl::PointCloud<PointType>::Ptr unused_result(new pcl::PointCloud<PointType>());\n    icp.align(*unused_result, T_init.matrix().cast<float>());\n\n    float loopFitnessScoreThreshold = 0.3; // user parameter but fixed low value is safe. \n    if (icp.hasConverged() == false || icp.getFitnessScore() > loopFitnessScoreThreshold) {\n        std::cout << \"[SC loop] ICP fitness test failed (\" << icp.getFitnessScore() << \" > \" << loopFitnessScoreThreshold << \"). Reject this SC loop.\" << std::endl;\n        return std::nullopt;\n    } else {\n        std::cout << \"[SC loop] ICP fitness test passed (\" << icp.getFitnessScore() << \" < \" << loopFitnessScoreThreshold << \"). Add this SC loop.\" << std::endl;\n    }\n    // icp verification \n    sensor_msgs::PointCloud2 targetKeyframeCloudMsg;\n    pcl::toROSMsg(*targetKeyframeCloudViz, targetKeyframeCloudMsg);\n    targetKeyframeCloudMsg.header.frame_id = \"/camera_init\";\n    pubLoopSubmapLocal.publish(targetKeyframeCloudMsg);\n\n    *icpKeyframeCloudViz += *local2global(unused_result, pose_loop);\n    sensor_msgs::PointCloud2 icpKeyframeCloudMsg;\n    pcl::toROSMsg(*icpKeyframeCloudViz, icpKeyframeCloudMsg);\n    icpKeyframeCloudMsg.header.frame_id = \"/camera_init\";\n    pubLoopScanLocal.publish(icpKeyframeCloudMsg);\n\n    // Get pose transformation\n    float x, y, z, roll, pitch, yaw;\n    Eigen::Affine3f correctionLidarFrame;\n    correctionLidarFrame = icp.getFinalTransformation();\n    pcl::getTranslationAndEulerAngles (correctionLidarFrame, x, y, z, roll, pitch, yaw);\n    gtsam::Pose3 poseTo = Pose3(Rot3::RzRyRx(roll, pitch, yaw), Point3(x, y, z));\n    gtsam::Pose3 poseFrom = Pose3(Rot3::RzRyRx(0.0, 0.0, 0.0), Point3(0.0, 0.0, 0.0));\n    return poseFrom.between(poseTo);\n} // doICPVirtualRelative\n\nvoid process_pg()\n{\n    while(1)\n    {\n\t\twhile ( !odometryBuf.empty() && !fullResBuf.empty() )\n        {\n            //\n            // pop and check keyframe is or not  \n            // \n\t\t\tmBuf.lock();       \n            while (!odometryBuf.empty() && odometryBuf.front()->header.stamp.toSec() < fullResBuf.front()->header.stamp.toSec())\n                odometryBuf.pop();\n            if (odometryBuf.empty())\n            {\n                mBuf.unlock();\n                break;\n            }\n\n            // Time equal check\n            timeLaserOdometry = odometryBuf.front()->header.stamp.toSec();\n            timeLaser = fullResBuf.front()->header.stamp.toSec();\n            if (abs(timeLaserOdometry - timeLaser) > 0.01) {\n              std::cout << \"timeLaserOdometry - timeLaser = \" << timeLaserOdometry - timeLaser << std::endl;\n              return;\n            }\n            // TODO\n\n            laserCloudFullRes->clear();\n            pcl::PointCloud<PointType>::Ptr thisKeyFrame(new pcl::PointCloud<PointType>());\n            pcl::fromROSMsg(*fullResBuf.front(), *thisKeyFrame);\n            fullResBuf.pop();\n\n            Pose6D pose_curr = getOdom(odometryBuf.front());\n            ros::Time time_stamp = odometryBuf.front()->header.stamp;\n            odometryBuf.pop();\n            mBuf.unlock(); \n\n            // check the rtk data\n            if (useRTK) {\n              rtkBufLock.lock();\n              if (rtkBuf.empty() || \n                  rtkBuf.back()->header.stamp.toSec() < timeLaserOdometry ||\n                  rtkBuf.front()->header.stamp.toSec() > timeLaserOdometry) {\n                  std::cout << \"rtk data is abnormal\" << std::endl;\n                  std::cout << \"rtk's front time is \" << rtkBuf.front()->header.stamp << std::endl;\n                  std::cout << \"odo's front time is \" << time_stamp << std::endl;\n                  std::cout << \"rtk's back time is \" << rtkBuf.back()->header.stamp << std::endl;\n                  std::cout << \"rtk's size is \" << rtkBuf.size() << std::endl;\n                  rtkBufLock.unlock();\n                  return;\n              }\n              // find the first and nearest rtk\n              ad_localization_msgs::NavStateInfo::ConstPtr preRTK;\n              while (!rtkBuf.empty() && rtkBuf.front()->header.stamp.toSec() < timeLaserOdometry) {\n                preRTK = rtkBuf.front();\n                rtkBuf.pop_front();\n              }\n              rtkBufLock.unlock();\n              double frame_time_interval = rtkBuf.front()->header.stamp.toSec() - preRTK->header.stamp.toSec();\n              if (preRTK->header.stamp.toSec() < timeLaserOdometry &&\n                  rtkBuf.front()->header.stamp.toSec() > timeLaserOdometry &&\n                  abs(frame_time_interval) < 0.1) {\n                    Pose6D rtkPoseBefore {preRTK->position_enu.x, preRTK->position_enu.y, preRTK->position_enu.z, \n                                          preRTK->roll, preRTK->pitch, preRTK->yaw};\n                    Pose6D rtkPoseAfter {rtkBuf.front()->position_enu.x, rtkBuf.front()->position_enu.y, rtkBuf.front()->position_enu.z, \n                                          rtkBuf.front()->roll, rtkBuf.front()->pitch, rtkBuf.front()->yaw};\n                    double step_length = (timeLaserOdometry - preRTK->header.stamp.toSec()) / frame_time_interval;\n                    currRTK = PoseInterpolation(rtkPoseBefore, rtkPoseAfter, step_length);\n                    currRTK = TransformtoPose6D(Pose6DtoTransform(currRTK) * T_livox2gnss);\n                    hasRTKforThisKF = true;\n              } else {\n                    hasRTKforThisKF = false;\n                    std::cout << \"rtk data's gap is too long\" << std::endl;\n                    return;\n              }\n            }\n            //\n            // Early reject by counting local delta movement (for equi-spereated kf drop)\n            // \n            odom_pose_prev = odom_pose_curr;\n            odom_pose_curr = pose_curr;\n            Pose6D dtf = diffTransformation(odom_pose_prev, odom_pose_curr); // dtf means delta_transform\n\n            double delta_translation = sqrt(dtf.x*dtf.x + dtf.y*dtf.y + dtf.z*dtf.z); // note: absolute value. \n            translationAccumulated += delta_translation;\n            rotaionAccumulated += (dtf.roll + dtf.pitch + dtf.yaw); // sum just naive approach.  \n\n            if( translationAccumulated > keyframeMeterGap || rotaionAccumulated > keyframeRadGap ) {\n                isNowKeyFrame = true;\n                translationAccumulated = 0.0; // reset \n                rotaionAccumulated = 0.0; // reset \n            } else {\n                isNowKeyFrame = false;\n            }\n\n            if( ! isNowKeyFrame ) \n                continue;\n\n            if (useRTK && !rtkOffsetInitialized) {\n              odom_to_enu = Pose6DtoTransform(currRTK) * Pose6DtoTransform(pose_curr).inverse();\n              Pose6D odom_to_enu_pose = TransformtoPose6D(odom_to_enu);\n              odom_to_enu_pose.roll = 0;\n              odom_to_enu_pose.pitch = 0;\n              odom_to_enu = Pose6DtoTransform(odom_to_enu_pose);\n              std::cout << \"enu pose is \" << currRTK.x << \" \" << currRTK.y << \" \" << currRTK.z << \" \"\n                        << currRTK.roll << \" \" << currRTK.pitch << \" \" << currRTK.yaw << \" \"\n                        << std::endl;\n              rtkOffsetInitialized = true;\n            }\n            // convert the current pose to enu\n            if (useRTK) {\n              if (rtkOffsetInitialized) {\n                pose_curr = convertOdomToENU(pose_curr);\n              } else {\n                std::cout << \"rtk's Initialize is abnormal\" << time_stamp << std::endl;\n                break;\n              }\n            }\n            //\n            // Save data and Add consecutive node \n            //\n            pcl::PointCloud<PointType>::Ptr thisKeyFrameDS(new pcl::PointCloud<PointType>());\n            downSizeFilterScancontext.setInputCloud(thisKeyFrame);\n            downSizeFilterScancontext.filter(*thisKeyFrameDS);\n\n            mKF.lock(); \n            keyframeLaserClouds.push_back(thisKeyFrameDS);\n            keyframePoses.push_back(pose_curr);\n            keyframePosesUpdated.push_back(pose_curr); // init\n            keyframeTimes.push_back(timeLaserOdometry);\n            keyframeTimesOri.push_back(time_stamp);\n            scManager.makeAndSaveScancontextAndKeys(*thisKeyFrameDS);\n            laserCloudMapPGORedraw = true;\n            mKF.unlock(); \n\n            rtkKFLock.lock();\n            if (hasRTKforThisKF) {\n              keyframePosesRTK[keyframePoses.size() - 1] = currRTK;\n            }\n            rtkKFLock.unlock();\n            const int prev_node_idx = keyframePoses.size() - 2; \n            const int curr_node_idx = keyframePoses.size() - 1; // becuase cpp starts with 0 (actually this index could be any number, but for simple implementation, we follow sequential indexing)\n            if( ! gtSAMgraphMade /* prior node */) {\n                int init_node_idx = 0;\n                if (curr_node_idx > 1) {\n                    gtsam::Vector priorNoiseVector6(6);\n                    // set a big priorNoise\n                    priorNoiseVector6 << 1e6, 1e6, 1e6, 1e6, 1e6, 1e6;\n                    priorNoise = noiseModel::Diagonal::Variances(priorNoiseVector6);\n                    init_node_idx = curr_node_idx;\n                }\n                gtsam::Pose3 poseOrigin = Pose6DtoGTSAMPose3(keyframePoses.at(init_node_idx));\n\n                mtxPosegraph.lock();\n                {\n                    // prior factor \n                    gtSAMgraph.add(gtsam::PriorFactor<gtsam::Pose3>(init_node_idx, poseOrigin, priorNoise));\n                    initialEstimate.insert(init_node_idx, poseOrigin);\n                    // runISAM2opt(); \n                }\n                mtxPosegraph.unlock();\n\n                gtSAMgraphMade = true; \n\n                cout << \"posegraph prior node \" << init_node_idx << \" added\" << endl;\n            } else /* consecutive node (and odom factor) after the prior added */ { // == keyframePoses.size() > 1 \n                gtsam::Pose3 poseFrom = Pose6DtoGTSAMPose3(keyframePoses.at(prev_node_idx));\n                gtsam::Pose3 poseTo = Pose6DtoGTSAMPose3(keyframePoses.at(curr_node_idx));\n\n                mtxPosegraph.lock();\n                {\n                    // odom factor\n                    gtSAMgraph.add(gtsam::BetweenFactor<gtsam::Pose3>(prev_node_idx, curr_node_idx, poseFrom.between(poseTo), odomNoise));\n\n                    // // rtk factor \n                    // if(hasRTKforThisKF) {\n                    //     gtsam::Pose3 poseRTK = Pose6DtoGTSAMPose3(currRTK);\n                    //     gtSAMgraph.add(gtsam::PriorFactor<gtsam::Pose3>(curr_node_idx, poseRTK, RTKNoise));\n                    //     cout << \"RTK factor added at node \" << curr_node_idx << endl;\n                    // }\n                    initialEstimate.insert(curr_node_idx, poseTo);                \n                    // runISAM2opt();\n                }\n                mtxPosegraph.unlock();\n\n                if(curr_node_idx % 100 == 0)\n                    cout << \"posegraph odom node \" << curr_node_idx << \" added.\" << endl;\n            }\n            // if want to print the current graph, use gtSAMgraph.print(\"\\nFactor Graph:\\n\");\n        }\n\n        // ps. \n        // scan context detector is running in another thread (in constant Hz, e.g., 1 Hz)\n        // pub path and point cloud in another thread\n\n        // wait (must required for running the while loop)\n        std::chrono::milliseconds dura(2);\n        std::this_thread::sleep_for(dura);\n    }\n} // process_pg\n\nvoid performSCLoopClosure(void)\n{\n    if( int(keyframePoses.size()) < scManager.NUM_EXCLUDE_RECENT) // do not try too early \n        return;\n\n    auto detectResult = scManager.detectLoopClosureID(); // first: nn index, second: yaw diff \n    int SCclosestHistoryFrameID = detectResult.first;\n    if( SCclosestHistoryFrameID != -1 ) { \n        const int prev_node_idx = SCclosestHistoryFrameID;\n        const int curr_node_idx = keyframePoses.size() - 1; // because cpp starts 0 and ends n-1\n        cout << \"Loop detected! - between \" << prev_node_idx << \" and \" << curr_node_idx << \"\" << endl;\n\n        mBuf.lock();\n        scLoopICPBuf.push(std::pair<int, int>(prev_node_idx, curr_node_idx));\n        // addding actual 6D constraints in the other thread, icp_calculation.\n        mBuf.unlock();\n    }\n} // performSCLoopClosure\n\nvoid process_lcd(void)\n{\n    float loopClosureFrequency = 1.0; // can change \n    ros::Rate rate(loopClosureFrequency);\n    while (ros::ok())\n    {\n        rate.sleep();\n        performSCLoopClosure();\n        // performRSLoopClosure(); // TODO\n    }\n} // process_lcd\n\nvoid process_icp(void)\n{\n    while(1)\n    {\n\t\twhile ( !scLoopICPBuf.empty() )\n        {\n            if( scLoopICPBuf.size() > 30 ) {\n                ROS_WARN(\"Too many loop clousre candidates to be ICPed is waiting ... Do process_lcd less frequently (adjust loopClosureFrequency)\");\n            }\n\n            mBuf.lock(); \n            std::pair<int, int> loop_idx_pair = scLoopICPBuf.front();\n            scLoopICPBuf.pop();\n            mBuf.unlock(); \n\n            const int prev_node_idx = loop_idx_pair.first;\n            const int curr_node_idx = loop_idx_pair.second;\n            auto relative_pose_optional = doICPVirtualRelative(prev_node_idx, curr_node_idx);\n            if(relative_pose_optional) {\n                gtsam::Pose3 relative_pose = relative_pose_optional.value();\n                mtxPosegraph.lock();\n                gtSAMgraph.add(gtsam::BetweenFactor<gtsam::Pose3>(prev_node_idx, curr_node_idx, relative_pose, robustLoopNoise));\n                // runISAM2opt();\n                mtxPosegraph.unlock();\n            }\n        }\n\n        // wait (must required for running the while loop)\n        std::chrono::milliseconds dura(2);\n        std::this_thread::sleep_for(dura);\n    }\n} // process_icp\n\nvoid process_viz_path(void)\n{\n    float hz = 10.0; \n    ros::Rate rate(hz);\n    while (ros::ok()) {\n        rate.sleep();\n        if(recentIdxUpdated > 1) {\n            pubPath();\n        }\n    }\n}\n\nvoid process_isam(void)\n{\n    float hz = 1; \n    ros::Rate rate(hz);\n    while (ros::ok()) {\n        rate.sleep();\n        if( gtSAMgraphMade ) {\n            mtxPosegraph.lock();\n            runISAM2opt();\n            cout << \"running isam2 optimization ...\" << endl;\n            mtxPosegraph.unlock();\n        }\n    }\n}\n\nvoid process_viz_map(void)\n{\n    float vizmapFrequency = 0.5; // 0.1 means run onces every 10s\n    ros::Rate rate(vizmapFrequency);\n    while (ros::ok()) {\n        rate.sleep();\n        // if(recentIdxUpdated > 1) {\n        pubMap();\n        // }\n    }\n} // pointcloud_viz\n\nvoid process_interactive_slam_result() {\n  ros::Rate rate(50);\n  mKF.lock();\n  for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) {\n    // pub Map for interactive slam\n    sensor_msgs::PointCloud2 laserCloudMapPGOMsg;\n    pcl::toROSMsg(*keyframeLaserClouds[node_idx], laserCloudMapPGOMsg);\n    laserCloudMapPGOMsg.header.frame_id = \"/camera_init\";\n    laserCloudMapPGOMsg.header.stamp = keyframeTimesOri.at(node_idx);\n    pubMapResultForInteractiveSlam.publish(laserCloudMapPGOMsg);\n    // pub Odom for interactive slam\n    const Pose6D& pose_est = keyframePosesUpdated.at(node_idx); // upodated poses\n    nav_msgs::Odometry odomAftPGOthis;\n    odomAftPGOthis.header.frame_id = \"/camera_init\";\n    odomAftPGOthis.child_frame_id = \"/aft_pgo\";\n    odomAftPGOthis.header.stamp = laserCloudMapPGOMsg.header.stamp;\n    odomAftPGOthis.pose.pose.position.x = pose_est.x;\n    odomAftPGOthis.pose.pose.position.y = pose_est.y;\n    odomAftPGOthis.pose.pose.position.z = pose_est.z;\n    odomAftPGOthis.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(pose_est.roll, pose_est.pitch, pose_est.yaw);\n    pubOdomResultForInteractiveSlam.publish(odomAftPGOthis); // last pose \n    rate.sleep();\n  }\n  mKF.unlock();\n} // pointcloud_viz\n\nvoid save_odo() {\n  std::cout << \"save_path = \" << save_path << std::endl;\n  mKF.lock();\n  for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) {\n    const Pose6D& pose_est = keyframePosesUpdated.at(node_idx); // upodated poses\n    const Pose6D& pose_rtk = TransformtoPose6D(Pose6DtoTransform(pose_est) * T_livox2gnss.inverse());\n    std::stringstream ss_x, ss_y, ss_z;\n    ss_x.precision(7);\n    ss_x.setf(std::ios::fixed);\n    ss_y.precision(7);\n    ss_y.setf(std::ios::fixed);\n    ss_z.precision(7);\n    ss_z.setf(std::ios::fixed);\n    ss_x << pose_rtk.x;\n    ss_y << pose_rtk.y;\n    ss_z << pose_rtk.z;\n    *odo //<< std::setw(20) \n        // << node_idx << \" \"\n        << keyframeTimesOri.at(node_idx) << \" \" \n        << ss_x.str() << \" \" << ss_y.str() << \" \" << ss_z.str() << \" \"\n        << pose_est.roll << \" \" << pose_est.pitch << \" \" << pose_est.yaw << \" \"\n        // << 0 << \" \" << 0 << \" \" << 0 << \" \" << 1\n        << std::endl;\n  }\n  mKF.unlock();\n} // pointcloud_viz\n\nvoid save_map() {\n  std::string all_points_dir(save_pcd_path + \"scans_lc.pcd\");\n  std::cout << \"all_points_dir = \" << all_points_dir << std::endl;\n  mKF.lock();\n  laserCloudMapPGO->clear();\n  for (int node_idx=0; node_idx < int(keyframePosesUpdated.size()); node_idx++) {\n    *laserCloudMapPGO += *local2global(keyframeLaserClouds[node_idx], keyframePosesUpdated[node_idx]);\n  }\n  pcl::io::savePCDFile(all_points_dir, *laserCloudMapPGO);\n  mKF.unlock();\n} // pointcloud_viz\n\nvoid MySigintHandler(int sig)\n{\n  // Before exit the main thread, the odom result and the map data will be saved and sent to interactive_slam\n\tROS_INFO(\"shutting down!\");\n  save_odo();\n  save_map();\n  process_interactive_slam_result();\n\tros::shutdown();\n}\n\n\nint main(int argc, char **argv)\n{\n\tros::init(argc, argv, \"laserPGO\");\n\tros::NodeHandle nh;\n\n  nh.param<std::string>(\"save_path\", save_path, \"/\");\n  std::cout << \"save_pcd_path = \" << save_path << std::endl;\n  nh.param<std::string>(\"save_pcd_path\", save_pcd_path, \"/\");\n  std::cout << \"save_pcd_path = \" << save_pcd_path << std::endl;\n  odo.reset(new std::ofstream(save_path));\n\n\tnh.param<double>(\"keyframe_meter_gap\", keyframeMeterGap, 2.0); // pose assignment every k m move \n\tnh.param<double>(\"keyframe_deg_gap\", keyframeDegGap, 10.0); // pose assignment every k deg rot \n  keyframeRadGap = deg2rad(keyframeDegGap);\n  std:: cout << \"keyframeMeterGap = \" << keyframeMeterGap << std:: endl;\n  std:: cout << \"keyframeDegGap = \" << keyframeDegGap << std:: endl;\n  std:: cout << \"keyframeRadGap = \" << keyframeRadGap << std:: endl;\n\tnh.param<double>(\"sc_dist_thres\", scDistThres, 0.2);\n\tnh.param<double>(\"sc_max_radius\", scMaximumRadius, 80.0); // 80 is recommended for outdoor, and lower (ex, 20, 40) values are recommended for indoor \n\n  nh.param<bool>(\"useRTK\", useRTK, false);\n  std:: cout << \"useRTK = \" << useRTK << std:: endl;\n\n  std::vector<double> vecTlivox2gnss;\n\tros::param::get(\"Extrinsic_T_livox2gnss\", vecTlivox2gnss);\n\n  ISAM2Params parameters;\n  parameters.relinearizeThreshold = 0.01;\n  parameters.relinearizeSkip = 1;\n  isam = new ISAM2(parameters);\n  initNoises();\n\n  scManager.setSCdistThres(scDistThres);\n  scManager.setMaximumRadius(scMaximumRadius);\n\n  float filter_size = 0.4; \n  downSizeFilterScancontext.setLeafSize(filter_size, filter_size, filter_size);\n  downSizeFilterICP.setLeafSize(filter_size, filter_size, filter_size);\n\n  double mapVizFilterSize;\n\tnh.param<double>(\"mapviz_filter_size\", mapVizFilterSize, 0.4); // pose assignment every k frames \n  downSizeFilterMapPGO.setLeafSize(mapVizFilterSize, mapVizFilterSize, mapVizFilterSize);\n\n\tros::Subscriber subLaserCloudFullRes = nh.subscribe<sensor_msgs::PointCloud2>(\"/velodyne_cloud_registered_local\", 100, laserCloudFullResHandler);\n\tros::Subscriber subLaserOdometry = nh.subscribe<nav_msgs::Odometry>(\"/aft_mapped_to_init\", 100, laserOdometryHandler);\n  ros::Subscriber subRTK = nh.subscribe<ad_localization_msgs::NavStateInfo>(\"/localization/navstate_info\", 100, rtkHandler);\n\n\tpubOdomAftPGO = nh.advertise<nav_msgs::Odometry>(\"/aft_pgo_odom\", 100);\n\tpubOdomRepubVerifier = nh.advertise<nav_msgs::Odometry>(\"/repub_odom\", 100);\n\tpubPathAftPGO = nh.advertise<nav_msgs::Path>(\"/aft_pgo_path\", 100);\n\tpubMapAftPGO = nh.advertise<sensor_msgs::PointCloud2>(\"/aft_pgo_map\", 100);\n\tpubLoopScanLocal = nh.advertise<sensor_msgs::PointCloud2>(\"/loop_scan_local\", 100);\n\tpubLoopSubmapLocal = nh.advertise<sensor_msgs::PointCloud2>(\"/loop_submap_local\", 100);\n\n  // save interactive_slam_pose\n  pubOdomResultForInteractiveSlam = nh.advertise<nav_msgs::Odometry>(\"/interactive_slam_odom\", 1000);\n  pubMapResultForInteractiveSlam = nh.advertise<sensor_msgs::PointCloud2>(\"/interactive_slam_map\", 1000);\n  signal(SIGINT, MySigintHandler);\n  Eigen::Isometry3d T_livox2gnss_temp;\n  T_livox2gnss_temp.matrix() << vecTlivox2gnss[0], vecTlivox2gnss[1], vecTlivox2gnss[2], vecTlivox2gnss[3],\n\t                              vecTlivox2gnss[4], vecTlivox2gnss[5], vecTlivox2gnss[6], vecTlivox2gnss[7],\n\t                              vecTlivox2gnss[8], vecTlivox2gnss[9], vecTlivox2gnss[10], vecTlivox2gnss[11],\n                                vecTlivox2gnss[12], vecTlivox2gnss[13], vecTlivox2gnss[14], vecTlivox2gnss[15];\n  tf::transformEigenToTF(T_livox2gnss_temp, T_livox2gnss);\n\tstd::thread posegraph_slam {process_pg}; // pose graph construction\n\tstd::thread lc_detection {process_lcd}; // loop closure detection \n\tstd::thread icp_calculation {process_icp}; // loop constraint calculation via icp \n\tstd::thread isam_update {process_isam}; // if you want to call less isam2 run (for saving redundant computations and no real-time visulization is required), uncommment this and comment all the above runisam2opt when node is added. \n\n\tstd::thread viz_map {process_viz_map}; // visualization - map (low frequency because it is heavy)\n\tstd::thread viz_path {process_viz_path}; // visualization - path (high frequency)\n \tros::spin();\n  \n\treturn 0;\n}"
  },
  {
    "path": "SC-PGO/utils/moving_object_removal/README.md",
    "content": "# Moving Object Removal\n\nPlease refer to [mmdection](https://github.com/open-mmlab/mmdetection) for the installation process.\n\nWe just use the most dummy projection function to remove the LiDAR points that fall into the bounding box with label of movable objects.\n\n| Detection with mmDection           | Remove with Projection            |\n| ---------------------------------- | --------------------------------- |\n| ![detection](images/detection.png) | ![removal](images/projection.png) |\n"
  },
  {
    "path": "SC-PGO/utils/moving_object_removal/move_object_removal_livox.py",
    "content": "import pcl\nfrom loguru import logger\n\nimport os\nimport sys\nimport time\nimport numpy as np\n\nimport cv2\nimport matplotlib.pyplot as plt\nfrom numba import njit\nfrom numba.cuda import jit\nfrom decimal import Decimal\n\nfrom mmdet.apis import inference_detector\nfrom matplotlib.patches import Polygon\n\nfrom shapely.geometry import Point as shapePoint\nfrom shapely.geometry.polygon import Polygon as shapePolygon\n\nnp.set_printoptions(precision=10)\n\nCLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n           'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n           'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n           'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',\n           'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n           'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',\n           'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n           'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n           'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n           'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n           'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',\n           'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n           'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',\n           'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')\n\nMOVABLE_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'bird', 'cat', 'dog',\n                   'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',)\n\n\ndef set_log_level(level: str = \"TRACE\", save_log: bool = False, log_file_name: str = None) -> None:\n    logger.remove()\n    logger.add(sys.stdout, level=level)\n\n    if save_log:\n        if log_file_name is None:\n            curr_time = time.ctime(time.time())\n        else:\n            curr_time = log_file_name\n        logger.add(f\"/data/test/{curr_time}.log\", level=level)\n\n\ndef set_calib_parameters():\n    extrinsic = np.array([[0.0265714, -0.99964, 0.00379047, 0.0606143], [-0.00218415, -0.00384985, -0.99999, 0.288651],\n                          [0.999644, 0.0265626, -0.00228567, 0.258449], [0, 0, 0, 1]])\n    intrinsic = np.array([[1023.37, 0, 922.516], [0, 994.267, 556.527], [0, 0, 1]])\n\n    distortion = np.array([[-0.396302, 0.1187, -0.00558465, 0.00159696]])\n\n    return extrinsic, intrinsic, distortion\n\n\ndef init_model(config=None, check_point=None):\n    from mmdet.apis import (init_detector)\n    if config is None:\n        config = '../configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py'\n    if check_point is None:\n        check_point = '../checkpoints/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth'\n\n    model = init_detector(config, check_point, device='cuda:0')\n\n    return model\n\n\n# align the pc_list with image_list\ndef align_file_lists(img_list, pc_list):\n    # image files are full, need to select images according to the odom files\n\n    pc_timestamps = [float(f.split(\"/\")[-1].split(\".pcd\")[0]) for f in pc_list]\n    img_timestamps = [float(s.split(\"/\")[-1].split(\".png\")[0]) for s in img_list]\n\n    selected_img_list = []\n\n    img_idx = 0\n    for i in range(len(pc_list)):\n        cur_img_time = img_timestamps[img_idx]\n        cur_pc_time = pc_timestamps[i]\n\n        while np.abs(cur_img_time - cur_pc_time) > 0.1:\n            if img_idx > len(img_timestamps) - 1:\n                break\n            img_idx += 1\n            cur_img_time = img_timestamps[img_idx]\n\n        selected_img_list.append(img_list[img_idx])\n\n    if len(selected_img_list) != len(pc_list):\n        logger.error(\"The file size is not same!\")\n\n    return selected_img_list, pc_list\n\n\ndef files_saving_path(base_path=None):\n    if base_path is None:\n        base_path = \"../demo/inference_result\"\n\n    proj_file_path = os.path.join(base_path, \"proj\")\n    if not os.path.exists(proj_file_path):\n        os.makedirs(proj_file_path)\n\n    detection_img_path = os.path.join(base_path, \"detection\")\n    if not os.path.exists(detection_img_path):\n        os.makedirs(detection_img_path)\n\n    npy_path = os.path.join(base_path, \"npy\")\n    if not os.path.exists(npy_path):\n        os.makedirs(npy_path)\n\n    pcd_path = os.path.join(base_path, \"pcd\")\n    if not os.path.exists(pcd_path):\n        os.makedirs(pcd_path)\n\n    return proj_file_path, detection_img_path, npy_path, pcd_path\n\n\ndef fetch_dataset(data_base, livox_pc_path=None, img_path=None):\n    if livox_pc_path is None:\n        livox_pc_path = os.path.join(data_base, \"pcd\")\n    if img_path is None:\n        img_path = os.path.join(data_base, \"img\")\n\n    if not os.path.exists(livox_pc_path):\n        raise RuntimeError(f\"PCD path {livox_pc_path} does not exist\")\n\n    if not os.path.exists(img_path):\n        raise RuntimeError(f\"Image path {img_path} does not exist\")\n\n    image_list, pc_list = [], []\n    image_list = [os.path.join(img_path, f) for f in os.listdir(img_path) if \".png\" in f]\n    image_list.sort(key=lambda s: float(s.split(\"/\")[-1].split(\".png\")[0]))\n\n    odom_flag = True\n    if odom_flag:\n        pc_list = [os.path.join(livox_pc_path, f) for f in os.listdir(livox_pc_path) if \".pcd\" in f]\n        pc_list.sort(key=lambda s: float(\n            float(s.split(\"/\")[-1].split(\".pcd\")[0].split(\"_\")[0])))\n\n        image_list, pc_list = align_file_lists(image_list, pc_list)\n    else:\n        pc_list = [os.path.join(livox_pc_path, f) for f in os.listdir(livox_pc_path) if \".npy\" in f]\n        pc_list.sort(key=lambda s: float(\n            float(s.split(\"/\")[-1].split(\".npy\")[0])\n        ))\n\n    return pc_list, image_list\n\n\ndef img_undistort(img_ori, intrinsic_param, distort_param):\n    intrinsic_param = intrinsic_param[:, :3]\n    iden = np.identity(3)\n    w, h = img_ori.shape[1], img_ori.shape[0]\n\n    mapx, mapy = cv2.initUndistortRectifyMap(intrinsic_param, distort_param, iden, intrinsic_param, (w, h),\n                                             cv2.CV_32FC1)\n    img_dst = cv2.remap(img_ori, mapx, mapy, cv2.INTER_LINEAR)\n\n    return img_dst\n\n\ndef convert_rot_trans(extrinsic):\n    rot_matrix = extrinsic[0:3, 0:3]\n    rot_vec, _ = cv2.Rodrigues(rot_matrix)\n    trans_vec = extrinsic[0:3, 3]\n    return rot_vec, trans_vec\n\n\ndef project_pts_to_img(pts, image, extrinsic, intrinsic, cam_dist, polygons):\n    img_width = image.shape[1]\n    img_height = image.shape[0]\n\n    if len(pts) < 10:\n        logger.debug(\"too few points to project to the image\")\n        return\n\n    if pts.shape[1] < 4:\n        pts_ = np.ones((len(pts), 4))\n        pts_[:, 0:3] = pts\n    else:\n        pts_ = pts\n\n    ration = 0.2\n    min_w = -ration * img_width\n    max_w = (1 + ration) * img_width\n    min_h = - ration * img_height\n    max_h = (1 + ration) * img_height\n    pt_img_ = []\n    pts_filtered = []\n    for pt in pts_:\n        pt_geo = np.copy(pt)\n        pt_geo[3] = 1.0\n        if np.any(np.isnan(pt)):\n            continue\n\n        pt_z = extrinsic[2, 0] * pt_geo[0] + extrinsic[2, 1] * pt_geo[1] + extrinsic[2, 2] * pt_geo[2] + extrinsic[\n            2, 3]\n        if pt_z < 1.0:\n            continue\n        pt_cam = np.dot(extrinsic, pt_geo)\n        pt_img = np.dot(intrinsic, pt_cam[0:3]\n                        / pt_cam[2])\n        if pt_img[0] < min_w or pt_img[0] > max_w:\n            continue\n        if pt_img[1] < min_h or pt_img[1] > max_h:\n            continue\n        pt_img_.append(pt_geo[0:3])\n        pts_filtered.append(pt)\n    if len(pt_img_) < 1:\n        logger.warning(\"no point correspondence found between lidar-image\")\n        return pt_img_, pts_filtered\n\n    rvec, tvec = convert_rot_trans(extrinsic)\n    dis_cam = np.asarray(cam_dist)\n    proj_pts, _ = cv2.projectPoints(np.asarray(pt_img_), rvec, tvec,\n                                    np.asarray(intrinsic), dis_cam)\n    proj_pts = proj_pts.reshape(-1, 2)\n\n    inds = np.logical_and(0 < proj_pts[:, 0], proj_pts[:, 0] < img_width)\n    proj_pts = proj_pts[inds, :]\n    pts_filtered = np.asarray(pts_filtered)[inds, :]\n\n    inds = np.logical_and(0 < proj_pts[:, 1], proj_pts[:, 1] < img_height)\n    proj_pts = proj_pts[inds, :]\n    pts_filtered = pts_filtered[inds, :]\n\n    flags = np.ones(len(proj_pts), dtype=bool)\n\n    pts_inside = []\n    pts_lidar_outside = []\n    pts_image_outside = []\n    for poly in polygons:\n        shape_poly = shapePolygon(poly.get_xy())\n        for idx, (pt_lidar, pt_img) in enumerate(zip(pts_filtered, proj_pts)):\n            if not flags[idx]:\n                continue\n            if shape_poly.contains(shapePoint(pt_img[0], pt_img[1])):\n                pts_inside.append(pt_img)\n                flags[idx] = False\n\n    pts_image_outside = proj_pts[flags]\n    pts_lidar_outside = pts_filtered[flags]\n\n    pt_img = np.asarray(pts_image_outside)\n    pt_lidar = np.asarray(pts_lidar_outside)\n    return pt_img, pt_lidar\n\n\ndef img_process(img, model, intrinsic_param, distort_param, visual_flag=False):\n    img = img_undistort(img, intrinsic_param, distort_param)\n    inference_result = inference_detector(model, img)\n\n    return img, inference_result\n\n\ndef projection_visualization(img, pts, save_pth=None):\n    ax = plt.subplot(111)\n    plt.title(\"Projection\")\n    plt.imshow(img)\n    img_width = img.shape[1]\n    img_height = img.shape[0]\n\n    counter = 0\n    counter_pt = 0\n\n    if pts is not None and len(pts) > 0:\n        for pt in pts:\n            if pt[0] < 0 or pt[0] > img_width:\n                continue\n            if pt[1] < 0 or pt[1] > img_height:\n                continue\n            counter += 1\n            if len(pts) > 5000:\n                skip_rate = 20\n            else:\n                skip_rate = 10\n            if counter % skip_rate == 0:\n                ax.plot(pt[0], pt[1], \"x\", c='b')\n                counter_pt += 1\n            else:\n                continue\n    if save_pth is None:\n        logger.trace(\" number of lidar pts found in the image: %d\" % counter)\n        plt.pause(0.1)\n        plt.show()\n        plt.clf()\n    else:\n        plt.savefig(save_pth, dpi=200)\n        plt.clf()\n\n\ndef enlarge_bbox(bbox_init, img_size, enlarge_step=10):\n    bbox_init[0] = max(bbox_init[0] - enlarge_step, 0)\n    bbox_init[1] = max(bbox_init[1] - enlarge_step, 0)\n    bbox_init[2] = min(bbox_init[2] + enlarge_step, img_size[0])\n    bbox_init[3] = min(bbox_init[3] + enlarge_step, img_size[1])\n    return bbox_init\n\n\ndef extract_inference_result(model, img, result, score_thr=0.6):\n    if hasattr(model, 'module'):\n        model = model.module\n    if isinstance(result, tuple):\n        bbox_result, segm_result = result\n        if isinstance(segm_result, tuple):\n            segm_result = segm_result[0]  # ms rcnn\n    else:\n        bbox_result, segm_result = result, None\n    bboxes = np.vstack(bbox_result)\n    labels = [\n        np.full(bbox.shape[0], i, dtype=np.int32)\n        for i, bbox in enumerate(bbox_result)\n    ]\n    labels = np.concatenate(labels)\n\n    if score_thr > 0:\n        assert bboxes.shape[1] == 5\n        scores = bboxes[:, -1]\n        inds = scores > score_thr\n        bboxes = bboxes[inds, :]\n        labels = labels[inds]\n\n    polygons = []\n    labels_text = []\n    w, h = img.shape[1], img.shape[0]\n    for i, (bbox, label) in enumerate(zip(bboxes, labels)):\n        label_text = CLASSES[label]\n        if label_text not in MOVABLE_CLASSES:\n            continue\n        bbox_int = bbox.astype(np.int32)\n        bbox_int = enlarge_bbox(bbox_int, [w, h], enlarge_step=20)\n        poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]],\n                [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]]\n        np_poly = np.array(poly).reshape((4, 2))\n        polygons.append(Polygon(np_poly))\n        labels_text.append(label_text)\n\n    return polygons, labels_text\n\n\ndef inference_and_remove(pc_list, img_list, inference_model, save_path=None):\n    extrinsic, intrinsic, distortion = set_calib_parameters()\n    proj_folder, det_folder, npy_folder, pcd_folder = files_saving_path(save_path)\n\n    for pc_, img_ in zip(pc_list, img_list):\n        logger.trace(\"The processed pc:  %s \" % pc_)\n        logger.trace(\"The processed img:  %s \" % img_)\n\n        img = cv2.imread(img_)\n        if \".npy\" in pc_:\n            pc = np.load(pc_)\n        else:\n            pc = pcl.load_XYZI(pc_).to_array()\n\n        img_undis, inference_result = img_process(img, inference_model, intrinsic, distortion)\n        polygons, labels_text = extract_inference_result(inference_model, img, inference_result)\n\n        pt_img, pt_lidar = project_pts_to_img(pc, img, extrinsic, intrinsic, distortion, polygons)\n\n        img_name = img_[img_.rfind(\"/\") + 1:]\n        pc_name = pc_[pc_.rfind(\"/\") + 1: pc_.rfind(\".\")] + \".npy\"\n\n        proj_file_pth = os.path.join(proj_folder, img_name)\n        save_img = os.path.join(det_folder, img_name)\n        pcd_path = os.path.join(npy_folder, pc_name)\n        inference_model.show_result(img, inference_result, score_thr=0.6, out_file=save_img)\n        logger.trace(\"Save trimmed NPY to: %s \" % pcd_path)\n        np.save(pcd_path, pt_lidar)\n        projection_visualization(img, pt_img, proj_file_pth)\n\n\ndef euler_to_rotMat(yaw, pitch, roll):\n    Rz_yaw = np.array([\n        [np.cos(yaw), -np.sin(yaw), 0],\n        [np.sin(yaw), np.cos(yaw), 0],\n        [0, 0, 1]])\n    Ry_pitch = np.array([\n        [np.cos(pitch), 0, np.sin(pitch)],\n        [0, 1, 0],\n        [-np.sin(pitch), 0, np.cos(pitch)]])\n    Rx_roll = np.array([\n        [1, 0, 0],\n        [0, np.cos(roll), -np.sin(roll)],\n        [0, np.sin(roll), np.cos(roll)]])\n    # R = RzRyRx\n    rotMat = np.dot(Rz_yaw, np.dot(Ry_pitch, Rx_roll))\n    return rotMat\n\ndef main(inference=False):\n    if inference:\n        inference_model = init_model()\n\n    data_base = \"DEMO\"\n    pc_path = os.path.join(data_base, \"pcd_correction\")\n    img_path = os.path.join(data_base, \"img\")\n    save_result_path = os.path.join(data_base, \"result\")\n\n    if inference:\n        pc_list, img_list = fetch_dataset(data_base, livox_pc_path=pc_path, img_path=img_path)\n        inference_and_remove(pc_list, img_list, inference_model, save_result_path)\n\n\nif __name__ == \"__main__\":\n    set_log_level()\n    main()\n"
  },
  {
    "path": "SC-PGO/utils/python/makeMergedMap.py",
    "content": "import os \nimport sys\nimport time \nimport copy \nfrom io import StringIO\n\nimport pypcd # for the install, use this command: python3.x (use your python ver) -m pip install --user git+https://github.com/DanielPollithy/pypcd.git\nfrom pypcd import pypcd\n\nimport numpy as np\nfrom numpy import linalg as LA\n\nimport open3d as o3d\n\nfrom pypcdMyUtils import * \n\njet_table = np.load('jet_table.npy')\nbone_table = np.load('bone_table.npy')\n\ncolor_table = jet_table\ncolor_table_len = color_table.shape[0]\n\n\n##########################\n# User only consider this block\n##########################\n\ndata_dir = \"/home/user/Documents/catkin2021/catkin_fastlio2/data/\" # should end with / \nscan_idx_range_to_stack = [0, 200] # if you want a whole map, use [0, len(scan_files)]\nnode_skip = 1\n\nnum_points_in_a_scan = 150000 # for reservation (save faster) // e.g., use 150000 for 128 ray lidars, 100000 for 64 ray lidars, 30000 for 16 ray lidars, if error occured, use the larger value.\n\nis_live_vis = False # recommend to use false \nis_o3d_vis = True\nintensity_color_max = 200\n\nis_near_removal = True\nthres_near_removal = 2 # meter (to remove platform-myself structure ghost points)\n\n##########################\n\n\n#\nscan_dir = data_dir + \"Scans\"\nscan_files = os.listdir(scan_dir) \nscan_files.sort()\n\nposes = []\nf = open(data_dir+\"optimized_poses.txt\", 'r')\nwhile True:\n    line = f.readline()\n    if not line: break\n    pose_SE3 = np.asarray([float(i) for i in line.split()])\n    pose_SE3 = np.vstack( (np.reshape(pose_SE3, (3, 4)), np.asarray([0,0,0,1])) )\n    poses.append(pose_SE3)\nf.close()\n\n\n#\nassert (scan_idx_range_to_stack[1] > scan_idx_range_to_stack[0])\nprint(\"Merging scans from\", scan_idx_range_to_stack[0], \"to\", scan_idx_range_to_stack[1])\n\n\n#\nif(is_live_vis):\n    vis = o3d.visualization.Visualizer() \n    vis.create_window('Map', visible = True) \n\nnodes_count = 0\npcd_combined_for_vis = o3d.geometry.PointCloud()\npcd_combined_for_save = None\n\n# The scans from 000000.pcd should be prepared if it is not used (because below code indexing is designed in a naive way)\n\n# manually reserve memory for fast write  \nnum_all_points_expected = int(num_points_in_a_scan * np.round((scan_idx_range_to_stack[1] - scan_idx_range_to_stack[0])/node_skip))\n\nnp_xyz_all = np.empty([num_all_points_expected, 3])\nnp_intensity_all = np.empty([num_all_points_expected, 1])\ncurr_count = 0\n\nfor node_idx in range(len(scan_files)):\n    if(node_idx < scan_idx_range_to_stack[0] or node_idx >= scan_idx_range_to_stack[1]):\n        continue\n\n    nodes_count = nodes_count + 1\n    if( nodes_count % node_skip is not 0): \n        if(node_idx is not scan_idx_range_to_stack[0]): # to ensure the vis init \n            continue\n\n    print(\"read keyframe scan idx\", node_idx)\n\n    scan_pose = poses[node_idx]\n\n    scan_path = os.path.join(scan_dir, scan_files[node_idx])\n    scan_pcd = o3d.io.read_point_cloud(scan_path)\n    scan_xyz_local = copy.deepcopy(np.asarray(scan_pcd.points))\n\n    scan_pypcd_with_intensity = pypcd.PointCloud.from_path(scan_path)\n    scan_intensity = scan_pypcd_with_intensity.pc_data['intensity']\n    scan_intensity_colors_idx = np.round( (color_table_len-1) * np.minimum( 1, np.maximum(0, scan_intensity / intensity_color_max) ) )\n    scan_intensity_colors = color_table[scan_intensity_colors_idx.astype(int)]\n\n    scan_pcd_global = scan_pcd.transform(scan_pose) # global coord, note that this is not deepcopy\n    scan_pcd_global.colors = o3d.utility.Vector3dVector(scan_intensity_colors)\n    scan_xyz = np.asarray(scan_pcd_global.points)\n\n    scan_intensity = np.expand_dims(scan_intensity, axis=1) \n    scan_ranges = LA.norm(scan_xyz_local, axis=1)\n\n    if(is_near_removal):\n        eff_idxes = np.where (scan_ranges > thres_near_removal)\n        scan_xyz = scan_xyz[eff_idxes[0], :]\n        scan_intensity = scan_intensity[eff_idxes[0], :]\n\n        scan_pcd_global = scan_pcd_global.select_by_index(eff_idxes[0])\n\n    if(is_o3d_vis):\n        pcd_combined_for_vis += scan_pcd_global # open3d pointcloud class append is fast \n\n    if is_live_vis:\n        if(node_idx is scan_idx_range_to_stack[0]): # to ensure the vis init \n            vis.add_geometry(pcd_combined_for_vis) \n\n        vis.update_geometry(pcd_combined_for_vis)\n        vis.poll_events()\n        vis.update_renderer()\n\n    # save \n    np_xyz_all[curr_count:curr_count + scan_xyz.shape[0], :] = scan_xyz\n    np_intensity_all[curr_count:curr_count + scan_xyz.shape[0], :] = scan_intensity\n\n    curr_count = curr_count + scan_xyz.shape[0]\n    print(curr_count)\n \n#\nif(is_o3d_vis):\n    print(\"draw the merged map.\")\n    o3d.visualization.draw_geometries([pcd_combined_for_vis])\n\n\n# save ply having intensity\nnp_xyz_all = np_xyz_all[0:curr_count, :]\nnp_intensity_all = np_intensity_all[0:curr_count, :]\n\nnp_xyzi_all = np.hstack( (np_xyz_all, np_intensity_all) )\nxyzi = make_xyzi_point_cloud(np_xyzi_all)\n\nmap_name = data_dir + \"map_\" + str(scan_idx_range_to_stack[0]) + \"_to_\" + str(scan_idx_range_to_stack[1]) + \"_with_intensity.pcd\"\nxyzi.save_pcd(map_name, compression='binary_compressed')\nprint(\"intensity map is save (path:\", map_name, \")\")\n\n# save rgb colored points \n# map_name = data_dir + \"map_\" + str(scan_idx_range_to_stack[0]) + \"_to_\" + str(scan_idx_range_to_stack[1]) + \".pcd\"\n# o3d.io.write_point_cloud(map_name, pcd_combined_for_vis)\n# print(\"the map is save (path:\", map_name, \")\")\n\n\n"
  },
  {
    "path": "SC-PGO/utils/python/pypcdMyUtils.py",
    "content": "import numpy as np\nimport pypcd # for the install, use this command: python3.x (use your python ver) -m pip install --user git+https://github.com/DanielPollithy/pypcd.git\nfrom pypcd import pypcd\n\ndef make_xyzi_point_cloud(xyzl, label_type='f'):\n    \"\"\" Make XYZL point cloud from numpy array.\n    TODO i labels?\n    \"\"\"\n    md = {'version': .7,\n          'fields': ['x', 'y', 'z', 'intensity'],\n          'count': [1, 1, 1, 1],\n          'width': len(xyzl),\n          'height': 1,\n          'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],\n          'points': len(xyzl),\n          'data': 'ASCII'}\n    if label_type.lower() == 'f':\n        md['size'] = [4, 4, 4, 4]\n        md['type'] = ['F', 'F', 'F', 'F']\n    elif label_type.lower() == 'u':\n        md['size'] = [4, 4, 4, 1]\n        md['type'] = ['F', 'F', 'F', 'U']\n    else:\n        raise ValueError('label type must be F or U')\n    # TODO use .view()\n    xyzl = xyzl.astype(np.float32)\n    dt = np.dtype([('x', np.float32), ('y', np.float32), ('z', np.float32),\n                   ('intensity', np.float32)])\n    pc_data = np.rec.fromarrays([xyzl[:, 0], xyzl[:, 1], xyzl[:, 2],\n                                 xyzl[:, 3]], dtype=dt)\n    pc = pypcd.PointCloud(md, pc_data)\n    return pc\n"
  },
  {
    "path": "ad_localization_msgs/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.0.2)\nproject(ad_localization_msgs)\n\n## Compile as C++11, supported in ROS Kinetic and newer\n# add_compile_options(-std=c++11)\n\nfind_package(catkin REQUIRED COMPONENTS roscpp rospy message_generation std_msgs geometry_msgs)\n\n## Generate messages in the 'msg' folder\nadd_message_files(\n  FILES\n  NavStateInfo.msg\n)\n\n## Generate added messages and services with any dependencies listed here\ngenerate_messages(\n  DEPENDENCIES\n  std_msgs  # Or other packages containing msgs\n  geometry_msgs\n)\n\ncatkin_package(\n    CATKIN_DEPENDS message_runtime\n)"
  },
  {
    "path": "ad_localization_msgs/msg/NavStateInfo.msg",
    "content": "# Header include seq, timestamp(last node pub time), and frame_id(sensor model)\nstd_msgs/Header header\n\n# The time of pose measurement, in nano second.\nuint64 measurement_time_ns\n\n# Report localization status\n# when INVALID or CONVERGING, localization result is not reliable and should not be used.\nstring status_info\n\n# For debug\nstring debug_info\n\n# Position of the Vehicle Reference Point(VRP) in the Map reference frame(East/North/Up).\n# The VRP is the car center(Forward/Left/Up).\n# x for East, y for North, z for Up Height, in meters.\ngeometry_msgs/Point position_enu\n# Position uncertainty(standard deviation in meters)\ngeometry_msgs/Point position_enu_std_dev\n\n# Position of the Vehicle Reference Point(VRP) in the WGS84 reference ellipsoid coordinate system.\n# longitude in degrees, ranging from -180 to 180.\n# latitude in degrees, ranging from -90 to 90.\n# ellipsoid height in meters.\nfloat64 longitude\nfloat64 latitude\nfloat64 altitude\n\n# Attitude in euler angle form to describe the orientation of a VRP frame with respect to a Map reference frame(ENU).\n# The roll, in (-pi/2, pi/2), corresponds to a rotation around the Vehicle Forward-axis.\n# The pitch, in [-pi, pi), corresponds to a rotation around the Vehicle Left-axis.\n# The yaw, in [-pi, pi), corresponds to a rotation around the Vehicle Up-axis.\n# The yaw is the angle of the Vehicle head w.r.t the ENU reference frame, East = 0, North = pi/2, anti-clockwise in radius.\n# The direction of rotation follows the right-hand rule.\nfloat64 roll\nfloat64 pitch\nfloat64 yaw\n\n# Attitude uncertainty(standard deviation in radians)\ngeometry_msgs/Point attitude_std_dev\n\n# Linear velocity of the VRP in the Vehicle reference frame\n# x for Forward, y for Left, z for Up, in meters per second\ngeometry_msgs/Vector3 linear_velocity\n\n# Linear velocity of the VRP in the ENU reference frame\n# x for East, y for North, z for Up, in meters per second\ngeometry_msgs/Vector3 linear_velocity_global\n\n# Linear acceleration of the VRP in the Vehicle reference frame\n# x for Forward, y for Left, z for Up, in meters per power second\ngeometry_msgs/Vector3 linear_acceleration\n\n# Linear acceleration of the VRP in the ENU reference frame\n# x for East, y for North, z for Up, in meters per power second\ngeometry_msgs/Vector3 linear_acceleration_global\n\n# Angular velocity of the VRP in the Vehicle reference frame\n# x across Forward axes, y across Left axes,\n# z across Up axes, in radians per second\ngeometry_msgs/Point angular_velocity\n"
  },
  {
    "path": "ad_localization_msgs/package.xml",
    "content": "<?xml version=\"1.0\"?>\n<package>\n  <name>ad_localization_msgs</name>\n  <version>0.0.0</version>\n  <description>The ad_localization_msgs package</description>\n\n  <maintainer email=\"root@todo.todo\">root</maintainer>\n\n  <license>TODO</license>\n\n  <buildtool_depend>catkin</buildtool_depend>\n  <build_depend>message_generation</build_depend>\n  <build_depend>geometry_msgs</build_depend>\n  <build_depend>roscpp</build_depend>\n  <build_depend>rospy</build_depend>\n  <build_depend>std_msgs</build_depend>\n\n  <run_depend>message_runtime</run_depend>\n  <run_depend>geometry_msgs</run_depend>\n  <run_depend>roscpp</run_depend>\n  <run_depend>rospy</run_depend>\n  <run_depend>std_msgs</run_depend>\n\n  <!-- The export tag contains other, unspecified, tags -->\n  <export>\n    <!-- Other tools can request additional information be placed here -->\n\n  </export>\n</package>\n"
  }
]