[
  {
    "path": ".clang-format",
    "content": "---\nLanguage:        Cpp\n# BasedOnStyle:  Google\nAccessModifierOffset: -1\nAlignAfterOpenBracket: Align\nAlignConsecutiveAssignments: true\nAlignConsecutiveDeclarations: false\nAlignEscapedNewlines: Left\nAlignOperands:   true\nAlignTrailingComments: true\nAllowAllParametersOfDeclarationOnNextLine: true\nAllowShortBlocksOnASingleLine: false\nAllowShortCaseLabelsOnASingleLine: false\nAllowShortFunctionsOnASingleLine: All\nAllowShortIfStatementsOnASingleLine: true\nAllowShortLoopsOnASingleLine: true\nAlwaysBreakAfterDefinitionReturnType: None\nAlwaysBreakAfterReturnType: None\nAlwaysBreakBeforeMultilineStrings: true\nAlwaysBreakTemplateDeclarations: true\nBinPackArguments: true\nBinPackParameters: true\nBraceWrapping:   \n  AfterClass:      false\n  AfterControlStatement: false\n  AfterEnum:       false\n  AfterFunction:   false\n  AfterNamespace:  false\n  AfterObjCDeclaration: false\n  AfterStruct:     false\n  AfterUnion:      false\n  AfterExternBlock: false\n  BeforeCatch:     false\n  BeforeElse:      false\n  IndentBraces:    false\n  SplitEmptyFunction: true\n  SplitEmptyRecord: true\n  SplitEmptyNamespace: true\nBreakBeforeBinaryOperators: None\nBreakBeforeBraces: Attach\nBreakBeforeInheritanceComma: false\nBreakBeforeTernaryOperators: true\nBreakConstructorInitializersBeforeComma: false\nBreakConstructorInitializers: BeforeColon\nBreakAfterJavaFieldAnnotations: false\nBreakStringLiterals: true\nColumnLimit:     0\nCommentPragmas:  '^ IWYU pragma:'\nCompactNamespaces: false\nConstructorInitializerAllOnOneLineOrOnePerLine: true\nConstructorInitializerIndentWidth: 4\nContinuationIndentWidth: 4\nCpp11BracedListStyle: true\nDerivePointerAlignment: true\nDisableFormat:   false\nExperimentalAutoDetectBinPacking: false\nFixNamespaceComments: true\nForEachMacros:   \n  - foreach\n  - Q_FOREACH\n  - BOOST_FOREACH\nIncludeBlocks:   Preserve\nIncludeCategories: \n  - Regex:           '^<ext/.*\\.h>'\n    Priority:        2\n  - Regex:           '^<.*\\.h>'\n    Priority:        1\n  - Regex:           '^<.*'\n    Priority:        2\n  - Regex:           '.*'\n    Priority:        3\nIncludeIsMainRegex: '([-_](test|unittest))?$'\nIndentCaseLabels: true\nIndentPPDirectives: None\nIndentWidth:     2\nIndentWrappedFunctionNames: false\nJavaScriptQuotes: Leave\nJavaScriptWrapImports: true\nKeepEmptyLinesAtTheStartOfBlocks: false\nMacroBlockBegin: ''\nMacroBlockEnd:   ''\nMaxEmptyLinesToKeep: 1\nNamespaceIndentation: None\nObjCBlockIndentWidth: 2\nObjCSpaceAfterProperty: false\nObjCSpaceBeforeProtocolList: false\nPenaltyBreakAssignment: 2\nPenaltyBreakBeforeFirstCallParameter: 1\nPenaltyBreakComment: 300\nPenaltyBreakFirstLessLess: 120\nPenaltyBreakString: 1000\nPenaltyExcessCharacter: 1000000\nPenaltyReturnTypeOnItsOwnLine: 200\nPointerAlignment: Left\nRawStringFormats: \n  - Delimiters:       [pb]\n    Language:        TextProto\n    BasedOnStyle:    google\nReflowComments:  true\nSortIncludes:    true\nSortUsingDeclarations: true\nSpaceAfterCStyleCast: false\nSpaceAfterTemplateKeyword: true\nSpaceBeforeAssignmentOperators: true\nSpaceBeforeParens: ControlStatements\nSpaceInEmptyParentheses: false\nSpacesBeforeTrailingComments: 2\nSpacesInAngles:  false\nSpacesInContainerLiterals: true\nSpacesInCStyleCastParentheses: false\nSpacesInParentheses: false\nSpacesInSquareBrackets: false\nStandard:        Auto\nTabWidth:        8\nUseTab:          Never\n...\n\n"
  },
  {
    "path": ".github/stale.yml",
    "content": "# Number of days of inactivity before an issue becomes stale\ndaysUntilStale: 14\n# Number of days of inactivity before a stale issue is closed\ndaysUntilClose: 1\n# Issues with these labels will never be considered stale\nexemptLabels:\n  - pinned\n  - security\n# Label to use when marking an issue as stale\nstaleLabel: stale\n# Comment to post when marking an issue as stale. Set to `false` to disable\nmarkComment: >\n  This issue has been automatically marked as stale because it has not had\n  recent activity. It will be closed if no further activity occurs. Thank you\n  for your contributions.\n# Comment to post when closing a stale issue. Set to `false` to disable\ncloseComment: false\n"
  },
  {
    "path": ".gitignore",
    "content": "devel/\nlogs/\nbuild/\nbin/\nlib/\nmsg_gen/\nsrv_gen/\nmsg/*Action.msg\nmsg/*ActionFeedback.msg\nmsg/*ActionGoal.msg\nmsg/*ActionResult.msg\nmsg/*Feedback.msg\nmsg/*Goal.msg\nmsg/*Result.msg\nmsg/_*.py\nbuild_isolated/\ndevel_isolated/\n\n# Generated by dynamic reconfigure\n*.cfgc\n/cfg/cpp/\n/cfg/*.py\n\n# Ignore generated docs\n*.dox\n*.wikidoc\n\n# eclipse stuff\n.project\n.cproject\n\n# qcreator stuff\nCMakeLists.txt.user\n\nsrv/_*.py\n*.pcd\n*.pyc\nqtcreator-*\n*.user\n\n/planning/cfg\n/planning/docs\n/planning/src\n\n*~\n\n# Emacs\n.#*\n\n# Catkin custom files\nCATKIN_IGNORE\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n# PyCharm\n#  JetBrains specific template is maintainted in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n#.idea/\n\n# VSCode\n.vscode"
  },
  {
    "path": "CITATION.bib",
    "content": "@article{lin2023lio-segmot,\n  title={Asynchronous State Estimation of Simultaneous Ego-motion Estimation and Multiple Object Tracking for LiDAR-Inertial Odometry},\n  author={Lin, Yu-Kai and Lin, Wen-Chieh and Wang, Chieh-Chih},\n  booktitle = {2023 International Conference on Robotics and Automation, {ICRA} 2023,\n               London, UK, May 2023},\n  pages     = {10616--10622},\n  year      = {2023},\n}"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.8.3)\nproject(lio_segmot)\n\nset(CMAKE_BUILD_TYPE \"Release\")\nset(CMAKE_CXX_FLAGS \"-std=c++11\")\nset(CMAKE_CXX_FLAGS_RELEASE \"-O3 -Wall -g0 -pthread\")\n\nfind_package(catkin REQUIRED COMPONENTS\n  tf\n  roscpp\n  rospy\n  cv_bridge\n  rosbag\n  # pcl library\n  pcl_conversions\n  # msgs\n  std_msgs\n  sensor_msgs\n  geometry_msgs\n  nav_msgs\n  message_generation\n  visualization_msgs\n  jsk_recognition_msgs\n  jsk_topic_tools\n)\n\nfind_package(OpenMP REQUIRED)\nfind_package(PCL REQUIRED QUIET)\nfind_package(OpenCV REQUIRED QUIET)\nfind_package(GTSAM REQUIRED QUIET)\n\nadd_message_files(\n  DIRECTORY msg\n  FILES\n  cloud_info.msg\n  ObjectState.msg\n  ObjectStateArray.msg\n  Diagnosis.msg\n  flags.msg\n)\n\nadd_service_files(\n  DIRECTORY srv\n  FILES\n  save_map.srv\n  detection.srv\n  save_estimation_result.srv\n)\n\ngenerate_messages(\n  DEPENDENCIES\n  geometry_msgs\n  std_msgs\n  nav_msgs\n  sensor_msgs\n  jsk_recognition_msgs\n)\n\ncatkin_package(\n  INCLUDE_DIRS include\n  DEPENDS PCL GTSAM\n\n  CATKIN_DEPENDS\n  std_msgs\n  nav_msgs\n  geometry_msgs\n  sensor_msgs\n  message_runtime\n  message_generation\n  visualization_msgs\n  jsk_recognition_msgs\n  jsk_topic_tools\n)\n\n# include directories\ninclude_directories(\n\tinclude\n\t${catkin_INCLUDE_DIRS}\n\t${PCL_INCLUDE_DIRS}\n  ${OpenCV_INCLUDE_DIRS}\n\t${GTSAM_INCLUDE_DIR}\n)\n\n# link directories\nlink_directories(\n\tinclude\n\t${PCL_LIBRARY_DIRS}\n  ${OpenCV_LIBRARY_DIRS}\n  ${GTSAM_LIBRARY_DIRS}\n)\n\n###########\n## Build ##\n###########\n\n# Range Image Projection\nadd_executable(${PROJECT_NAME}_imageProjection src/imageProjection.cpp)\nadd_dependencies(${PROJECT_NAME}_imageProjection ${catkin_EXPORTED_TARGETS} ${PROJECT_NAME}_generate_messages_cpp)\ntarget_link_libraries(${PROJECT_NAME}_imageProjection ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES})\n\n# Feature Association\nadd_executable(${PROJECT_NAME}_featureExtraction src/featureExtraction.cpp)\nadd_dependencies(${PROJECT_NAME}_featureExtraction ${catkin_EXPORTED_TARGETS} ${PROJECT_NAME}_generate_messages_cpp)\ntarget_link_libraries(${PROJECT_NAME}_featureExtraction ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES})\n\n# Mapping Optimization\nadd_executable(${PROJECT_NAME}_mapOptimization src/mapOptimization.cpp src/factor.cpp src/solver.cpp)\nadd_dependencies(${PROJECT_NAME}_mapOptimization ${catkin_EXPORTED_TARGETS} ${PROJECT_NAME}_generate_messages_cpp)\ntarget_compile_options(${PROJECT_NAME}_mapOptimization PRIVATE ${OpenMP_CXX_FLAGS})\ntarget_link_libraries(${PROJECT_NAME}_mapOptimization ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ${OpenMP_CXX_FLAGS} gtsam)\n\n# IMU Preintegration\nadd_executable(${PROJECT_NAME}_imuPreintegration src/imuPreintegration.cpp)\ntarget_link_libraries(${PROJECT_NAME}_imuPreintegration ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} gtsam)\n\n# Offline ROSBag Player\nadd_executable(${PROJECT_NAME}_offlineBagPlayer src/offlineBagPlayer.cpp)\ntarget_link_libraries(${PROJECT_NAME}_offlineBagPlayer ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} gtsam)\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2020, Tixiao Shan\nCopyright (c) 2022-2023, Yu-Kai Lin\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n- Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n- Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "# LIO-SEGMOT\n\nThe official implementation of LIO-SEGMOT (**L**iDAR-**I**nertial **O**dometry\nvia **S**imultaneous **Eg**o-motion Estimation and **M**ultiple **O**bject\n**T**racking), an optimization-based odometry approach targeted for dynamic\nenvironments. LIO-SEGMOT can provide continuous object tracking results while\npreserving the keyframe selection mechanism in the odometry system. This work is\naccepted for publication in [ICRA 2023](https://www.icra2023.org/).\n\n![TEASER_GIF](./docs/_static/images/LIO-SEGMOT_KITTI_teaser.GIF)\n\nYou can check out [our video](https://youtu.be/5HtnDFPerVo) to understand the\nmain idea of LIO-SEGMOT. For more, please refer to our paper:\n\n- Yu-Kai Lin, Wen-Chieh Lin, Chieh-Chih Wang, **Asynchronous State Estimation of Simultaneous Ego-motion Estimation and Multiple Object Tracking for LiDAR-Inertial Odometry**. _2023 International Conference on Robotics and Automation (ICRA)_, pp. 10616--10622, May 2023. ([paper](https://doi.org/10.1109/ICRA48891.2023.10161269)) ([preprint](https://gpl.cs.nctu.edu.tw/Steve-Lin/LIO-SEGMOT/preprint.pdf)) ([code](https://github.com/StephLin/LIO-SEGMOT)) ([video](https://youtu.be/5HtnDFPerVo))\n\nIf you use this project in your research, please cite:\n\n```bibtex\n@article{lin2023lio-segmot,\n  title={Asynchronous State Estimation of Simultaneous Ego-motion Estimation and Multiple Object Tracking for LiDAR-Inertial Odometry},\n  author={Lin, Yu-Kai and Lin, Wen-Chieh and Wang, Chieh-Chih},\n  booktitle = {2023 International Conference on Robotics and Automation, {ICRA} 2023,\n               London, UK, May 2023},\n  pages     = {10616--10622},\n  year      = {2023},\n}\n```\n\n- [:newspaper: Poster](#newspaper-poster)\n- [:gear: Installation](#gear-installation)\n  - [Step 1. Preparing the Dependencies](#step-1-preparing-the-dependencies)\n  - [Step 2. Building the LIO-SEGMOT Project](#step-2-building-the-lio-segmot-project)\n  - [Step 3. Preparing Object Detection Services](#step-3-preparing-object-detection-services)\n- [:card_file_box: Sample Datasets](#card_file_box-sample-datasets)\n- [:running_man: Run](#running_man-run)\n- [:wheelchair: Services of LIO-SEGMOT](#wheelchair-services-of-lio-segmot)\n  - [`/lio_segmot/save_map`](#lio_segmotsave_map)\n  - [`/lio_segmot/save_estimation_result`](#lio_segmotsave_estimation_result)\n- [:memo: Remarks](#memo-remarks)\n  - [Hyperparameters](#hyperparameters)\n    - [Hierarchical Criterion](#hierarchical-criterion)\n    - [Factor Graph Optimization](#factor-graph-optimization)\n    - [High-speed Moving Object Supports in Early Steps](#high-speed-moving-object-supports-in-early-steps)\n    - [Lifecycle Management of Tracking Objects](#lifecycle-management-of-tracking-objects)\n  - [Limitations of LIO-SEGMOT](#limitations-of-lio-segmot)\n  - [Possible Future Research Directions](#possible-future-research-directions)\n- [:gift: Acknowledgement](#gift-acknowledgement)\n\n## :newspaper: Poster\n\n![Poster](./docs/_static/images/poster.png)\n\n## :gear: Installation\n\nThe project is originally developed in **Ubuntu 18.04**, and the following\ninstruction supposes that you are using Ubuntu 18.04 as well. I am not sure if\nit also works with other Ubuntu versions or other Linux distributions, but maybe\nyou can give it a try :+1:\n\nAlso, please feel free to open an issue if you encounter any problems of the\nfollowing instruction.\n\n### Step 1. Preparing the Dependencies\n\nPlease prepare the following packages or libraries used in LIO-SEGMOT:\n\n1. [ROS Melodic](http://wiki.ros.org/melodic/Installation/Ubuntu) (w/ Desktop-Full\n   Install) and the following dependencies:\n   ```bash\n   #!/bin/bash\n   sudo apt update\n   sudo apt install -y \"ros-${ROS_DISTRO}-navigation\" \\\n                       \"ros-${ROS_DISTRO}-robot-localization\" \\\n                       \"ros-${ROS_DISTRO}-robot-state-publisher\" \\\n                       \"ros-${ROS_DISTRO}-jsk-recognition-msgs\" \\\n                       \"ros-${ROS_DISTRO}-jsk-rviz-plugins\"\n   ```\n2. [gtsam 4.0.3](https://github.com/borglab/gtsam/tree/4.0.3)\n   ```bash\n   #!/bin/bash\n   cd ~\n   git clone -b 4.0.3 https://github.com/borglab/gtsam && cd gtsam\n   mkdir build && cd build\n   cmake ..\n   sudo make install\n   ```\n\n### Step 2. Building the LIO-SEGMOT Project\n\nYou can use the following command to build the project:\n\n```bash\n#!/bin/bash\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nmkdir -p ~/catkin_ws/src\ncd ~/catkin_ws/src\ngit clone git@github.com:StephLin/LIO-SEGMOT.git\ncd ..\ncatkin_make\n```\n\n### Step 3. Preparing Object Detection Services\n\nWe provide two object detection services for LIO-SEGMOT:\n\n- [StephLin/SE-SSD-ROS (Apache-2.0 license)](https://github.com/StephLin/SE-SSD-ROS)\n  (based on [Vegeta2020/SE-SSD](https://github.com/Vegeta2020/SE-SSD))\n- [StephLin/livox_detection_lio_segmot (GPL-3.0 license)](https://github.com/StephLin/livox_detection_lio_segmot)\n  (based on [Livox-SDK/livox_detection](https://github.com/Livox-SDK/livox_detection))\n\nPlease refer to their installation instructions accordingly.\n\n## :card_file_box: Sample Datasets\n\nWe provide the following pre-built bag files for KITTI raw sequences and the Hsinchu\ndataset (GuangfuRoad sequence):\n\n| Dataset | Sequence    | Bag File                                             | Ground Truth Trajectory                              |\n| ------- | ----------- | ---------------------------------------------------- | ---------------------------------------------------- |\n| KITTI   | 0926-0009   | [bag](http://140.113.150.180:5000/sharing/BrHtqyElq) | [tum](http://140.113.150.180:5000/sharing/HGebyDSCR) |\n| KITTI   | 0926-0013   | [bag](http://140.113.150.180:5000/sharing/AQQa3kMSH) | [tum](http://140.113.150.180:5000/sharing/gREpr4xdI) |\n| KITTI   | 0926-0014   | [bag](http://140.113.150.180:5000/sharing/HOgV5T79H) | [tum](http://140.113.150.180:5000/sharing/POuFRJBQI) |\n| KITTI   | 0926-0015   | [bag](http://140.113.150.180:5000/sharing/XnoNLKSUQ) | [tum](http://140.113.150.180:5000/sharing/RBw1BeftU) |\n| KITTI   | 0926-0032   | [bag](http://140.113.150.180:5000/sharing/ikbtkpWve) | [tum](http://140.113.150.180:5000/sharing/aQdaEnVjc) |\n| KITTI   | 0926-0051   | [bag](http://140.113.150.180:5000/sharing/N1o9NcgU4) | [tum](http://140.113.150.180:5000/sharing/Wzu8QWoEC) |\n| KITTI   | 0926-0101   | [bag](http://140.113.150.180:5000/sharing/lhhohwfJT) | [tum](http://140.113.150.180:5000/sharing/JCOaJHw04) |\n| Hsinchu | GuangfuRoad | [bag](http://gofile.me/56KxB/mz6HOYOMG)              | [tum](http://gofile.me/56KxB/t5smaOAZk)              |\n\nIf you cannot download sample datasets from above links, please refer to\n[this alternative link (Google Drive)](https://drive.google.com/drive/folders/17QwfTMCv-2XdgOLxGdrnCwybcSYQhs7S?usp=drive_link).\n\nGround truth robot trajectories (based on GPS data provided by KITTI) are stored\nas [the TUM format](https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats#ground-truth_trajectories).\nEach row has 8 components containing timestamps (sec), xyz-position (meter), and\nxyzw-orientation (quaternion):\n\n```\ntimestamp x y z qx qy qz qw\n```\n\n## :running_man: Run\n\nPlease follow the steps to execute LIO-SEGMOT properly:\n\n1. (Optional) Launch the ROS core:\n\n   ```bash\n   roscore\n   ```\n\n2. Launch the core LIO-SEGMOT service:\n\n   ```bash\n   #!/bin/bash\n   # Please select one of the following configs to launch the service properly:\n   # 1. With KITTI configuration\n   roslaunch lio_segmot run_kitti.launch\n\n   # 2. With Hsinchu configuration\n   roslaunch lio_segmot run_hsinchu.launch\n\n   # 3. Undefined (same as KITTI configuration)\n   roslaunch lio_segmot run.launch\n   ```\n\n3. Launch the selected object detection service:\n\n   ```bash\n   #!/bin/bash\n   # SE-SSD-ROS & livox_detection_lio_segmot\n   # Please check their documentation to see how they are launched\n   ```\n\n4. Start the customized ROS bag player:\n\n   ```bash\n   #!/bin/bash\n   rosrun lio_segmot lio_segmot_offlineBagPlayer _bag_filename:=\"path/to/your/sequence.bag\"\n   ```\n\n   The default registered LiDAR and IMU topics are `/points_raw` and `/imu_raw`,\n   respectively. If you want to register other LiDAR/IMU topics, please add\n   additional options `_lidar_topic` and `_imu_topic`. For example, if you are\n   using the GuangfuRoad sequence (`/velodyne_points` and `/imu/data` for LiDAR\n   and IMU topics, respectively):\n\n   ```bash\n   rosrun lio_segmot lio_segmot_offlineBagPlayer _bag_filename:=\"GuangfuRoad-06-13.bag\" \\\n                                                 _lidar_topic:=\"/velodyne_points\" \\\n                                                 _imu_topic:=\"/imu/data\"\n   ```\n\n## :wheelchair: Services of LIO-SEGMOT\n\n### `/lio_segmot/save_map`\n\n```txt\nUsage: rosservice call /lio_segmot/save_map [RESOLUTION] [OUTPUT_DIR]\nExample: rosservice call /lio_segmot/save_map 0.2 /path/to/a/directory/\n```\n\nThis service saves LiDAR map to the local machine.\n\n### `/lio_segmot/save_estimation_result`\n\n```txt\nUsage: rosservice call /lio_segmot/save_estimation_result\n```\n\nThis service outputs current estimation results including\n\n- `nav_msgs::Path robotTrajectory`: The robot trajectory\n- $\\color{gray}\\textsf{(INTERNAL USE)}$ `nav_msgs::Path[] objectTrajectories`: Trajectories for each object (indexed by the factor graph)\n- $\\color{gray}\\textsf{(INTERNAL USE)}$ `nav_msgs::Path[] objectVelocities`: Linear and angular velocities for each object (indexed by the factor graph)\n- `nav_msgs::Path[] trackingObjectTrajectories`: Trajectories for each object (indexed by LIO-SEGMOT)\n- `nav_msgs::Path[] trackingObjectVelocities`: Linear and angular velocities for each object (indexed by LIO-SEGMOT)\n- `lio_segmot::ObjectStateArray[] trackingObjectStates`: States for each object during its lifetime (indexed by LIO-SEGMOT)\n- $\\color{gray}\\textsf{(INTERNAL USE)}$ `lio_segmot::flags[] objectFlags`: Flags for each object during its lifetime (indexed by the factor graph)\n- `lio_segmot::flags[] trackingObjectFlags`: Flags for each object during its lifetime (indexed by LIO-SEGMOT)\n\nin which custom types `lio_segmot::ObjectStateArray` (underlying `lio_segmot::ObjectState`) and `lio_segmot::flags` are given by\n\n- `lio_segmot::ObjectStateArray`\n\n  ```cpp\n  Header header\n  lio_segmot::ObjectState[] objects\n  ```\n\n- `lio_segmot::ObjectState`\n\n  ```cpp\n  Header header\n\n  // The corresponding detection (measurement)\n  jsk_recognition_msgs::BoundingBox detection\n\n  // States of object pose and velocity in the factor graph\n  geometry_msgs::Pose pose\n  geometry_msgs::Pose velocity\n\n  // Residual and innovation of the tightly-coupled detection factor\n  bool hasTightlyCoupledDetectionError\n  float64 tightlyCoupledDetectionError         // Residual\n  float64 initialTightlyCoupledDetectionError  // Innovation\n\n  // Residual and innovation of the loosely-coupled detection factor\n  bool hasLooselyCoupledDetectionError\n  float64 looselyCoupledDetectionError         // Residual\n  float64 initialLooselyCoupledDetectionError  // Innovation\n\n  // Residual and innovation of the smooth movement factor\n  bool hasMotionError\n  float64 motionError         // Residual\n  float64 initialMotionError  // Innovation\n\n  int32 index            // Object index\n  int32 lostCount        // Counter of losing detections\n  float64 confidence     // Detection's confidence score (given by detection methods)\n  bool isTightlyCoupled  // Is the object tightly-coupled at this moment?\n  bool isFirst           // Is the object just initialized at this moment?\n  ```\n\n- `lio_segmot/flags`\n\n  ```cpp\n  // Flags of the object in its lifetime\n  int32[] flags  // 1: the object is tightly-coupled\n                 // 0: the object is loosely-coupled\n  ```\n\n## :memo: Remarks\n\n### Hyperparameters\n\nThere are various covariance matrices designed for the hirarchical criterion\n(innovation filtering) and factor graph optimization (increments of LIO-SEGMOT\nw.r.t. LIO-SAM) in LIO-SEGMOT. This section is going to explain them.\n\n> All covariance matrices in settings are expressed as diagonal vectors.\n\nTaking [the KITTI configuration](./config/params_kitti.yaml) for example, we have the following settings:\n\n#### Hierarchical Criterion\n\nThis section collects settings for the hierarchical criterion. It can be viewed\nas a kind of innovation filtering. In brief, the criterion is designed to\nprogressively make the following decisions when a new detection\n$\\boldsymbol{z}\\in SE(3)$ is coming into the system:\n\n| ID       | Description                                                                 |\n| -------- | --------------------------------------------------------------------------- |\n| **(Q1)** | Does the detection belong to any existing object $\\boldsymbol{x}_{t,i}$?    |\n| **(Q2)** | If Q1 holds, does $\\boldsymbol{z}$ follows the $i$-th object's motion?      |\n| **(Q3)** | If Q1 and Q2 holds, should the tightly-coupled detection factor be applied? |\n\nThe first two questions **(Q1)** and **(Q2)** are determined by using the\nMahalanobis distance of the error vector,\n\n$$\n\\Big\\Vert\\text{ detection error of }\\boldsymbol{z}\\text{ and the }i\\text{-th object } \\boldsymbol{x} _{t,i}\\text{ }\\Big\\Vert _{\\Sigma}\n\\leq \\varepsilon,\n$$\n\nwith given covariance with given covariance matrices\n$\\Sigma\\in\\{\\Sigma_ {\\text{Q}_ 1},\\Sigma_ {\\text{Q}_ 2}\\}\\subsetneq\\mathbb{R}^{6\\times 6}$\nand a threshold $\\varepsilon>0$. We assume that\n$\\Sigma_ {\\text{Q}_ 2}-\\Sigma_ {\\text{Q}_ 1}$\nis positive semidefinite (PSD), i.e.,\n$\\Sigma_ {\\text{Q}_ 2}-\\Sigma_{\\text{Q}_ 1} \\succeq 0$,\nto prevent ambiguity of the hierarchical criterion that **(Q2)** holds but\n**(Q1)** does not hold.\n\nTwo spatial information-based tests are conducted to determine **(Q3)**, which\nare the detection constraint and the velocity constraint:\n\n- **(Detection Constraint)** The above equation holds with another given\n  covariance matrix\n  $\\Sigma_ {\\text{Q}_ {3,1}}$\n  that satisfies\n  $\\Sigma_ {\\text{Q}_ {3,1}}-\\Sigma_ {\\text{Q}_ {2}} \\succeq 0$.\n\n- **(Velocity constraint)** The variance of velocities in previous steps is\n  small enough. That is,\n\n  $$\\frac{1}{N}\\sum_ {s=1}^{N} \\Big\\Vert \\text{Log}(\\boldsymbol{v}_ {t-s,i}) - \\text{Log}(\\bar{\\boldsymbol{v}}_ {t,i}) \\Big\\Vert_ {\\Sigma_{Q_ {3,2}}}^2 \\leq \\varepsilon$$\n\n  with a given covariance matrix $\\Sigma_{Q_{3,2}}$, where $N$ is the fixed\n  number of previous velocities of object states and\n  $\\bar{\\boldsymbol{v}}_{t,i}\\in SE(3)$ is the mean of the $N$ previous\n  velocities.\n\nIf **(Q1)** holds for the detection $\\boldsymbol{z}$ and the corresponding\n$i$-th object, the new state of the $i$-th object along with a loosely-coupled\ndetection factor would be added to the factor graph.\n\nFurthermore, if **(Q2)**\nholds, a constant velocity factor and a smooth movement factor would be also\nadded to the factor graph.\n\nFinally, if **(Q3)** holds, the loosely-coupled\ndetection factor would be replaced with a tightly-coupled detection factor. It\nmeans that the $i$-th object are regarded as a reliable object that are suitable\nto refine the odometry.\n\n| Notation                           | Setting                                             | Description                                                                                                                                                                                                                                   | Default Value                                      |\n| ---------------------------------- | --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- |\n| $\\varepsilon$                      | `detectionMatchThreshold`                           | The threshold to classify all Mahalanobis distances in the hirarchical criterion (except for the tightly-coupled detection factor).                                                                                                           | 19.5                                               |\n| $\\Sigma_{\\text{Q}_1}$              | `dataAssociationVarianceVector`                     | The covariance matrix to determine if a detection belongs to a given object. This covariance is used to maintain tracking ID.                                                                                                                 | `[3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 3.0e-2, 3.0e-2]` |\n| $\\Sigma_{\\text{Q}_2}$              | `looselyCoupledMatchingVarianceVector`              | The covariance matrix to determine if a detection follows the object's motion.                                                                                                                                                                | `[1.0e-4, 1.0e-4, 1.0e-4, 2.0e-3, 2.0e-3, 2.0e-3]` |\n| $\\varepsilon^\\prime$               | `tightCouplingDetectionErrorThreshold`              | The threshold to classify the Mahalanobis distance in the detection constraint of the tightly-coupled detection factor checks.                                                                                                                | 26.0                                               |\n| $\\Sigma_{\\text{Q}_{3,1}}^\\prime$   | `tightlyCoupledMatchingVarianceVector`              | The covariance to determine if a detection satisfies the detection constraint in the tightly-coupled detection factor checks.                                                                                                                 | `[8.0e-6, 8.0e-6, 8.0e-6, 1.0e-4, 1.0e-4, 1.0e-4]` |\n| $N$                                | `numberOfVelocityConsistencySteps`                  | The number of samples used in velocity constraint of the tightly-coupled detection checks.                                                                                                                                                    | 4                                                  |\n| $N^\\prime$                         | `numberOfPreLooseCouplingSteps`                     | The number of steps that objects should only use loosely-coupled detection factors (due to velocity constraint of the tightly-coupled detection checks, see [below](#high-speed-moving-object-supports-in-early-steps) for more information). | 6                                                  |\n| $\\sigma_{\\text{Q}_{3,2}}^\\text{A}$ | `objectAngularVelocityConsistencyVarianceThreshold` | The angular part of the covariance matrix to determine if the object satisfies the velocity constraint in the tightly-coupled detection factor checks.                                                                                        | 1.0e-5                                             |\n| $\\sigma_{\\text{Q}_{3,2}}^\\text{L}$ | `objectLinearVelocityConsistencyVarianceThreshold`  | The linear part of the covariance matrix to determine if the object satisfies the velocity constraint in the tightly-coupled detection factor checks.                                                                                         | 1.0e-2                                             |\n\nFor engineering purposes, we use two thresholds $\\varepsilon$ and\n$\\varepsilon^\\prime$ in the implementation. In addition, we decouple the angular\npart and the linear part of $\\Sigma_{\\text{Q}_{3,2}}$. The following equations\nare shown to coincide with the expression used in our paper:\n\n$$\n\\begin{aligned}\n\\displaystyle\\Sigma _{\\text{Q} _{3,1}} &= \\left(\\frac{\\varepsilon^\\prime}{\\varepsilon}\\right)^2 \\cdot \\displaystyle\\Sigma _{\\text{Q} _{3,1}}^\\prime, \\\\\n\\displaystyle\\Sigma _{\\text{Q} _{3,2}}^\\prime &= \\begin{bmatrix}\\sigma _{\\text{Q} _{3,2}}^\\text{A} \\\\\n& \\sigma _{\\text{Q} _{3,2}}^\\text{A} \\\\\n&& \\sigma _{\\text{Q} _{3,2}}^\\text{A} \\\\\n&&& \\sigma _{\\text{Q} _{3,2}}^\\text{L} \\\\\n&&&& \\sigma _{\\text{Q} _{3,2}}^\\text{L} \\\\\n&&&&& \\sigma _{\\text{Q} _{3,2}}^\\text{L}\n\\end{bmatrix}, \\\\\n\\displaystyle\\Sigma _{\\text{Q} _{3,2}} &= \\frac{1}{\\varepsilon^2} \\cdot \\displaystyle\\Sigma _{\\text{Q} _{3,2}}^\\prime.\n\\end{aligned}\n$$\n\n#### Factor Graph Optimization\n\nCovariance matrices used in factor graph optimization. Different from the above\nsection, they are essential to \"balance\" different types of measurements in a\nunified factor graph. Those matrices are relatively rare to be modified.\n\n| Notation             | Setting                                  | Description                                                                                   | Default Value                                      |\n| -------------------- | ---------------------------------------- | --------------------------------------------------------------------------------------------- | -------------------------------------------------- |\n| $\\Sigma_{\\text{C}}$  | `constantVelocityDiagonalVarianceVector` | The covariance matrix of constant velocity factors used in factor graph optimization.         | `[2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]` |\n| $\\Sigma_{\\text{M}}$  | `motionDiagonalVarianceVector`           | The covariance matrix of smooth movement factors used in factor graph optimization.           | `[2.0e-4, 2.0e-4, 1.0e-3, 1.0e-1, 1.0e-2, 1.0e-2]` |\n| $\\Sigma_{\\text{LC}}$ | `looselyCoupledDetectionVarianceVector`  | The covariance matrix of loosely-coupled detection factors used in factor graph optimization. | `[2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]` |\n| $\\Sigma_{\\text{TC}}$ | `tightlyCoupledDetectionVarianceVector`  | The covariance matrix of tightly-coupled detection factors used in factor graph optimization. | `[2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]` |\n\n#### High-speed Moving Object Supports in Early Steps\n\nAs all tracking objects' velocities are initialized with zero-speed, it may be\nhard to associate detections in different moments for high-speed moving objects\nin the early stage. To mitigate this issue, a larger covariance matrix for\n**(Q2)** in the hierarchical criterion is used in the first few steps to\naccommodate objects that are moving fast.\n\nSince those steps are used to figure out the inital speed of an object, we do\nnot account them for the velocity constraint in the tightly-coupled detection\nfactor checks. Therefore, we have $N^\\prime = N + N^\\text{E}$.\n\n| Notation                       | Setting                                       | Description                                                                                   | Default Value                                      |\n| ------------------------------ | --------------------------------------------- | --------------------------------------------------------------------------------------------- | -------------------------------------------------- |\n| $N^\\text{E}$                   | `numberOfEarlySteps`                          | Number of the first steps to accommodate high-speed moving objects.                           | 2                                                  |\n| $\\Sigma_{\\text{Q}_2}^\\text{E}$ | `earlyLooselyCoupledMatchingVarianceVector`   | The covariance matrix to determine if a detection follows the object's motion.                | `[3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 5.0e-3, 5.0e-3]` |\n| $\\Sigma_{\\text{C}}^\\text{E}$   | `earlyConstantVelocityDiagonalVarianceVector` | The covariance matrix of loosely-coupled detection factors used in factor graph optimization. | `[2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]` |\n\n#### Lifecycle Management of Tracking Objects\n\nIn real world applications, it's likely to lose detections of tracking objects\ndue to occlusion or other complicate environment circumstances. To mitigate\nfrequent ID-switching in multiple object tracking due to the above issue, we\nstill track each object for a little while, even though they do not have any\ncorresponding detections.\n\n| Notation     | Setting                      | Description                                                                                | Default Value |\n| ------------ | ---------------------------- | ------------------------------------------------------------------------------------------ | ------------- |\n| $N^\\text{L}$ | `trackingStepsForLostObject` | Number of steps that LIO-SEGMOT still keeps an object of missing detections in the system. | 3             |\n\n### Limitations of LIO-SEGMOT\n\nCurrently, most state-of-the-art object detection approaches (e.g.,\n[SE-SSD](https://arxiv.org/abs/2104.09804),\n[PointPillars](https://arxiv.org/abs/1812.05784),\n[PointRCNN](https://arxiv.org/abs/1812.04244),\n[PV-RCNN](https://arxiv.org/abs/1912.13192),\n[SPG](https://arxiv.org/abs/2108.06709), and\n[ST3D](https://arxiv.org/abs/2103.05346)) are constructed under machine\nlearning-based neural network architectures, while the issue of domain\nadaptation for data-driven object detection approaches still remains a\nchallenging open problem targeted by recent researches (e.g,\n[SPG](https://arxiv.org/abs/2108.06709) and\n[ST3D](https://arxiv.org/abs/2103.05346)). That is, the performance of object\ndetection models change varying under different geographic appearances or\nweather conditions.\n\n[The model weight of SE-SSD](https://github.com/Vegeta2020/SE-SSD) used in\nLIO-SEGMOT is trained under a subset of the KITTI dataset, and thus it might\nwork well in other subsets of the KITTI dataset. However, we can observe that\nthere are numerous false positive detections in the Hsinchu dataset. Therefore,\nit forces us to choose different detection models (i.e.,\n[PointPillars w/ Livox's model weights](https://github.com/Livox-SDK/livox_detection))\nwhen experimenting LIO-SEGMOT in different real world datasets. In addition,\nsince the model still cannot perform as good detection results in the Hsinchu\ndataset as SE-SSD does in the KITTI dataset, we need to use a more strict\ncriterion for the detection constraint in the tightly-coupled detection factor\nchecks (by decreasing $\\varepsilon^\\prime$ from 26.0 to 19.0). This points out\nthe first limitation of LIO-SEGMOT. That is, covariance matrices and thresholds\nrelated to object detections (mainly hyperparameters in the hierarchical\ncriterion) are required to be adjusted according to the stability of object\ndetections. Despite it affects generalization capability of the proposed method,\nwe believe that the problem can be mitigated with the breakthrough of the domain\nadaptation for 3-D object detection.\n\nThe second limitation of LIO-SEGMOT is related to the motion model of tracking\nobjects. If an object does not move at constant velocity, LIO-SEGMOT may\nmiscalculate the object velocity, leading to inaccurate predicted object pose in\nthe subsequent state. The main reason is that objects' velocities are supposed\nto be steady and constant in LIO-SEGMOT. It is possible to be resolved by\nintroducing multiple motion models to LIO-SEGMOT and adaptively selecting proper\nmodels during the factor graph optimization, in which the concept is similar to\nan interacting multiple model (IMM) in filtering-based object tracking\napproaches.\n\n### Possible Future Research Directions\n\nThere are two possible future research directions of LIO-SEGMOT:\n\n- The optimization problem of LIO-SEGMOT produces a multi-robot architecturethat\n  may break the efficiency of maintaining single root Bayes trees in iSAM2. It\n  causes an unignorable computational cost, especially when there are lots of\n  dynamic objects coupled in the system. In forthcoming researches, we would\n  like to overcome the bottleneck by introducing the\n  [multi-robot iSAM2 (MR-iSAM2)](http://doi.org/10.1109/iros51168.2021.9636687)\n  algorithm.\n- In addition, rule-based coupling conditions make the proposed method lack the\n  ability to explore global optimality when considering combinatorial ambiguity\n  as unknown integer variables. It raises a complicated mix-integer programming\n  (MIP) problem, whereas a recent work called\n  [multi-hypothesis iSAM (MH-iSAM2)](https://www.cs.cmu.edu/~kaess/pub/Hsiao19icra.pdf)\n  still promotes us to investigate the challenging problem in the future.\n\n## :gift: Acknowledgement\n\nThe project is mainly developed based on [Tixiao\nShan](https://github.com/TixiaoShan)'s excellent work\n[LIO-SAM](https://github.com/TixiaoShan/LIO-SAM), which helps me a lot in\nconstructing LIO-SEGMOT. I would like to express my sincere thanks first. In\naddition, I would like to thank [Wu Zheng](https://github.com/Vegeta2020) and\n[Livox](https://github.com/Livox-SDK) for developing and releasing their awesome\nobject detection modules [SE-SSD](https://github.com/Vegeta2020/SE-SSD) and\n[Livox Detection](https://github.com/Livox-SDK/livox_detection) respectively.\n"
  },
  {
    "path": "config/doc/kitti2bag/README.md",
    "content": "# kitti2bag\n\n## How to run it?\n\n```bash\nwget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_26_drive_0084/2011_09_26_drive_0084_sync.zip\nwget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_26_drive_0084/2011_09_26_drive_0084_extract.zip\nwget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_26_calib.zip\nunzip 2011_09_26_drive_0084_sync.zip\nunzip 2011_09_26_drive_0084_extract.zip\nunzip 2011_09_26_calib.zip\npython kitti2bag.py -t 2011_09_26 -r 0084 raw_synced .\n```\n\nThat's it. You have a bag that contains your data.\n\nOther source files can be found at [KITTI raw data](http://www.cvlibs.net/datasets/kitti/raw_data.php) page."
  },
  {
    "path": "config/doc/kitti2bag/kitti2bag.py",
    "content": "#!env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\ntry:\n    import pykitti\nexcept ImportError as e:\n    print('Could not load module \\'pykitti\\'. Please run `pip install pykitti`')\n    sys.exit(1)\n\nimport argparse\nimport os\nfrom datetime import datetime\n\nimport cv2\nimport numpy as np\nimport rosbag\nimport rospy\nimport sensor_msgs.point_cloud2 as pcl2\nimport tf\nfrom cv_bridge import CvBridge\nfrom geometry_msgs.msg import Transform, TransformStamped, TwistStamped\nfrom sensor_msgs.msg import CameraInfo, Imu, NavSatFix, PointField\nfrom std_msgs.msg import Header\nfrom tf2_msgs.msg import TFMessage\nfrom tqdm import tqdm\n\n\ndef save_imu_data(bag, kitti, imu_frame_id, topic):\n    print(\"Exporting IMU\")\n    for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):\n        q = tf.transformations.quaternion_from_euler(oxts.packet.roll, oxts.packet.pitch, oxts.packet.yaw)\n        imu = Imu()\n        imu.header.frame_id = imu_frame_id\n        imu.header.stamp = rospy.Time.from_sec(float(timestamp.strftime(\"%s.%f\")))\n        imu.orientation.x = q[0]\n        imu.orientation.y = q[1]\n        imu.orientation.z = q[2]\n        imu.orientation.w = q[3]\n        imu.linear_acceleration.x = oxts.packet.af\n        imu.linear_acceleration.y = oxts.packet.al\n        imu.linear_acceleration.z = oxts.packet.au\n        imu.angular_velocity.x = oxts.packet.wf\n        imu.angular_velocity.y = oxts.packet.wl\n        imu.angular_velocity.z = oxts.packet.wu\n        bag.write(topic, imu, t=imu.header.stamp)\n\ndef save_imu_data_raw(bag, kitti, imu_frame_id, topic):\n    print(\"Exporting IMU Raw\")\n    synced_path = kitti.data_path\n    unsynced_path = synced_path.replace('sync', 'extract')\n    imu_path = os.path.join(unsynced_path, 'oxts')\n\n    # read time stamp (convert to ros seconds format)\n    ambiguous_index = None\n    with open(os.path.join(imu_path, 'timestamps.txt')) as f:\n        lines = f.readlines()\n        imu_datetimes = []\n        imu_status = []\n        for line in lines:\n            if len(line) == 1:\n                continue\n            timestamp = datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n            imu_datetime = float(timestamp.strftime('%s.%f'))\n\n            if len(imu_datetimes) == 0:\n                pass\n            elif imu_datetimes[-1] > imu_datetime:\n                if ambiguous_index is None:\n                    ambiguous_index = np.argwhere(np.array(imu_datetimes) < imu_datetime).flatten()[-1] + 1\n\n                for i in range(ambiguous_index, len(imu_datetimes)):\n                    imu_status[i] = False\n\n            if len(imu_datetimes) > 0 and imu_datetime - imu_datetimes[-1] >= 0.02:\n                ambiguous_index = len(imu_datetimes)\n\n            imu_datetimes.append(imu_datetime)\n            imu_status.append(True)\n    \n    # # fix imu time using a linear model (may not be ideal, ^_^)\n    # imu_index = np.asarray(range(len(imu_datetimes)), dtype=np.float64)\n    # z = np.polyfit(imu_index, imu_datetimes, 1)\n    # imu_datetimes_new = z[0] * imu_index + z[1]\n    # imu_datetimes = imu_datetimes_new.tolist()\n    # assert((np.diff(imu_datetimes) > 0).all())\n\n    # get all imu data\n    imu_data_dir = os.path.join(imu_path, 'data')\n    imu_filenames = sorted(os.listdir(imu_data_dir))\n    imu_data = [None] * len(imu_filenames)\n    for i, imu_file in enumerate(imu_filenames):\n        imu_data_file = open(os.path.join(imu_data_dir, imu_file), \"r\")\n        for line in imu_data_file:\n            if len(line) == 1:\n                continue\n            stripped_line = line.strip()\n            line_list = stripped_line.split()\n            imu_data[i] = line_list\n\n    if len(imu_data) > len(imu_datetimes):\n        imu_data = imu_data[:len(imu_datetimes)]\n\n    assert len(imu_datetimes) == len(imu_data)\n    print(\"Max IMU time interval: \" + str(np.diff(np.array(imu_datetimes)[np.argwhere(imu_status).flatten()]).max()))\n\n    for timestamp, data, status in zip(imu_datetimes, imu_data, imu_status):\n        if not status:\n            continue\n        roll, pitch, yaw = float(data[3]), float(data[4]), float(data[5]), \n        q = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n        imu = Imu()\n        imu.header.frame_id = imu_frame_id\n        imu.header.stamp = rospy.Time.from_sec(timestamp)\n        imu.orientation.x = q[0]\n        imu.orientation.y = q[1]\n        imu.orientation.z = q[2]\n        imu.orientation.w = q[3]\n        imu.linear_acceleration.x = float(data[11])\n        imu.linear_acceleration.y = float(data[12])\n        imu.linear_acceleration.z = float(data[13])\n        imu.angular_velocity.x = float(data[17])\n        imu.angular_velocity.y = float(data[18])\n        imu.angular_velocity.z = float(data[19])\n        bag.write(topic, imu, t=imu.header.stamp)\n\n        imu.header.frame_id = 'imu_enu_link'\n        bag.write('/imu_correct', imu, t=imu.header.stamp) # for LIO-SAM GPS\n\ndef save_dynamic_tf(bag, kitti, kitti_type, initial_time):\n    print(\"Exporting time dependent transformations\")\n    if kitti_type.find(\"raw\") != -1:\n        for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):\n            tf_oxts_msg = TFMessage()\n            tf_oxts_transform = TransformStamped()\n            tf_oxts_transform.header.stamp = rospy.Time.from_sec(float(timestamp.strftime(\"%s.%f\")))\n            tf_oxts_transform.header.frame_id = 'world'\n            tf_oxts_transform.child_frame_id = 'base_link'\n\n            transform = (oxts.T_w_imu)\n            t = transform[0:3, 3]\n            q = tf.transformations.quaternion_from_matrix(transform)\n            oxts_tf = Transform()\n\n            oxts_tf.translation.x = t[0]\n            oxts_tf.translation.y = t[1]\n            oxts_tf.translation.z = t[2]\n\n            oxts_tf.rotation.x = q[0]\n            oxts_tf.rotation.y = q[1]\n            oxts_tf.rotation.z = q[2]\n            oxts_tf.rotation.w = q[3]\n\n            tf_oxts_transform.transform = oxts_tf\n            tf_oxts_msg.transforms.append(tf_oxts_transform)\n\n            bag.write('/tf', tf_oxts_msg, tf_oxts_msg.transforms[0].header.stamp)\n\n    elif kitti_type.find(\"odom\") != -1:\n        timestamps = map(lambda x: initial_time + x.total_seconds(), kitti.timestamps)\n        for timestamp, tf_matrix in zip(timestamps, kitti.T_w_cam0):\n            tf_msg = TFMessage()\n            tf_stamped = TransformStamped()\n            tf_stamped.header.stamp = rospy.Time.from_sec(timestamp)\n            tf_stamped.header.frame_id = 'world'\n            tf_stamped.child_frame_id = 'camera_left'\n            \n            t = tf_matrix[0:3, 3]\n            q = tf.transformations.quaternion_from_matrix(tf_matrix)\n            transform = Transform()\n\n            transform.translation.x = t[0]\n            transform.translation.y = t[1]\n            transform.translation.z = t[2]\n\n            transform.rotation.x = q[0]\n            transform.rotation.y = q[1]\n            transform.rotation.z = q[2]\n            transform.rotation.w = q[3]\n\n            tf_stamped.transform = transform\n            tf_msg.transforms.append(tf_stamped)\n\n            bag.write('/tf', tf_msg, tf_msg.transforms[0].header.stamp)\n\ndef save_dynamic_tf_as_tum_file(filename, kitti, kitti_type, initial_time):\n    print(\"Exporting time dependent transformations as the tum format\")\n    with open(filename, 'w') as file:\n        if kitti_type.find(\"raw\") != -1:\n            for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):\n                tf_oxts_msg = TFMessage()\n                tf_oxts_transform = TransformStamped()\n                tf_oxts_transform.header.stamp = rospy.Time.from_sec(float(timestamp.strftime(\"%s.%f\")))\n                tf_oxts_transform.header.frame_id = 'world'\n                tf_oxts_transform.child_frame_id = 'base_link'\n\n                transform = (oxts.T_w_imu)\n                t = transform[0:3, 3]\n                q = tf.transformations.quaternion_from_matrix(transform)\n                oxts_tf = Transform()\n\n                oxts_tf.translation.x = t[0]\n                oxts_tf.translation.y = t[1]\n                oxts_tf.translation.z = t[2]\n\n                oxts_tf.rotation.x = q[0]\n                oxts_tf.rotation.y = q[1]\n                oxts_tf.rotation.z = q[2]\n                oxts_tf.rotation.w = q[3]\n\n                tum_stamp = '%s %f %f %f %f %f %f %f\\n'\n                tum_stamp = tum_stamp % (timestamp.strftime('%s.%f'), t[0], t[1], t[2], q[0], q[1], q[2], q[3])\n                file.write(tum_stamp)\n\n        elif kitti_type.find(\"odom\") != -1:\n            timestamps = map(lambda x: initial_time + x.total_seconds(), kitti.timestamps)\n            for timestamp, tf_matrix in zip(timestamps, kitti.T_w_cam0):\n                tf_msg = TFMessage()\n                tf_stamped = TransformStamped()\n                tf_stamped.header.stamp = rospy.Time.from_sec(timestamp)\n                tf_stamped.header.frame_id = 'world'\n                tf_stamped.child_frame_id = 'camera_left'\n                \n                t = tf_matrix[0:3, 3]\n                q = tf.transformations.quaternion_from_matrix(tf_matrix)\n                transform = Transform()\n\n                transform.translation.x = t[0]\n                transform.translation.y = t[1]\n                transform.translation.z = t[2]\n\n                transform.rotation.x = q[0]\n                transform.rotation.y = q[1]\n                transform.rotation.z = q[2]\n                transform.rotation.w = q[3]\n                \n                tum_stamp = '%s %f %f %f %f %f %f %f\\n'\n                tum_stamp = tum_stamp % (timestamp.strftime('%s.%f'), t[0], t[1], t[2], q[0], q[1], q[2], q[3])\n                file.write(tum_stamp)\n\ndef save_camera_data(bag, kitti_type, kitti, util, bridge, camera, camera_frame_id, topic, initial_time):\n    print(\"Exporting camera {}\".format(camera))\n    if kitti_type.find(\"raw\") != -1:\n        camera_pad = '{0:02d}'.format(camera)\n        image_dir = os.path.join(kitti.data_path, 'image_{}'.format(camera_pad))\n        image_path = os.path.join(image_dir, 'data')\n        image_filenames = sorted(os.listdir(image_path))\n        with open(os.path.join(image_dir, 'timestamps.txt')) as f:\n            image_datetimes = map(lambda x: datetime.strptime(x[:-4], '%Y-%m-%d %H:%M:%S.%f'), f.readlines())\n        \n        calib = CameraInfo()\n        calib.header.frame_id = camera_frame_id\n        calib.width, calib.height = tuple(util['S_rect_{}'.format(camera_pad)].tolist())\n        calib.distortion_model = 'plumb_bob'\n        calib.K = util['K_{}'.format(camera_pad)]\n        calib.R = util['R_rect_{}'.format(camera_pad)]\n        calib.D = util['D_{}'.format(camera_pad)]\n        calib.P = util['P_rect_{}'.format(camera_pad)]\n            \n    elif kitti_type.find(\"odom\") != -1:\n        camera_pad = '{0:01d}'.format(camera)\n        image_path = os.path.join(kitti.sequence_path, 'image_{}'.format(camera_pad))\n        image_filenames = sorted(os.listdir(image_path))\n        image_datetimes = map(lambda x: initial_time + x.total_seconds(), kitti.timestamps)\n        \n        calib = CameraInfo()\n        calib.header.frame_id = camera_frame_id\n        calib.P = util['P{}'.format(camera_pad)]\n    \n    iterable = zip(image_datetimes, image_filenames)\n    for dt, filename in tqdm(iterable, total=len(image_filenames)):\n        image_filename = os.path.join(image_path, filename)\n        cv_image = cv2.imread(image_filename)\n        calib.height, calib.width = cv_image.shape[:2]\n        if camera in (0, 1):\n            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)\n        encoding = \"mono8\" if camera in (0, 1) else \"bgr8\"\n        image_message = bridge.cv2_to_imgmsg(cv_image, encoding=encoding)\n        image_message.header.frame_id = camera_frame_id\n        if kitti_type.find(\"raw\") != -1:\n            image_message.header.stamp = rospy.Time.from_sec(float(datetime.strftime(dt, \"%s.%f\")))\n            topic_ext = \"/image_raw\"\n        elif kitti_type.find(\"odom\") != -1:\n            image_message.header.stamp = rospy.Time.from_sec(dt)\n            topic_ext = \"/image_rect\"\n        calib.header.stamp = image_message.header.stamp\n        bag.write(topic + topic_ext, image_message, t = image_message.header.stamp)\n        bag.write(topic + '/camera_info', calib, t = calib.header.stamp) \n        \ndef save_velo_data(bag, kitti, velo_frame_id, topic):\n    print(\"Exporting velodyne data\")\n    velo_path = os.path.join(kitti.data_path, 'velodyne_points')\n    velo_data_dir = os.path.join(velo_path, 'data')\n    velo_filenames = sorted(os.listdir(velo_data_dir))\n    with open(os.path.join(velo_path, 'timestamps.txt')) as f:\n        lines = f.readlines()\n        velo_datetimes = []\n        for line in lines:\n            if len(line) == 1:\n                continue\n            dt = datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n            velo_datetimes.append(dt)\n\n    iterable = zip(velo_datetimes, velo_filenames)\n\n    count = 0\n\n    for dt, filename in tqdm(iterable, total=len(velo_filenames)):\n        if dt is None:\n            continue\n\n        velo_filename = os.path.join(velo_data_dir, filename)\n\n        # read binary data\n        scan = (np.fromfile(velo_filename, dtype=np.float32)).reshape(-1, 4)\n\n        # get ring channel\n        depth = np.linalg.norm(scan, 2, axis=1)\n        pitch = np.arcsin(scan[:, 2] / depth) # arcsin(z, depth)\n        fov_down = -24.8 / 180.0 * np.pi\n        fov = (abs(-24.8) + abs(2.0)) / 180.0 * np.pi\n        proj_y = (pitch + abs(fov_down)) / fov  # in [0.0, 1.0]\n        proj_y *= 64  # in [0.0, H]\n        proj_y = np.floor(proj_y)\n        proj_y = np.minimum(64 - 1, proj_y)\n        proj_y = np.maximum(0, proj_y).astype(np.int32)  # in [0,H-1]\n        proj_y = proj_y.reshape(-1, 1)\n        scan = np.concatenate((scan,proj_y), axis=1)\n        scan = scan.tolist()\n        for i in range(len(scan)):\n            scan[i][-1] = int(scan[i][-1])\n\n        # create header\n        header = Header()\n        header.frame_id = velo_frame_id\n        header.stamp = rospy.Time.from_sec(float(datetime.strftime(dt, \"%s.%f\")))\n\n        # fill pcl msg\n        fields = [PointField('x', 0, PointField.FLOAT32, 1),\n                  PointField('y', 4, PointField.FLOAT32, 1),\n                  PointField('z', 8, PointField.FLOAT32, 1),\n                  PointField('intensity', 12, PointField.FLOAT32, 1),\n                  PointField('ring', 16, PointField.UINT16, 1)]\n        pcl_msg = pcl2.create_cloud(header, fields, scan)\n        pcl_msg.is_dense = True\n        # print(pcl_msg)\n\n        bag.write(topic, pcl_msg, t=pcl_msg.header.stamp)\n\n        # count += 1\n        # if count > 200:\n        #     break\n\ndef get_static_transform(from_frame_id, to_frame_id, transform):\n    t = transform[0:3, 3]\n    q = tf.transformations.quaternion_from_matrix(transform)\n    tf_msg = TransformStamped()\n    tf_msg.header.frame_id = from_frame_id\n    tf_msg.child_frame_id = to_frame_id\n    tf_msg.transform.translation.x = float(t[0])\n    tf_msg.transform.translation.y = float(t[1])\n    tf_msg.transform.translation.z = float(t[2])\n    tf_msg.transform.rotation.x = float(q[0])\n    tf_msg.transform.rotation.y = float(q[1])\n    tf_msg.transform.rotation.z = float(q[2])\n    tf_msg.transform.rotation.w = float(q[3])\n    return tf_msg\n\n\ndef inv(transform):\n    \"Invert rigid body transformation matrix\"\n    R = transform[0:3, 0:3]\n    t = transform[0:3, 3]\n    t_inv = -1 * R.T.dot(t)\n    transform_inv = np.eye(4)\n    transform_inv[0:3, 0:3] = R.T\n    transform_inv[0:3, 3] = t_inv\n    return transform_inv\n\n\ndef save_static_transforms(bag, transforms, timestamps):\n    print(\"Exporting static transformations\")\n    tfm = TFMessage()\n    for transform in transforms:\n        t = get_static_transform(from_frame_id=transform[0], to_frame_id=transform[1], transform=transform[2])\n        tfm.transforms.append(t)\n    for timestamp in timestamps:\n        time = rospy.Time.from_sec(float(timestamp.strftime(\"%s.%f\")))\n        for i in range(len(tfm.transforms)):\n            tfm.transforms[i].header.stamp = time\n        bag.write('/tf_static', tfm, t=time)\n\n\ndef save_gps_fix_data(bag, kitti, gps_frame_id, topic):\n    for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):\n        navsatfix_msg = NavSatFix()\n        navsatfix_msg.header.frame_id = gps_frame_id\n        navsatfix_msg.header.stamp = rospy.Time.from_sec(float(timestamp.strftime(\"%s.%f\")))\n        navsatfix_msg.latitude = oxts.packet.lat\n        navsatfix_msg.longitude = oxts.packet.lon\n        navsatfix_msg.altitude = oxts.packet.alt\n        navsatfix_msg.status.service = 1\n        bag.write(topic, navsatfix_msg, t=navsatfix_msg.header.stamp)\n\n\ndef save_gps_vel_data(bag, kitti, gps_frame_id, topic):\n    for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):\n        twist_msg = TwistStamped()\n        twist_msg.header.frame_id = gps_frame_id\n        twist_msg.header.stamp = rospy.Time.from_sec(float(timestamp.strftime(\"%s.%f\")))\n        twist_msg.twist.linear.x = oxts.packet.vf\n        twist_msg.twist.linear.y = oxts.packet.vl\n        twist_msg.twist.linear.z = oxts.packet.vu\n        twist_msg.twist.angular.x = oxts.packet.wf\n        twist_msg.twist.angular.y = oxts.packet.wl\n        twist_msg.twist.angular.z = oxts.packet.wu\n        bag.write(topic, twist_msg, t=twist_msg.header.stamp)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description = \"Convert KITTI dataset to ROS bag file the easy way!\")\n    # Accepted argument values\n    kitti_types = [\"raw_synced\", \"odom_color\", \"odom_gray\"]\n    odometry_sequences = []\n    for s in range(22):\n        odometry_sequences.append(str(s).zfill(2))\n    \n    parser.add_argument(\"kitti_type\", choices = kitti_types, help = \"KITTI dataset type\")\n    parser.add_argument(\"dir\", nargs = \"?\", default = os.getcwd(), help = \"base directory of the dataset, if no directory passed the deafult is current working directory\")\n    parser.add_argument(\"-t\", \"--date\", help = \"date of the raw dataset (i.e. 2011_09_26), option is only for RAW datasets.\")\n    parser.add_argument(\"-r\", \"--drive\", help = \"drive number of the raw dataset (i.e. 0001), option is only for RAW datasets.\")\n    parser.add_argument(\"-s\", \"--sequence\", choices = odometry_sequences,help = \"sequence of the odometry dataset (between 00 - 21), option is only for ODOMETRY datasets.\")\n    args = parser.parse_args()\n\n    bridge = CvBridge()\n    compression = rosbag.Compression.NONE\n    # compression = rosbag.Compression.BZ2\n    # compression = rosbag.Compression.LZ4\n    \n    # CAMERAS\n    cameras = [\n        (0, 'camera_gray_left', '/kitti/camera_gray_left'),\n        (1, 'camera_gray_right', '/kitti/camera_gray_right'),\n        (2, 'camera_color_left', '/kitti/camera_color_left'),\n        (3, 'camera_color_right', '/kitti/camera_color_right')\n    ]\n\n    if args.kitti_type.find(\"raw\") != -1:\n    \n        if args.date == None:\n            print(\"Date option is not given. It is mandatory for raw dataset.\")\n            print(\"Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>\")\n            sys.exit(1)\n        elif args.drive == None:\n            print(\"Drive option is not given. It is mandatory for raw dataset.\")\n            print(\"Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>\")\n            sys.exit(1)\n        \n        bag = rosbag.Bag(\"kitti_{}_drive_{}_{}.bag\".format(args.date, args.drive, args.kitti_type[4:]), 'w', compression=compression)\n        tum_filename = \"kitti_{}_drive_{}_{}.tum\".format(args.date, args.drive, args.kitti_type[4:])\n        kitti = pykitti.raw(args.dir, args.date, args.drive)\n        if not os.path.exists(kitti.data_path):\n            print('Path {} does not exists. Exiting.'.format(kitti.data_path))\n            sys.exit(1)\n\n        if len(kitti.timestamps) == 0:\n            print('Dataset is empty? Exiting.')\n            sys.exit(1)\n\n        try:\n            # IMU\n            imu_frame_id = 'imu_link'\n            imu_topic = '/kitti/oxts/imu'\n            imu_raw_topic = '/imu_raw'\n            gps_fix_topic = '/gps/fix'\n            gps_vel_topic = '/gps/vel'\n            velo_frame_id = 'velodyne'\n            velo_topic = '/points_raw'\n\n            T_base_link_to_imu = np.eye(4, 4)\n            T_base_link_to_imu[0:3, 3] = [-2.71/2.0-0.05, 0.32, 0.93]\n\n            # tf_static\n            transforms = [\n                ('base_link', imu_frame_id, T_base_link_to_imu),\n                (imu_frame_id, velo_frame_id, inv(kitti.calib.T_velo_imu)),\n                (imu_frame_id, cameras[0][1], inv(kitti.calib.T_cam0_imu)),\n                (imu_frame_id, cameras[1][1], inv(kitti.calib.T_cam1_imu)),\n                (imu_frame_id, cameras[2][1], inv(kitti.calib.T_cam2_imu)),\n                (imu_frame_id, cameras[3][1], inv(kitti.calib.T_cam3_imu))\n            ]\n\n            util = pykitti.utils.read_calib_file(os.path.join(kitti.calib_path, 'calib_cam_to_cam.txt'))\n\n            # Export\n            save_static_transforms(bag, transforms, kitti.timestamps)\n            save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=None)\n            save_dynamic_tf_as_tum_file(tum_filename, kitti, args.kitti_type, initial_time=None)\n            # save_imu_data(bag, kitti, imu_frame_id, imu_topic)\n            save_imu_data_raw(bag, kitti, imu_frame_id, imu_raw_topic)\n            save_gps_fix_data(bag, kitti, imu_frame_id, gps_fix_topic)\n            save_gps_vel_data(bag, kitti, imu_frame_id, gps_vel_topic)\n            for camera in cameras:\n                save_camera_data(bag, args.kitti_type, kitti, util, bridge, camera=camera[0], camera_frame_id=camera[1], topic=camera[2], initial_time=None)\n                break\n            save_velo_data(bag, kitti, velo_frame_id, velo_topic)\n\n        finally:\n            print(\"## OVERVIEW ##\")\n            print(bag)\n            bag.close()\n            \n    elif args.kitti_type.find(\"odom\") != -1:\n        \n        if args.sequence == None:\n            print(\"Sequence option is not given. It is mandatory for odometry dataset.\")\n            print(\"Usage for odometry dataset: kitti2bag {odom_color, odom_gray} [dir] -s <sequence>\")\n            sys.exit(1)\n            \n        bag = rosbag.Bag(\"kitti_data_odometry_{}_sequence_{}.bag\".format(args.kitti_type[5:],args.sequence), 'w', compression=compression)\n        \n        kitti = pykitti.odometry(args.dir, args.sequence)\n        if not os.path.exists(kitti.sequence_path):\n            print('Path {} does not exists. Exiting.'.format(kitti.sequence_path))\n            sys.exit(1)\n\n        kitti.load_calib()         \n        kitti.load_timestamps() \n             \n        if len(kitti.timestamps) == 0:\n            print('Dataset is empty? Exiting.')\n            sys.exit(1)\n            \n        if args.sequence in odometry_sequences[:11]:\n            print(\"Odometry dataset sequence {} has ground truth information (poses).\".format(args.sequence))\n            kitti.load_poses()\n\n        try:\n            util = pykitti.utils.read_calib_file(os.path.join(args.dir,'sequences',args.sequence, 'calib.txt'))\n            current_epoch = (datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()\n            # Export\n            if args.kitti_type.find(\"gray\") != -1:\n                used_cameras = cameras[:2]\n            elif args.kitti_type.find(\"color\") != -1:\n                used_cameras = cameras[-2:]\n\n            save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=current_epoch)\n            for camera in used_cameras:\n                save_camera_data(bag, args.kitti_type, kitti, util, bridge, camera=camera[0], camera_frame_id=camera[1], topic=camera[2], initial_time=current_epoch)\n\n        finally:\n            print(\"## OVERVIEW ##\")\n            print(bag)\n            bag.close()\n\n"
  },
  {
    "path": "config/params.yaml",
    "content": "lio_segmot:\n\n  # Topics\n  pointCloudTopic: \"points_raw\"               # Point cloud data\n  imuTopic: \"imu_raw\"                         # IMU data\n  odomTopic: \"odometry/imu\"                   # IMU pre-preintegration odometry, same frequency as IMU\n  gpsTopic: \"odometry/gpsz\"                   # GPS odometry topic from navsat, see module_navsat.launch file\n\n  # Frames\n  lidarFrame: \"base_link\"\n  baselinkFrame: \"base_link\"\n  odometryFrame: \"odom\"\n  mapFrame: \"map\"\n\n  # GPS Settings\n  useImuHeadingInitialization: false          # if using GPS data, set to \"true\"\n  useGpsElevation: false                      # if GPS elevation is bad, set to \"false\"\n  gpsCovThreshold: 2.0                        # m^2, threshold for using GPS data\n  poseCovThreshold: 25.0                      # m^2, threshold for using GPS data\n  \n  # Export settings\n  savePCD: false                              # https://github.com/TixiaoShan/LIO-SAM/issues/3\n  savePCDDirectory: \"/Downloads/LOAM/\"        # in your home folder, starts and ends with \"/\". Warning: the code deletes \"LOAM\" folder then recreates it. See \"mapOptimization\" for implementation\n\n  # Sensor Settings\n  sensor: velodyne                            # lidar sensor type, either 'velodyne' or 'ouster'\n  N_SCAN: 64                                  # number of lidar channel (i.e., 16, 32, 64, 128)\n  Horizon_SCAN: 1800                          # lidar horizontal resolution (Velodyne:1800, Ouster:512,1024,2048)\n  downsampleRate: 1                           # default: 1. Downsample your data if too many points. i.e., 16 = 64 / 4, 16 = 16 / 1\n  lidarMinRange: 1.0                          # default: 1.0, minimum lidar range to be used\n  lidarMaxRange: 1000.0                       # default: 1000.0, maximum lidar range to be used\n\n  # IMU Settings\n  imuAccNoise: 3.9939570888238808e-03\n  imuGyrNoise: 1.5636343949698187e-03\n  imuAccBiasN: 6.4356659353532566e-05\n  imuGyrBiasN: 3.5640318696367613e-05\n  imuGravity: 9.80511\n  imuRPYWeight: 0.01\n\n  # Extrinsics (lidar -> IMU, KITTI)\n  extrinsicTrans: [-8.086759e-01, 3.195559e-01, -7.997231e-01]\n  extrinsicRot: [ 9.999976e-01, 7.553071e-04, -2.035826e-03,\n                 -7.854027e-04, 9.998898e-01, -1.482298e-02,\n                  2.024406e-03, 1.482454e-02,  9.998881e-01]\n  extrinsicRPY: [ 9.999976e-01, 7.553071e-04, -2.035826e-03,\n                 -7.854027e-04, 9.998898e-01, -1.482298e-02,\n                  2.024406e-03, 1.482454e-02,  9.998881e-01]\n  # Extrinsics (lidar -> IMU, ITRI)\n  # extrinsicTrans: [0, 0, 0]\n  # extrinsicRot: [1, 0, 0,\n  #                 0, 1, 0,\n  #                 0, 0, 1]\n  # extrinsicRPY: [1, 0, 0,\n  #                 0, 1, 0,\n  #                 0, 0, 1]\n  # Extrinsics (lidar -> IMU, UrbanLoco)\n  # extrinsicTrans: [0, 0, -0.0762]\n  # extrinsicRot: [ 0, 1, 0,\n  #                -1, 0, 0,\n  #                 0, 0, 1]\n  # extrinsicRPY: [ 0, 1, 0,\n  #                -1, 0, 0,\n  #                 0, 0, 1]\n\n  # LOAM feature threshold\n  edgeThreshold: 1.0\n  surfThreshold: 0.1\n  edgeFeatureMinValidNum: 10\n  surfFeatureMinValidNum: 100\n\n  # voxel filter paprams\n  odometrySurfLeafSize: 0.4                     # default: 0.4 - outdoor, 0.2 - indoor\n  mappingCornerLeafSize: 0.2                    # default: 0.2 - outdoor, 0.1 - indoor\n  mappingSurfLeafSize: 0.4                      # default: 0.4 - outdoor, 0.2 - indoor\n\n  # robot motion constraint (in case you are using a 2D robot)\n  z_tollerance: 1000                            # meters\n  rotation_tollerance: 1000                     # radians\n\n  # CPU Params\n  numberOfCores: 8                              # number of cores for mapping optimization\n  mappingProcessInterval: 0.09                  # seconds, regulate mapping frequency\n\n  # Surrounding map\n  surroundingkeyframeAddingDistThreshold: 1.0   # meters, regulate keyframe adding threshold\n  surroundingkeyframeAddingAngleThreshold: 0.2  # radians, regulate keyframe adding threshold\n  surroundingKeyframeDensity: 2.0               # meters, downsample surrounding keyframe poses   \n  surroundingKeyframeSearchRadius: 50.0         # meters, within n meters scan-to-map optimization (when loop closure disabled)\n\n  # Loop closure\n  loopClosureEnableFlag: true\n  loopClosureFrequency: 1.0                     # Hz, regulate loop closure constraint add frequency\n  surroundingKeyframeSize: 50                   # submap size (when loop closure enabled)\n  historyKeyframeSearchRadius: 15.0             # meters, key frame that is within n meters from current pose will be considerd for loop closure\n  historyKeyframeSearchTimeDiff: 30.0           # seconds, key frame that is n seconds older will be considered for loop closure\n  historyKeyframeSearchNum: 25                  # number of hostory key frames will be fused into a submap for loop closure\n  historyKeyframeFitnessScore: 0.3              # icp threshold, the smaller the better alignment\n\n  # Visualization\n  globalMapVisualizationSearchRadius: 1000.0    # meters, global map visualization radius\n  globalMapVisualizationPoseDensity: 10.0       # meters, global map visualization keyframe density\n  globalMapVisualizationLeafSize: 1.0           # meters, global map visualization cloud density\n\n  # Dynamic object data association\n  detectionMatchThreshold: 19.5\n  dataAssociationVarianceVector: [3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 3.0e-2, 3.0e-2]\n  earlyLooselyCoupledMatchingVarianceVector: [3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 5.0e-3, 5.0e-3]\n  looselyCoupledMatchingVarianceVector: [1.0e-4, 1.0e-4, 1.0e-4, 2.0e-3, 2.0e-3, 2.0e-3]\n  tightlyCoupledMatchingVarianceVector: [8.0e-6, 8.0e-6, 8.0e-6, 1.0e-4, 1.0e-4, 1.0e-4]\n  # tightlyCoupledMatchingVarianceVector: [2.0e-7, 2.0e-7, 2.0e-7, 1.5e-5, 1.5e-5, 1.5e-5]\n\n  # Factor covariance matrices (as diagonal vectors)\n  # priorOdometryDiagonalVarianceVector: [1.0e-2, 1.0e-2, M_PI*M_PI, 1.0e+8, 1.0e+8, 1.0e+8]\n  # odometryDiagonalVarianceVector: [1.0e-6, 1.0e-6, 1.0e-6, 1.0e-4, 1.0e-4, 1.0e-4]\n  earlyConstantVelocityDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]\n  constantVelocityDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]\n  motionDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 1.0e-1, 1.0e-2, 1.0e-2]\n  looselyCoupledDetectionVarianceVector: [2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]\n  tightlyCoupledDetectionVarianceVector: [2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]\n\n  numberOfEarlySteps: 2\n  numberOfPreLooseCouplingSteps: 6\n  numberOfVelocityConsistencySteps: 4\n  numberOfInterLooseCouplingSteps: 0\n  tightCouplingDetectionErrorThreshold: 26.0\n\n  objectAngularVelocityConsistencyVarianceThreshold: 1.0e-5  # rad^2\n  objectLinearVelocityConsistencyVarianceThreshold: 1.0e-2  # m^2\n\n  # Tracking\n  trackingStepsForLostObject: 3\n\n\n# Navsat (convert GPS coordinates to Cartesian)\nnavsat:\n  frequency: 50\n  wait_for_datum: false\n  delay: 0.0\n  magnetic_declination_radians: 0\n  yaw_offset: 0\n  zero_altitude: true\n  broadcast_utm_transform: false\n  broadcast_utm_transform_as_parent_frame: false\n  publish_filtered_gps: false\n\n# EKF for Navsat\nekf_gps:\n  publish_tf: false\n  map_frame: map\n  odom_frame: odom\n  base_link_frame: base_link\n  world_frame: odom\n\n  frequency: 50\n  two_d_mode: false\n  sensor_timeout: 0.01\n  # -------------------------------------\n  # External IMU:\n  # -------------------------------------\n  imu0: imu_correct\n  # make sure the input is aligned with ROS REP105. \"imu_correct\" is manually transformed by myself. EKF can also transform the data using tf between your imu and base_link\n  imu0_config: [false, false, false,\n                true,  true,  true,\n                false, false, false,\n                false, false, true,\n                true,  true,  true]\n  imu0_differential: false\n  imu0_queue_size: 50 \n  imu0_remove_gravitational_acceleration: true\n  # -------------------------------------\n  # Odometry (From Navsat):\n  # -------------------------------------\n  odom0: odometry/gps\n  odom0_config: [true,  true,  true,\n                 false, false, false,\n                 false, false, false,\n                 false, false, false,\n                 false, false, false]\n  odom0_differential: false\n  odom0_queue_size: 10\n\n  #                            x     y     z     r     p     y   x_dot  y_dot  z_dot  r_dot p_dot y_dot x_ddot y_ddot z_ddot\n  process_noise_covariance: [  1.0,  0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    1.0,  0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    10.0, 0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0.03, 0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0.03, 0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0.1,  0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0.25,  0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0.25,  0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0.04,  0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0.01, 0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0.01, 0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0.5,  0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0.01, 0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0.01,   0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0.015]\n"
  },
  {
    "path": "config/params_hsinchu.yaml",
    "content": "lio_segmot:\n\n  # Topics\n  pointCloudTopic: \"points_raw\"               # Point cloud data\n  imuTopic: \"imu_raw\"                         # IMU data\n  odomTopic: \"odometry/imu\"                   # IMU pre-preintegration odometry, same frequency as IMU\n  gpsTopic: \"odometry/gpsz\"                   # GPS odometry topic from navsat, see module_navsat.launch file\n\n  # Frames\n  lidarFrame: \"base_link\"\n  baselinkFrame: \"base_link\"\n  odometryFrame: \"odom\"\n  mapFrame: \"map\"\n\n  # GPS Settings\n  useImuHeadingInitialization: false          # if using GPS data, set to \"true\"\n  useGpsElevation: false                      # if GPS elevation is bad, set to \"false\"\n  gpsCovThreshold: 2.0                        # m^2, threshold for using GPS data\n  poseCovThreshold: 25.0                      # m^2, threshold for using GPS data\n  \n  # Export settings\n  savePCD: false                              # https://github.com/TixiaoShan/LIO-SAM/issues/3\n  savePCDDirectory: \"/Downloads/LOAM/\"        # in your home folder, starts and ends with \"/\". Warning: the code deletes \"LOAM\" folder then recreates it. See \"mapOptimization\" for implementation\n\n  # Sensor Settings\n  sensor: velodyne                            # lidar sensor type, either 'velodyne' or 'ouster'\n  N_SCAN: 64                                  # number of lidar channel (i.e., 16, 32, 64, 128)\n  Horizon_SCAN: 1800                          # lidar horizontal resolution (Velodyne:1800, Ouster:512,1024,2048)\n  downsampleRate: 1                           # default: 1. Downsample your data if too many points. i.e., 16 = 64 / 4, 16 = 16 / 1\n  lidarMinRange: 1.0                          # default: 1.0, minimum lidar range to be used\n  lidarMaxRange: 1000.0                       # default: 1000.0, maximum lidar range to be used\n\n  # IMU Settings\n  imuAccNoise: 3.9939570888238808e-03\n  imuGyrNoise: 1.5636343949698187e-03\n  imuAccBiasN: 6.4356659353532566e-05\n  imuGyrBiasN: 3.5640318696367613e-05\n  imuGravity: 9.80511\n  imuRPYWeight: 0.01\n\n  # Extrinsics (lidar -> IMU, KITTI)\n  # extrinsicTrans: [-8.086759e-01, 3.195559e-01, -7.997231e-01]\n  # extrinsicRot: [ 9.999976e-01, 7.553071e-04, -2.035826e-03,\n  #                -7.854027e-04, 9.998898e-01, -1.482298e-02,\n  #                 2.024406e-03, 1.482454e-02,  9.998881e-01]\n  # extrinsicRPY: [ 9.999976e-01, 7.553071e-04, -2.035826e-03,\n  #                -7.854027e-04, 9.998898e-01, -1.482298e-02,\n  #                 2.024406e-03, 1.482454e-02,  9.998881e-01]\n  # Extrinsics (lidar -> IMU, ITRI)\n  extrinsicTrans: [0, 0, 0]\n  extrinsicRot: [1, 0, 0,\n                  0, 1, 0,\n                  0, 0, 1]\n  extrinsicRPY: [1, 0, 0,\n                  0, 1, 0,\n                  0, 0, 1]\n  # Extrinsics (lidar -> IMU, UrbanLoco)\n  # extrinsicTrans: [0, 0, -0.0762]\n  # extrinsicRot: [ 0, 1, 0,\n  #                -1, 0, 0,\n  #                 0, 0, 1]\n  # extrinsicRPY: [ 0, 1, 0,\n  #                -1, 0, 0,\n  #                 0, 0, 1]\n\n  # LOAM feature threshold\n  edgeThreshold: 1.0\n  surfThreshold: 0.1\n  edgeFeatureMinValidNum: 10\n  surfFeatureMinValidNum: 100\n\n  # voxel filter paprams\n  odometrySurfLeafSize: 0.4                     # default: 0.4 - outdoor, 0.2 - indoor\n  mappingCornerLeafSize: 0.2                    # default: 0.2 - outdoor, 0.1 - indoor\n  mappingSurfLeafSize: 0.4                      # default: 0.4 - outdoor, 0.2 - indoor\n\n  # robot motion constraint (in case you are using a 2D robot)\n  z_tollerance: 1000                            # meters\n  rotation_tollerance: 1000                     # radians\n\n  # CPU Params\n  numberOfCores: 8                              # number of cores for mapping optimization\n  mappingProcessInterval: 0.09                  # seconds, regulate mapping frequency\n\n  # Surrounding map\n  surroundingkeyframeAddingDistThreshold: 1.0   # meters, regulate keyframe adding threshold\n  surroundingkeyframeAddingAngleThreshold: 0.2  # radians, regulate keyframe adding threshold\n  surroundingKeyframeDensity: 2.0               # meters, downsample surrounding keyframe poses   \n  surroundingKeyframeSearchRadius: 50.0         # meters, within n meters scan-to-map optimization (when loop closure disabled)\n\n  # Loop closure\n  loopClosureEnableFlag: true\n  loopClosureFrequency: 1.0                     # Hz, regulate loop closure constraint add frequency\n  surroundingKeyframeSize: 50                   # submap size (when loop closure enabled)\n  historyKeyframeSearchRadius: 15.0             # meters, key frame that is within n meters from current pose will be considerd for loop closure\n  historyKeyframeSearchTimeDiff: 30.0           # seconds, key frame that is n seconds older will be considered for loop closure\n  historyKeyframeSearchNum: 25                  # number of hostory key frames will be fused into a submap for loop closure\n  historyKeyframeFitnessScore: 0.3              # icp threshold, the smaller the better alignment\n\n  # Visualization\n  globalMapVisualizationSearchRadius: 1000.0    # meters, global map visualization radius\n  globalMapVisualizationPoseDensity: 10.0       # meters, global map visualization keyframe density\n  globalMapVisualizationLeafSize: 1.0           # meters, global map visualization cloud density\n\n  # Dynamic object data association\n  detectionMatchThreshold: 19.5\n  dataAssociationVarianceVector: [3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 3.0e-2, 3.0e-2]\n  earlyLooselyCoupledMatchingVarianceVector: [3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 5.0e-3, 5.0e-3]\n  looselyCoupledMatchingVarianceVector: [1.0e-4, 1.0e-4, 1.0e-4, 2.0e-3, 2.0e-3, 2.0e-3]\n  tightlyCoupledMatchingVarianceVector: [8.0e-6, 8.0e-6, 8.0e-6, 1.0e-4, 1.0e-4, 1.0e-4]\n  # tightlyCoupledMatchingVarianceVector: [2.0e-7, 2.0e-7, 2.0e-7, 1.5e-5, 1.5e-5, 1.5e-5]\n\n  # Factor covariance matrices (as diagonal vectors)\n  # priorOdometryDiagonalVarianceVector: [1.0e-2, 1.0e-2, M_PI*M_PI, 1.0e+8, 1.0e+8, 1.0e+8]\n  # odometryDiagonalVarianceVector: [1.0e-6, 1.0e-6, 1.0e-6, 1.0e-4, 1.0e-4, 1.0e-4]\n  earlyConstantVelocityDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]\n  constantVelocityDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]\n  motionDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 1.0e-1, 1.0e-2, 1.0e-2]\n  looselyCoupledDetectionVarianceVector: [2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]\n  tightlyCoupledDetectionVarianceVector: [2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]\n\n  numberOfEarlySteps: 2\n  numberOfPreLooseCouplingSteps: 6\n  numberOfVelocityConsistencySteps: 4\n  numberOfInterLooseCouplingSteps: 0\n  tightCouplingDetectionErrorThreshold: 19.5\n\n  objectAngularVelocityConsistencyVarianceThreshold: 1.0e-5  # rad^2\n  objectLinearVelocityConsistencyVarianceThreshold: 1.0e-2  # m^2\n\n  # Tracking\n  trackingStepsForLostObject: 3\n\n\n# Navsat (convert GPS coordinates to Cartesian)\nnavsat:\n  frequency: 50\n  wait_for_datum: false\n  delay: 0.0\n  magnetic_declination_radians: 0\n  yaw_offset: 0\n  zero_altitude: true\n  broadcast_utm_transform: false\n  broadcast_utm_transform_as_parent_frame: false\n  publish_filtered_gps: false\n\n# EKF for Navsat\nekf_gps:\n  publish_tf: false\n  map_frame: map\n  odom_frame: odom\n  base_link_frame: base_link\n  world_frame: odom\n\n  frequency: 50\n  two_d_mode: false\n  sensor_timeout: 0.01\n  # -------------------------------------\n  # External IMU:\n  # -------------------------------------\n  imu0: imu_correct\n  # make sure the input is aligned with ROS REP105. \"imu_correct\" is manually transformed by myself. EKF can also transform the data using tf between your imu and base_link\n  imu0_config: [false, false, false,\n                true,  true,  true,\n                false, false, false,\n                false, false, true,\n                true,  true,  true]\n  imu0_differential: false\n  imu0_queue_size: 50 \n  imu0_remove_gravitational_acceleration: true\n  # -------------------------------------\n  # Odometry (From Navsat):\n  # -------------------------------------\n  odom0: odometry/gps\n  odom0_config: [true,  true,  true,\n                 false, false, false,\n                 false, false, false,\n                 false, false, false,\n                 false, false, false]\n  odom0_differential: false\n  odom0_queue_size: 10\n\n  #                            x     y     z     r     p     y   x_dot  y_dot  z_dot  r_dot p_dot y_dot x_ddot y_ddot z_ddot\n  process_noise_covariance: [  1.0,  0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    1.0,  0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    10.0, 0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0.03, 0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0.03, 0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0.1,  0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0.25,  0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0.25,  0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0.04,  0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0.01, 0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0.01, 0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0.5,  0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0.01, 0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0.01,   0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0.015]\n"
  },
  {
    "path": "config/params_kitti.yaml",
    "content": "lio_segmot:\n\n  # Topics\n  pointCloudTopic: \"points_raw\"               # Point cloud data\n  imuTopic: \"imu_raw\"                         # IMU data\n  odomTopic: \"odometry/imu\"                   # IMU pre-preintegration odometry, same frequency as IMU\n  gpsTopic: \"odometry/gpsz\"                   # GPS odometry topic from navsat, see module_navsat.launch file\n\n  # Frames\n  lidarFrame: \"base_link\"\n  baselinkFrame: \"base_link\"\n  odometryFrame: \"odom\"\n  mapFrame: \"map\"\n\n  # GPS Settings\n  useImuHeadingInitialization: false          # if using GPS data, set to \"true\"\n  useGpsElevation: false                      # if GPS elevation is bad, set to \"false\"\n  gpsCovThreshold: 2.0                        # m^2, threshold for using GPS data\n  poseCovThreshold: 25.0                      # m^2, threshold for using GPS data\n  \n  # Export settings\n  savePCD: false                              # https://github.com/TixiaoShan/LIO-SAM/issues/3\n  savePCDDirectory: \"/Downloads/LOAM/\"        # in your home folder, starts and ends with \"/\". Warning: the code deletes \"LOAM\" folder then recreates it. See \"mapOptimization\" for implementation\n\n  # Sensor Settings\n  sensor: velodyne                            # lidar sensor type, either 'velodyne' or 'ouster'\n  N_SCAN: 64                                  # number of lidar channel (i.e., 16, 32, 64, 128)\n  Horizon_SCAN: 1800                          # lidar horizontal resolution (Velodyne:1800, Ouster:512,1024,2048)\n  downsampleRate: 1                           # default: 1. Downsample your data if too many points. i.e., 16 = 64 / 4, 16 = 16 / 1\n  lidarMinRange: 1.0                          # default: 1.0, minimum lidar range to be used\n  lidarMaxRange: 1000.0                       # default: 1000.0, maximum lidar range to be used\n\n  # IMU Settings\n  imuAccNoise: 3.9939570888238808e-03\n  imuGyrNoise: 1.5636343949698187e-03\n  imuAccBiasN: 6.4356659353532566e-05\n  imuGyrBiasN: 3.5640318696367613e-05\n  imuGravity: 9.80511\n  imuRPYWeight: 0.01\n\n  # Extrinsics (lidar -> IMU, KITTI)\n  extrinsicTrans: [-8.086759e-01, 3.195559e-01, -7.997231e-01]\n  extrinsicRot: [ 9.999976e-01, 7.553071e-04, -2.035826e-03,\n                 -7.854027e-04, 9.998898e-01, -1.482298e-02,\n                  2.024406e-03, 1.482454e-02,  9.998881e-01]\n  extrinsicRPY: [ 9.999976e-01, 7.553071e-04, -2.035826e-03,\n                 -7.854027e-04, 9.998898e-01, -1.482298e-02,\n                  2.024406e-03, 1.482454e-02,  9.998881e-01]\n  # Extrinsics (lidar -> IMU, ITRI)\n  # extrinsicTrans: [0, 0, 0]\n  # extrinsicRot: [1, 0, 0,\n  #                 0, 1, 0,\n  #                 0, 0, 1]\n  # extrinsicRPY: [1, 0, 0,\n  #                 0, 1, 0,\n  #                 0, 0, 1]\n  # Extrinsics (lidar -> IMU, UrbanLoco)\n  # extrinsicTrans: [0, 0, -0.0762]\n  # extrinsicRot: [ 0, 1, 0,\n  #                -1, 0, 0,\n  #                 0, 0, 1]\n  # extrinsicRPY: [ 0, 1, 0,\n  #                -1, 0, 0,\n  #                 0, 0, 1]\n\n  # LOAM feature threshold\n  edgeThreshold: 1.0\n  surfThreshold: 0.1\n  edgeFeatureMinValidNum: 10\n  surfFeatureMinValidNum: 100\n\n  # voxel filter paprams\n  odometrySurfLeafSize: 0.4                     # default: 0.4 - outdoor, 0.2 - indoor\n  mappingCornerLeafSize: 0.2                    # default: 0.2 - outdoor, 0.1 - indoor\n  mappingSurfLeafSize: 0.4                      # default: 0.4 - outdoor, 0.2 - indoor\n\n  # robot motion constraint (in case you are using a 2D robot)\n  z_tollerance: 1000                            # meters\n  rotation_tollerance: 1000                     # radians\n\n  # CPU Params\n  numberOfCores: 8                              # number of cores for mapping optimization\n  mappingProcessInterval: 0.09                  # seconds, regulate mapping frequency\n\n  # Surrounding map\n  surroundingkeyframeAddingDistThreshold: 1.0   # meters, regulate keyframe adding threshold\n  surroundingkeyframeAddingAngleThreshold: 0.2  # radians, regulate keyframe adding threshold\n  surroundingKeyframeDensity: 2.0               # meters, downsample surrounding keyframe poses   \n  surroundingKeyframeSearchRadius: 50.0         # meters, within n meters scan-to-map optimization (when loop closure disabled)\n\n  # Loop closure\n  loopClosureEnableFlag: true\n  loopClosureFrequency: 1.0                     # Hz, regulate loop closure constraint add frequency\n  surroundingKeyframeSize: 50                   # submap size (when loop closure enabled)\n  historyKeyframeSearchRadius: 15.0             # meters, key frame that is within n meters from current pose will be considerd for loop closure\n  historyKeyframeSearchTimeDiff: 30.0           # seconds, key frame that is n seconds older will be considered for loop closure\n  historyKeyframeSearchNum: 25                  # number of hostory key frames will be fused into a submap for loop closure\n  historyKeyframeFitnessScore: 0.3              # icp threshold, the smaller the better alignment\n\n  # Visualization\n  globalMapVisualizationSearchRadius: 1000.0    # meters, global map visualization radius\n  globalMapVisualizationPoseDensity: 10.0       # meters, global map visualization keyframe density\n  globalMapVisualizationLeafSize: 1.0           # meters, global map visualization cloud density\n\n  # Dynamic object data association\n  detectionMatchThreshold: 19.5\n  dataAssociationVarianceVector: [3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 3.0e-2, 3.0e-2]\n  earlyLooselyCoupledMatchingVarianceVector: [3.0e-4, 3.0e-4, 3.0e-4, 5.0e-2, 5.0e-3, 5.0e-3]\n  looselyCoupledMatchingVarianceVector: [1.0e-4, 1.0e-4, 1.0e-4, 2.0e-3, 2.0e-3, 2.0e-3]\n  tightlyCoupledMatchingVarianceVector: [8.0e-6, 8.0e-6, 8.0e-6, 1.0e-4, 1.0e-4, 1.0e-4]\n  # tightlyCoupledMatchingVarianceVector: [2.0e-7, 2.0e-7, 2.0e-7, 1.5e-5, 1.5e-5, 1.5e-5]\n\n  # Factor covariance matrices (as diagonal vectors)\n  # priorOdometryDiagonalVarianceVector: [1.0e-2, 1.0e-2, M_PI*M_PI, 1.0e+8, 1.0e+8, 1.0e+8]\n  # odometryDiagonalVarianceVector: [1.0e-6, 1.0e-6, 1.0e-6, 1.0e-4, 1.0e-4, 1.0e-4]\n  earlyConstantVelocityDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]\n  constantVelocityDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 2.0e-1, 1.0e-1, 1.0e-1]\n  motionDiagonalVarianceVector: [2.0e-4, 2.0e-4, 1.0e-3, 1.0e-1, 1.0e-2, 1.0e-2]\n  looselyCoupledDetectionVarianceVector: [2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]\n  tightlyCoupledDetectionVarianceVector: [2.0e-4, 2.0e-4, 2.0e-4, 1.5e-3, 1.5e-3, 1.5e-3]\n\n  numberOfEarlySteps: 2\n  numberOfPreLooseCouplingSteps: 6\n  numberOfVelocityConsistencySteps: 4\n  numberOfInterLooseCouplingSteps: 0\n  tightCouplingDetectionErrorThreshold: 26.0\n\n  objectAngularVelocityConsistencyVarianceThreshold: 1.0e-5  # rad^2\n  objectLinearVelocityConsistencyVarianceThreshold: 1.0e-2  # m^2\n\n  # Tracking\n  trackingStepsForLostObject: 3\n\n\n# Navsat (convert GPS coordinates to Cartesian)\nnavsat:\n  frequency: 50\n  wait_for_datum: false\n  delay: 0.0\n  magnetic_declination_radians: 0\n  yaw_offset: 0\n  zero_altitude: true\n  broadcast_utm_transform: false\n  broadcast_utm_transform_as_parent_frame: false\n  publish_filtered_gps: false\n\n# EKF for Navsat\nekf_gps:\n  publish_tf: false\n  map_frame: map\n  odom_frame: odom\n  base_link_frame: base_link\n  world_frame: odom\n\n  frequency: 50\n  two_d_mode: false\n  sensor_timeout: 0.01\n  # -------------------------------------\n  # External IMU:\n  # -------------------------------------\n  imu0: imu_correct\n  # make sure the input is aligned with ROS REP105. \"imu_correct\" is manually transformed by myself. EKF can also transform the data using tf between your imu and base_link\n  imu0_config: [false, false, false,\n                true,  true,  true,\n                false, false, false,\n                false, false, true,\n                true,  true,  true]\n  imu0_differential: false\n  imu0_queue_size: 50 \n  imu0_remove_gravitational_acceleration: true\n  # -------------------------------------\n  # Odometry (From Navsat):\n  # -------------------------------------\n  odom0: odometry/gps\n  odom0_config: [true,  true,  true,\n                 false, false, false,\n                 false, false, false,\n                 false, false, false,\n                 false, false, false]\n  odom0_differential: false\n  odom0_queue_size: 10\n\n  #                            x     y     z     r     p     y   x_dot  y_dot  z_dot  r_dot p_dot y_dot x_ddot y_ddot z_ddot\n  process_noise_covariance: [  1.0,  0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    1.0,  0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    10.0, 0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0.03, 0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0.03, 0,    0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0.1,  0,     0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0.25,  0,     0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0.25,  0,     0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0.04,  0,    0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0.01, 0,    0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0.01, 0,    0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0.5,  0,    0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0.01, 0,      0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0.01,   0,\n                               0,    0,    0,    0,    0,    0,    0,     0,     0,     0,    0,    0,    0,    0,      0.015]\n"
  },
  {
    "path": "include/factor.h",
    "content": "// Copyright 2021 Yu-Kai Lin. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n#pragma once\n\n#include <stdexcept>\n#include <tuple>\n#include <vector>\n\n#include <gtsam/geometry/Point3.h>\n#include <gtsam/geometry/Pose3.h>\n#include <gtsam/linear/NoiseModel.h>\n#include <gtsam/nonlinear/NonlinearFactor.h>\n#include <gtsam/slam/BetweenFactor.h>\n#include <jsk_recognition_msgs/BoundingBox.h>\n#include <Eigen/Dense>\n\nclass MaxMixturePoint3 : public gtsam::Point3 {};\n\nclass Detection {\n public:\n  using BoundingBox = jsk_recognition_msgs::BoundingBox;\n\n protected:\n  BoundingBox box;\n  gtsam::Pose3 pose;\n  gtsam::Vector6 variances;\n\n public:\n  Detection(BoundingBox box, gtsam::Vector6 variances);\n\n  const BoundingBox getBoundingBox() const { return this->box; }\n  const gtsam::Pose3 getPose() const { return this->pose; };\n  gtsam::noiseModel::Diagonal::shared_ptr getDiagonal() const { return gtsam::noiseModel::Diagonal::Variances(this->variances); }\n\n  const double error(const gtsam::Pose3 x) const;\n};\n\nstd::tuple<size_t, double>\ngetDetectionIndexAndError(const gtsam::Pose3 &d,\n                          std::vector<Detection> detections);\n\nclass TightlyCoupledDetectionFactor : public gtsam::NoiseModelFactor2<gtsam::Pose3,\n                                                                      gtsam::Pose3> {\n private:\n  using This = TightlyCoupledDetectionFactor;\n  using Base = gtsam::NoiseModelFactor2<gtsam::Pose3, gtsam::Pose3>;\n\n protected:\n  std::vector<Detection> detections;\n  std::vector<gtsam::noiseModel::Diagonal::shared_ptr> noiseModels;\n\n  mutable int cachedDetectionIndex = -1;\n\n public:\n  /** Constructor */\n  TightlyCoupledDetectionFactor(gtsam::Key robotPoseKey,\n                                gtsam::Key objectPoseKey,\n                                std::vector<Detection> detections)\n      : Base(boost::shared_ptr<gtsam::noiseModel::Diagonal>(),\n             robotPoseKey,\n             objectPoseKey),\n        detections(detections) {\n    for (const auto &detection : detections) {\n      this->noiseModels.push_back(detection.getDiagonal());\n    }\n  }\n\n  /** Contructor with cached detection index */\n  TightlyCoupledDetectionFactor(gtsam::Key robotPoseKey,\n                                gtsam::Key objectPoseKey,\n                                std::vector<Detection> detections,\n                                int cachedDetectionIndex)\n      : Base(boost::shared_ptr<gtsam::noiseModel::Diagonal>(),\n             robotPoseKey,\n             objectPoseKey),\n        detections(detections),\n        cachedDetectionIndex(cachedDetectionIndex) {\n    for (const auto &detection : detections) {\n      this->noiseModels.push_back(detection.getDiagonal());\n    }\n  }\n\n  /** print */\n  virtual void\n  print(const std::string &s,\n        const gtsam::KeyFormatter &keyFormatter = gtsam::DefaultKeyFormatter) const {\n    std::cout << s << \"TightlyCoupledDetectionFactor(\"\n              << keyFormatter(this->robotPoseKey()) << \", \"\n              << keyFormatter(this->objectPoseKey()) << \")\\n\";\n  }\n\n  /** equals */\n  virtual bool equals(const NonlinearFactor &expected, double tol = 1e-9) const {\n    const This *e = dynamic_cast<const This *>(&expected);\n    return e != NULL && Base::equals(*e, tol);\n  }\n\n  gtsam::Key robotPoseKey() const { return key1(); }\n  gtsam::Key objectPoseKey() const { return key2(); }\n  const std::vector<Detection> &getDetections() const { return this->detections; }\n  const int getCachedDetectionIndex() const { return this->cachedDetectionIndex; }\n\n  virtual gtsam::Vector\n  evaluateError(const gtsam::Pose3 &robotPose,\n                const gtsam::Pose3 &objectPose,\n                const gtsam::Pose3 &measured,\n                boost::optional<gtsam::Matrix &> H1 = boost::none,\n                boost::optional<gtsam::Matrix &> H2 = boost::none) const;\n\n  virtual gtsam::Vector\n  evaluateError(const gtsam::Pose3 &robotPose,\n                const gtsam::Pose3 &objectPose,\n                boost::optional<gtsam::Matrix &> H1 = boost::none,\n                boost::optional<gtsam::Matrix &> H2 = boost::none) const override {\n    throw std::runtime_error(\"This evaluateError function is revoked.\");\n  }\n\n  virtual gtsam::Vector unwhitenedError(const gtsam::Pose3 &measured,\n                                        const gtsam::Values &x,\n                                        boost::optional<std::vector<gtsam::Matrix> &> H = boost::none) const;\n\n  virtual gtsam::Vector unwhitenedError(const gtsam::Values &x,\n                                        boost::optional<std::vector<gtsam::Matrix> &> H = boost::none) const override {\n    throw std::runtime_error(\"This unwhitenedError function is revoked.\");\n  };\n\n  virtual boost::shared_ptr<gtsam::GaussianFactor>\n  linearize(const gtsam::Values &c) const override;\n\n  virtual double error(const gtsam::Values &c) const override;\n\n  std::size_t size() const {\n    return 2;\n  }\n\n  virtual size_t dim() const override { return 6; }\n};\n\nclass LooselyCoupledDetectionFactor : public gtsam::NoiseModelFactor1<gtsam::Pose3> {\n private:\n  using This = LooselyCoupledDetectionFactor;\n  using Base = gtsam::NoiseModelFactor1<gtsam::Pose3>;\n\n protected:\n  std::vector<Detection> detections;\n  std::vector<gtsam::noiseModel::Diagonal::shared_ptr> noiseModels;\n\n  gtsam::Key robotPoseKey_;\n\n  mutable int cachedDetectionIndex = -1;\n\n public:\n  /** Constructor */\n  LooselyCoupledDetectionFactor(gtsam::Key robotPoseKey,\n                                gtsam::Key objectPoseKey,\n                                std::vector<Detection> detections)\n      : Base(boost::shared_ptr<gtsam::noiseModel::Diagonal>(), objectPoseKey),\n        robotPoseKey_(robotPoseKey),\n        detections(detections) {\n    for (const auto &detection : detections) {\n      this->noiseModels.push_back(detection.getDiagonal());\n    }\n  }\n\n  /** Contructor with cached detection index */\n  LooselyCoupledDetectionFactor(gtsam::Key robotPoseKey,\n                                gtsam::Key objectPoseKey,\n                                std::vector<Detection> detections,\n                                int cachedDetectionIndex)\n      : Base(boost::shared_ptr<gtsam::noiseModel::Diagonal>(), objectPoseKey),\n        robotPoseKey_(robotPoseKey),\n        detections(detections),\n        cachedDetectionIndex(cachedDetectionIndex) {\n    for (const auto &detection : detections) {\n      this->noiseModels.push_back(detection.getDiagonal());\n    }\n  }\n\n  /** print */\n  virtual void\n  print(const std::string &s,\n        const gtsam::KeyFormatter &keyFormatter = gtsam::DefaultKeyFormatter) const {\n    std::cout << s << \"LooselyCoupledDetectionFactor(\"\n              << keyFormatter(this->robotPoseKey()) << \",\"\n              << keyFormatter(this->objectPoseKey()) << \")\\n\";\n  }\n\n  /** equals */\n  virtual bool equals(const NonlinearFactor &expected, double tol = 1e-9) const {\n    const This *e = dynamic_cast<const This *>(&expected);\n    return e != NULL && Base::equals(*e, tol);\n  }\n\n  gtsam::Key robotPoseKey() const { return this->robotPoseKey_; }\n  gtsam::Key objectPoseKey() const { return this->key(); }\n  const std::vector<Detection> &getDetections() const { return this->detections; }\n  const int getCachedDetectionIndex() const { return this->cachedDetectionIndex; }\n\n  virtual gtsam::Vector\n  evaluateError(const gtsam::Pose3 &robotPose,\n                const gtsam::Pose3 &objectPose,\n                const gtsam::Pose3 &measured,\n                boost::optional<gtsam::Matrix &> H1 = boost::none) const;\n\n  virtual gtsam::Vector\n  evaluateError(const gtsam::Pose3 &x, boost::optional<gtsam::Matrix &> H = boost::none) const override {\n    throw std::runtime_error(\"This evaluateError function is revoked.\");\n  }\n\n  virtual gtsam::Vector unwhitenedError(const gtsam::Pose3 &measured,\n                                        const gtsam::Values &x,\n                                        boost::optional<std::vector<gtsam::Matrix> &> H = boost::none) const;\n\n  virtual gtsam::Vector unwhitenedError(const gtsam::Values &x,\n                                        boost::optional<std::vector<gtsam::Matrix> &> H = boost::none) const override {\n    throw std::runtime_error(\"This unwhitenedError function is revoked.\");\n  };\n\n  virtual boost::shared_ptr<gtsam::GaussianFactor>\n  linearize(const gtsam::Values &c) const override;\n\n  virtual double error(const gtsam::Values &c) const override;\n\n  std::size_t size() const {\n    return 1;\n  }\n\n  virtual size_t dim() const override { return 6; }\n};\n\nclass ConstantVelocityFactor : public gtsam::BetweenFactor<gtsam::Pose3> {\n private:\n  using This = ConstantVelocityFactor;\n  using Base = gtsam::BetweenFactor<gtsam::Pose3>;\n\n public:\n  ConstantVelocityFactor(gtsam::Key key1,\n                         gtsam::Key key2,\n                         const gtsam::SharedNoiseModel &model = nullptr)\n      : Base(key1, key2, gtsam::Pose3::identity(), model) {\n  }\n\n  virtual ~ConstantVelocityFactor() {}\n\n  ///@name Testable\n  ///@{\n\n  /** print */\n  virtual void\n  print(const std::string &s,\n        const gtsam::KeyFormatter &keyFormatter = gtsam::DefaultKeyFormatter) const {\n    std::cout << s << \"ConstantVelocityFactor(\"\n              << keyFormatter(this->key1()) << \",\"\n              << keyFormatter(this->key2()) << \")\\n\";\n    this->noiseModel_->print(\"  noise model: \");\n  }\n\n  /** equals */\n  virtual bool equals(const NonlinearFactor &expected, double tol = 1e-9) const {\n    const This *e = dynamic_cast<const This *>(&expected);\n    return e != NULL && Base::equals(*e, tol);\n  }\n};\n\nclass StablePoseFactor : public gtsam::NoiseModelFactor3<gtsam::Pose3,\n                                                         gtsam::Pose3,\n                                                         gtsam::Pose3> {\n private:\n  using This = StablePoseFactor;\n  using Base = gtsam::NoiseModelFactor3<gtsam::Pose3,\n                                        gtsam::Pose3,\n                                        gtsam::Pose3>;\n\n protected:\n  double deltaTime;\n\n public:\n  /** Constructor */\n  StablePoseFactor(gtsam::Key previousPoseKey,\n                   gtsam::Key velocityKey,\n                   gtsam::Key nextPoseKey,\n                   double deltaTime,\n                   const gtsam::SharedNoiseModel &model = nullptr)\n      : Base(model, previousPoseKey, velocityKey, nextPoseKey), deltaTime(deltaTime) {\n  }\n\n  /** print */\n  virtual void\n  print(const std::string &s,\n        const gtsam::KeyFormatter &keyFormatter = gtsam::DefaultKeyFormatter) const {\n    std::cout << s << \"StablePoseFactor(\"\n              << keyFormatter(this->key1()) << \",\"\n              << keyFormatter(this->key2()) << \",\"\n              << keyFormatter(this->key3()) << \")\"\n              << \"  dt = \" << this->deltaTime << '\\n';\n    this->noiseModel_->print(\"  noise model: \");\n  }\n\n  /** equals */\n  virtual bool equals(const NonlinearFactor &expected, double tol = 1e-9) const {\n    const This *e = dynamic_cast<const This *>(&expected);\n    return e != NULL && Base::equals(*e, tol);\n  }\n\n  inline gtsam::Key previousPoseKey() const { return key1(); }\n  inline gtsam::Key velocityKey() const { return key2(); }\n  inline gtsam::Key nextPoseKey() const { return key3(); }\n\n  virtual gtsam::Vector\n  evaluateError(const gtsam::Pose3 &previousPose,\n                const gtsam::Pose3 &velocity,\n                const gtsam::Pose3 &nextPose,\n                boost::optional<gtsam::Matrix &> H1 = boost::none,\n                boost::optional<gtsam::Matrix &> H2 = boost::none,\n                boost::optional<gtsam::Matrix &> H3 = boost::none) const override;\n};"
  },
  {
    "path": "include/solver.h",
    "content": "// Copyright 2022 Yu-Kai Lin. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n#pragma once\n\n#include <gtsam/nonlinear/ISAM2.h>\n\n#include \"factor.h\"\n\nclass MaxMixtureISAM2 : public gtsam::ISAM2 {\n public:\n  explicit MaxMixtureISAM2(const gtsam::ISAM2Params& params) : gtsam::ISAM2(params){};\n\n  MaxMixtureISAM2() : gtsam::ISAM2(){};\n\n  virtual gtsam::ISAM2Result update(\n      const gtsam::NonlinearFactorGraph& newFactors                            = gtsam::NonlinearFactorGraph(),\n      const gtsam::Values& newTheta                                            = gtsam::Values(),\n      const gtsam::FactorIndices& removeFactorIndices                          = gtsam::FactorIndices(),\n      const boost::optional<gtsam::FastMap<gtsam::Key, int> >& constrainedKeys = boost::none,\n      const boost::optional<gtsam::FastList<gtsam::Key> >& noRelinKeys         = boost::none,\n      const boost::optional<gtsam::FastList<gtsam::Key> >& extraReelimKeys     = boost::none,\n      bool force_relinearize                                                   = false) override;\n\n  /**\n   * Add variable, update, and relinearize if need; almost everything is same as\n   * the original ISAM2 solver, except for the relinearization condition for\n   * max-mixture models.\n   */\n  virtual gtsam::ISAM2Result\n  update(const gtsam::NonlinearFactorGraph& newFactors,\n         const gtsam::Values& newTheta,\n         const gtsam::ISAM2UpdateParams& updateParams) override;\n};"
  },
  {
    "path": "include/utility.h",
    "content": "#pragma once\n#ifndef _UTILITY_LIDAR_ODOMETRY_H_\n#define _UTILITY_LIDAR_ODOMETRY_H_\n\n#include <ros/ros.h>\n\n#include <nav_msgs/Odometry.h>\n#include <nav_msgs/Path.h>\n#include <sensor_msgs/Imu.h>\n#include <sensor_msgs/NavSatFix.h>\n#include <sensor_msgs/PointCloud2.h>\n#include <std_msgs/Float64MultiArray.h>\n#include <std_msgs/Header.h>\n#include <visualization_msgs/Marker.h>\n#include <visualization_msgs/MarkerArray.h>\n\n#include <opencv/cv.h>\n\n#include <pcl/common/common.h>\n#include <pcl/common/transforms.h>\n#include <pcl/filters/crop_box.h>\n#include <pcl/filters/filter.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl/io/pcd_io.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <pcl/range_image/range_image.h>\n#include <pcl/registration/icp.h>\n#include <pcl_conversions/pcl_conversions.h>\n#include <pcl/search/impl/search.hpp>\n\n#include <tf/LinearMath/Quaternion.h>\n#include <tf/transform_broadcaster.h>\n#include <tf/transform_datatypes.h>\n#include <tf/transform_listener.h>\n\n#include <algorithm>\n#include <array>\n#include <cfloat>\n#include <cmath>\n#include <ctime>\n#include <deque>\n#include <fstream>\n#include <iomanip>\n#include <iostream>\n#include <iterator>\n#include <limits>\n#include <mutex>\n#include <queue>\n#include <sstream>\n#include <string>\n#include <thread>\n#include <vector>\n\nusing namespace std;\n\ntypedef pcl::PointXYZI PointType;\n\nenum class SensorType { VELODYNE,\n                        OUSTER };\n\nclass ParamServer {\n public:\n  ros::NodeHandle nh;\n\n  std::string robot_id;\n\n  // Topics\n  string pointCloudTopic;\n  string imuTopic;\n  string odomTopic;\n  string gpsTopic;\n\n  // Frames\n  string lidarFrame;\n  string baselinkFrame;\n  string odometryFrame;\n  string mapFrame;\n\n  // GPS Settings\n  bool useImuHeadingInitialization;\n  bool useGpsElevation;\n  float gpsCovThreshold;\n  float poseCovThreshold;\n\n  // Save pcd\n  bool savePCD;\n  string savePCDDirectory;\n\n  // Lidar Sensor Configuration\n  SensorType sensor;\n  int N_SCAN;\n  int Horizon_SCAN;\n  int downsampleRate;\n  float lidarMinRange;\n  float lidarMaxRange;\n\n  // IMU\n  float imuAccNoise;\n  float imuGyrNoise;\n  float imuAccBiasN;\n  float imuGyrBiasN;\n  float imuGravity;\n  float imuRPYWeight;\n  vector<double> extRotV;\n  vector<double> extRPYV;\n  vector<double> extTransV;\n  Eigen::Matrix3d extRot;\n  Eigen::Matrix3d extRPY;\n  Eigen::Vector3d extTrans;\n  Eigen::Quaterniond extQRPY;\n\n  // LOAM\n  float edgeThreshold;\n  float surfThreshold;\n  int edgeFeatureMinValidNum;\n  int surfFeatureMinValidNum;\n\n  // voxel filter paprams\n  float odometrySurfLeafSize;\n  float mappingCornerLeafSize;\n  float mappingSurfLeafSize;\n\n  float z_tollerance;\n  float rotation_tollerance;\n\n  // CPU Params\n  int numberOfCores;\n  double mappingProcessInterval;\n\n  // Surrounding map\n  float surroundingkeyframeAddingDistThreshold;\n  float surroundingkeyframeAddingAngleThreshold;\n  float surroundingKeyframeDensity;\n  float surroundingKeyframeSearchRadius;\n\n  // Loop closure\n  bool loopClosureEnableFlag;\n  float loopClosureFrequency;\n  int surroundingKeyframeSize;\n  float historyKeyframeSearchRadius;\n  float historyKeyframeSearchTimeDiff;\n  int historyKeyframeSearchNum;\n  float historyKeyframeFitnessScore;\n\n  // global map visualization radius\n  float globalMapVisualizationSearchRadius;\n  float globalMapVisualizationPoseDensity;\n  float globalMapVisualizationLeafSize;\n\n  // Dynamic object data association\n  float detectionMatchThreshold;\n  vector<double> dataAssociationVarianceVector;\n  vector<double> earlyLooselyCoupledMatchingVarianceVector;\n  vector<double> looselyCoupledMatchingVarianceVector;\n  vector<double> tightlyCoupledMatchingVarianceVector;\n\n  Eigen::Matrix<double, 6, 1> dataAssociationVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> earlyLooselyCoupledMatchingVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> looselyCoupledMatchingVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> tightlyCoupledMatchingVarianceEigenVector;\n\n  // Factor covariance matrices (presented as diagonal vectors)\n  vector<double> priorOdometryDiagonalVarianceVector;\n  vector<double> odometryDiagonalVarianceVector;\n  vector<double> earlyConstantVelocityDiagonalVarianceVector;\n  vector<double> constantVelocityDiagonalVarianceVector;\n  vector<double> motionDiagonalVarianceVector;\n  vector<double> looselyCoupledDetectionVarianceVector;\n  vector<double> tightlyCoupledDetectionVarianceVector;\n\n  Eigen::Matrix<double, 6, 1> priorOdometryDiagonalVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> odometryDiagonalVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> earlyConstantVelocityDiagonalVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> constantVelocityDiagonalVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> motionDiagonalVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> looselyCoupledDetectionVarianceEigenVector;\n  Eigen::Matrix<double, 6, 1> tightlyCoupledDetectionVarianceEigenVector;\n\n  // Gentle coupling options\n  int numberOfEarlySteps;\n  int numberOfPreLooseCouplingSteps;\n  int numberOfVelocityConsistencySteps;\n  int numberOfInterLooseCouplingSteps;\n  float tightCouplingDetectionErrorThreshold;\n\n  float objectAngularVelocityConsistencyVarianceThreshold;\n  float objectLinearVelocityConsistencyVarianceThreshold;\n\n  // Tracking\n  int trackingStepsForLostObject;\n\n  ParamServer() {\n    nh.param<std::string>(\"/robot_id\", robot_id, \"roboat\");\n\n    nh.param<std::string>(\"lio_segmot/pointCloudTopic\", pointCloudTopic, \"points_raw\");\n    nh.param<std::string>(\"lio_segmot/imuTopic\", imuTopic, \"imu_correct\");\n    nh.param<std::string>(\"lio_segmot/odomTopic\", odomTopic, \"odometry/imu\");\n    nh.param<std::string>(\"lio_segmot/gpsTopic\", gpsTopic, \"odometry/gps\");\n\n    nh.param<std::string>(\"lio_segmot/lidarFrame\", lidarFrame, \"base_link\");\n    nh.param<std::string>(\"lio_segmot/baselinkFrame\", baselinkFrame, \"base_link\");\n    nh.param<std::string>(\"lio_segmot/odometryFrame\", odometryFrame, \"odom\");\n    nh.param<std::string>(\"lio_segmot/mapFrame\", mapFrame, \"map\");\n\n    nh.param<bool>(\"lio_segmot/useImuHeadingInitialization\", useImuHeadingInitialization, false);\n    nh.param<bool>(\"lio_segmot/useGpsElevation\", useGpsElevation, false);\n    nh.param<float>(\"lio_segmot/gpsCovThreshold\", gpsCovThreshold, 2.0);\n    nh.param<float>(\"lio_segmot/poseCovThreshold\", poseCovThreshold, 25.0);\n\n    nh.param<bool>(\"lio_segmot/savePCD\", savePCD, false);\n    nh.param<std::string>(\"lio_segmot/savePCDDirectory\", savePCDDirectory, \"/Downloads/LOAM/\");\n\n    std::string sensorStr;\n    nh.param<std::string>(\"lio_segmot/sensor\", sensorStr, \"\");\n    if (sensorStr == \"velodyne\") {\n      sensor = SensorType::VELODYNE;\n    } else if (sensorStr == \"ouster\") {\n      sensor = SensorType::OUSTER;\n    } else {\n      ROS_ERROR_STREAM(\n          \"Invalid sensor type (must be either 'velodyne' or 'ouster'): \" << sensorStr);\n      ros::shutdown();\n    }\n\n    nh.param<int>(\"lio_segmot/N_SCAN\", N_SCAN, 16);\n    nh.param<int>(\"lio_segmot/Horizon_SCAN\", Horizon_SCAN, 1800);\n    nh.param<int>(\"lio_segmot/downsampleRate\", downsampleRate, 1);\n    nh.param<float>(\"lio_segmot/lidarMinRange\", lidarMinRange, 1.0);\n    nh.param<float>(\"lio_segmot/lidarMaxRange\", lidarMaxRange, 1000.0);\n\n    nh.param<float>(\"lio_segmot/imuAccNoise\", imuAccNoise, 0.01);\n    nh.param<float>(\"lio_segmot/imuGyrNoise\", imuGyrNoise, 0.001);\n    nh.param<float>(\"lio_segmot/imuAccBiasN\", imuAccBiasN, 0.0002);\n    nh.param<float>(\"lio_segmot/imuGyrBiasN\", imuGyrBiasN, 0.00003);\n    nh.param<float>(\"lio_segmot/imuGravity\", imuGravity, 9.80511);\n    nh.param<float>(\"lio_segmot/imuRPYWeight\", imuRPYWeight, 0.01);\n    nh.param<vector<double>>(\"lio_segmot/extrinsicRot\", extRotV, vector<double>());\n    nh.param<vector<double>>(\"lio_segmot/extrinsicRPY\", extRPYV, vector<double>());\n    nh.param<vector<double>>(\"lio_segmot/extrinsicTrans\", extTransV, vector<double>());\n    extRot   = Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(extRotV.data(), 3, 3);\n    extRPY   = Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(extRPYV.data(), 3, 3);\n    extTrans = Eigen::Map<const Eigen::Matrix<double, -1, -1, Eigen::RowMajor>>(extTransV.data(), 3, 1);\n    extQRPY  = Eigen::Quaterniond(extRPY);\n\n    nh.param<float>(\"lio_segmot/edgeThreshold\", edgeThreshold, 0.1);\n    nh.param<float>(\"lio_segmot/surfThreshold\", surfThreshold, 0.1);\n    nh.param<int>(\"lio_segmot/edgeFeatureMinValidNum\", edgeFeatureMinValidNum, 10);\n    nh.param<int>(\"lio_segmot/surfFeatureMinValidNum\", surfFeatureMinValidNum, 100);\n\n    nh.param<float>(\"lio_segmot/odometrySurfLeafSize\", odometrySurfLeafSize, 0.2);\n    nh.param<float>(\"lio_segmot/mappingCornerLeafSize\", mappingCornerLeafSize, 0.2);\n    nh.param<float>(\"lio_segmot/mappingSurfLeafSize\", mappingSurfLeafSize, 0.2);\n\n    nh.param<float>(\"lio_segmot/z_tollerance\", z_tollerance, FLT_MAX);\n    nh.param<float>(\"lio_segmot/rotation_tollerance\", rotation_tollerance, FLT_MAX);\n\n    nh.param<int>(\"lio_segmot/numberOfCores\", numberOfCores, 2);\n    nh.param<double>(\"lio_segmot/mappingProcessInterval\", mappingProcessInterval, 0.15);\n\n    nh.param<float>(\"lio_segmot/surroundingkeyframeAddingDistThreshold\", surroundingkeyframeAddingDistThreshold, 1.0);\n    nh.param<float>(\"lio_segmot/surroundingkeyframeAddingAngleThreshold\", surroundingkeyframeAddingAngleThreshold, 0.2);\n    nh.param<float>(\"lio_segmot/surroundingKeyframeDensity\", surroundingKeyframeDensity, 1.0);\n    nh.param<float>(\"lio_segmot/surroundingKeyframeSearchRadius\", surroundingKeyframeSearchRadius, 50.0);\n\n    nh.param<bool>(\"lio_segmot/loopClosureEnableFlag\", loopClosureEnableFlag, false);\n    nh.param<float>(\"lio_segmot/loopClosureFrequency\", loopClosureFrequency, 1.0);\n    nh.param<int>(\"lio_segmot/surroundingKeyframeSize\", surroundingKeyframeSize, 50);\n    nh.param<float>(\"lio_segmot/historyKeyframeSearchRadius\", historyKeyframeSearchRadius, 10.0);\n    nh.param<float>(\"lio_segmot/historyKeyframeSearchTimeDiff\", historyKeyframeSearchTimeDiff, 30.0);\n    nh.param<int>(\"lio_segmot/historyKeyframeSearchNum\", historyKeyframeSearchNum, 25);\n    nh.param<float>(\"lio_segmot/historyKeyframeFitnessScore\", historyKeyframeFitnessScore, 0.3);\n\n    nh.param<float>(\"lio_segmot/globalMapVisualizationSearchRadius\", globalMapVisualizationSearchRadius, 1e3);\n    nh.param<float>(\"lio_segmot/globalMapVisualizationPoseDensity\", globalMapVisualizationPoseDensity, 10.0);\n    nh.param<float>(\"lio_segmot/globalMapVisualizationLeafSize\", globalMapVisualizationLeafSize, 1.0);\n\n    nh.param<float>(\"lio_segmot/detectionMatchThreshold\", detectionMatchThreshold, 19.5);\n    nh.param<vector<double>>(\"lio_segmot/dataAssociationVarianceVector\", dataAssociationVarianceVector, {1e-4, 1e-4, 1e-4, 1e-2, 2e-3, 2e-3});\n    nh.param<vector<double>>(\"lio_segmot/earlyLooselyCoupledMatchingVarianceVector\", earlyLooselyCoupledMatchingVarianceVector, {1e-4, 1e-4, 1e-4, 1e-2, 2e-3, 2e-3});\n    nh.param<vector<double>>(\"lio_segmot/looselyCoupledMatchingVarianceVector\", looselyCoupledMatchingVarianceVector, {1e-4, 1e-4, 1e-4, 1e-2, 2e-3, 2e-3});\n    nh.param<vector<double>>(\"lio_segmot/tightlyCoupledMatchingVarianceVector\", tightlyCoupledMatchingVarianceVector, {1e-4, 1e-4, 1e-4, 1e-2, 2e-3, 2e-3});\n\n    nh.param<vector<double>>(\"lio_segmot/priorOdometryDiagonalVarianceVector\", priorOdometryDiagonalVarianceVector, {1e-2, 1e-2, M_PI * M_PI, 1e8, 1e8, 1e8});\n    nh.param<vector<double>>(\"lio_segmot/odometryDiagonalVarianceVector\", odometryDiagonalVarianceVector, {1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4});\n    nh.param<vector<double>>(\"lio_segmot/earlyConstantVelocityDiagonalVarianceVector\", earlyConstantVelocityDiagonalVarianceVector, {1e-3, 1e-3, 1e-3, 2e-1, 1e-1, 1e-1});\n    nh.param<vector<double>>(\"lio_segmot/constantVelocityDiagonalVarianceVector\", constantVelocityDiagonalVarianceVector, {1e-3, 1e-3, 1e-3, 2e-1, 1e-1, 1e-1});\n    nh.param<vector<double>>(\"lio_segmot/motionDiagonalVarianceVector\", motionDiagonalVarianceVector, {1e-4, 1e-4, 1e-2, 1e-1, 1e-2, 1e-2});\n    nh.param<vector<double>>(\"lio_segmot/looselyCoupledDetectionVarianceVector\", looselyCoupledDetectionVarianceVector, {1e-4, 1e-4, 1e-4, 1e-2, 2e-3, 2e-3});\n    nh.param<vector<double>>(\"lio_segmot/tightlyCoupledDetectionVarianceVector\", tightlyCoupledDetectionVarianceVector, {1e-4, 1e-4, 1e-4, 1e-2, 2e-3, 2e-3});\n\n    dataAssociationVarianceEigenVector             = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(dataAssociationVarianceVector.data());\n    earlyLooselyCoupledMatchingVarianceEigenVector = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(earlyLooselyCoupledMatchingVarianceVector.data());\n    looselyCoupledMatchingVarianceEigenVector      = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(looselyCoupledMatchingVarianceVector.data());\n    tightlyCoupledMatchingVarianceEigenVector      = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(tightlyCoupledMatchingVarianceVector.data());\n\n    priorOdometryDiagonalVarianceEigenVector         = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(priorOdometryDiagonalVarianceVector.data());\n    odometryDiagonalVarianceEigenVector              = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(odometryDiagonalVarianceVector.data());\n    earlyConstantVelocityDiagonalVarianceEigenVector = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(earlyConstantVelocityDiagonalVarianceVector.data());\n    constantVelocityDiagonalVarianceEigenVector      = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(constantVelocityDiagonalVarianceVector.data());\n    motionDiagonalVarianceEigenVector                = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(motionDiagonalVarianceVector.data());\n    looselyCoupledDetectionVarianceEigenVector       = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(looselyCoupledDetectionVarianceVector.data());\n    tightlyCoupledDetectionVarianceEigenVector       = Eigen::Map<const Eigen::Matrix<double, 6, 1>>(tightlyCoupledDetectionVarianceVector.data());\n\n    nh.param<int>(\"lio_segmot/numberOfEarlySteps\", numberOfEarlySteps, 1);\n    nh.param<int>(\"lio_segmot/numberOfPreLooseCouplingSteps\", numberOfPreLooseCouplingSteps, 10);\n    nh.param<int>(\"lio_segmot/numberOfVelocityConsistencySteps\", numberOfVelocityConsistencySteps, 4);\n    nh.param<int>(\"lio_segmot/numberOfInterLooseCouplingSteps\", numberOfInterLooseCouplingSteps, 0);\n    nh.param<float>(\"lio_segmot/tightCouplingDetectionErrorThreshold\", tightCouplingDetectionErrorThreshold, 500.0);\n\n    nh.param<float>(\"lio_segmot/objectAngularVelocityConsistencyVarianceThreshold\", objectAngularVelocityConsistencyVarianceThreshold, 1e-5);\n    nh.param<float>(\"lio_segmot/objectLinearVelocityConsistencyVarianceThreshold\", objectLinearVelocityConsistencyVarianceThreshold, 1e-2);\n\n    nh.param<int>(\"lio_segmot/trackingStepsForLostObject\", trackingStepsForLostObject, 3);\n\n    usleep(100);\n  }\n\n  sensor_msgs::Imu imuConverter(const sensor_msgs::Imu &imu_in) {\n    sensor_msgs::Imu imu_out = imu_in;\n    // rotate acceleration\n    Eigen::Vector3d acc(imu_in.linear_acceleration.x, imu_in.linear_acceleration.y, imu_in.linear_acceleration.z);\n    acc                           = extRot * acc;\n    imu_out.linear_acceleration.x = acc.x();\n    imu_out.linear_acceleration.y = acc.y();\n    imu_out.linear_acceleration.z = acc.z();\n    // rotate gyroscope\n    Eigen::Vector3d gyr(imu_in.angular_velocity.x, imu_in.angular_velocity.y, imu_in.angular_velocity.z);\n    gyr                        = extRot * gyr;\n    imu_out.angular_velocity.x = gyr.x();\n    imu_out.angular_velocity.y = gyr.y();\n    imu_out.angular_velocity.z = gyr.z();\n    // rotate roll pitch yaw\n    Eigen::Quaterniond q_from(imu_in.orientation.w, imu_in.orientation.x, imu_in.orientation.y, imu_in.orientation.z);\n    Eigen::Quaterniond q_final = q_from * extQRPY;\n    imu_out.orientation.x      = q_final.x();\n    imu_out.orientation.y      = q_final.y();\n    imu_out.orientation.z      = q_final.z();\n    imu_out.orientation.w      = q_final.w();\n\n    if (sqrt(q_final.x() * q_final.x() + q_final.y() * q_final.y() + q_final.z() * q_final.z() + q_final.w() * q_final.w()) < 0.1) {\n      ROS_ERROR(\"Invalid quaternion, please use a 9-axis IMU!\");\n      ros::shutdown();\n    }\n\n    return imu_out;\n  }\n};\n\nsensor_msgs::PointCloud2 publishCloud(ros::Publisher *thisPub, pcl::PointCloud<PointType>::Ptr thisCloud, ros::Time thisStamp, std::string thisFrame) {\n  sensor_msgs::PointCloud2 tempCloud;\n  pcl::toROSMsg(*thisCloud, tempCloud);\n  tempCloud.header.stamp    = thisStamp;\n  tempCloud.header.frame_id = thisFrame;\n  if (thisPub->getNumSubscribers() != 0)\n    thisPub->publish(tempCloud);\n  return tempCloud;\n}\n\ntemplate <typename T>\ndouble ROS_TIME(T msg) {\n  return msg->header.stamp.toSec();\n}\n\ntemplate <typename T>\nvoid imuAngular2rosAngular(sensor_msgs::Imu *thisImuMsg, T *angular_x, T *angular_y, T *angular_z) {\n  *angular_x = thisImuMsg->angular_velocity.x;\n  *angular_y = thisImuMsg->angular_velocity.y;\n  *angular_z = thisImuMsg->angular_velocity.z;\n}\n\ntemplate <typename T>\nvoid imuAccel2rosAccel(sensor_msgs::Imu *thisImuMsg, T *acc_x, T *acc_y, T *acc_z) {\n  *acc_x = thisImuMsg->linear_acceleration.x;\n  *acc_y = thisImuMsg->linear_acceleration.y;\n  *acc_z = thisImuMsg->linear_acceleration.z;\n}\n\ntemplate <typename T>\nvoid imuRPY2rosRPY(sensor_msgs::Imu *thisImuMsg, T *rosRoll, T *rosPitch, T *rosYaw) {\n  double imuRoll, imuPitch, imuYaw;\n  tf::Quaternion orientation;\n  tf::quaternionMsgToTF(thisImuMsg->orientation, orientation);\n  tf::Matrix3x3(orientation).getRPY(imuRoll, imuPitch, imuYaw);\n\n  *rosRoll  = imuRoll;\n  *rosPitch = imuPitch;\n  *rosYaw   = imuYaw;\n}\n\nfloat pointDistance(PointType p) {\n  return sqrt(p.x * p.x + p.y * p.y + p.z * p.z);\n}\n\nfloat pointDistance(PointType p1, PointType p2) {\n  return sqrt((p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y) + (p1.z - p2.z) * (p1.z - p2.z));\n}\n\n#endif\n"
  },
  {
    "path": "launch/debug.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <!-- Parameters -->\n    <rosparam file=\"$(find lio_segmot)/config/params.yaml\" command=\"load\" />\n\n    <!--- LOAM -->\n    <include file=\"$(find lio_segmot)/launch/include/module_loam_debug.launch\" />\n\n    <!--- Robot State TF -->\n    <include file=\"$(find lio_segmot)/launch/include/module_robot_state_publisher.launch\" />\n\n    <!--- Run Navsat -->\n    <include file=\"$(find lio_segmot)/launch/include/module_navsat.launch\" />\n\n    <!--- Run Rviz-->\n    <include file=\"$(find lio_segmot)/launch/include/module_rviz.launch\" />\n\n</launch>\n"
  },
  {
    "path": "launch/gdbserver_debug.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <!-- Parameters -->\n    <rosparam file=\"$(find lio_segmot)/config/params.yaml\" command=\"load\" />\n\n    <!--- LOAM -->\n    <include file=\"$(find lio_segmot)/launch/include/module_loam_gdbserver_debug.launch\" />\n\n    <!--- Robot State TF -->\n    <include file=\"$(find lio_segmot)/launch/include/module_robot_state_publisher.launch\" />\n\n    <!--- Run Navsat -->\n    <include file=\"$(find lio_segmot)/launch/include/module_navsat.launch\" />\n\n    <!--- Run Rviz-->\n    <include file=\"$(find lio_segmot)/launch/include/module_rviz.launch\" />\n\n</launch>\n"
  },
  {
    "path": "launch/include/config/robot.urdf.xacro",
    "content": "<?xml version=\"1.0\"?>\n<robot name=\"lio\" xmlns:xacro=\"http://tixiaoshan.github.io/\">\n  <xacro:property name=\"PI\" value=\"3.1415926535897931\" />\n\n  <link name=\"chassis_link\"></link>\n\n  <link name=\"base_link\"></link>\n  <joint name=\"base_link_joint\" type=\"fixed\">\n    <parent link=\"base_link\"/>\n    <child link=\"chassis_link\" />\n    <origin xyz=\"0 0 0\" rpy=\"0 0 0\" />\n  </joint>\n\n  <link name=\"imu_link\"> </link>\n  <joint name=\"imu_joint\" type=\"fixed\">\n    <parent link=\"chassis_link\" />\n    <child link=\"imu_link\" />\n    <origin xyz=\"0 0 0\" rpy=\"0 0 0\" />\n  </joint>\n\n  <link name=\"velodyne\"> </link>\n  <joint name=\"velodyne_joint\" type=\"fixed\">\n    <parent link=\"chassis_link\" />\n    <child link=\"velodyne\" />\n    <origin xyz=\"0 0 0\" rpy=\"0 0 0\" />\n  </joint>\n\n  <link name=\"navsat_link\"> </link>\n  <joint name=\"navsat_joint\" type=\"fixed\">\n    <parent link=\"chassis_link\" />\n    <child link=\"navsat_link\" />\n    <origin xyz=\"0 0 0\" rpy=\"0 0 0\" />\n  </joint>\n\n</robot>\n"
  },
  {
    "path": "launch/include/config/rviz.rviz",
    "content": "Panels:\n  - Class: rviz/Displays\n    Help Height: 191\n    Name: Displays\n    Property Tree Widget:\n      Expanded:\n        - /Global Options1\n        - /Status1\n        - /links1\n        - /Odometry1\n        - /Point Cloud1\n        - /Feature Cloud1\n        - /Mapping1\n        - /Mapping1/Map (global)1\n        - /Tracking1\n        - /Tracking1/Dynamic Objects1\n      Splitter Ratio: 0.5940054655075073\n    Tree Height: 686\n  - Class: rviz/Selection\n    Name: Selection\n  - Class: rviz/Views\n    Expanded:\n      - /Current View1\n    Name: Views\n    Splitter Ratio: 0.5\n  - Class: rviz/Tool Properties\n    Expanded: ~\n    Name: Tool Properties\n    Splitter Ratio: 0.5\nPreferences:\n  PromptSaveOnExit: true\nToolbars:\n  toolButtonStyle: 2\nVisualization Manager:\n  Class: \"\"\n  Displays:\n    - Class: rviz/Group\n      Displays:\n        - Class: rviz/TF\n          Enabled: false\n          Frame Timeout: 999\n          Frames:\n            All Enabled: false\n          Marker Scale: 3\n          Name: TF\n          Show Arrows: true\n          Show Axes: true\n          Show Names: true\n          Tree:\n            {}\n          Update Interval: 0\n          Value: false\n        - Class: rviz/Axes\n          Enabled: true\n          Length: 2\n          Name: map\n          Radius: 0.5\n          Reference Frame: map\n          Value: true\n        - Class: rviz/Axes\n          Enabled: true\n          Length: 1\n          Name: base_link\n          Radius: 0.30000001192092896\n          Reference Frame: base_link\n          Value: true\n        - Class: rviz/Axes\n          Enabled: true\n          Length: 1\n          Name: lidar_link\n          Radius: 0.10000000149011612\n          Reference Frame: lidar_link\n          Value: true\n      Enabled: true\n      Name: links\n    - Class: rviz/Group\n      Displays:\n        - Angle Tolerance: 0.10000000149011612\n          Class: rviz/Odometry\n          Covariance:\n            Orientation:\n              Alpha: 0.5\n              Color: 255; 255; 127\n              Color Style: Unique\n              Frame: Local\n              Offset: 1\n              Scale: 1\n              Value: true\n            Position:\n              Alpha: 0.30000001192092896\n              Color: 204; 51; 204\n              Scale: 1\n              Value: true\n            Value: false\n          Enabled: true\n          Keep: 300\n          Name: Odom IMU\n          Position Tolerance: 0.10000000149011612\n          Shape:\n            Alpha: 1\n            Axes Length: 0.5\n            Axes Radius: 0.10000000149011612\n            Color: 255; 25; 0\n            Head Length: 0.30000001192092896\n            Head Radius: 0.10000000149011612\n            Shaft Length: 1\n            Shaft Radius: 0.05000000074505806\n            Value: Axes\n          Topic: /odometry/imu\n          Unreliable: false\n          Value: true\n        - Angle Tolerance: 0.10000000149011612\n          Class: rviz/Odometry\n          Covariance:\n            Orientation:\n              Alpha: 0.5\n              Color: 255; 255; 127\n              Color Style: Unique\n              Frame: Local\n              Offset: 1\n              Scale: 1\n              Value: true\n            Position:\n              Alpha: 0.30000001192092896\n              Color: 204; 51; 204\n              Scale: 1\n              Value: true\n            Value: false\n          Enabled: true\n          Keep: 300\n          Name: Odom GPS\n          Position Tolerance: 0.10000000149011612\n          Shape:\n            Alpha: 1\n            Axes Length: 1\n            Axes Radius: 0.30000001192092896\n            Color: 255; 25; 0\n            Head Length: 0.30000001192092896\n            Head Radius: 0.10000000149011612\n            Shaft Length: 1\n            Shaft Radius: 0.05000000074505806\n            Value: Axes\n          Topic: /odometry/gps\n          Unreliable: false\n          Value: true\n        - Angle Tolerance: 0.10000000149011612\n          Class: rviz/Odometry\n          Covariance:\n            Orientation:\n              Alpha: 0.5\n              Color: 255; 255; 127\n              Color Style: Unique\n              Frame: Local\n              Offset: 1\n              Scale: 1\n              Value: true\n            Position:\n              Alpha: 0.30000001192092896\n              Color: 204; 51; 204\n              Scale: 1\n              Value: true\n            Value: false\n          Enabled: false\n          Keep: 300\n          Name: Odom lidar\n          Position Tolerance: 0.10000000149011612\n          Shape:\n            Alpha: 1\n            Axes Length: 0.25\n            Axes Radius: 0.10000000149011612\n            Color: 255; 25; 0\n            Head Length: 0.30000001192092896\n            Head Radius: 0.10000000149011612\n            Shaft Length: 1\n            Shaft Radius: 0.05000000074505806\n            Value: Axes\n          Topic: /lio_segmot/mapping/odometry\n          Unreliable: false\n          Value: false\n      Enabled: false\n      Name: Odometry\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Autocompute Intensity Bounds: false\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: Intensity\n          Decay Time: 0\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 191; 64; 64\n          Max Intensity: 0.9900000095367432\n          Min Color: 191; 64; 64\n          Min Intensity: 0\n          Name: Velodyne\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: false\n          Size (Pixels): 2\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /lio_segmot/mapping/cloud_deskewed\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: false\n          Value: true\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: Intensity\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: Reg Cloud\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 3\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /lio_segmot/mapping/cloud_registered\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: false\n        - Class: jsk_rviz_plugin/BoundingBoxArray\n          Enabled: true\n          Name: Detections\n          Topic: /lio_segmot/mapping/detections\n          Unreliable: false\n          Value: true\n          alpha: 0.6000000238418579\n          color: 171; 255; 152\n          coloring: Flat color\n          line width: 0.15000000596046448\n          only edge: false\n          show coords: false\n      Enabled: true\n      Name: Point Cloud\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 0; 255; 0\n          Color Transformer: FlatColor\n          Decay Time: 0\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: Edge Feature\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 5\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /lio_segmot/feature/cloud_corner\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 85; 255\n          Color Transformer: FlatColor\n          Decay Time: 0\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: Plannar Feature\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 3\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /lio_segmot/feature/cloud_surface\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n      Enabled: false\n      Name: Feature Cloud\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 0.30000001192092896\n          Autocompute Intensity Bounds: false\n          Autocompute Value Bounds:\n            Max Value: 7.4858574867248535\n            Min Value: -1.2309398651123047\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: Intensity\n          Decay Time: 300\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 46; 52; 54\n          Max Intensity: 0.9900000095367432\n          Min Color: 46; 52; 54\n          Min Intensity: 0\n          Name: Map (cloud)\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: false\n          Size (Pixels): 2\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /lio_segmot/mapping/cloud_registered\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: false\n          Value: false\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 36.61034393310547\n            Min Value: -2.3476977348327637\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: AxisColor\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: Map (local)\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 2\n          Size (m): 0.10000000149011612\n          Style: Flat Squares\n          Topic: /lio_segmot/mapping/map_local\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: false\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: Intensity\n          Decay Time: 0\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 75; 75; 75\n          Min Color: 75; 75; 75\n          Name: Map (global)\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 2\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /lio_segmot/mapping/map_global\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: false\n          Value: true\n        - Alpha: 1\n          Buffer Length: 1\n          Class: rviz/Path\n          Color: 85; 255; 255\n          Enabled: true\n          Head Diameter: 0.30000001192092896\n          Head Length: 0.20000000298023224\n          Length: 0.30000001192092896\n          Line Style: Billboards\n          Line Width: 0.10000000149011612\n          Name: Path (global)\n          Offset:\n            X: 0\n            Y: 0\n            Z: 0\n          Pose Color: 255; 85; 255\n          Pose Style: None\n          Radius: 0.029999999329447746\n          Shaft Diameter: 0.10000000149011612\n          Shaft Length: 0.10000000149011612\n          Topic: /lio_segmot/mapping/path\n          Unreliable: false\n          Value: true\n        - Alpha: 1\n          Buffer Length: 1\n          Class: rviz/Path\n          Color: 255; 85; 255\n          Enabled: true\n          Head Diameter: 0.30000001192092896\n          Head Length: 0.20000000298023224\n          Length: 0.30000001192092896\n          Line Style: Billboards\n          Line Width: 0.10000000149011612\n          Name: Path (local)\n          Offset:\n            X: 0\n            Y: 0\n            Z: 0\n          Pose Color: 255; 85; 255\n          Pose Style: None\n          Radius: 0.029999999329447746\n          Shaft Diameter: 0.10000000149011612\n          Shaft Length: 0.10000000149011612\n          Topic: /lio_segmot/imu/path\n          Unreliable: false\n          Value: true\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 20.802837371826172\n            Min Value: -0.03934507071971893\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: AxisColor\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: Loop closure\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 2\n          Size (m): 0.30000001192092896\n          Style: Flat Squares\n          Topic: /lio_segmot/mapping/icp_loop_closure_corrected_cloud\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: false\n        - Class: rviz/MarkerArray\n          Enabled: true\n          Marker Topic: /lio_segmot/mapping/loop_closure_constraints\n          Name: Loop constraint\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: true\n        - Class: jsk_rviz_plugin/BoundingBoxArray\n          Enabled: false\n          Name: Dynamic Objects\n          Topic: /lio_segmot/mapping/objects\n          Unreliable: false\n          Value: false\n          alpha: 0.800000011920929\n          color: 25; 255; 0\n          coloring: Label\n          line width: 0.15000000596046448\n          only edge: true\n          show coords: true\n        - Class: rviz/MarkerArray\n          Enabled: false\n          Marker Topic: /lio_segmot/mapping/object_paths\n          Name: Dynamic Object Paths\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: false\n        - Class: rviz/MarkerArray\n          Enabled: false\n          Marker Topic: /lio_segmot/mapping/object_labels\n          Name: Dynamic Object Labels\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: false\n        - Class: rviz/MarkerArray\n          Enabled: false\n          Marker Topic: /lio_segmot/mapping/object_velocities\n          Name: Dynamic Object Predicted Path\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: false\n        - Class: rviz/Marker\n          Enabled: true\n          Marker Topic: /lio_segmot/mapping/tightly_coupled_object_points\n          Name: Tightly-coupled nodes\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: true\n      Enabled: true\n      Name: Mapping\n    - Class: rviz/Group\n      Displays:\n        - Class: jsk_rviz_plugin/BoundingBoxArray\n          Enabled: true\n          Name: Dynamic Objects\n          Topic: /lio_segmot/tracking/objects\n          Unreliable: false\n          Value: true\n          alpha: 0.800000011920929\n          color: 25; 255; 0\n          coloring: Label\n          line width: 0.15000000596046448\n          only edge: true\n          show coords: false\n        - Class: rviz/MarkerArray\n          Enabled: true\n          Marker Topic: /lio_segmot/tracking/object_paths\n          Name: Dynamic Object Paths\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: true\n        - Class: rviz/MarkerArray\n          Enabled: false\n          Marker Topic: /lio_segmot/tracking/object_labels\n          Name: Dynamic Object Labels\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: false\n        - Class: rviz/MarkerArray\n          Enabled: true\n          Marker Topic: /lio_segmot/tracking/object_velocities\n          Name: Dynamic Object Predicted Path\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: true\n        - Class: rviz/MarkerArray\n          Enabled: true\n          Marker Topic: /lio_segmot/tracking/object_velocity_arrows\n          Name: Dynamic Object Predicted Path Arrow\n          Namespaces:\n            {}\n          Queue Size: 100\n          Value: true\n      Enabled: true\n      Name: Tracking\n  Enabled: true\n  Global Options:\n    Background Color: 0; 0; 0\n    Default Light: true\n    Fixed Frame: map\n    Frame Rate: 30\n  Name: root\n  Tools:\n    - Class: rviz/Interact\n      Hide Inactive Objects: true\n    - Class: rviz/FocusCamera\n    - Class: rviz/Measure\n  Value: true\n  Views:\n    Current:\n      Class: rviz/Orbit\n      Distance: 182.35630798339844\n      Enable Stereo Rendering:\n        Stereo Eye Separation: 0.05999999865889549\n        Stereo Focal Distance: 1\n        Swap Stereo Eyes: false\n        Value: false\n      Focal Point:\n        X: 68.5766372680664\n        Y: 25.248287200927734\n        Z: -78.56790161132812\n      Focal Shape Fixed Size: false\n      Focal Shape Size: 0.05000000074505806\n      Invert Z Axis: false\n      Name: Current View\n      Near Clip Distance: 0.009999999776482582\n      Pitch: 0.8203978538513184\n      Target Frame: base_link\n      Value: Orbit (rviz)\n      Yaw: 3.4916627407073975\n    Saved: ~\nWindow Geometry:\n  Displays:\n    collapsed: false\n  Height: 1022\n  Hide Left Dock: false\n  Hide Right Dock: true\n  QMainWindow State: 000000ff00000000fd000000040000000000000171000003a8fc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb0000000a0049006d006100670065000000002e000001340000000000000000fb000000100044006900730070006c006100790073010000003b000003a8000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000286000000a70000005c00ffffff000000010000011100000435fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000002e00000435000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004b000000040fc0100000002fb0000000800540069006d00650000000000000004b00000000000000000fb0000000800540069006d00650100000000000004500000000000000000000005c2000003a800000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000\n  Selection:\n    collapsed: false\n  Tool Properties:\n    collapsed: false\n  Views:\n    collapsed: true\n  Width: 1849\n  X: 71\n  Y: 28\n"
  },
  {
    "path": "launch/include/module_loam.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <node pkg=\"$(arg project)\" type=\"$(arg project)_imuPreintegration\"   name=\"$(arg project)_imuPreintegration\"    output=\"screen\" \trespawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_imageProjection\"     name=\"$(arg project)_imageProjection\"      output=\"screen\"     respawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_featureExtraction\"   name=\"$(arg project)_featureExtraction\"    output=\"screen\"     respawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_mapOptimization\"      name=\"$(arg project)_mapOptimization\"       output=\"screen\"     respawn=\"false\"/>\n    \n</launch>"
  },
  {
    "path": "launch/include/module_loam_debug.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <node pkg=\"$(arg project)\" type=\"$(arg project)_imuPreintegration\"   name=\"$(arg project)_imuPreintegration\"    output=\"screen\" \trespawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_imageProjection\"     name=\"$(arg project)_imageProjection\"      output=\"screen\"     respawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_featureExtraction\"   name=\"$(arg project)_featureExtraction\"    output=\"screen\"     respawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_mapOptimization\"\n    name=\"$(arg project)_mapOptimization\"       output=\"screen\"\n    respawn=\"false\" launch-prefix=\"gdb -ex run --args\"/>\n    \n</launch>"
  },
  {
    "path": "launch/include/module_loam_gdbserver_debug.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <node pkg=\"$(arg project)\" type=\"$(arg project)_imuPreintegration\"   name=\"$(arg project)_imuPreintegration\"    output=\"screen\" \trespawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_imageProjection\"     name=\"$(arg project)_imageProjection\"      output=\"screen\"     respawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_featureExtraction\"   name=\"$(arg project)_featureExtraction\"    output=\"screen\"     respawn=\"true\"/>\n    <node pkg=\"$(arg project)\" type=\"$(arg project)_mapOptimization\"\n    name=\"$(arg project)_mapOptimization\"       output=\"screen\"\n    respawn=\"false\" launch-prefix=\"gdbserver 127.0.0.1:12222\"/>\n    \n</launch>"
  },
  {
    "path": "launch/include/module_navsat.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n\n    <env name=\"ROSCONSOLE_CONFIG_FILE\" value=\"$(find lio_segmot)/launch/include/rosconsole/rosconsole_error.conf\"/>\n    \n    <!-- EKF GPS-->\n    <node pkg=\"robot_localization\" type=\"ekf_localization_node\" name=\"ekf_gps\" respawn=\"true\">\n        <remap from=\"odometry/filtered\" to=\"odometry/navsat\" />\n    </node>\n\n    <!-- Navsat -->\n    <node pkg=\"robot_localization\" type=\"navsat_transform_node\" name=\"navsat\" respawn=\"true\">\n        <!-- <rosparam param=\"datum\">[42.35893211, -71.09345588, 0.0, world, base_link]</rosparam> -->\n        <remap from=\"imu/data\" to=\"imu_correct\" />\n        <remap from=\"gps/fix\" to=\"gps/fix\" />\n        <remap from=\"odometry/filtered\" to=\"odometry/navsat\" />\n    </node>\n\n</launch>"
  },
  {
    "path": "launch/include/module_robot_state_publisher.launch",
    "content": "<launch>\n\n\t<arg name=\"project\" default=\"lio_segmot\"/>\n\n    <param name=\"robot_description\" command=\"$(find xacro)/xacro $(find lio_segmot)/launch/include/config/robot.urdf.xacro --inorder\" />\n\n    <node name=\"robot_state_publisher\" pkg=\"robot_state_publisher\" type=\"robot_state_publisher\" respawn=\"true\">\n        <!-- <param name=\"tf_prefix\" value=\"$(env ROS_HOSTNAME)\"/> -->\n    </node>\n  \n</launch>"
  },
  {
    "path": "launch/include/module_rviz.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n\n    <!--- Run Rviz-->\n    <node pkg=\"rviz\" type=\"rviz\" name=\"$(arg project)_rviz\" args=\"-d $(find lio_segmot)/launch/include/config/rviz.rviz\" />\n\n</launch>\n"
  },
  {
    "path": "launch/include/rosconsole/rosconsole_error.conf",
    "content": "# Set the default ros output to warning and higher\nlog4j.logger.ros = ERROR"
  },
  {
    "path": "launch/include/rosconsole/rosconsole_info.conf",
    "content": "# Set the default ros output to warning and higher\nlog4j.logger.ros = INFO"
  },
  {
    "path": "launch/include/rosconsole/rosconsole_warn.conf",
    "content": "# Set the default ros output to warning and higher\nlog4j.logger.ros = WARN"
  },
  {
    "path": "launch/run.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <!-- Parameters -->\n    <rosparam file=\"$(find lio_segmot)/config/params.yaml\" command=\"load\" />\n\n    <!--- LOAM -->\n    <include file=\"$(find lio_segmot)/launch/include/module_loam.launch\" />\n\n    <!--- Robot State TF -->\n    <include file=\"$(find lio_segmot)/launch/include/module_robot_state_publisher.launch\" />\n\n    <!--- Run Navsat -->\n    <include file=\"$(find lio_segmot)/launch/include/module_navsat.launch\" />\n\n    <!--- Run Rviz-->\n    <include file=\"$(find lio_segmot)/launch/include/module_rviz.launch\" />\n\n</launch>\n"
  },
  {
    "path": "launch/run_hsinchu.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <!-- Parameters -->\n    <rosparam file=\"$(find lio_segmot)/config/params_hsinchu.yaml\" command=\"load\" />\n\n    <!--- LOAM -->\n    <include file=\"$(find lio_segmot)/launch/include/module_loam.launch\" />\n\n    <!--- Robot State TF -->\n    <include file=\"$(find lio_segmot)/launch/include/module_robot_state_publisher.launch\" />\n\n    <!--- Run Navsat -->\n    <include file=\"$(find lio_segmot)/launch/include/module_navsat.launch\" />\n\n    <!--- Run Rviz-->\n    <include file=\"$(find lio_segmot)/launch/include/module_rviz.launch\" />\n\n</launch>\n"
  },
  {
    "path": "launch/run_kitti.launch",
    "content": "<launch>\n\n    <arg name=\"project\" default=\"lio_segmot\"/>\n    \n    <!-- Parameters -->\n    <rosparam file=\"$(find lio_segmot)/config/params_kitti.yaml\" command=\"load\" />\n\n    <!--- LOAM -->\n    <include file=\"$(find lio_segmot)/launch/include/module_loam.launch\" />\n\n    <!--- Robot State TF -->\n    <include file=\"$(find lio_segmot)/launch/include/module_robot_state_publisher.launch\" />\n\n    <!--- Run Navsat -->\n    <include file=\"$(find lio_segmot)/launch/include/module_navsat.launch\" />\n\n    <!--- Run Rviz-->\n    <include file=\"$(find lio_segmot)/launch/include/module_rviz.launch\" />\n\n</launch>\n"
  },
  {
    "path": "msg/Diagnosis.msg",
    "content": "Header header\n\nfloat64 computationalTime\nint32 numberOfDetections\nint32 numberOfTightlyCoupledObjects"
  },
  {
    "path": "msg/ObjectState.msg",
    "content": "Header header\n\njsk_recognition_msgs/BoundingBox detection\n\ngeometry_msgs/Pose pose\ngeometry_msgs/Pose velocity\n\nbool hasTightlyCoupledDetectionError\nfloat64 tightlyCoupledDetectionError\nfloat64 initialTightlyCoupledDetectionError\n\nbool hasLooselyCoupledDetectionError\nfloat64 looselyCoupledDetectionError\nfloat64 initialLooselyCoupledDetectionError\n\nbool hasMotionError\nfloat64 motionError\nfloat64 initialMotionError\n\nint32 index\nint32 lostCount\nfloat64 confidence\nbool isTightlyCoupled\nbool isFirst"
  },
  {
    "path": "msg/ObjectStateArray.msg",
    "content": "Header header\n\nlio_segmot/ObjectState[] objects"
  },
  {
    "path": "msg/cloud_info.msg",
    "content": "# Cloud Info\nHeader header \n\nint32[] startRingIndex\nint32[] endRingIndex\n\nint32[]  pointColInd # point column index in range image\nfloat32[] pointRange # point range \n\nint64 imuAvailable\nint64 odomAvailable\n\n# Attitude for LOAM initialization\nfloat32 imuRollInit\nfloat32 imuPitchInit\nfloat32 imuYawInit\n\n# Initial guess from imu pre-integration\nfloat32 initialGuessX\nfloat32 initialGuessY\nfloat32 initialGuessZ\nfloat32 initialGuessRoll\nfloat32 initialGuessPitch\nfloat32 initialGuessYaw\n\n# Point cloud messages\nsensor_msgs/PointCloud2 cloud_raw       # original cloud without any processing\nsensor_msgs/PointCloud2 cloud_deskewed  # original cloud deskewed\nsensor_msgs/PointCloud2 cloud_corner    # extracted corner feature\nsensor_msgs/PointCloud2 cloud_surface   # extracted surface feature"
  },
  {
    "path": "msg/flags.msg",
    "content": "int32[] flags"
  },
  {
    "path": "package.xml",
    "content": "<?xml version=\"1.0\"?>\n<package>\n  <name>lio_segmot</name>\n  <version>1.0.0</version>\n  <description>Lidar Odometry</description>\n\n  <maintainer email=\"shant@mit.edu\">Tixiao Shan</maintainer>\n\n  <license>TODO</license>\n\n  <author>Tixiao Shan</author>\n\n  <buildtool_depend>catkin</buildtool_depend>\n\n  <build_depend>roscpp</build_depend>\n  <run_depend>roscpp</run_depend>\n  <build_depend>rospy</build_depend>\n  <run_depend>rospy</run_depend>\n  <build_depend>rosbag</build_depend>\n  <run_depend>rosbag</run_depend>\n\n  <build_depend>tf</build_depend>\n  <run_depend>tf</run_depend>\n\n  <build_depend>cv_bridge</build_depend>\n  <run_depend>cv_bridge</run_depend>\n\n  <build_depend>pcl_conversions</build_depend>\n  <run_depend>pcl_conversions</run_depend>\n\n  <build_depend>std_msgs</build_depend>\n  <run_depend>std_msgs</run_depend>\n  <build_depend>sensor_msgs</build_depend>\n  <run_depend>sensor_msgs</run_depend>\n  <build_depend>geometry_msgs</build_depend>\n  <run_depend>geometry_msgs</run_depend>\n  <build_depend>nav_msgs</build_depend>\n  <run_depend>nav_msgs</run_depend>\n  <build_depend>visualization_msgs</build_depend>\n  <run_depend>visualization_msgs</run_depend>\n\n  <build_depend>message_generation</build_depend>\n  <run_depend>message_generation</run_depend>\n  <build_depend>message_runtime</build_depend>\n  <run_depend>message_runtime</run_depend>\n\n  <build_depend>GTSAM</build_depend>\n  <run_depend>GTSAM</run_depend>\n\n  <build_depend>jsk_recognition_msgs</build_depend>\n  <run_depend>jsk_recognition_msgs</run_depend>\n\n  <build_depend>jsk_topic_tools</build_depend>\n  <run_depend>jsk_topic_tools</run_depend>\n\n</package>\n"
  },
  {
    "path": "scripts/diagnose.py",
    "content": "import copy\nimport multiprocessing as mp\nfrom typing import Dict, List, Tuple\n\nimport dash\nimport dash_bootstrap_components as dbc\nimport numpy as np\nimport plotly.graph_objects as go\nimport rospy\nfrom dash import dcc, html\nfrom dash.dependencies import Input, Output\nfrom lio_segmot.msg import ObjectState, ObjectStateArray\nfrom std_msgs.msg import Header\n\n# ------------------------------------------------------------------------------------------------ #\n#                                      Environmental Variables                                     #\n# ------------------------------------------------------------------------------------------------ #\n\nRECENT_HISTORY_LENGTH = 15\n\n\n# ------------------------------------------------------------------------------------------------ #\n#                                               Utils                                              #\n# ------------------------------------------------------------------------------------------------ #\n\n\ndef get_color(index):\n    colormap = [\n        (0.121569, 0.466667, 0.705882),\n        (0.682353, 0.780392, 0.909804),\n        (1.000000, 0.498039, 0.054902),\n        (1.000000, 0.733333, 0.470588),\n        (0.172549, 0.627451, 0.172549),\n        (0.596078, 0.874510, 0.541176),\n        (0.839216, 0.152941, 0.156863),\n        (1.000000, 0.596078, 0.588235),\n        (0.580392, 0.403922, 0.741176),\n        (0.772549, 0.690196, 0.835294),\n        (0.549020, 0.337255, 0.294118),\n        (0.768627, 0.611765, 0.580392),\n        (0.890196, 0.466667, 0.760784),\n        (0.968627, 0.713725, 0.823529),\n        (0.498039, 0.498039, 0.498039),\n        (0.780392, 0.780392, 0.780392),\n        (0.737255, 0.741176, 0.133333),\n        (0.858824, 0.858824, 0.552941),\n        (0.090196, 0.745098, 0.811765),\n        (0.619608, 0.854902, 0.898039),\n    ]\n    colormap = [(int(i * 255), int(j * 255), int(k * 255)) for (i, j, k) in colormap]\n    color = colormap[index % 20]\n    return \"rgba({}, {}, {}, 1)\".format(color[0], color[1], color[2])\n\n\n# ------------------------------------------------------------------------------------------------ #\n#                                               Data                                               #\n# ------------------------------------------------------------------------------------------------ #\n\n\nclass DynamicObject:\n    def __init__(self) -> None:\n        self.states: Dict[int, ObjectState] = {}  # {stamp index}: {object state}\n        self.index: int = -1\n        self.name: str = \"\"\n        self.color: str = \"rgba(0, 0, 0, 1)\"\n\n    def get_confidence_score(self, index: int) -> float:\n        if index not in self.states.keys():\n            raise ValueError(\"Invalid index\")\n        return self.states[index].detection.value\n\n    def get_initial_tightly_coupled_detection_error(self, index: int) -> float:\n        if index not in self.states.keys():\n            raise ValueError(\"Invalid index\")\n        return self.states[index].initialTightlyCoupledDetectionError\n\n    def get_initial_loosely_coupled_detection_error(self, index: int) -> float:\n        if index not in self.states.keys():\n            raise ValueError(\"Invalid index\")\n        return self.states[index].initialLooselyCoupledDetectionError\n\n    def get_tightly_coupled_detection_error(self, index: int) -> float:\n        if index not in self.states.keys():\n            raise ValueError(\"Invalid index\")\n        return self.states[index].tightlyCoupledDetectionError\n\n    def get_loosely_coupled_detection_error(self, index: int) -> float:\n        if index not in self.states.keys():\n            raise ValueError(\"Invalid index\")\n        return self.states[index].looselyCoupledDetectionError\n\n    def get_initial_motion_error(self, index: int) -> float:\n        if index not in self.states.keys():\n            raise ValueError(\"Invalid index\")\n        return self.states[index].initialMotionError\n\n    def get_motion_error(self, index: int) -> float:\n        if index not in self.states.keys():\n            raise ValueError(\"Invalid index\")\n        return self.states[index].motionError\n\n    def recent_indices(self, start_index: int, value_type: str = \"\") -> np.ndarray:\n        indices = np.array(list(self.states.keys()))\n        indices = indices[np.where(indices >= start_index)[0]]\n        if value_type in [\"\", \"confidence_score\"]:\n            return indices\n        elif value_type in [\n            \"initial_tightly_coupled_detection_error\",\n            \"tightly_coupled_detection_error\",\n        ]:\n            return indices[\n                np.where(\n                    np.array(\n                        [\n                            1 if self.states[idx].hasTightlyCoupledDetectionError else 0\n                            for idx in indices\n                        ]\n                    )\n                )[0]\n            ]\n        elif value_type in [\n            \"initial_loosely_coupled_detection_error\",\n            \"loosely_coupled_detection_error\",\n        ]:\n            return indices[\n                np.where(\n                    np.array(\n                        [\n                            1 if self.states[idx].hasLooselyCoupledDetectionError else 0\n                            for idx in indices\n                        ]\n                    )\n                )[0]\n            ]\n        elif value_type in [\"initial_motion_error\", \"motion_error\"]:\n            return indices[\n                np.where(\n                    np.array(\n                        [1 if self.states[idx].hasMotionError else 0 for idx in indices]\n                    )\n                )[0]\n            ]\n        else:\n            raise ValueError(\"Invalid value type\")\n\n    def has_recent_data(self, start_index: int) -> bool:\n        return len(self.recent_indices(start_index)) > 0\n\n    def get_recent_values_plot_data(\n        self, value_type: str, start_index: int, stamps: np.ndarray\n    ) -> go.Scatter:\n        indices = self.recent_indices(start_index, value_type)\n        if len(indices) == 0:\n            return go.Scatter(x=[], y=[], name=self.name)\n\n        x = stamps[indices]\n        y = None\n        if value_type == \"confidence_score\":\n            y = [self.states[i].detection.value for i in indices]\n        elif value_type == \"initial_loosely_coupled_detection_error\":\n            y = [self.states[i].initialLooselyCoupledDetectionError for i in indices]\n        elif value_type == \"initial_tightly_coupled_detection_error\":\n            y = [self.states[i].initialTightlyCoupledDetectionError for i in indices]\n        elif value_type == \"loosely_coupled_detection_error\":\n            y = [self.states[i].looselyCoupledDetectionError for i in indices]\n        elif value_type == \"tightly_coupled_detection_error\":\n            y = [self.states[i].tightlyCoupledDetectionError for i in indices]\n        elif value_type == \"initial_motion_error\":\n            y = [self.states[i].initialMotionError for i in indices]\n        elif value_type == \"motion_error\":\n            y = [self.states[i].motionError for i in indices]\n        else:\n            raise ValueError(\"Invalid value type\")\n\n        return go.Scatter(x=x, y=y, name=self.name, marker=dict(color=self.color))\n\n\nclass Data:\n    def __init__(self) -> None:\n        self.objects: Dict[int, DynamicObject] = {}\n        self.tightly_coupled_objects: Dict[int, DynamicObject] = {}\n        self.start_time = -1\n        self.stamps: List[float] = []\n        self.stamps_hash: List[str] = []\n\n    def add_new_object_state(self, object_state: ObjectState) -> None:\n        stamp_index = self.update_time_stamp(object_state.header)\n\n        if object_state.index not in self.objects.keys():\n            self.objects[object_state.index] = DynamicObject()\n            self.objects[object_state.index].index = object_state.index\n            self.objects[object_state.index].name = \"Object {}\".format(\n                object_state.index\n            )\n            self.objects[object_state.index].color = get_color(object_state.index)\n\n        self.objects[object_state.index].states[stamp_index] = copy.deepcopy(\n            object_state\n        )\n\n        if object_state.isTightlyCoupled:\n            if object_state.index not in self.tightly_coupled_objects.keys():\n                self.tightly_coupled_objects[object_state.index] = DynamicObject()\n                self.tightly_coupled_objects[\n                    object_state.index\n                ].index = object_state.index\n                self.tightly_coupled_objects[\n                    object_state.index\n                ].name = \"Object {}\".format(object_state.index)\n                self.tightly_coupled_objects[object_state.index].color = get_color(\n                    object_state.index\n                )\n\n            self.tightly_coupled_objects[object_state.index].states[\n                stamp_index\n            ] = copy.deepcopy(object_state)\n\n    def update_time_stamp(self, header: Header) -> int:\n        stamp = header.stamp.to_sec()\n\n        if self.start_time < 0:\n            self.start_time = stamp\n\n        stamp = stamp - self.start_time\n        stamp_hash = \"{}.{}\".format(header.stamp.secs, header.stamp.nsecs)\n\n        # TODO: Check ordering of time stamps\n        if stamp_hash not in self.stamps_hash:\n            self.stamps.append(stamp)\n            self.stamps_hash.append(stamp_hash)\n            return len(self.stamps) - 1\n        else:\n            length = len(self.stamps)\n            return next(\n                (\n                    length - i - 1\n                    for i, e in enumerate(self.stamps_hash[::-1])\n                    if e == stamp_hash\n                )\n            )\n\n    @property\n    def number_of_stamps(self):\n        return len(self.stamps)\n\n    def get_recent_values_plot_data(\n        self, value_type: str, length: int, only_tightly_coupled: bool = False\n    ) -> List[go.Scatter]:\n        start_index = max(self.number_of_stamps - length, 0)\n        np_stamps = np.array(self.stamps)\n        available_object_states = (\n            self.tightly_coupled_objects.values()\n            if only_tightly_coupled\n            else self.objects.values()\n        )\n        return [\n            d.get_recent_values_plot_data(value_type, start_index, np_stamps)\n            for d in available_object_states\n            if d.has_recent_data(start_index)\n        ]\n\n    def get_recent_plot_xaxis_range(self, length: int) -> Tuple[float, float]:\n        start_index = max(self.number_of_stamps - length, 0)\n        end_stamp = max(self.stamps[-1], self.stamps[start_index] + length * 0.1) + 0.2\n        return self.stamps[start_index], end_stamp\n\n\ndata = Data()\nqueue = mp.Queue(maxsize=5)\n\nconfidence_score_min_y = 0.0\nconfidence_score_max_y = 0.55\n\ninitial_detection_error_min_y = 0.0\ninitial_detection_error_max_y = 200\ndetection_error_min_y = 0.0\ndetection_error_max_y = 20\n\ninitial_motion_error_min_y = 0.0\ninitial_motion_error_max_y = 5.0\nmotion_error_min_y = 0.0\nmotion_error_max_y = 20\n\n# ------------------------------------------------------------------------------------------------ #\n#                                                ROS                                               #\n# ------------------------------------------------------------------------------------------------ #\n\n\nclass Callback:\n    def __init__(self, queue: mp.Queue) -> None:\n        self.queue: mp.Queue = queue\n\n    def __call__(self, msg: ObjectStateArray) -> None:\n        queue.put(msg)\n\n\ndef ros_main():\n    callback = Callback(queue)\n    rospy.init_node(\"lio_segmotmot_diagnose\", anonymous=True)\n    rospy.Subscriber(\"lio_segmot/mapping/object_states\", ObjectStateArray, callback)\n    rospy.spin()\n\n\n# ------------------------------------------------------------------------------------------------ #\n#                                               Dash                                               #\n# ------------------------------------------------------------------------------------------------ #\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])\napp.layout = html.Div(\n    html.Div(\n        [\n            dbc.NavbarSimple(\n                children=[],\n                brand=\"Coupling LIO-SAM and MOT\",\n                brand_href=\"#\",\n                color=\"primary\",\n                dark=True,\n            ),\n            dbc.Toast(\n                \"Data is reset!\",\n                id=\"data-reset-toast\",\n                header=\"Notification\",\n                is_open=False,\n                dismissable=True,\n                icon=\"info\",\n                # top: 66 positions the toast below the navbar\n                style={\n                    \"position\": \"fixed\",\n                    \"top\": 66,\n                    \"right\": 10,\n                    \"width\": 350,\n                    \"z-index\": \"100\",\n                },\n            ),\n            html.Div(\n                [\n                    dbc.Row(\n                        [\n                            dbc.Col(\n                                [html.H3(\"SE-SSD's Confidence Score\")],\n                                class_name=\"col-6\",\n                            ),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"min(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"confidence-score-min-y\",\n                                                placeholder=\"min(y)\",\n                                                value=confidence_score_min_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"max(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"confidence-score-max-y\",\n                                                placeholder=\"max(y)\",\n                                                value=confidence_score_max_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                        ],\n                        style={\"margin-top\": \"30px\"},\n                    ),\n                    dbc.Row(\n                        [\n                            dbc.Col(\n                                dcc.Graph(id=\"detection-score-all\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                            dbc.Col(\n                                dcc.Graph(id=\"detection-score-tightly-coupled\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                        ]\n                    ),\n                    html.Hr(),\n                    dbc.Row(\n                        [\n                            dbc.Col(\n                                [html.H3(\"Initial Detection Error\")], class_name=\"col-6\"\n                            ),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"min(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"initial-detection-error-min-y\",\n                                                placeholder=\"min(y)\",\n                                                value=initial_detection_error_min_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"max(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"initial-detection-error-max-y\",\n                                                placeholder=\"max(y)\",\n                                                value=initial_detection_error_max_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                        ],\n                        style={\"margin-top\": \"30px\"},\n                    ),\n                    dbc.Row(\n                        [\n                            dbc.Col(\n                                dcc.Graph(id=\"initial-loosely-coupled-detection-error\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                            dbc.Col(\n                                dcc.Graph(id=\"initial-tightly-coupled-detection-error\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                        ]\n                    ),\n                    html.Hr(),\n                    dbc.Row(\n                        [\n                            dbc.Col([html.H3(\"Detection Error\")], class_name=\"col-6\"),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"min(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"detection-error-min-y\",\n                                                placeholder=\"min(y)\",\n                                                value=detection_error_min_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"max(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"detection-error-max-y\",\n                                                placeholder=\"max(y)\",\n                                                value=detection_error_max_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                        ],\n                        style={\"margin-top\": \"30px\"},\n                    ),\n                    dbc.Row(\n                        [\n                            dbc.Col(\n                                dcc.Graph(id=\"loosely-coupled-detection-error\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                            dbc.Col(\n                                dcc.Graph(id=\"tightly-coupled-detection-error\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                        ]\n                    ),\n                    # html.Hr(),\n                    # dbc.Row(\n                    #     [\n                    #         dbc.Col(\n                    #             [html.H3(\"Initial Motion Error\")], class_name=\"col-6\"\n                    #         ),\n                    #         dbc.Col(\n                    #             [\n                    #                 dbc.InputGroup(\n                    #                     [\n                    #                         dbc.InputGroupText(\"min(y)\"),\n                    #                         dbc.Input(\n                    #                             type=\"number\",\n                    #                             id=\"initial-motion-error-min-y\",\n                    #                             placeholder=\"min(y)\",\n                    #                             value=initial_motion_error_min_y,\n                    #                         ),\n                    #                     ]\n                    #                 ),\n                    #             ],\n                    #             class_name=\"col-3\",\n                    #         ),\n                    #         dbc.Col(\n                    #             [\n                    #                 dbc.InputGroup(\n                    #                     [\n                    #                         dbc.InputGroupText(\"max(y)\"),\n                    #                         dbc.Input(\n                    #                             type=\"number\",\n                    #                             id=\"initial-motion-error-max-y\",\n                    #                             placeholder=\"max(y)\",\n                    #                             value=initial_motion_error_max_y,\n                    #                         ),\n                    #                     ]\n                    #                 ),\n                    #             ],\n                    #             class_name=\"col-3\",\n                    #         ),\n                    #     ],\n                    #     style={\"margin-top\": \"30px\"},\n                    # ),\n                    # dbc.Row(\n                    #     [\n                    #         dbc.Col(\n                    #             dcc.Graph(id=\"initial-motion-error-all\"),\n                    #             class_name=\"col-lg-6 col-12\",\n                    #         ),\n                    #         dbc.Col(\n                    #             dcc.Graph(id=\"initial-motion-error-tightly-coupled\"),\n                    #             class_name=\"col-lg-6 col-12\",\n                    #         ),\n                    #     ]\n                    # ),\n                    html.Hr(),\n                    dbc.Row(\n                        [\n                            dbc.Col([html.H3(\"Motion Error\")], class_name=\"col-6\"),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"min(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"motion-error-min-y\",\n                                                placeholder=\"min(y)\",\n                                                value=motion_error_min_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                            dbc.Col(\n                                [\n                                    dbc.InputGroup(\n                                        [\n                                            dbc.InputGroupText(\"max(y)\"),\n                                            dbc.Input(\n                                                type=\"number\",\n                                                id=\"motion-error-max-y\",\n                                                placeholder=\"max(y)\",\n                                                value=motion_error_max_y,\n                                            ),\n                                        ]\n                                    ),\n                                ],\n                                class_name=\"col-3\",\n                            ),\n                        ],\n                        style={\"margin-top\": \"30px\"},\n                    ),\n                    dbc.Row(\n                        [\n                            dbc.Col(\n                                dcc.Graph(id=\"motion-error-all\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                            dbc.Col(\n                                dcc.Graph(id=\"motion-error-tightly-coupled\"),\n                                class_name=\"col-lg-6 col-12\",\n                            ),\n                        ]\n                    ),\n                    html.Hr(),\n                    html.Div(\n                        [\n                            dbc.Button(\n                                \"Reset\",\n                                id=\"reset-button\",\n                                color=\"danger\",\n                                n_clicks=0,\n                                className=\"me-1\",\n                            ),\n                        ]\n                    ),\n                ],\n                className=\"container\",\n            ),\n            dcc.Interval(\n                id=\"interval-component\",\n                interval=1 * 200,  # in milliseconds\n                n_intervals=0,\n            ),\n        ]\n    )\n)\n\n\n@app.callback(\n    Output(\"data-reset-toast\", \"is_open\"),\n    Input(\"reset-button\", \"n_clicks\"),\n)\ndef reset(n):\n    global data\n    data = Data()\n    return True\n\n\n@app.callback(\n    Output(\"detection-score-all\", \"figure\"),\n    Output(\"detection-score-tightly-coupled\", \"figure\"),\n    Output(\"initial-loosely-coupled-detection-error\", \"figure\"),\n    Output(\"initial-tightly-coupled-detection-error\", \"figure\"),\n    Output(\"loosely-coupled-detection-error\", \"figure\"),\n    Output(\"tightly-coupled-detection-error\", \"figure\"),\n    # Output(\"initial-motion-error-all\", \"figure\"),\n    # Output(\"initial-motion-error-tightly-coupled\", \"figure\"),\n    Output(\"motion-error-all\", \"figure\"),\n    Output(\"motion-error-tightly-coupled\", \"figure\"),\n    Input(\"interval-component\", \"n_intervals\"),\n    Input(\"confidence-score-min-y\", \"value\"),\n    Input(\"confidence-score-max-y\", \"value\"),\n    Input(\"initial-detection-error-min-y\", \"value\"),\n    Input(\"initial-detection-error-max-y\", \"value\"),\n    Input(\"detection-error-min-y\", \"value\"),\n    Input(\"detection-error-max-y\", \"value\"),\n    # Input(\"initial-motion-error-min-y\", \"value\"),\n    # Input(\"initial-motion-error-max-y\", \"value\"),\n    Input(\"motion-error-min-y\", \"value\"),\n    Input(\"motion-error-max-y\", \"value\"),\n)\ndef update(\n    n,\n    c_min_y,\n    c_max_y,\n    i_d_min_y,\n    i_d_max_y,\n    d_min_y,\n    d_max_y,\n    # i_m_min_y,\n    # i_m_max_y,\n    m_min_y,\n    m_max_y,\n):\n    global confidence_score_min_y\n    global confidence_score_max_y\n    global initial_detection_error_min_y\n    global initial_detection_error_max_y\n    global detection_error_min_y\n    global detection_error_max_y\n    global initial_motion_error_min_y\n    global initial_motion_error_max_y\n    global motion_error_min_y\n    global motion_error_max_y\n\n    changed = False\n\n    if confidence_score_min_y != c_min_y:\n        changed = True\n        confidence_score_min_y = c_min_y\n    if confidence_score_max_y != c_max_y:\n        changed = True\n        confidence_score_max_y = c_max_y\n    if initial_detection_error_min_y != i_d_min_y:\n        changed = True\n        initial_detection_error_min_y = i_d_min_y\n    if initial_detection_error_max_y != i_d_max_y:\n        changed = True\n        initial_detection_error_max_y = i_d_max_y\n    if detection_error_min_y != d_min_y:\n        changed = True\n        detection_error_min_y = d_min_y\n    if detection_error_max_y != d_max_y:\n        changed = True\n        detection_error_max_y = d_max_y\n    # if initial_motion_error_min_y != i_m_min_y:\n    #     changed = True\n    #     initial_motion_error_min_y = i_m_min_y\n    # if initial_motion_error_max_y != i_m_max_y:\n    #     changed = True\n    #     initial_motion_error_max_y = i_m_max_y\n    if motion_error_min_y != m_min_y:\n        changed = True\n        motion_error_min_y = m_min_y\n    if motion_error_max_y != m_max_y:\n        changed = True\n        motion_error_max_y = m_max_y\n\n    if queue.empty() and not changed:\n        return [dash.no_update] * 8\n    elif changed and len(data.objects) == 0:\n        return [dash.no_update] * 8\n\n    while not queue.empty():\n        msg: ObjectStateArray = queue.get()\n        for object_state in msg.objects:\n            data.add_new_object_state(object_state)\n\n    confidence_score_all = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[confidence_score_min_y, confidence_score_max_y],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\n            \"confidence_score\", RECENT_HISTORY_LENGTH, False\n        ),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[0, 0.55],\n    )\n    confidence_score_all.update_layout(\n        title=\"All Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Confidence Score\",\n        showlegend=True,\n    )\n    confidence_score_tightly_coupled = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[confidence_score_min_y - 1, confidence_score_max_y + 1],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\n            \"confidence_score\", RECENT_HISTORY_LENGTH, True\n        ),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[confidence_score_min_y, confidence_score_max_y],\n    )\n    confidence_score_tightly_coupled.update_layout(\n        title=\"Tightly-coupled Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Confidence Score\",\n        showlegend=True,\n    )\n\n    initial_loosely_coupled_detection_error = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[\n                    initial_detection_error_min_y - 1,\n                    initial_detection_error_max_y + 1,\n                ],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\n            \"initial_loosely_coupled_detection_error\", RECENT_HISTORY_LENGTH, False\n        ),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[\n            initial_detection_error_min_y,\n            initial_detection_error_max_y,\n        ],\n    )\n    initial_loosely_coupled_detection_error.update_layout(\n        title=\"Loosely-coupled Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Loosely-coupled Detection Error\",\n        showlegend=True,\n    )\n    initial_tightly_coupled_detection_error = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[\n                    initial_detection_error_min_y - 1,\n                    initial_detection_error_max_y + 1,\n                ],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\n            \"initial_tightly_coupled_detection_error\", RECENT_HISTORY_LENGTH, False\n        ),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[\n            initial_detection_error_min_y,\n            initial_detection_error_max_y,\n        ],\n    )\n    initial_tightly_coupled_detection_error.update_layout(\n        title=\"Tightly-coupled Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Tightly-coupled Detection Error\",\n        showlegend=True,\n    )\n\n    loosely_coupled_detection_error = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[detection_error_min_y - 1, detection_error_max_y + 1],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\n            \"loosely_coupled_detection_error\", RECENT_HISTORY_LENGTH, False\n        ),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[detection_error_min_y, detection_error_max_y],\n    )\n    loosely_coupled_detection_error.update_layout(\n        title=\"Loosely-coupled Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Loosely-coupled Detection Error\",\n        showlegend=True,\n    )\n    tightly_coupled_detection_error = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[detection_error_min_y - 1, detection_error_max_y + 1],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\n            \"tightly_coupled_detection_error\", RECENT_HISTORY_LENGTH, False\n        ),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[detection_error_min_y, detection_error_max_y],\n    )\n    tightly_coupled_detection_error.update_layout(\n        title=\"Tightly-coupled Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Tightly-coupled Detection Error\",\n        showlegend=True,\n    )\n\n    # initial_motion_error_all = go.Figure(\n    #     data=[\n    #         go.Scatter(\n    #             x=[data.stamps[-1]] * 2,\n    #             y=[initial_motion_error_min_y - 1, initial_motion_error_max_y + 1],\n    #             marker={\"color\": \"red\"},\n    #         )\n    #     ]\n    #     + data.get_recent_values_plot_data(\n    #         \"initial_motion_error\", RECENT_HISTORY_LENGTH, False\n    #     ),\n    #     layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n    #     layout_yaxis_range=[initial_motion_error_min_y, initial_motion_error_max_y],\n    # )\n    # initial_motion_error_all.update_layout(\n    #     title=\"All Objects\",\n    #     xaxis_title=\"Time Stamp\",\n    #     yaxis_title=\"Motion Error\",\n    #     showlegend=True,\n    # )\n    # initial_motion_error_tightly_coupled = go.Figure(\n    #     data=[\n    #         go.Scatter(\n    #             x=[data.stamps[-1]] * 2,\n    #             y=[initial_motion_error_min_y - 1, initial_motion_error_max_y + 1],\n    #             marker={\"color\": \"red\"},\n    #         )\n    #     ]\n    #     + data.get_recent_values_plot_data(\n    #         \"initial_motion_error\", RECENT_HISTORY_LENGTH, True\n    #     ),\n    #     layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n    #     layout_yaxis_range=[initial_motion_error_min_y, initial_motion_error_max_y],\n    # )\n    # initial_motion_error_tightly_coupled.update_layout(\n    #     title=\"Tightly-coupled Objects\",\n    #     xaxis_title=\"Time Stamp\",\n    #     yaxis_title=\"Motion Error\",\n    #     showlegend=True,\n    # )\n\n    motion_error_all = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[motion_error_min_y - 1, motion_error_max_y + 1],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\n            \"motion_error\", RECENT_HISTORY_LENGTH, False\n        ),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[motion_error_min_y, motion_error_max_y],\n    )\n    motion_error_all.update_layout(\n        title=\"All Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Motion Error\",\n        showlegend=True,\n    )\n    motion_error_tightly_coupled = go.Figure(\n        data=[\n            go.Scatter(\n                x=[data.stamps[-1]] * 2,\n                y=[motion_error_min_y - 1, motion_error_max_y + 1],\n                marker={\"color\": \"red\"},\n            )\n        ]\n        + data.get_recent_values_plot_data(\"motion_error\", RECENT_HISTORY_LENGTH, True),\n        layout_xaxis_range=data.get_recent_plot_xaxis_range(RECENT_HISTORY_LENGTH),\n        layout_yaxis_range=[motion_error_min_y, motion_error_max_y],\n    )\n    motion_error_tightly_coupled.update_layout(\n        title=\"Tightly-coupled Objects\",\n        xaxis_title=\"Time Stamp\",\n        yaxis_title=\"Motion Error\",\n        showlegend=True,\n    )\n\n    return (\n        confidence_score_all,\n        confidence_score_tightly_coupled,\n        initial_loosely_coupled_detection_error,\n        initial_tightly_coupled_detection_error,\n        loosely_coupled_detection_error,\n        tightly_coupled_detection_error,\n        # initial_motion_error_all,\n        # initial_motion_error_tightly_coupled,\n        motion_error_all,\n        motion_error_tightly_coupled,\n    )\n\n\nif __name__ == \"__main__\":\n    ros_main_process = mp.Process(target=ros_main)\n    ros_main_process.start()\n    app.run_server(debug=True)\n    ros_main_process.join()\n"
  },
  {
    "path": "src/factor.cpp",
    "content": "#include \"factor.h\"\n\n#include <boost/format.hpp>\n\n#include <gtsam/linear/JacobianFactor.h>\n#include <gtsam/slam/PriorFactor.h>\n\n#include <cmath>\n#include <iostream>\n#include <limits>\n#include <stdexcept>\n\n/* -------------------------------------------------------------------------- */\n/*                                  Detection                                 */\n/* -------------------------------------------------------------------------- */\n\nDetection::Detection(Detection::BoundingBox box,\n                     gtsam::Vector6 variances)\n    : box(box), variances(variances) {\n  auto p     = box.pose.position;\n  auto q     = box.pose.orientation;\n  this->pose = gtsam::Pose3(gtsam::Rot3(q.w, q.x, q.y, q.z), gtsam::Point3(p.x, p.y, p.z));\n}\n\n/* -------------------------------------------------------------------------- */\n\nconst double Detection::error(const gtsam::Pose3 x) const {\n  gtsam::Vector errorVector = gtsam::traits<gtsam::Pose3>::Local(this->pose, x);\n  return sqrt(this->getDiagonal()->Mahalanobis(errorVector));\n}\n\n/* -------------------------------------------------------------------------- */\n\nstd::tuple<size_t, double>\ngetDetectionIndexAndError(const gtsam::Pose3 &d, std::vector<Detection> detections) {\n  if (detections.size() == 0) {\n    throw std::runtime_error(\"Does not receive any detection.\");\n  }\n\n  size_t idx   = 0;\n  double error = detections[0].error(d);\n\n  // Figure out the optimal detection index\n  for (size_t i = 1; i < detections.size(); ++i) {\n    double error_i = detections[i].error(d);\n    if (error_i < error) {\n      idx   = i;\n      error = error_i;\n    }\n  }\n\n  return std::make_tuple(idx, error);\n}\n\n/* -------------------------------------------------------------------------- */\n/*                      Tightly-Coupled Detection Factor                      */\n/* -------------------------------------------------------------------------- */\n\ngtsam::Vector\nTightlyCoupledDetectionFactor::evaluateError(const gtsam::Pose3 &robotPose,\n                                             const gtsam::Pose3 &objectPose,\n                                             const gtsam::Pose3 &measured,\n                                             boost::optional<gtsam::Matrix &> H1,\n                                             boost::optional<gtsam::Matrix &> H2) const {\n  gtsam::Pose3 hx = gtsam::traits<gtsam::Pose3>::Between(robotPose,\n                                                         objectPose,\n                                                         H1,\n                                                         H2);\n  return gtsam::traits<gtsam::Pose3>::Local(measured, hx);\n}\n\n/* -------------------------------------------------------------------------- */\n\ngtsam::Vector\nTightlyCoupledDetectionFactor::unwhitenedError(const gtsam::Pose3 &measured,\n                                               const gtsam::Values &x,\n                                               boost::optional<std::vector<gtsam::Matrix> &> H) const {\n  if (this->active(x)) {\n    const gtsam::Pose3 &x1 = x.at<gtsam::Pose3>(robotPoseKey());\n    const gtsam::Pose3 &x2 = x.at<gtsam::Pose3>(objectPoseKey());\n    if (H) {\n      return evaluateError(x1, x2, measured, (*H)[0], (*H)[1]);\n    } else {\n      return evaluateError(x1, x2, measured);\n    }\n  } else {\n    return gtsam::Vector::Zero(this->dim());\n  }\n}\n\n/* -------------------------------------------------------------------------- */\n\nboost::shared_ptr<gtsam::GaussianFactor>\nTightlyCoupledDetectionFactor::linearize(const gtsam::Values &c) const {\n  // Only linearize if the factor is active\n  if (!active(c))\n    return boost::shared_ptr<gtsam::JacobianFactor>();\n\n  // Determine which detection is used to generate the factor function, a.k.a.\n  // the Max-Mixture model\n  double error;\n  const auto robotPose                  = c.at<gtsam::Pose3>(this->robotPoseKey());\n  const auto objectPose                 = c.at<gtsam::Pose3>(this->objectPoseKey());\n  std::tie(cachedDetectionIndex, error) = getDetectionIndexAndError(robotPose.inverse() * objectPose, this->detections);\n\n  auto measured   = this->detections[cachedDetectionIndex].getPose();\n  auto noiseModel = this->noiseModels[cachedDetectionIndex];\n\n  // Call evaluate error to get Jacobians and RHS vector b\n  std::vector<gtsam::Matrix> A(size());\n  gtsam::Vector b = -unwhitenedError(measured, c, A);\n  if (noiseModel && b.size() != noiseModel->dim())\n    throw std::invalid_argument(\n        boost::str(\n            boost::format(\n                \"NoiseModelFactor: NoiseModel has dimension %1% instead of %2%.\") %\n            noiseModel->dim() % b.size()));\n\n  // Whiten the corresponding system now\n  if (noiseModel)\n    noiseModel->WhitenSystem(A, b);\n\n  // Fill in terms, needed to create JacobianFactor below\n  std::vector<std::pair<gtsam::Key, gtsam::Matrix> > terms(size());\n  for (size_t j = 0; j < size(); ++j) {\n    terms[j].first = keys()[j];\n    terms[j].second.swap(A[j]);\n  }\n\n  // TODO pass unwhitened + noise model to Gaussian factor\n  using gtsam::noiseModel::Constrained;\n  if (noiseModel && noiseModel->isConstrained())\n    return gtsam::GaussianFactor::shared_ptr(\n        new gtsam::JacobianFactor(terms, b,\n                                  boost::static_pointer_cast<Constrained>(noiseModel)->unit()));\n  else\n    return gtsam::GaussianFactor::shared_ptr(new gtsam::JacobianFactor(terms, b));\n}\n\n/* -------------------------------------------------------------------------- */\n\ndouble TightlyCoupledDetectionFactor::error(const gtsam::Values &c) const {\n  if (active(c)) {\n    // Determine which detection is used to generate the factor function, a.k.a.\n    // the Max-Mixture model\n    size_t index;\n    double error;\n    const auto robotPose   = c.at<gtsam::Pose3>(this->robotPoseKey());\n    const auto objectPose  = c.at<gtsam::Pose3>(this->objectPoseKey());\n    std::tie(index, error) = getDetectionIndexAndError(robotPose.inverse() * objectPose, this->detections);\n\n    auto measured   = this->detections[index].getPose();\n    auto noiseModel = this->noiseModels[index];\n\n    const gtsam::Vector b = unwhitenedError(measured, c);\n    if (noiseModel && b.size() != noiseModel->dim())\n      throw std::invalid_argument(\n          boost::str(\n              boost::format(\n                  \"NoiseModelFactor: NoiseModel has dimension %1% instead of %2%.\") %\n              noiseModel->dim() % b.size()));\n    return 0.5 * noiseModel->distance(b);\n  } else {\n    return 0.0;\n  }\n}\n\n/* -------------------------------------------------------------------------- */\n/*                      Loosely-Coupled Detection Factor                      */\n/* -------------------------------------------------------------------------- */\n\ngtsam::Vector\nLooselyCoupledDetectionFactor::evaluateError(const gtsam::Pose3 &robotPose,\n                                             const gtsam::Pose3 &objectPose,\n                                             const gtsam::Pose3 &measured,\n                                             boost::optional<gtsam::Matrix &> H1) const {\n  gtsam::Pose3 hx = gtsam::traits<gtsam::Pose3>::Between(robotPose,\n                                                         objectPose,\n                                                         boost::none,\n                                                         H1);\n  return gtsam::traits<gtsam::Pose3>::Local(measured, hx);\n}\n\n/* -------------------------------------------------------------------------- */\n\ngtsam::Vector\nLooselyCoupledDetectionFactor::unwhitenedError(const gtsam::Pose3 &measured,\n                                               const gtsam::Values &x,\n                                               boost::optional<std::vector<gtsam::Matrix> &> H) const {\n  if (this->active(x)) {\n    const gtsam::Pose3 &x1 = x.at<gtsam::Pose3>(this->robotPoseKey());\n    const gtsam::Pose3 &x2 = x.at<gtsam::Pose3>(this->objectPoseKey());\n    if (H) {\n      return evaluateError(x1, x2, measured, (*H)[0]);\n    } else {\n      return evaluateError(x1, x2, measured);\n    }\n  } else {\n    return gtsam::Vector::Zero(this->dim());\n  }\n}\n\n/* -------------------------------------------------------------------------- */\n\nboost::shared_ptr<gtsam::GaussianFactor>\nLooselyCoupledDetectionFactor::linearize(const gtsam::Values &c) const {\n  // Only linearize if the factor is active\n  if (!active(c))\n    return boost::shared_ptr<gtsam::JacobianFactor>();\n\n  // Determine which detection is used to generate the factor function, a.k.a.\n  // the Max-Mixture model\n  double error;\n  const auto robotPose                  = c.at<gtsam::Pose3>(this->robotPoseKey());\n  const auto objectPose                 = c.at<gtsam::Pose3>(this->objectPoseKey());\n  std::tie(cachedDetectionIndex, error) = getDetectionIndexAndError(robotPose.inverse() * objectPose, this->detections);\n\n  auto measured   = this->detections[cachedDetectionIndex].getPose();\n  auto noiseModel = this->noiseModels[cachedDetectionIndex];\n\n  // Call evaluate error to get Jacobians and RHS vector b\n  std::vector<gtsam::Matrix> A(size());\n  gtsam::Vector b = -unwhitenedError(measured, c, A);\n  if (noiseModel && b.size() != noiseModel->dim())\n    throw std::invalid_argument(\n        boost::str(\n            boost::format(\n                \"NoiseModelFactor: NoiseModel has dimension %1% instead of %2%.\") %\n            noiseModel->dim() % b.size()));\n\n  // Whiten the corresponding system now\n  if (noiseModel)\n    noiseModel->WhitenSystem(A, b);\n\n  // Fill in terms, needed to create JacobianFactor below\n  std::vector<std::pair<gtsam::Key, gtsam::Matrix> > terms(size());\n  for (size_t j = 0; j < size(); ++j) {\n    terms[j].first = keys()[j];\n    terms[j].second.swap(A[j]);\n  }\n\n  // TODO pass unwhitened + noise model to Gaussian factor\n  using gtsam::noiseModel::Constrained;\n  if (noiseModel && noiseModel->isConstrained())\n    return gtsam::GaussianFactor::shared_ptr(\n        new gtsam::JacobianFactor(terms, b,\n                                  boost::static_pointer_cast<Constrained>(noiseModel)->unit()));\n  else\n    return gtsam::GaussianFactor::shared_ptr(new gtsam::JacobianFactor(terms, b));\n}\n\n/* -------------------------------------------------------------------------- */\n\ndouble LooselyCoupledDetectionFactor::error(const gtsam::Values &c) const {\n  if (active(c)) {\n    // Determine which detection is used to generate the factor function, a.k.a.\n    // the Max-Mixture model\n    size_t index;\n    double error;\n    const auto robotPose   = c.at<gtsam::Pose3>(this->robotPoseKey());\n    const auto objectPose  = c.at<gtsam::Pose3>(this->objectPoseKey());\n    std::tie(index, error) = getDetectionIndexAndError(robotPose.inverse() * objectPose, this->detections);\n\n    auto measured   = this->detections[index].getPose();\n    auto noiseModel = this->noiseModels[index];\n\n    const gtsam::Vector b = unwhitenedError(measured, c);\n    if (noiseModel && b.size() != noiseModel->dim())\n      throw std::invalid_argument(\n          boost::str(\n              boost::format(\n                  \"NoiseModelFactor: NoiseModel has dimension %1% instead of %2%.\") %\n              noiseModel->dim() % b.size()));\n    return 0.5 * noiseModel->distance(b);\n  } else {\n    return 0.0;\n  }\n}\n\n/* -------------------------------------------------------------------------- */\n/*                             Stable Pose Factor                             */\n/* -------------------------------------------------------------------------- */\n\ngtsam::Vector\nStablePoseFactor::evaluateError(const gtsam::Pose3 &previousPose,\n                                const gtsam::Pose3 &velocity,\n                                const gtsam::Pose3 &nextPose,\n                                boost::optional<gtsam::Matrix &> H1,\n                                boost::optional<gtsam::Matrix &> H2,\n                                boost::optional<gtsam::Matrix &> H3) const {\n  auto identity = gtsam::Pose3::identity();\n\n  auto deltaPoseVec = gtsam::traits<gtsam::Pose3>::Local(identity, velocity) * this->deltaTime;\n  auto deltaPose    = gtsam::traits<gtsam::Pose3>::Retract(identity, deltaPoseVec);\n  gtsam::Pose3 hx   = nextPose.inverse() * previousPose * deltaPose;\n\n  if (H1) *H1 = deltaPose.inverse().AdjointMap();\n  if (H2) *H2 = this->deltaTime * gtsam::Pose3::ExpmapDerivative(deltaPoseVec) * gtsam::Pose3::LogmapDerivative(velocity);\n  if (H3) *H3 = -hx.inverse().AdjointMap();\n\n  return gtsam::traits<gtsam::Pose3>::Local(identity, hx);\n}\n"
  },
  {
    "path": "src/featureExtraction.cpp",
    "content": "#include \"lio_segmot/cloud_info.h\"\n#include \"utility.h\"\n\nstruct smoothness_t {\n  float value;\n  size_t ind;\n};\n\nstruct by_value {\n  bool operator()(smoothness_t const &left, smoothness_t const &right) {\n    return left.value < right.value;\n  }\n};\n\nclass FeatureExtraction : public ParamServer {\n public:\n  ros::Subscriber subLaserCloudInfo;\n\n  ros::Publisher pubLaserCloudInfo;\n  ros::Publisher pubCornerPoints;\n  ros::Publisher pubSurfacePoints;\n\n  pcl::PointCloud<PointType>::Ptr extractedCloud;\n  pcl::PointCloud<PointType>::Ptr cornerCloud;\n  pcl::PointCloud<PointType>::Ptr surfaceCloud;\n\n  pcl::VoxelGrid<PointType> downSizeFilter;\n\n  lio_segmot::cloud_info cloudInfo;\n  std_msgs::Header cloudHeader;\n\n  std::vector<smoothness_t> cloudSmoothness;\n  float *cloudCurvature;\n  int *cloudNeighborPicked;\n  int *cloudLabel;\n\n  FeatureExtraction() {\n    subLaserCloudInfo = nh.subscribe<lio_segmot::cloud_info>(\"lio_segmot/deskew/cloud_info\", 1, &FeatureExtraction::laserCloudInfoHandler, this, ros::TransportHints().tcpNoDelay());\n\n    pubLaserCloudInfo = nh.advertise<lio_segmot::cloud_info>(\"lio_segmot/feature/cloud_info\", 1);\n    pubCornerPoints   = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/feature/cloud_corner\", 1);\n    pubSurfacePoints  = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/feature/cloud_surface\", 1);\n\n    initializationValue();\n  }\n\n  void initializationValue() {\n    cloudSmoothness.resize(N_SCAN * Horizon_SCAN);\n\n    downSizeFilter.setLeafSize(odometrySurfLeafSize, odometrySurfLeafSize, odometrySurfLeafSize);\n\n    extractedCloud.reset(new pcl::PointCloud<PointType>());\n    cornerCloud.reset(new pcl::PointCloud<PointType>());\n    surfaceCloud.reset(new pcl::PointCloud<PointType>());\n\n    cloudCurvature      = new float[N_SCAN * Horizon_SCAN];\n    cloudNeighborPicked = new int[N_SCAN * Horizon_SCAN];\n    cloudLabel          = new int[N_SCAN * Horizon_SCAN];\n  }\n\n  void laserCloudInfoHandler(const lio_segmot::cloud_infoConstPtr &msgIn) {\n    cloudInfo   = *msgIn;                                     // new cloud info\n    cloudHeader = msgIn->header;                              // new cloud header\n    pcl::fromROSMsg(msgIn->cloud_deskewed, *extractedCloud);  // new cloud for extraction\n\n    calculateSmoothness();\n\n    markOccludedPoints();\n\n    extractFeatures();\n\n    publishFeatureCloud();\n  }\n\n  void calculateSmoothness() {\n    int cloudSize = extractedCloud->points.size();\n    for (int i = 5; i < cloudSize - 5; i++) {\n      float diffRange = cloudInfo.pointRange[i - 5] + cloudInfo.pointRange[i - 4] + cloudInfo.pointRange[i - 3] + cloudInfo.pointRange[i - 2] + cloudInfo.pointRange[i - 1] - cloudInfo.pointRange[i] * 10 + cloudInfo.pointRange[i + 1] + cloudInfo.pointRange[i + 2] + cloudInfo.pointRange[i + 3] + cloudInfo.pointRange[i + 4] + cloudInfo.pointRange[i + 5];\n\n      cloudCurvature[i] = diffRange * diffRange;  //diffX * diffX + diffY * diffY + diffZ * diffZ;\n\n      cloudNeighborPicked[i] = 0;\n      cloudLabel[i]          = 0;\n      // cloudSmoothness for sorting\n      cloudSmoothness[i].value = cloudCurvature[i];\n      cloudSmoothness[i].ind   = i;\n    }\n  }\n\n  void markOccludedPoints() {\n    int cloudSize = extractedCloud->points.size();\n    // mark occluded points and parallel beam points\n    for (int i = 5; i < cloudSize - 6; ++i) {\n      // occluded points\n      float depth1   = cloudInfo.pointRange[i];\n      float depth2   = cloudInfo.pointRange[i + 1];\n      int columnDiff = std::abs(int(cloudInfo.pointColInd[i + 1] - cloudInfo.pointColInd[i]));\n\n      if (columnDiff < 10) {\n        // 10 pixel diff in range image\n        if (depth1 - depth2 > 0.3) {\n          cloudNeighborPicked[i - 5] = 1;\n          cloudNeighborPicked[i - 4] = 1;\n          cloudNeighborPicked[i - 3] = 1;\n          cloudNeighborPicked[i - 2] = 1;\n          cloudNeighborPicked[i - 1] = 1;\n          cloudNeighborPicked[i]     = 1;\n        } else if (depth2 - depth1 > 0.3) {\n          cloudNeighborPicked[i + 1] = 1;\n          cloudNeighborPicked[i + 2] = 1;\n          cloudNeighborPicked[i + 3] = 1;\n          cloudNeighborPicked[i + 4] = 1;\n          cloudNeighborPicked[i + 5] = 1;\n          cloudNeighborPicked[i + 6] = 1;\n        }\n      }\n      // parallel beam\n      float diff1 = std::abs(float(cloudInfo.pointRange[i - 1] - cloudInfo.pointRange[i]));\n      float diff2 = std::abs(float(cloudInfo.pointRange[i + 1] - cloudInfo.pointRange[i]));\n\n      if (diff1 > 0.02 * cloudInfo.pointRange[i] && diff2 > 0.02 * cloudInfo.pointRange[i])\n        cloudNeighborPicked[i] = 1;\n    }\n  }\n\n  void extractFeatures() {\n    cornerCloud->clear();\n    surfaceCloud->clear();\n\n    pcl::PointCloud<PointType>::Ptr surfaceCloudScan(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr surfaceCloudScanDS(new pcl::PointCloud<PointType>());\n\n    for (int i = 0; i < N_SCAN; i++) {\n      surfaceCloudScan->clear();\n\n      for (int j = 0; j < 6; j++) {\n        int sp = (cloudInfo.startRingIndex[i] * (6 - j) + cloudInfo.endRingIndex[i] * j) / 6;\n        int ep = (cloudInfo.startRingIndex[i] * (5 - j) + cloudInfo.endRingIndex[i] * (j + 1)) / 6 - 1;\n\n        if (sp >= ep)\n          continue;\n\n        std::sort(cloudSmoothness.begin() + sp, cloudSmoothness.begin() + ep, by_value());\n\n        int largestPickedNum = 0;\n        for (int k = ep; k >= sp; k--) {\n          int ind = cloudSmoothness[k].ind;\n          if (cloudNeighborPicked[ind] == 0 && cloudCurvature[ind] > edgeThreshold) {\n            largestPickedNum++;\n            if (largestPickedNum <= 20) {\n              cloudLabel[ind] = 1;\n              cornerCloud->push_back(extractedCloud->points[ind]);\n            } else {\n              break;\n            }\n\n            cloudNeighborPicked[ind] = 1;\n            for (int l = 1; l <= 5; l++) {\n              int columnDiff = std::abs(int(cloudInfo.pointColInd[ind + l] - cloudInfo.pointColInd[ind + l - 1]));\n              if (columnDiff > 10)\n                break;\n              cloudNeighborPicked[ind + l] = 1;\n            }\n            for (int l = -1; l >= -5; l--) {\n              int columnDiff = std::abs(int(cloudInfo.pointColInd[ind + l] - cloudInfo.pointColInd[ind + l + 1]));\n              if (columnDiff > 10)\n                break;\n              cloudNeighborPicked[ind + l] = 1;\n            }\n          }\n        }\n\n        for (int k = sp; k <= ep; k++) {\n          int ind = cloudSmoothness[k].ind;\n          if (cloudNeighborPicked[ind] == 0 && cloudCurvature[ind] < surfThreshold) {\n            cloudLabel[ind]          = -1;\n            cloudNeighborPicked[ind] = 1;\n\n            for (int l = 1; l <= 5; l++) {\n              int columnDiff = std::abs(int(cloudInfo.pointColInd[ind + l] - cloudInfo.pointColInd[ind + l - 1]));\n              if (columnDiff > 10)\n                break;\n\n              cloudNeighborPicked[ind + l] = 1;\n            }\n            for (int l = -1; l >= -5; l--) {\n              int columnDiff = std::abs(int(cloudInfo.pointColInd[ind + l] - cloudInfo.pointColInd[ind + l + 1]));\n              if (columnDiff > 10)\n                break;\n\n              cloudNeighborPicked[ind + l] = 1;\n            }\n          }\n        }\n\n        for (int k = sp; k <= ep; k++) {\n          if (cloudLabel[k] <= 0) {\n            surfaceCloudScan->push_back(extractedCloud->points[k]);\n          }\n        }\n      }\n\n      surfaceCloudScanDS->clear();\n      downSizeFilter.setInputCloud(surfaceCloudScan);\n      downSizeFilter.filter(*surfaceCloudScanDS);\n\n      *surfaceCloud += *surfaceCloudScanDS;\n    }\n  }\n\n  void freeCloudInfoMemory() {\n    cloudInfo.startRingIndex.clear();\n    cloudInfo.endRingIndex.clear();\n    cloudInfo.pointColInd.clear();\n    cloudInfo.pointRange.clear();\n  }\n\n  void publishFeatureCloud() {\n    // free cloud info memory\n    freeCloudInfoMemory();\n    // save newly extracted features\n    cloudInfo.cloud_corner  = publishCloud(&pubCornerPoints, cornerCloud, cloudHeader.stamp, lidarFrame);\n    cloudInfo.cloud_surface = publishCloud(&pubSurfacePoints, surfaceCloud, cloudHeader.stamp, lidarFrame);\n    // publish to mapOptimization\n    pubLaserCloudInfo.publish(cloudInfo);\n  }\n};\n\nint main(int argc, char **argv) {\n  ros::init(argc, argv, \"lio_segmot\");\n\n  FeatureExtraction FE;\n\n  ROS_INFO(\"\\033[1;32m----> Feature Extraction Started.\\033[0m\");\n\n  ros::spin();\n\n  return 0;\n}"
  },
  {
    "path": "src/imageProjection.cpp",
    "content": "#include \"lio_segmot/cloud_info.h\"\n#include \"utility.h\"\n\nstruct VelodynePointXYZIRT {\n  PCL_ADD_POINT4D\n  PCL_ADD_INTENSITY;\n  uint16_t ring;\n  float time;\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n} EIGEN_ALIGN16;\nPOINT_CLOUD_REGISTER_POINT_STRUCT(VelodynePointXYZIRT,\n                                  (float, x, x)(float, y, y)(float, z, z)(float, intensity, intensity)(uint16_t, ring, ring)(float, time, time))\n\nstruct OusterPointXYZIRT {\n  PCL_ADD_POINT4D;\n  float intensity;\n  uint32_t t;\n  uint16_t reflectivity;\n  uint8_t ring;\n  uint16_t noise;\n  uint32_t range;\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n} EIGEN_ALIGN16;\nPOINT_CLOUD_REGISTER_POINT_STRUCT(OusterPointXYZIRT,\n                                  (float, x, x)(float, y, y)(float, z, z)(float, intensity, intensity)(uint32_t, t, t)(uint16_t, reflectivity, reflectivity)(uint8_t, ring, ring)(uint16_t, noise, noise)(uint32_t, range, range))\n\n// Use the Velodyne point format as a common representation\nusing PointXYZIRT = VelodynePointXYZIRT;\n\nconst int queueLength = 2000;\n\nclass ImageProjection : public ParamServer {\n private:\n  std::mutex imuLock;\n  std::mutex odoLock;\n\n  ros::Subscriber subLaserCloud;\n  ros::Publisher pubLaserCloud;\n\n  ros::Publisher pubExtractedCloud;\n  ros::Publisher pubLaserCloudInfo;\n\n  ros::Publisher pubReady;\n\n  ros::Subscriber subImu;\n  std::deque<sensor_msgs::Imu> imuQueue;\n\n  ros::Subscriber subOdom;\n  std::deque<nav_msgs::Odometry> odomQueue;\n\n  std::deque<sensor_msgs::PointCloud2> cloudQueue;\n  sensor_msgs::PointCloud2 currentCloudMsg;\n\n  double *imuTime = new double[queueLength];\n  double *imuRotX = new double[queueLength];\n  double *imuRotY = new double[queueLength];\n  double *imuRotZ = new double[queueLength];\n\n  int imuPointerCur;\n  bool firstPointFlag;\n  Eigen::Affine3f transStartInverse;\n\n  pcl::PointCloud<PointXYZIRT>::Ptr laserCloudIn;\n  pcl::PointCloud<PointXYZIRT>::Ptr rawCloudIn;\n  pcl::PointCloud<OusterPointXYZIRT>::Ptr tmpOusterCloudIn;\n  pcl::PointCloud<PointType>::Ptr rawCloud;\n  pcl::PointCloud<PointType>::Ptr fullCloud;\n  pcl::PointCloud<PointType>::Ptr extractedCloud;\n\n  int deskewFlag;\n  cv::Mat rangeMat;\n\n  bool odomDeskewFlag;\n  float odomIncreX;\n  float odomIncreY;\n  float odomIncreZ;\n\n  lio_segmot::cloud_info cloudInfo;\n  double timeScanCur;\n  double timeScanEnd;\n  std_msgs::Header cloudHeader;\n\n public:\n  ImageProjection() : deskewFlag(0) {\n    subImu        = nh.subscribe<sensor_msgs::Imu>(imuTopic, 2000, &ImageProjection::imuHandler, this, ros::TransportHints().tcpNoDelay());\n    subOdom       = nh.subscribe<nav_msgs::Odometry>(odomTopic + \"_incremental\", 2000, &ImageProjection::odometryHandler, this, ros::TransportHints().tcpNoDelay());\n    subLaserCloud = nh.subscribe<sensor_msgs::PointCloud2>(pointCloudTopic, 5, &ImageProjection::cloudHandler, this, ros::TransportHints().tcpNoDelay());\n\n    pubExtractedCloud = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/deskew/cloud_deskewed\", 1);\n    pubLaserCloudInfo = nh.advertise<lio_segmot::cloud_info>(\"lio_segmot/deskew/cloud_info\", 1);\n    pubReady          = nh.advertise<std_msgs::Empty>(\"lio_segmot/ready\", 1);\n\n    allocateMemory();\n    resetParameters();\n\n    pcl::console::setVerbosityLevel(pcl::console::L_ERROR);\n  }\n\n  void allocateMemory() {\n    laserCloudIn.reset(new pcl::PointCloud<PointXYZIRT>());\n    rawCloudIn.reset(new pcl::PointCloud<PointXYZIRT>());\n    tmpOusterCloudIn.reset(new pcl::PointCloud<OusterPointXYZIRT>());\n    rawCloud.reset(new pcl::PointCloud<PointType>());\n    fullCloud.reset(new pcl::PointCloud<PointType>());\n    extractedCloud.reset(new pcl::PointCloud<PointType>());\n\n    fullCloud->points.resize(N_SCAN * Horizon_SCAN);\n\n    cloudInfo.startRingIndex.assign(N_SCAN, 0);\n    cloudInfo.endRingIndex.assign(N_SCAN, 0);\n\n    cloudInfo.pointColInd.assign(N_SCAN * Horizon_SCAN, 0);\n    cloudInfo.pointRange.assign(N_SCAN * Horizon_SCAN, 0);\n\n    resetParameters();\n  }\n\n  void resetParameters() {\n    laserCloudIn->clear();\n    extractedCloud->clear();\n    // reset range matrix for range image projection\n    rangeMat = cv::Mat(N_SCAN, Horizon_SCAN, CV_32F, cv::Scalar::all(FLT_MAX));\n\n    imuPointerCur  = 0;\n    firstPointFlag = true;\n    odomDeskewFlag = false;\n\n    for (int i = 0; i < queueLength; ++i) {\n      imuTime[i] = 0;\n      imuRotX[i] = 0;\n      imuRotY[i] = 0;\n      imuRotZ[i] = 0;\n    }\n  }\n\n  ~ImageProjection() {}\n\n  void imuHandler(const sensor_msgs::Imu::ConstPtr &imuMsg) {\n    sensor_msgs::Imu thisImu = imuConverter(*imuMsg);\n\n    std::lock_guard<std::mutex> lock1(imuLock);\n    imuQueue.push_back(thisImu);\n\n    // debug IMU data\n    // cout << std::setprecision(6);\n    // cout << \"IMU acc: \" << endl;\n    // cout << \"x: \" << thisImu.linear_acceleration.x <<\n    //       \", y: \" << thisImu.linear_acceleration.y <<\n    //       \", z: \" << thisImu.linear_acceleration.z << endl;\n    // cout << \"IMU gyro: \" << endl;\n    // cout << \"x: \" << thisImu.angular_velocity.x <<\n    //       \", y: \" << thisImu.angular_velocity.y <<\n    //       \", z: \" << thisImu.angular_velocity.z << endl;\n    // double imuRoll, imuPitch, imuYaw;\n    // tf::Quaternion orientation;\n    // tf::quaternionMsgToTF(thisImu.orientation, orientation);\n    // tf::Matrix3x3(orientation).getRPY(imuRoll, imuPitch, imuYaw);\n    // cout << \"IMU roll pitch yaw: \" << endl;\n    // cout << \"roll: \" << imuRoll << \", pitch: \" << imuPitch << \", yaw: \" << imuYaw << endl << endl;\n  }\n\n  void odometryHandler(const nav_msgs::Odometry::ConstPtr &odometryMsg) {\n    std::lock_guard<std::mutex> lock2(odoLock);\n    odomQueue.push_back(*odometryMsg);\n  }\n\n  void cloudHandler(const sensor_msgs::PointCloud2ConstPtr &laserCloudMsg) {\n    if (!cachePointCloud(laserCloudMsg) || !deskewInfo()) {\n      pubReady.publish(std_msgs::Empty());\n      return;\n    }\n\n    projectPointCloud();\n\n    cloudExtraction();\n\n    publishClouds();\n\n    resetParameters();\n  }\n\n  bool cachePointCloud(const sensor_msgs::PointCloud2ConstPtr &laserCloudMsg) {\n    // cache point cloud\n    cloudQueue.push_back(*laserCloudMsg);\n    if (cloudQueue.size() <= 2)\n      return false;\n\n    // convert cloud\n    currentCloudMsg = std::move(cloudQueue.front());\n    cloudQueue.pop_front();\n    if (sensor == SensorType::VELODYNE) {\n      pcl::moveFromROSMsg(currentCloudMsg, *rawCloudIn);\n      *laserCloudIn += *rawCloudIn;\n\n      rawCloud->clear();\n      for (const auto &p : *rawCloudIn) {\n        PointType xyzi;\n        xyzi.x         = p.x;\n        xyzi.y         = p.y;\n        xyzi.z         = p.z;\n        xyzi.intensity = p.intensity;\n\n        rawCloud->push_back(xyzi);\n      }\n    } else if (sensor == SensorType::OUSTER) {\n      // Convert to Velodyne format\n      pcl::moveFromROSMsg(currentCloudMsg, *tmpOusterCloudIn);\n      laserCloudIn->points.resize(tmpOusterCloudIn->size());\n      laserCloudIn->is_dense = tmpOusterCloudIn->is_dense;\n      for (size_t i = 0; i < tmpOusterCloudIn->size(); i++) {\n        auto &src     = tmpOusterCloudIn->points[i];\n        auto &dst     = laserCloudIn->points[i];\n        dst.x         = src.x;\n        dst.y         = src.y;\n        dst.z         = src.z;\n        dst.intensity = src.intensity;\n        dst.ring      = src.ring;\n        dst.time      = src.t * 1e-9f;\n      }\n    } else {\n      ROS_ERROR_STREAM(\"Unknown sensor type: \" << int(sensor));\n      ros::shutdown();\n    }\n\n    // get timestamp\n    cloudHeader = currentCloudMsg.header;\n    timeScanCur = cloudHeader.stamp.toSec();\n    timeScanEnd = timeScanCur + laserCloudIn->points.back().time;\n\n    // check dense flag\n    if (laserCloudIn->is_dense == false) {\n      ROS_ERROR(\"Point cloud is not in dense format, please remove NaN points first!\");\n      ros::shutdown();\n    }\n\n    // check ring channel\n    static int ringFlag = 0;\n    if (ringFlag == 0) {\n      ringFlag = -1;\n      for (int i = 0; i < (int)currentCloudMsg.fields.size(); ++i) {\n        if (currentCloudMsg.fields[i].name == \"ring\") {\n          ringFlag = 1;\n          break;\n        }\n      }\n      if (ringFlag == -1) {\n        ROS_ERROR(\"Point cloud ring channel not available, please configure your point cloud data!\");\n        ros::shutdown();\n      }\n    }\n\n    // check point time\n    if (deskewFlag == 0) {\n      deskewFlag = -1;\n      for (auto &field : currentCloudMsg.fields) {\n        if (field.name == \"time\" || field.name == \"t\") {\n          deskewFlag = 1;\n          break;\n        }\n      }\n      if (deskewFlag == -1)\n        ROS_WARN(\"Point cloud timestamp not available, deskew function disabled, system will drift significantly!\");\n    }\n\n    return true;\n  }\n\n  bool deskewInfo() {\n    std::lock_guard<std::mutex> lock1(imuLock);\n    std::lock_guard<std::mutex> lock2(odoLock);\n\n    // make sure IMU data available for the scan\n    if (imuQueue.empty() || imuQueue.front().header.stamp.toSec() > timeScanCur || imuQueue.back().header.stamp.toSec() < timeScanEnd) {\n      ROS_DEBUG(\"Waiting for IMU data ...\");\n      return false;\n    }\n\n    imuDeskewInfo();\n\n    odomDeskewInfo();\n\n    return true;\n  }\n\n  void imuDeskewInfo() {\n    cloudInfo.imuAvailable = false;\n\n    while (!imuQueue.empty()) {\n      if (imuQueue.front().header.stamp.toSec() < timeScanCur - 0.01)\n        imuQueue.pop_front();\n      else\n        break;\n    }\n\n    if (imuQueue.empty())\n      return;\n\n    imuPointerCur = 0;\n\n    for (int i = 0; i < (int)imuQueue.size(); ++i) {\n      sensor_msgs::Imu thisImuMsg = imuQueue[i];\n      double currentImuTime       = thisImuMsg.header.stamp.toSec();\n\n      // get roll, pitch, and yaw estimation for this scan\n      if (currentImuTime <= timeScanCur)\n        imuRPY2rosRPY(&thisImuMsg, &cloudInfo.imuRollInit, &cloudInfo.imuPitchInit, &cloudInfo.imuYawInit);\n\n      if (currentImuTime > timeScanEnd + 0.01)\n        break;\n\n      if (imuPointerCur == 0) {\n        imuRotX[0] = 0;\n        imuRotY[0] = 0;\n        imuRotZ[0] = 0;\n        imuTime[0] = currentImuTime;\n        ++imuPointerCur;\n        continue;\n      }\n\n      // get angular velocity\n      double angular_x, angular_y, angular_z;\n      imuAngular2rosAngular(&thisImuMsg, &angular_x, &angular_y, &angular_z);\n\n      // integrate rotation\n      double timeDiff        = currentImuTime - imuTime[imuPointerCur - 1];\n      imuRotX[imuPointerCur] = imuRotX[imuPointerCur - 1] + angular_x * timeDiff;\n      imuRotY[imuPointerCur] = imuRotY[imuPointerCur - 1] + angular_y * timeDiff;\n      imuRotZ[imuPointerCur] = imuRotZ[imuPointerCur - 1] + angular_z * timeDiff;\n      imuTime[imuPointerCur] = currentImuTime;\n      ++imuPointerCur;\n    }\n\n    --imuPointerCur;\n\n    if (imuPointerCur <= 0)\n      return;\n\n    cloudInfo.imuAvailable = true;\n  }\n\n  void odomDeskewInfo() {\n    cloudInfo.odomAvailable = false;\n\n    while (!odomQueue.empty()) {\n      if (odomQueue.front().header.stamp.toSec() < timeScanCur - 0.01)\n        odomQueue.pop_front();\n      else\n        break;\n    }\n\n    if (odomQueue.empty())\n      return;\n\n    if (odomQueue.front().header.stamp.toSec() > timeScanCur)\n      return;\n\n    // get start odometry at the beinning of the scan\n    nav_msgs::Odometry startOdomMsg;\n\n    for (int i = 0; i < (int)odomQueue.size(); ++i) {\n      startOdomMsg = odomQueue[i];\n\n      if (ROS_TIME(&startOdomMsg) < timeScanCur)\n        continue;\n      else\n        break;\n    }\n\n    tf::Quaternion orientation;\n    tf::quaternionMsgToTF(startOdomMsg.pose.pose.orientation, orientation);\n\n    double roll, pitch, yaw;\n    tf::Matrix3x3(orientation).getRPY(roll, pitch, yaw);\n\n    // Initial guess used in mapOptimization\n    cloudInfo.initialGuessX     = startOdomMsg.pose.pose.position.x;\n    cloudInfo.initialGuessY     = startOdomMsg.pose.pose.position.y;\n    cloudInfo.initialGuessZ     = startOdomMsg.pose.pose.position.z;\n    cloudInfo.initialGuessRoll  = roll;\n    cloudInfo.initialGuessPitch = pitch;\n    cloudInfo.initialGuessYaw   = yaw;\n\n    cloudInfo.odomAvailable = true;\n\n    // get end odometry at the end of the scan\n    odomDeskewFlag = false;\n\n    if (odomQueue.back().header.stamp.toSec() < timeScanEnd)\n      return;\n\n    nav_msgs::Odometry endOdomMsg;\n\n    for (int i = 0; i < (int)odomQueue.size(); ++i) {\n      endOdomMsg = odomQueue[i];\n\n      if (ROS_TIME(&endOdomMsg) < timeScanEnd)\n        continue;\n      else\n        break;\n    }\n\n    if (int(round(startOdomMsg.pose.covariance[0])) != int(round(endOdomMsg.pose.covariance[0])))\n      return;\n\n    Eigen::Affine3f transBegin = pcl::getTransformation(startOdomMsg.pose.pose.position.x, startOdomMsg.pose.pose.position.y, startOdomMsg.pose.pose.position.z, roll, pitch, yaw);\n\n    tf::quaternionMsgToTF(endOdomMsg.pose.pose.orientation, orientation);\n    tf::Matrix3x3(orientation).getRPY(roll, pitch, yaw);\n    Eigen::Affine3f transEnd = pcl::getTransformation(endOdomMsg.pose.pose.position.x, endOdomMsg.pose.pose.position.y, endOdomMsg.pose.pose.position.z, roll, pitch, yaw);\n\n    Eigen::Affine3f transBt = transBegin.inverse() * transEnd;\n\n    float rollIncre, pitchIncre, yawIncre;\n    pcl::getTranslationAndEulerAngles(transBt, odomIncreX, odomIncreY, odomIncreZ, rollIncre, pitchIncre, yawIncre);\n\n    odomDeskewFlag = true;\n  }\n\n  void findRotation(double pointTime, float *rotXCur, float *rotYCur, float *rotZCur) {\n    *rotXCur = 0;\n    *rotYCur = 0;\n    *rotZCur = 0;\n\n    int imuPointerFront = 0;\n    while (imuPointerFront < imuPointerCur) {\n      if (pointTime < imuTime[imuPointerFront])\n        break;\n      ++imuPointerFront;\n    }\n\n    if (pointTime > imuTime[imuPointerFront] || imuPointerFront == 0) {\n      *rotXCur = imuRotX[imuPointerFront];\n      *rotYCur = imuRotY[imuPointerFront];\n      *rotZCur = imuRotZ[imuPointerFront];\n    } else {\n      int imuPointerBack = imuPointerFront - 1;\n      double ratioFront  = (pointTime - imuTime[imuPointerBack]) / (imuTime[imuPointerFront] - imuTime[imuPointerBack]);\n      double ratioBack   = (imuTime[imuPointerFront] - pointTime) / (imuTime[imuPointerFront] - imuTime[imuPointerBack]);\n      *rotXCur           = imuRotX[imuPointerFront] * ratioFront + imuRotX[imuPointerBack] * ratioBack;\n      *rotYCur           = imuRotY[imuPointerFront] * ratioFront + imuRotY[imuPointerBack] * ratioBack;\n      *rotZCur           = imuRotZ[imuPointerFront] * ratioFront + imuRotZ[imuPointerBack] * ratioBack;\n    }\n  }\n\n  void findPosition(double relTime, float *posXCur, float *posYCur, float *posZCur) {\n    *posXCur = 0;\n    *posYCur = 0;\n    *posZCur = 0;\n\n    // If the sensor moves relatively slow, like walking speed, positional deskew seems to have little benefits. Thus code below is commented.\n\n    // if (cloudInfo.odomAvailable == false || odomDeskewFlag == false)\n    //     return;\n\n    // float ratio = relTime / (timeScanEnd - timeScanCur);\n\n    // *posXCur = ratio * odomIncreX;\n    // *posYCur = ratio * odomIncreY;\n    // *posZCur = ratio * odomIncreZ;\n  }\n\n  PointType deskewPoint(PointType *point, double relTime) {\n    if (deskewFlag == -1 || cloudInfo.imuAvailable == false)\n      return *point;\n\n    double pointTime = timeScanCur + relTime;\n\n    float rotXCur, rotYCur, rotZCur;\n    findRotation(pointTime, &rotXCur, &rotYCur, &rotZCur);\n\n    float posXCur, posYCur, posZCur;\n    findPosition(relTime, &posXCur, &posYCur, &posZCur);\n\n    if (firstPointFlag == true) {\n      transStartInverse = (pcl::getTransformation(posXCur, posYCur, posZCur, rotXCur, rotYCur, rotZCur)).inverse();\n      firstPointFlag    = false;\n    }\n\n    // transform points to start\n    Eigen::Affine3f transFinal = pcl::getTransformation(posXCur, posYCur, posZCur, rotXCur, rotYCur, rotZCur);\n    Eigen::Affine3f transBt    = transStartInverse * transFinal;\n\n    PointType newPoint;\n    newPoint.x         = transBt(0, 0) * point->x + transBt(0, 1) * point->y + transBt(0, 2) * point->z + transBt(0, 3);\n    newPoint.y         = transBt(1, 0) * point->x + transBt(1, 1) * point->y + transBt(1, 2) * point->z + transBt(1, 3);\n    newPoint.z         = transBt(2, 0) * point->x + transBt(2, 1) * point->y + transBt(2, 2) * point->z + transBt(2, 3);\n    newPoint.intensity = point->intensity;\n\n    return newPoint;\n  }\n\n  void projectPointCloud() {\n    int cloudSize = laserCloudIn->points.size();\n    // range image projection\n    for (int i = 0; i < cloudSize; ++i) {\n      PointType thisPoint;\n      thisPoint.x         = laserCloudIn->points[i].x;\n      thisPoint.y         = laserCloudIn->points[i].y;\n      thisPoint.z         = laserCloudIn->points[i].z;\n      thisPoint.intensity = laserCloudIn->points[i].intensity;\n\n      float range = pointDistance(thisPoint);\n      if (range < lidarMinRange || range > lidarMaxRange)\n        continue;\n\n      int rowIdn = laserCloudIn->points[i].ring;\n      if (rowIdn < 0 || rowIdn >= N_SCAN)\n        continue;\n\n      if (rowIdn % downsampleRate != 0)\n        continue;\n\n      float horizonAngle = atan2(thisPoint.x, thisPoint.y) * 180 / M_PI;\n\n      static float ang_res_x = 360.0 / float(Horizon_SCAN);\n      int columnIdn          = -round((horizonAngle - 90.0) / ang_res_x) + Horizon_SCAN / 2;\n      if (columnIdn >= Horizon_SCAN)\n        columnIdn -= Horizon_SCAN;\n\n      if (columnIdn < 0 || columnIdn >= Horizon_SCAN)\n        continue;\n\n      if (rangeMat.at<float>(rowIdn, columnIdn) != FLT_MAX)\n        continue;\n\n      thisPoint = deskewPoint(&thisPoint, laserCloudIn->points[i].time);\n\n      rangeMat.at<float>(rowIdn, columnIdn) = range;\n\n      int index                = columnIdn + rowIdn * Horizon_SCAN;\n      fullCloud->points[index] = thisPoint;\n    }\n  }\n\n  void cloudExtraction() {\n    int count = 0;\n    // extract segmented cloud for lidar odometry\n    for (int i = 0; i < N_SCAN; ++i) {\n      cloudInfo.startRingIndex[i] = count - 1 + 5;\n\n      for (int j = 0; j < Horizon_SCAN; ++j) {\n        if (rangeMat.at<float>(i, j) != FLT_MAX) {\n          // mark the points' column index for marking occlusion later\n          cloudInfo.pointColInd[count] = j;\n          // save range info\n          cloudInfo.pointRange[count] = rangeMat.at<float>(i, j);\n          // save extracted cloud\n          extractedCloud->push_back(fullCloud->points[j + i * Horizon_SCAN]);\n          // size of extracted cloud\n          ++count;\n        }\n      }\n      cloudInfo.endRingIndex[i] = count - 1 - 5;\n    }\n  }\n\n  void publishClouds() {\n    cloudInfo.header         = cloudHeader;\n    cloudInfo.cloud_raw      = publishCloud(&pubExtractedCloud, rawCloud, cloudHeader.stamp, lidarFrame);\n    cloudInfo.cloud_deskewed = publishCloud(&pubExtractedCloud, extractedCloud, cloudHeader.stamp, lidarFrame);\n    pubLaserCloudInfo.publish(cloudInfo);\n  }\n};\n\nint main(int argc, char **argv) {\n  ros::init(argc, argv, \"lio_segmot\");\n\n  ImageProjection IP;\n\n  ROS_INFO(\"\\033[1;32m----> Image Projection Started.\\033[0m\");\n\n  ros::MultiThreadedSpinner spinner(3);\n  spinner.spin();\n\n  return 0;\n}\n"
  },
  {
    "path": "src/imuPreintegration.cpp",
    "content": "#include \"utility.h\"\n\n#include <gtsam/geometry/Pose3.h>\n#include <gtsam/geometry/Rot3.h>\n#include <gtsam/inference/Symbol.h>\n#include <gtsam/navigation/CombinedImuFactor.h>\n#include <gtsam/navigation/GPSFactor.h>\n#include <gtsam/navigation/ImuFactor.h>\n#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>\n#include <gtsam/nonlinear/Marginals.h>\n#include <gtsam/nonlinear/NonlinearFactorGraph.h>\n#include <gtsam/nonlinear/Values.h>\n#include <gtsam/slam/BetweenFactor.h>\n#include <gtsam/slam/PriorFactor.h>\n\n#include <gtsam/nonlinear/ISAM2.h>\n#include <gtsam_unstable/nonlinear/IncrementalFixedLagSmoother.h>\n\nusing gtsam::symbol_shorthand::B;  // Bias  (ax,ay,az,gx,gy,gz)\nusing gtsam::symbol_shorthand::V;  // Vel   (xdot,ydot,zdot)\nusing gtsam::symbol_shorthand::X;  // Pose3 (x,y,z,r,p,y)\n\nclass TransformFusion : public ParamServer {\n public:\n  std::mutex mtx;\n\n  ros::Subscriber subImuOdometry;\n  ros::Subscriber subLaserOdometry;\n\n  ros::Publisher pubImuOdometry;\n  ros::Publisher pubImuPath;\n\n  Eigen::Affine3f lidarOdomAffine;\n  Eigen::Affine3f imuOdomAffineFront;\n  Eigen::Affine3f imuOdomAffineBack;\n\n  tf::TransformListener tfListener;\n  tf::StampedTransform lidar2Baselink;\n\n  double lidarOdomTime = -1;\n  deque<nav_msgs::Odometry> imuOdomQueue;\n\n  TransformFusion() {\n    if (lidarFrame != baselinkFrame) {\n      try {\n        tfListener.waitForTransform(lidarFrame, baselinkFrame, ros::Time(0), ros::Duration(3.0));\n        tfListener.lookupTransform(lidarFrame, baselinkFrame, ros::Time(0), lidar2Baselink);\n      } catch (tf::TransformException ex) {\n        ROS_ERROR(\"%s\", ex.what());\n      }\n    }\n\n    subLaserOdometry = nh.subscribe<nav_msgs::Odometry>(\"lio_segmot/mapping/odometry\", 5, &TransformFusion::lidarOdometryHandler, this, ros::TransportHints().tcpNoDelay());\n    subImuOdometry   = nh.subscribe<nav_msgs::Odometry>(odomTopic + \"_incremental\", 2000, &TransformFusion::imuOdometryHandler, this, ros::TransportHints().tcpNoDelay());\n\n    pubImuOdometry = nh.advertise<nav_msgs::Odometry>(odomTopic, 2000);\n    pubImuPath     = nh.advertise<nav_msgs::Path>(\"lio_segmot/imu/path\", 1);\n  }\n\n  Eigen::Affine3f odom2affine(nav_msgs::Odometry odom) {\n    double x, y, z, roll, pitch, yaw;\n    x = odom.pose.pose.position.x;\n    y = odom.pose.pose.position.y;\n    z = odom.pose.pose.position.z;\n    tf::Quaternion orientation;\n    tf::quaternionMsgToTF(odom.pose.pose.orientation, orientation);\n    tf::Matrix3x3(orientation).getRPY(roll, pitch, yaw);\n    return pcl::getTransformation(x, y, z, roll, pitch, yaw);\n  }\n\n  void lidarOdometryHandler(const nav_msgs::Odometry::ConstPtr& odomMsg) {\n    std::lock_guard<std::mutex> lock(mtx);\n\n    lidarOdomAffine = odom2affine(*odomMsg);\n\n    lidarOdomTime = odomMsg->header.stamp.toSec();\n  }\n\n  void imuOdometryHandler(const nav_msgs::Odometry::ConstPtr& odomMsg) {\n    // static tf\n    static tf::TransformBroadcaster tfMap2Odom;\n    static tf::Transform map_to_odom = tf::Transform(tf::createQuaternionFromRPY(0, 0, 0), tf::Vector3(0, 0, 0));\n    tfMap2Odom.sendTransform(tf::StampedTransform(map_to_odom, odomMsg->header.stamp, mapFrame, odometryFrame));\n\n    std::lock_guard<std::mutex> lock(mtx);\n\n    imuOdomQueue.push_back(*odomMsg);\n\n    // get latest odometry (at current IMU stamp)\n    if (lidarOdomTime == -1)\n      return;\n    while (!imuOdomQueue.empty()) {\n      if (imuOdomQueue.front().header.stamp.toSec() <= lidarOdomTime)\n        imuOdomQueue.pop_front();\n      else\n        break;\n    }\n    Eigen::Affine3f imuOdomAffineFront = odom2affine(imuOdomQueue.front());\n    Eigen::Affine3f imuOdomAffineBack  = odom2affine(imuOdomQueue.back());\n    Eigen::Affine3f imuOdomAffineIncre = imuOdomAffineFront.inverse() * imuOdomAffineBack;\n    Eigen::Affine3f imuOdomAffineLast  = lidarOdomAffine * imuOdomAffineIncre;\n    float x, y, z, roll, pitch, yaw;\n    pcl::getTranslationAndEulerAngles(imuOdomAffineLast, x, y, z, roll, pitch, yaw);\n\n    // publish latest odometry\n    nav_msgs::Odometry laserOdometry    = imuOdomQueue.back();\n    laserOdometry.pose.pose.position.x  = x;\n    laserOdometry.pose.pose.position.y  = y;\n    laserOdometry.pose.pose.position.z  = z;\n    laserOdometry.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(roll, pitch, yaw);\n    pubImuOdometry.publish(laserOdometry);\n\n    // publish tf\n    static tf::TransformBroadcaster tfOdom2BaseLink;\n    tf::Transform tCur;\n    tf::poseMsgToTF(laserOdometry.pose.pose, tCur);\n    if (lidarFrame != baselinkFrame)\n      tCur = tCur * lidar2Baselink;\n    tf::StampedTransform odom_2_baselink = tf::StampedTransform(tCur, odomMsg->header.stamp, odometryFrame, baselinkFrame);\n    tfOdom2BaseLink.sendTransform(odom_2_baselink);\n\n    // publish IMU path\n    static nav_msgs::Path imuPath;\n    static double last_path_time = -1;\n    double imuTime               = imuOdomQueue.back().header.stamp.toSec();\n    if (imuTime - last_path_time > 0.1) {\n      last_path_time = imuTime;\n      geometry_msgs::PoseStamped pose_stamped;\n      pose_stamped.header.stamp    = imuOdomQueue.back().header.stamp;\n      pose_stamped.header.frame_id = odometryFrame;\n      pose_stamped.pose            = laserOdometry.pose.pose;\n      imuPath.poses.push_back(pose_stamped);\n      while (!imuPath.poses.empty() && imuPath.poses.front().header.stamp.toSec() < lidarOdomTime - 1.0)\n        imuPath.poses.erase(imuPath.poses.begin());\n      if (pubImuPath.getNumSubscribers() != 0) {\n        imuPath.header.stamp    = imuOdomQueue.back().header.stamp;\n        imuPath.header.frame_id = odometryFrame;\n        pubImuPath.publish(imuPath);\n      }\n    }\n  }\n};\n\nclass IMUPreintegration : public ParamServer {\n public:\n  std::mutex mtx;\n\n  ros::Subscriber subImu;\n  ros::Subscriber subOdometry;\n  ros::Publisher pubImuOdometry;\n\n  bool systemInitialized = false;\n\n  gtsam::noiseModel::Diagonal::shared_ptr priorPoseNoise;\n  gtsam::noiseModel::Diagonal::shared_ptr priorVelNoise;\n  gtsam::noiseModel::Diagonal::shared_ptr priorBiasNoise;\n  gtsam::noiseModel::Diagonal::shared_ptr correctionNoise;\n  gtsam::noiseModel::Diagonal::shared_ptr correctionNoise2;\n  gtsam::Vector noiseModelBetweenBias;\n\n  gtsam::PreintegratedImuMeasurements* imuIntegratorOpt_;\n  gtsam::PreintegratedImuMeasurements* imuIntegratorImu_;\n\n  std::deque<sensor_msgs::Imu> imuQueOpt;\n  std::deque<sensor_msgs::Imu> imuQueImu;\n\n  gtsam::Pose3 prevPose_;\n  gtsam::Vector3 prevVel_;\n  gtsam::NavState prevState_;\n  gtsam::imuBias::ConstantBias prevBias_;\n\n  gtsam::NavState prevStateOdom;\n  gtsam::imuBias::ConstantBias prevBiasOdom;\n\n  bool doneFirstOpt   = false;\n  double lastImuT_imu = -1;\n  double lastImuT_opt = -1;\n\n  gtsam::ISAM2 optimizer;\n  gtsam::NonlinearFactorGraph graphFactors;\n  gtsam::Values graphValues;\n\n  const double delta_t = 0;\n\n  int key = 1;\n\n  gtsam::Pose3 imu2Lidar = gtsam::Pose3(gtsam::Rot3(1, 0, 0, 0), gtsam::Point3(-extTrans.x(), -extTrans.y(), -extTrans.z()));\n  gtsam::Pose3 lidar2Imu = gtsam::Pose3(gtsam::Rot3(1, 0, 0, 0), gtsam::Point3(extTrans.x(), extTrans.y(), extTrans.z()));\n\n  IMUPreintegration() {\n    subImu      = nh.subscribe<sensor_msgs::Imu>(imuTopic, 2000, &IMUPreintegration::imuHandler, this, ros::TransportHints().tcpNoDelay());\n    subOdometry = nh.subscribe<nav_msgs::Odometry>(\"lio_segmot/mapping/odometry_incremental\", 5, &IMUPreintegration::odometryHandler, this, ros::TransportHints().tcpNoDelay());\n\n    pubImuOdometry = nh.advertise<nav_msgs::Odometry>(odomTopic + \"_incremental\", 2000);\n\n    boost::shared_ptr<gtsam::PreintegrationParams> p = gtsam::PreintegrationParams::MakeSharedU(imuGravity);\n    p->accelerometerCovariance                       = gtsam::Matrix33::Identity(3, 3) * pow(imuAccNoise, 2);  // acc white noise in continuous\n    p->gyroscopeCovariance                           = gtsam::Matrix33::Identity(3, 3) * pow(imuGyrNoise, 2);  // gyro white noise in continuous\n    p->integrationCovariance                         = gtsam::Matrix33::Identity(3, 3) * pow(1e-4, 2);         // error committed in integrating position from velocities\n    gtsam::imuBias::ConstantBias prior_imu_bias((gtsam::Vector(6) << 0, 0, 0, 0, 0, 0).finished());\n    ;  // assume zero initial bias\n\n    priorPoseNoise        = gtsam::noiseModel::Diagonal::Sigmas((gtsam::Vector(6) << 1e-2, 1e-2, 1e-2, 1e-2, 1e-2, 1e-2).finished());  // rad,rad,rad,m, m, m\n    priorVelNoise         = gtsam::noiseModel::Isotropic::Sigma(3, 1e4);                                                               // m/s\n    priorBiasNoise        = gtsam::noiseModel::Isotropic::Sigma(6, 1e-3);                                                              // 1e-2 ~ 1e-3 seems to be good\n    correctionNoise       = gtsam::noiseModel::Diagonal::Sigmas((gtsam::Vector(6) << 0.05, 0.05, 0.05, 0.1, 0.1, 0.1).finished());     // rad,rad,rad,m, m, m\n    correctionNoise2      = gtsam::noiseModel::Diagonal::Sigmas((gtsam::Vector(6) << 1, 1, 1, 1, 1, 1).finished());                    // rad,rad,rad,m, m, m\n    noiseModelBetweenBias = (gtsam::Vector(6) << imuAccBiasN, imuAccBiasN, imuAccBiasN, imuGyrBiasN, imuGyrBiasN, imuGyrBiasN).finished();\n\n    imuIntegratorImu_ = new gtsam::PreintegratedImuMeasurements(p, prior_imu_bias);  // setting up the IMU integration for IMU message thread\n    imuIntegratorOpt_ = new gtsam::PreintegratedImuMeasurements(p, prior_imu_bias);  // setting up the IMU integration for optimization\n  }\n\n  void resetOptimization() {\n    gtsam::ISAM2Params optParameters;\n    optParameters.relinearizeThreshold = 0.1;\n    optParameters.relinearizeSkip      = 1;\n    optimizer                          = gtsam::ISAM2(optParameters);\n\n    gtsam::NonlinearFactorGraph newGraphFactors;\n    graphFactors = newGraphFactors;\n\n    gtsam::Values NewGraphValues;\n    graphValues = NewGraphValues;\n  }\n\n  void resetParams() {\n    lastImuT_imu      = -1;\n    doneFirstOpt      = false;\n    systemInitialized = false;\n  }\n\n  void odometryHandler(const nav_msgs::Odometry::ConstPtr& odomMsg) {\n    std::lock_guard<std::mutex> lock(mtx);\n\n    double currentCorrectionTime = ROS_TIME(odomMsg);\n\n    // make sure we have imu data to integrate\n    if (imuQueOpt.empty())\n      return;\n\n    float p_x              = odomMsg->pose.pose.position.x;\n    float p_y              = odomMsg->pose.pose.position.y;\n    float p_z              = odomMsg->pose.pose.position.z;\n    float r_x              = odomMsg->pose.pose.orientation.x;\n    float r_y              = odomMsg->pose.pose.orientation.y;\n    float r_z              = odomMsg->pose.pose.orientation.z;\n    float r_w              = odomMsg->pose.pose.orientation.w;\n    bool degenerate        = (int)odomMsg->pose.covariance[0] == 1 ? true : false;\n    gtsam::Pose3 lidarPose = gtsam::Pose3(gtsam::Rot3::Quaternion(r_w, r_x, r_y, r_z), gtsam::Point3(p_x, p_y, p_z));\n\n    // 0. initialize system\n    if (systemInitialized == false) {\n      resetOptimization();\n\n      // pop old IMU message\n      while (!imuQueOpt.empty()) {\n        if (ROS_TIME(&imuQueOpt.front()) < currentCorrectionTime - delta_t) {\n          lastImuT_opt = ROS_TIME(&imuQueOpt.front());\n          imuQueOpt.pop_front();\n        } else\n          break;\n      }\n      // initial pose\n      prevPose_ = lidarPose.compose(lidar2Imu);\n      gtsam::PriorFactor<gtsam::Pose3> priorPose(X(0), prevPose_, priorPoseNoise);\n      graphFactors.add(priorPose);\n      // initial velocity\n      prevVel_ = gtsam::Vector3(0, 0, 0);\n      gtsam::PriorFactor<gtsam::Vector3> priorVel(V(0), prevVel_, priorVelNoise);\n      graphFactors.add(priorVel);\n      // initial bias\n      prevBias_ = gtsam::imuBias::ConstantBias();\n      gtsam::PriorFactor<gtsam::imuBias::ConstantBias> priorBias(B(0), prevBias_, priorBiasNoise);\n      graphFactors.add(priorBias);\n      // add values\n      graphValues.insert(X(0), prevPose_);\n      graphValues.insert(V(0), prevVel_);\n      graphValues.insert(B(0), prevBias_);\n      // optimize once\n      optimizer.update(graphFactors, graphValues);\n      graphFactors.resize(0);\n      graphValues.clear();\n\n      imuIntegratorImu_->resetIntegrationAndSetBias(prevBias_);\n      imuIntegratorOpt_->resetIntegrationAndSetBias(prevBias_);\n\n      key               = 1;\n      systemInitialized = true;\n      return;\n    }\n\n    // reset graph for speed\n    if (key == 100) {\n      // get updated noise before reset\n      gtsam::noiseModel::Gaussian::shared_ptr updatedPoseNoise = gtsam::noiseModel::Gaussian::Covariance(optimizer.marginalCovariance(X(key - 1)));\n      gtsam::noiseModel::Gaussian::shared_ptr updatedVelNoise  = gtsam::noiseModel::Gaussian::Covariance(optimizer.marginalCovariance(V(key - 1)));\n      gtsam::noiseModel::Gaussian::shared_ptr updatedBiasNoise = gtsam::noiseModel::Gaussian::Covariance(optimizer.marginalCovariance(B(key - 1)));\n      // reset graph\n      resetOptimization();\n      // add pose\n      gtsam::PriorFactor<gtsam::Pose3> priorPose(X(0), prevPose_, updatedPoseNoise);\n      graphFactors.add(priorPose);\n      // add velocity\n      gtsam::PriorFactor<gtsam::Vector3> priorVel(V(0), prevVel_, updatedVelNoise);\n      graphFactors.add(priorVel);\n      // add bias\n      gtsam::PriorFactor<gtsam::imuBias::ConstantBias> priorBias(B(0), prevBias_, updatedBiasNoise);\n      graphFactors.add(priorBias);\n      // add values\n      graphValues.insert(X(0), prevPose_);\n      graphValues.insert(V(0), prevVel_);\n      graphValues.insert(B(0), prevBias_);\n      // optimize once\n      optimizer.update(graphFactors, graphValues);\n      graphFactors.resize(0);\n      graphValues.clear();\n\n      key = 1;\n    }\n\n    // 1. integrate imu data and optimize\n    while (!imuQueOpt.empty()) {\n      // pop and integrate imu data that is between two optimizations\n      sensor_msgs::Imu* thisImu = &imuQueOpt.front();\n      double imuTime            = ROS_TIME(thisImu);\n      if (imuTime < currentCorrectionTime - delta_t) {\n        double dt = (lastImuT_opt < 0) ? (1.0 / 500.0) : (imuTime - lastImuT_opt);\n        imuIntegratorOpt_->integrateMeasurement(\n            gtsam::Vector3(thisImu->linear_acceleration.x, thisImu->linear_acceleration.y, thisImu->linear_acceleration.z),\n            gtsam::Vector3(thisImu->angular_velocity.x, thisImu->angular_velocity.y, thisImu->angular_velocity.z), dt);\n\n        lastImuT_opt = imuTime;\n        imuQueOpt.pop_front();\n      } else\n        break;\n    }\n    // add imu factor to graph\n    const gtsam::PreintegratedImuMeasurements& preint_imu = dynamic_cast<const gtsam::PreintegratedImuMeasurements&>(*imuIntegratorOpt_);\n    gtsam::ImuFactor imu_factor(X(key - 1), V(key - 1), X(key), V(key), B(key - 1), preint_imu);\n    graphFactors.add(imu_factor);\n    // add imu bias between factor\n    graphFactors.add(gtsam::BetweenFactor<gtsam::imuBias::ConstantBias>(B(key - 1), B(key), gtsam::imuBias::ConstantBias(),\n                                                                        gtsam::noiseModel::Diagonal::Sigmas(sqrt(imuIntegratorOpt_->deltaTij()) * noiseModelBetweenBias)));\n    // add pose factor\n    gtsam::Pose3 curPose = lidarPose.compose(lidar2Imu);\n    gtsam::PriorFactor<gtsam::Pose3> pose_factor(X(key), curPose, degenerate ? correctionNoise2 : correctionNoise);\n    graphFactors.add(pose_factor);\n    // insert predicted values\n    gtsam::NavState propState_ = imuIntegratorOpt_->predict(prevState_, prevBias_);\n    graphValues.insert(X(key), propState_.pose());\n    graphValues.insert(V(key), propState_.v());\n    graphValues.insert(B(key), prevBias_);\n    // optimize\n    optimizer.update(graphFactors, graphValues);\n    optimizer.update();\n    graphFactors.resize(0);\n    graphValues.clear();\n    // Overwrite the beginning of the preintegration for the next step.\n    gtsam::Values result = optimizer.calculateEstimate();\n    prevPose_            = result.at<gtsam::Pose3>(X(key));\n    prevVel_             = result.at<gtsam::Vector3>(V(key));\n    prevState_           = gtsam::NavState(prevPose_, prevVel_);\n    prevBias_            = result.at<gtsam::imuBias::ConstantBias>(B(key));\n    // Reset the optimization preintegration object.\n    imuIntegratorOpt_->resetIntegrationAndSetBias(prevBias_);\n    // check optimization\n    if (failureDetection(prevVel_, prevBias_)) {\n      resetParams();\n      return;\n    }\n\n    // 2. after optiization, re-propagate imu odometry preintegration\n    prevStateOdom = prevState_;\n    prevBiasOdom  = prevBias_;\n    // first pop imu message older than current correction data\n    double lastImuQT = -1;\n    while (!imuQueImu.empty() && ROS_TIME(&imuQueImu.front()) < currentCorrectionTime - delta_t) {\n      lastImuQT = ROS_TIME(&imuQueImu.front());\n      imuQueImu.pop_front();\n    }\n    // repropogate\n    if (!imuQueImu.empty()) {\n      // reset bias use the newly optimized bias\n      imuIntegratorImu_->resetIntegrationAndSetBias(prevBiasOdom);\n      // integrate imu message from the beginning of this optimization\n      for (int i = 0; i < (int)imuQueImu.size(); ++i) {\n        sensor_msgs::Imu* thisImu = &imuQueImu[i];\n        double imuTime            = ROS_TIME(thisImu);\n        double dt                 = (lastImuQT < 0) ? (1.0 / 500.0) : (imuTime - lastImuQT);\n\n        imuIntegratorImu_->integrateMeasurement(gtsam::Vector3(thisImu->linear_acceleration.x, thisImu->linear_acceleration.y, thisImu->linear_acceleration.z),\n                                                gtsam::Vector3(thisImu->angular_velocity.x, thisImu->angular_velocity.y, thisImu->angular_velocity.z), dt);\n        lastImuQT = imuTime;\n      }\n    }\n\n    ++key;\n    doneFirstOpt = true;\n  }\n\n  bool failureDetection(const gtsam::Vector3& velCur, const gtsam::imuBias::ConstantBias& biasCur) {\n    Eigen::Vector3f vel(velCur.x(), velCur.y(), velCur.z());\n    if (vel.norm() > 30) {\n      ROS_WARN(\"Large velocity, reset IMU-preintegration!\");\n      return true;\n    }\n\n    Eigen::Vector3f ba(biasCur.accelerometer().x(), biasCur.accelerometer().y(), biasCur.accelerometer().z());\n    Eigen::Vector3f bg(biasCur.gyroscope().x(), biasCur.gyroscope().y(), biasCur.gyroscope().z());\n    if (ba.norm() > 1.0 || bg.norm() > 1.0) {\n      ROS_WARN(\"Large bias, reset IMU-preintegration!\");\n      return true;\n    }\n\n    return false;\n  }\n\n  void imuHandler(const sensor_msgs::Imu::ConstPtr& imu_raw) {\n    std::lock_guard<std::mutex> lock(mtx);\n\n    sensor_msgs::Imu thisImu = imuConverter(*imu_raw);\n\n    imuQueOpt.push_back(thisImu);\n    imuQueImu.push_back(thisImu);\n\n    if (doneFirstOpt == false)\n      return;\n\n    double imuTime = ROS_TIME(&thisImu);\n    double dt      = (lastImuT_imu < 0) ? (1.0 / 500.0) : (imuTime - lastImuT_imu);\n    lastImuT_imu   = imuTime;\n\n    // integrate this single imu message\n    imuIntegratorImu_->integrateMeasurement(gtsam::Vector3(thisImu.linear_acceleration.x, thisImu.linear_acceleration.y, thisImu.linear_acceleration.z),\n                                            gtsam::Vector3(thisImu.angular_velocity.x, thisImu.angular_velocity.y, thisImu.angular_velocity.z), dt);\n\n    // predict odometry\n    gtsam::NavState currentState = imuIntegratorImu_->predict(prevStateOdom, prevBiasOdom);\n\n    // publish odometry\n    nav_msgs::Odometry odometry;\n    odometry.header.stamp    = thisImu.header.stamp;\n    odometry.header.frame_id = odometryFrame;\n    odometry.child_frame_id  = \"odom_imu\";\n\n    // transform imu pose to ldiar\n    gtsam::Pose3 imuPose   = gtsam::Pose3(currentState.quaternion(), currentState.position());\n    gtsam::Pose3 lidarPose = imuPose.compose(imu2Lidar);\n\n    odometry.pose.pose.position.x    = lidarPose.translation().x();\n    odometry.pose.pose.position.y    = lidarPose.translation().y();\n    odometry.pose.pose.position.z    = lidarPose.translation().z();\n    odometry.pose.pose.orientation.x = lidarPose.rotation().toQuaternion().x();\n    odometry.pose.pose.orientation.y = lidarPose.rotation().toQuaternion().y();\n    odometry.pose.pose.orientation.z = lidarPose.rotation().toQuaternion().z();\n    odometry.pose.pose.orientation.w = lidarPose.rotation().toQuaternion().w();\n\n    odometry.twist.twist.linear.x  = currentState.velocity().x();\n    odometry.twist.twist.linear.y  = currentState.velocity().y();\n    odometry.twist.twist.linear.z  = currentState.velocity().z();\n    odometry.twist.twist.angular.x = thisImu.angular_velocity.x + prevBiasOdom.gyroscope().x();\n    odometry.twist.twist.angular.y = thisImu.angular_velocity.y + prevBiasOdom.gyroscope().y();\n    odometry.twist.twist.angular.z = thisImu.angular_velocity.z + prevBiasOdom.gyroscope().z();\n    pubImuOdometry.publish(odometry);\n  }\n};\n\nint main(int argc, char** argv) {\n  ros::init(argc, argv, \"roboat_loam\");\n\n  IMUPreintegration ImuP;\n\n  TransformFusion TF;\n\n  ROS_INFO(\"\\033[1;32m----> IMU Preintegration Started.\\033[0m\");\n\n  ros::MultiThreadedSpinner spinner(4);\n  spinner.spin();\n\n  return 0;\n}\n"
  },
  {
    "path": "src/mapOptimization.cpp",
    "content": "#include <jsk_topic_tools/color_utils.h>\n#include \"factor.h\"\n#include \"lio_segmot/Diagnosis.h\"\n#include \"lio_segmot/ObjectStateArray.h\"\n#include \"lio_segmot/cloud_info.h\"\n#include \"lio_segmot/detection.h\"\n#include \"lio_segmot/flags.h\"\n#include \"lio_segmot/save_estimation_result.h\"\n#include \"lio_segmot/save_map.h\"\n#include \"solver.h\"\n#include \"utility.h\"\n\n#include <visualization_msgs/MarkerArray.h>\n\n#include <gtsam/geometry/Pose3.h>\n#include <gtsam/geometry/Rot3.h>\n#include <gtsam/inference/Symbol.h>\n#include <gtsam/navigation/CombinedImuFactor.h>\n#include <gtsam/navigation/GPSFactor.h>\n#include <gtsam/navigation/ImuFactor.h>\n#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>\n#include <gtsam/nonlinear/Marginals.h>\n#include <gtsam/nonlinear/NonlinearFactorGraph.h>\n#include <gtsam/nonlinear/Values.h>\n#include <gtsam/slam/BetweenFactor.h>\n#include <gtsam/slam/PriorFactor.h>\n\n// #define ENABLE_COMPACT_VERSION_OF_FACTOR_GRAPH\n// #define MAP_OPTIMIZATION_DEBUG\n#define ENABLE_SIMULTANEOUS_LOCALIZATION_AND_TRACKING\n#define ENABLE_ASYNCHRONOUS_STATE_ESTIMATE_FOR_SLOT\n// #define ENABLE_MINIMAL_MEMORY_USAGE\n\nusing namespace gtsam;\n\nusing symbol_shorthand::B;  // Bias  (ax,ay,az,gx,gy,gz)\nusing symbol_shorthand::G;  // GPS pose\nusing symbol_shorthand::V;  // Vel   (xdot,ydot,zdot)\nusing symbol_shorthand::X;  // Pose3 (x,y,z,r,p,y)\n\nusing BoundingBox              = jsk_recognition_msgs::BoundingBox;\nusing BoundingBoxPtr           = jsk_recognition_msgs::BoundingBoxPtr;\nusing BoundingBoxConstPtr      = jsk_recognition_msgs::BoundingBoxConstPtr;\nusing BoundingBoxArray         = jsk_recognition_msgs::BoundingBoxArray;\nusing BoundingBoxArrayPtr      = jsk_recognition_msgs::BoundingBoxArrayPtr;\nusing BoundingBoxArrayConstPtr = jsk_recognition_msgs::BoundingBoxArrayConstPtr;\n/*\n * A point cloud type that has 6D pose info ([x,y,z,roll,pitch,yaw] intensity is time stamp)\n */\nstruct PointXYZIRPYT {\n  PCL_ADD_POINT4D\n  PCL_ADD_INTENSITY;  // preferred way of adding a XYZ+padding\n  float roll;\n  float pitch;\n  float yaw;\n  double time;\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW  // make sure our new allocators are aligned\n} EIGEN_ALIGN16;                   // enforce SSE padding for correct memory alignment\n\nPOINT_CLOUD_REGISTER_POINT_STRUCT(PointXYZIRPYT,\n                                  (float, x, x)(float, y, y)(float, z, z)(float, intensity, intensity)(float, roll, roll)(float, pitch, pitch)(float, yaw, yaw)(double, time, time))\n\ntypedef PointXYZIRPYT PointTypePose;\n\ngeometry_msgs::Pose gtsamPose2ROSPose(const Pose3& pose) {\n  geometry_msgs::Pose p;\n  auto trans   = pose.translation();\n  p.position.x = trans.x();\n  p.position.y = trans.y();\n  p.position.z = trans.z();\n\n  auto quat       = pose.rotation().toQuaternion();\n  p.orientation.w = quat.w();\n  p.orientation.x = quat.x();\n  p.orientation.y = quat.y();\n  p.orientation.z = quat.z();\n\n  return p;\n}\n\nclass ObjectState {\n public:\n  Pose3 pose                      = Pose3::identity();\n  Pose3 velocity                  = Pose3::identity();\n  uint64_t poseNodeIndex          = 0;\n  uint64_t velocityNodeIndex      = 0;\n  uint64_t objectIndex            = 0;\n  uint64_t objectIndexForTracking = 0;\n  int lostCount                   = 0;\n  int trackScore                  = 0;\n  ros::Time timestamp             = ros::Time();\n\n  BoundingBox box       = BoundingBox();\n  BoundingBox detection = BoundingBox();\n  double confidence     = 0;\n\n  bool isTightlyCoupled = false;\n  bool isFirst          = false;\n\n  TightlyCoupledDetectionFactor::shared_ptr tightlyCoupledDetectionFactorPtr = nullptr;\n  LooselyCoupledDetectionFactor::shared_ptr looselyCoupledDetectionFactorPtr = nullptr;\n  StablePoseFactor::shared_ptr motionFactorPtr                               = nullptr;\n\n  double initialDetectionError = 0;\n  double initialMotionError    = 0;\n\n  std::vector<uint64_t> previousVelocityNodeIndices;\n\n  ObjectState(Pose3 pose                                        = Pose3::identity(),\n              Pose3 velocity                                    = Pose3::identity(),\n              uint64_t poseNodeIndex                            = 0,\n              uint64_t velocityNodeIndex                        = 0,\n              uint64_t objectIndex                              = 0,\n              uint64_t objectIndexForTracking                   = 0,\n              int lostCount                                     = 0,\n              int trackScore                                    = 0,\n              ros::Time timestamp                               = ros::Time(),\n              BoundingBox box                                   = BoundingBox(),\n              BoundingBox detection                             = BoundingBox(),\n              double confidence                                 = 0,\n              bool isTightlyCoupled                             = false,\n              bool isFirst                                      = false,\n              std::vector<uint64_t> previousVelocityNodeIndices = std::vector<uint64_t>())\n      : pose(pose),\n        velocity(velocity),\n        poseNodeIndex(poseNodeIndex),\n        velocityNodeIndex(velocityNodeIndex),\n        objectIndex(objectIndex),\n        objectIndexForTracking(objectIndexForTracking),\n        lostCount(lostCount),\n        trackScore(trackScore),\n        timestamp(timestamp),\n        box(box),\n        detection(detection),\n        confidence(confidence),\n        isTightlyCoupled(isTightlyCoupled),\n        isFirst(isFirst),\n        previousVelocityNodeIndices(previousVelocityNodeIndices) {\n  }\n\n  ObjectState clone() const {\n    return ObjectState(pose,\n                       velocity,\n                       poseNodeIndex,\n                       velocityNodeIndex,\n                       objectIndex,\n                       objectIndexForTracking,\n                       lostCount,\n                       trackScore,\n                       timestamp,\n                       box,\n                       detection,\n                       confidence,\n                       isTightlyCoupled,\n                       isFirst,\n                       previousVelocityNodeIndices);\n  }\n\n  bool isTurning(float threshold) const {\n    auto rot = gtsam::traits<gtsam::Rot3>::Local(gtsam::Rot3::identity(), this->velocity.rotation());\n    return rot.maxCoeff() > threshold;\n  }\n\n  bool isMovingFast(float threshold) const {\n    auto v = gtsam::traits<gtsam::Pose3>::Local(gtsam::Pose3::identity(), this->velocity);\n    return sqrt(pow(v(3), 2) + pow(v(4), 2) + pow(v(5), 2)) > threshold;\n  }\n\n  bool velocityIsConsistent(int samplingSize,\n                            Values& currentEstimates,\n                            double angleThreshold,\n                            double velocityThreshold) const {\n    int size = previousVelocityNodeIndices.size();\n\n    if (size < samplingSize) return false;\n\n    Eigen::VectorXd angles     = Eigen::VectorXd::Zero(samplingSize);\n    Eigen::VectorXd velocities = Eigen::VectorXd::Zero(samplingSize);\n    std::vector<gtsam::Vector6> vs;\n    gtsam::Vector6 vMean = gtsam::Vector6::Zero();\n    for (int i = 0; i < samplingSize; ++i) {\n      auto vi       = currentEstimates.at<gtsam::Pose3>(previousVelocityNodeIndices[size - i - 1]);\n      auto v        = gtsam::traits<gtsam::Pose3>::Local(gtsam::Pose3::identity(), vi);\n      angles(i)     = sqrt(pow(v(0), 2) + pow(v(1), 2) + pow(v(2), 2));\n      velocities(i) = sqrt(pow(v(3), 2) + pow(v(4), 2) + pow(v(5), 2));\n      vs.push_back(v);\n      vMean += v;\n    }\n    vMean /= samplingSize;\n    gtsam::Matrix6 covariance = gtsam::Matrix6::Zero();\n    covariance(0, 0) = covariance(1, 1) = covariance(2, 2) = angleThreshold;\n    covariance(3, 3) = covariance(4, 4) = covariance(5, 5) = velocityThreshold;\n    auto covarianceInverse                                 = covariance.inverse();\n    double error                                           = 0.0;\n    for (int i = 0; i < samplingSize; ++i) {\n      auto v = vs[i] - vMean;\n      error += v.transpose() * covarianceInverse * v;\n    }\n    error /= samplingSize;\n\n    double angleVar    = (angles.array() - angles.mean()).pow(2).mean();\n    double velocityVar = (velocities.array() - velocities.mean()).pow(2).mean();\n\n    // return angleVar < angleThreshold && velocityVar < velocityThreshold;\n\n    return error < 1.0 * 1.0;\n  }\n};\n\nclass Timer {\n private:\n  std::chrono::time_point<std::chrono::high_resolution_clock> start;\n  std::chrono::time_point<std::chrono::high_resolution_clock> end;\n\n public:\n  Timer() {\n    start = std::chrono::high_resolution_clock::now();\n  }\n\n  void reset() {\n    start = std::chrono::high_resolution_clock::now();\n  }\n\n  void stop() {\n    end = std::chrono::high_resolution_clock::now();\n  }\n\n  double elapsed() const {\n    return std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();\n  }\n};\n\nclass mapOptimization : public ParamServer {\n public:\n  // gtsam\n  NonlinearFactorGraph gtSAMgraph;\n  NonlinearFactorGraph gtSAMgraphForLooselyCoupledObjects;\n  Values initialEstimate;\n  Values initialEstimateForLooselyCoupledObjects;\n  Values initialEstimateForAnalysis;\n  Values optimizedEstimate;\n  MaxMixtureISAM2* isam;\n  Values isamCurrentEstimate;\n  Eigen::MatrixXd poseCovariance;\n\n  ros::Publisher pubLaserCloudSurround;\n  ros::Publisher pubLaserOdometryGlobal;\n  ros::Publisher pubLaserOdometryIncremental;\n  ros::Publisher pubKeyPoses;\n  ros::Publisher pubPath;\n  ros::Publisher pubKeyFrameCloud;\n\n  ros::Publisher pubHistoryKeyFrames;\n  ros::Publisher pubIcpKeyFrames;\n  ros::Publisher pubRecentKeyFrames;\n  ros::Publisher pubRecentKeyFrame;\n  ros::Publisher pubCloudRegisteredRaw;\n  ros::Publisher pubLoopConstraintEdge;\n\n  ros::Subscriber subCloud;\n  ros::Subscriber subGPS;\n  ros::Subscriber subLoop;\n\n  ros::Publisher pubDetection;\n  ros::Publisher pubLaserCloudDeskewed;\n  ros::Publisher pubObjects;\n  ros::Publisher pubObjectPaths;\n  ros::Publisher pubTightlyCoupledObjectPoints;\n  ros::Publisher pubObjectLabels;\n  ros::Publisher pubObjectVelocities;\n  ros::Publisher pubObjectVelocityArrows;\n  ros::Publisher pubObjectStates;\n  ros::Publisher pubTrackingObjects;\n  ros::Publisher pubTrackingObjectPaths;\n  ros::Publisher pubTrackingObjectLabels;\n  ros::Publisher pubTrackingObjectVelocities;\n  ros::Publisher pubTrackingObjectVelocityArrows;\n\n  ros::Publisher pubDiagnosis;\n\n  ros::Publisher pubReady;\n\n  ros::ServiceServer srvSaveMap;\n  ros::ServiceServer srvSaveEstimationResult;\n\n  ros::ServiceClient detectionClient;\n  lio_segmot::detection detectionSrv;\n\n  std::deque<nav_msgs::Odometry> gpsQueue;\n  lio_segmot::cloud_info cloudInfo;\n\n  vector<pcl::PointCloud<PointType>::Ptr> cornerCloudKeyFrames;\n  vector<pcl::PointCloud<PointType>::Ptr> surfCloudKeyFrames;\n\n  pcl::PointCloud<PointType>::Ptr cloudKeyPoses3D;\n  pcl::PointCloud<PointTypePose>::Ptr cloudKeyPoses6D;\n  pcl::PointCloud<PointType>::Ptr copy_cloudKeyPoses3D;\n  pcl::PointCloud<PointTypePose>::Ptr copy_cloudKeyPoses6D;\n  std::vector<uint64_t> keyPoseIndices;\n\n  pcl::PointCloud<PointType>::Ptr laserCloudCornerLast;    // corner feature set from odoOptimization\n  pcl::PointCloud<PointType>::Ptr laserCloudSurfLast;      // surf feature set from odoOptimization\n  pcl::PointCloud<PointType>::Ptr laserCloudCornerLastDS;  // downsampled corner featuer set from odoOptimization\n  pcl::PointCloud<PointType>::Ptr laserCloudSurfLastDS;    // downsampled surf featuer set from odoOptimization\n\n  pcl::PointCloud<PointType>::Ptr laserCloudOri;\n  pcl::PointCloud<PointType>::Ptr coeffSel;\n\n  std::vector<PointType> laserCloudOriCornerVec;  // corner point holder for parallel computation\n  std::vector<PointType> coeffSelCornerVec;\n  std::vector<bool> laserCloudOriCornerFlag;\n  std::vector<PointType> laserCloudOriSurfVec;  // surf point holder for parallel computation\n  std::vector<PointType> coeffSelSurfVec;\n  std::vector<bool> laserCloudOriSurfFlag;\n\n  map<int, pair<pcl::PointCloud<PointType>, pcl::PointCloud<PointType>>> laserCloudMapContainer;\n  pcl::PointCloud<PointType>::Ptr laserCloudCornerFromMap;\n  pcl::PointCloud<PointType>::Ptr laserCloudSurfFromMap;\n  pcl::PointCloud<PointType>::Ptr laserCloudCornerFromMapDS;\n  pcl::PointCloud<PointType>::Ptr laserCloudSurfFromMapDS;\n\n  pcl::KdTreeFLANN<PointType>::Ptr kdtreeCornerFromMap;\n  pcl::KdTreeFLANN<PointType>::Ptr kdtreeSurfFromMap;\n\n  pcl::KdTreeFLANN<PointType>::Ptr kdtreeSurroundingKeyPoses;\n  pcl::KdTreeFLANN<PointType>::Ptr kdtreeHistoryKeyPoses;\n\n  pcl::VoxelGrid<PointType> downSizeFilterCorner;\n  pcl::VoxelGrid<PointType> downSizeFilterSurf;\n  pcl::VoxelGrid<PointType> downSizeFilterICP;\n  pcl::VoxelGrid<PointType> downSizeFilterSurroundingKeyPoses;  // for surrounding key poses of scan-to-map optimization\n\n  ros::Time timeLaserInfoStamp;\n  double timeLaserInfoCur;\n  double deltaTime;\n\n  float transformTobeMapped[6];\n\n  std::mutex mtx;\n  std::mutex mtxLoopInfo;\n\n  bool isDegenerate = false;\n  cv::Mat matP;\n\n  int laserCloudCornerFromMapDSNum = 0;\n  int laserCloudSurfFromMapDSNum   = 0;\n  int laserCloudCornerLastDSNum    = 0;\n  int laserCloudSurfLastDSNum      = 0;\n\n  bool aLoopIsClosed = false;\n  map<int, int> loopIndexContainer;  // from new to old\n  vector<pair<int, int>> loopIndexQueue;\n  vector<gtsam::Pose3> loopPoseQueue;\n  vector<gtsam::noiseModel::Diagonal::shared_ptr> loopNoiseQueue;\n  deque<std_msgs::Float64MultiArray> loopInfoVec;\n\n  nav_msgs::Path globalPath;\n\n  Eigen::Affine3f transPointAssociateToMap;\n  Eigen::Affine3f incrementalOdometryAffineFront;\n  Eigen::Affine3f incrementalOdometryAffineBack;\n\n  BoundingBoxArrayPtr detections;\n  std::vector<Detection> detectionVector;\n  std::vector<Detection> tightlyCoupledDetectionVector;\n  std::vector<Detection> earlyLooselyCoupledMatchingVector;\n  std::vector<Detection> looselyCoupledMatchingVector;\n  std::vector<Detection> tightlyCoupledMatchingVector;\n  std::vector<Detection> dataAssociationVector;\n  bool detectionIsActive = false;\n  std::vector<std::map<uint64_t, ObjectState>> objects;\n  visualization_msgs::MarkerArray objectPaths;\n  visualization_msgs::Marker tightlyCoupledObjectPoints;\n  visualization_msgs::MarkerArray objectLabels;\n  visualization_msgs::MarkerArray objectVelocities;\n  visualization_msgs::MarkerArray objectVelocityArrows;\n  visualization_msgs::MarkerArray trackingObjectPaths;\n  visualization_msgs::MarkerArray trackingObjectLabels;\n  visualization_msgs::MarkerArray trackingObjectVelocities;\n  visualization_msgs::MarkerArray trackingObjectVelocityArrows;\n  lio_segmot::ObjectStateArray objectStates;\n  uint64_t numberOfRegisteredObjects = 0;\n  uint64_t numberOfTrackingObjects   = 0;\n  bool anyObjectIsTightlyCoupled     = false;\n\n  uint64_t numberOfNodes = 0;\n\n  Timer timer;\n  int numberOfTightlyCoupledObjectsAtThisMoment = 0;\n\n  mapOptimization() {\n    ISAM2Params parameters;\n    parameters.relinearizeThreshold = 0.1;\n    parameters.relinearizeSkip      = 1;\n    isam                            = new MaxMixtureISAM2(parameters);\n\n    pubKeyPoses                 = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/trajectory\", 1);\n    pubLaserCloudSurround       = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/map_global\", 1);\n    pubLaserOdometryGlobal      = nh.advertise<nav_msgs::Odometry>(\"lio_segmot/mapping/odometry\", 1);\n    pubLaserOdometryIncremental = nh.advertise<nav_msgs::Odometry>(\"lio_segmot/mapping/odometry_incremental\", 1);\n    pubPath                     = nh.advertise<nav_msgs::Path>(\"lio_segmot/mapping/path\", 1);\n\n    subCloud = nh.subscribe<lio_segmot::cloud_info>(\"lio_segmot/feature/cloud_info\", 1, &mapOptimization::laserCloudInfoHandler, this, ros::TransportHints().tcpNoDelay());\n    subGPS   = nh.subscribe<nav_msgs::Odometry>(gpsTopic, 200, &mapOptimization::gpsHandler, this, ros::TransportHints().tcpNoDelay());\n    subLoop  = nh.subscribe<std_msgs::Float64MultiArray>(\"lio_loop/loop_closure_detection\", 1, &mapOptimization::loopInfoHandler, this, ros::TransportHints().tcpNoDelay());\n\n    srvSaveMap              = nh.advertiseService(\"lio_segmot/save_map\", &mapOptimization::saveMapService, this);\n    srvSaveEstimationResult = nh.advertiseService(\"lio_segmot/save_estimation_result\", &mapOptimization::saveEstimationResultService, this);\n    detectionClient         = nh.serviceClient<lio_segmot::detection>(\"lio_segmot_detector\");\n\n    pubHistoryKeyFrames   = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/icp_loop_closure_history_cloud\", 1);\n    pubIcpKeyFrames       = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/icp_loop_closure_corrected_cloud\", 1);\n    pubLoopConstraintEdge = nh.advertise<visualization_msgs::MarkerArray>(\"/lio_segmot/mapping/loop_closure_constraints\", 1);\n\n    pubRecentKeyFrames    = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/map_local\", 1);\n    pubRecentKeyFrame     = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/cloud_registered\", 1);\n    pubCloudRegisteredRaw = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/cloud_registered_raw\", 1);\n\n    pubDetection                  = nh.advertise<BoundingBoxArray>(\"lio_segmot/mapping/detections\", 1);\n    pubLaserCloudDeskewed         = nh.advertise<sensor_msgs::PointCloud2>(\"lio_segmot/mapping/cloud_deskewed\", 1);\n    pubObjects                    = nh.advertise<BoundingBoxArray>(\"lio_segmot/mapping/objects\", 1);\n    pubObjectPaths                = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/mapping/object_paths\", 1);\n    pubTightlyCoupledObjectPoints = nh.advertise<visualization_msgs::Marker>(\"lio_segmot/mapping/tightly_coupled_object_points\", 1);\n    pubObjectLabels               = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/mapping/object_labels\", 1);\n    pubObjectVelocities           = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/mapping/object_velocities\", 1);\n    pubObjectVelocityArrows       = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/mapping/object_velocity_arrows\", 1);\n    pubObjectStates               = nh.advertise<lio_segmot::ObjectStateArray>(\"lio_segmot/mapping/object_states\", 1);\n\n    pubTrackingObjects              = nh.advertise<BoundingBoxArray>(\"lio_segmot/tracking/objects\", 1);\n    pubTrackingObjectPaths          = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/tracking/object_paths\", 1);\n    pubTrackingObjectLabels         = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/tracking/object_labels\", 1);\n    pubTrackingObjectVelocities     = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/tracking/object_velocities\", 1);\n    pubTrackingObjectVelocityArrows = nh.advertise<visualization_msgs::MarkerArray>(\"lio_segmot/tracking/object_velocity_arrows\", 1);\n\n    pubDiagnosis = nh.advertise<lio_segmot::Diagnosis>(\"lio_segmot/diagnosis\", 1);\n\n    pubReady = nh.advertise<std_msgs::Empty>(\"lio_segmot/ready\", 1);\n\n    downSizeFilterCorner.setLeafSize(mappingCornerLeafSize, mappingCornerLeafSize, mappingCornerLeafSize);\n    downSizeFilterSurf.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize);\n    downSizeFilterICP.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize);\n    downSizeFilterSurroundingKeyPoses.setLeafSize(surroundingKeyframeDensity, surroundingKeyframeDensity, surroundingKeyframeDensity);  // for surrounding key poses of scan-to-map optimization\n\n    allocateMemory();\n  }\n\n  void allocateMemory() {\n    cloudKeyPoses3D.reset(new pcl::PointCloud<PointType>());\n    cloudKeyPoses6D.reset(new pcl::PointCloud<PointTypePose>());\n    copy_cloudKeyPoses3D.reset(new pcl::PointCloud<PointType>());\n    copy_cloudKeyPoses6D.reset(new pcl::PointCloud<PointTypePose>());\n\n    kdtreeSurroundingKeyPoses.reset(new pcl::KdTreeFLANN<PointType>());\n    kdtreeHistoryKeyPoses.reset(new pcl::KdTreeFLANN<PointType>());\n\n    laserCloudCornerLast.reset(new pcl::PointCloud<PointType>());    // corner feature set from odoOptimization\n    laserCloudSurfLast.reset(new pcl::PointCloud<PointType>());      // surf feature set from odoOptimization\n    laserCloudCornerLastDS.reset(new pcl::PointCloud<PointType>());  // downsampled corner featuer set from odoOptimization\n    laserCloudSurfLastDS.reset(new pcl::PointCloud<PointType>());    // downsampled surf featuer set from odoOptimization\n\n    laserCloudOri.reset(new pcl::PointCloud<PointType>());\n    coeffSel.reset(new pcl::PointCloud<PointType>());\n\n    laserCloudOriCornerVec.resize(N_SCAN * Horizon_SCAN);\n    coeffSelCornerVec.resize(N_SCAN * Horizon_SCAN);\n    laserCloudOriCornerFlag.resize(N_SCAN * Horizon_SCAN);\n    laserCloudOriSurfVec.resize(N_SCAN * Horizon_SCAN);\n    coeffSelSurfVec.resize(N_SCAN * Horizon_SCAN);\n    laserCloudOriSurfFlag.resize(N_SCAN * Horizon_SCAN);\n\n    std::fill(laserCloudOriCornerFlag.begin(), laserCloudOriCornerFlag.end(), false);\n    std::fill(laserCloudOriSurfFlag.begin(), laserCloudOriSurfFlag.end(), false);\n\n    laserCloudCornerFromMap.reset(new pcl::PointCloud<PointType>());\n    laserCloudSurfFromMap.reset(new pcl::PointCloud<PointType>());\n    laserCloudCornerFromMapDS.reset(new pcl::PointCloud<PointType>());\n    laserCloudSurfFromMapDS.reset(new pcl::PointCloud<PointType>());\n\n    kdtreeCornerFromMap.reset(new pcl::KdTreeFLANN<PointType>());\n    kdtreeSurfFromMap.reset(new pcl::KdTreeFLANN<PointType>());\n\n    for (int i = 0; i < 6; ++i) {\n      transformTobeMapped[i] = 0;\n    }\n\n    matP = cv::Mat(6, 6, CV_32F, cv::Scalar::all(0));\n\n    detections.reset(new BoundingBoxArray());\n\n    tightlyCoupledObjectPoints.action             = visualization_msgs::Marker::ADD;\n    tightlyCoupledObjectPoints.type               = visualization_msgs::Marker::SPHERE_LIST;\n    tightlyCoupledObjectPoints.color.a            = 0.4;\n    tightlyCoupledObjectPoints.color.r            = 1.0;\n    tightlyCoupledObjectPoints.color.g            = 1.0;\n    tightlyCoupledObjectPoints.color.b            = 1.0;\n    tightlyCoupledObjectPoints.scale.x            = 1.0;\n    tightlyCoupledObjectPoints.scale.y            = 1.0;\n    tightlyCoupledObjectPoints.scale.z            = 1.0;\n    tightlyCoupledObjectPoints.pose.orientation.w = 1.0;\n  }\n\n  void laserCloudInfoHandler(const lio_segmot::cloud_infoConstPtr& msgIn) {\n    // extract time stamp\n    timeLaserInfoStamp = msgIn->header.stamp;\n    timeLaserInfoCur   = msgIn->header.stamp.toSec();\n\n    // extract info and feature cloud\n    cloudInfo = *msgIn;\n    pcl::fromROSMsg(msgIn->cloud_corner, *laserCloudCornerLast);\n    pcl::fromROSMsg(msgIn->cloud_surface, *laserCloudSurfLast);\n\n    std::lock_guard<std::mutex> lock(mtx);\n\n    timer.reset();\n    numberOfTightlyCoupledObjectsAtThisMoment = 0;\n\n    static double timeLastProcessing = -1;\n    if (timeLaserInfoCur - timeLastProcessing >= mappingProcessInterval) {\n#ifdef ENABLE_SIMULTANEOUS_LOCALIZATION_AND_TRACKING\n      std::thread t(&mapOptimization::getDetections, this);\n\n      deltaTime          = timeLaserInfoCur - timeLastProcessing;\n      timeLastProcessing = timeLaserInfoCur;\n#endif\n\n      updateInitialGuess();\n\n      extractSurroundingKeyFrames();\n\n      downsampleCurrentScan();\n\n      scan2MapOptimization();\n\n#ifdef ENABLE_SIMULTANEOUS_LOCALIZATION_AND_TRACKING\n      t.join();\n#endif\n\n      saveKeyFramesAndFactor();\n\n      correctPoses();\n\n      timer.stop();\n\n      publishOdometry();\n\n      publishFrames();\n    }\n\n    pubReady.publish(std_msgs::Empty());\n  }\n\n  void getDetections() {\n    detectionIsActive          = false;\n    detectionSrv.request.cloud = cloudInfo.cloud_raw;\n    if (detectionClient.call(detectionSrv)) {\n      *detections       = detectionSrv.response.detections;\n      detectionIsActive = true;\n    }\n  }\n\n  void gpsHandler(const nav_msgs::Odometry::ConstPtr& gpsMsg) {\n    gpsQueue.push_back(*gpsMsg);\n  }\n\n  void pointAssociateToMap(PointType const* const pi, PointType* const po) {\n    po->x         = transPointAssociateToMap(0, 0) * pi->x + transPointAssociateToMap(0, 1) * pi->y + transPointAssociateToMap(0, 2) * pi->z + transPointAssociateToMap(0, 3);\n    po->y         = transPointAssociateToMap(1, 0) * pi->x + transPointAssociateToMap(1, 1) * pi->y + transPointAssociateToMap(1, 2) * pi->z + transPointAssociateToMap(1, 3);\n    po->z         = transPointAssociateToMap(2, 0) * pi->x + transPointAssociateToMap(2, 1) * pi->y + transPointAssociateToMap(2, 2) * pi->z + transPointAssociateToMap(2, 3);\n    po->intensity = pi->intensity;\n  }\n\n  pcl::PointCloud<PointType>::Ptr transformPointCloud(pcl::PointCloud<PointType>::Ptr cloudIn, PointTypePose* transformIn) {\n    pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>());\n\n    int cloudSize = cloudIn->size();\n    cloudOut->resize(cloudSize);\n\n    Eigen::Affine3f transCur = pcl::getTransformation(transformIn->x, transformIn->y, transformIn->z, transformIn->roll, transformIn->pitch, transformIn->yaw);\n\n#pragma omp parallel for num_threads(numberOfCores)\n    for (int i = 0; i < cloudSize; ++i) {\n      const auto& pointFrom         = cloudIn->points[i];\n      cloudOut->points[i].x         = transCur(0, 0) * pointFrom.x + transCur(0, 1) * pointFrom.y + transCur(0, 2) * pointFrom.z + transCur(0, 3);\n      cloudOut->points[i].y         = transCur(1, 0) * pointFrom.x + transCur(1, 1) * pointFrom.y + transCur(1, 2) * pointFrom.z + transCur(1, 3);\n      cloudOut->points[i].z         = transCur(2, 0) * pointFrom.x + transCur(2, 1) * pointFrom.y + transCur(2, 2) * pointFrom.z + transCur(2, 3);\n      cloudOut->points[i].intensity = pointFrom.intensity;\n    }\n    return cloudOut;\n  }\n\n  gtsam::Pose3 pclPointTogtsamPose3(PointTypePose thisPoint) {\n    return gtsam::Pose3(gtsam::Rot3::RzRyRx(double(thisPoint.roll), double(thisPoint.pitch), double(thisPoint.yaw)),\n                        gtsam::Point3(double(thisPoint.x), double(thisPoint.y), double(thisPoint.z)));\n  }\n\n  gtsam::Pose3 trans2gtsamPose(float transformIn[]) {\n    return gtsam::Pose3(gtsam::Rot3::RzRyRx(transformIn[0], transformIn[1], transformIn[2]),\n                        gtsam::Point3(transformIn[3], transformIn[4], transformIn[5]));\n  }\n\n  Eigen::Affine3f pclPointToAffine3f(PointTypePose thisPoint) {\n    return pcl::getTransformation(thisPoint.x, thisPoint.y, thisPoint.z, thisPoint.roll, thisPoint.pitch, thisPoint.yaw);\n  }\n\n  Eigen::Affine3f trans2Affine3f(float transformIn[]) {\n    return pcl::getTransformation(transformIn[3], transformIn[4], transformIn[5], transformIn[0], transformIn[1], transformIn[2]);\n  }\n\n  PointTypePose trans2PointTypePose(float transformIn[]) {\n    PointTypePose thisPose6D;\n    thisPose6D.x     = transformIn[3];\n    thisPose6D.y     = transformIn[4];\n    thisPose6D.z     = transformIn[5];\n    thisPose6D.roll  = transformIn[0];\n    thisPose6D.pitch = transformIn[1];\n    thisPose6D.yaw   = transformIn[2];\n    return thisPose6D;\n  }\n\n  bool saveMapService(lio_segmot::save_mapRequest& req, lio_segmot::save_mapResponse& res) {\n    string saveMapDirectory;\n\n    cout << \"****************************************************\" << endl;\n    cout << \"Saving map to pcd files ...\" << endl;\n    if (req.destination.empty())\n      saveMapDirectory = std::getenv(\"HOME\") + savePCDDirectory;\n    else\n      saveMapDirectory = std::getenv(\"HOME\") + req.destination;\n    cout << \"Save destination: \" << saveMapDirectory << endl;\n    // create directory and remove old files;\n    int unused = system((std::string(\"exec rm -r \") + saveMapDirectory).c_str());\n    unused     = system((std::string(\"mkdir -p \") + saveMapDirectory).c_str());\n    // save key frame transformations\n    pcl::io::savePCDFileBinary(saveMapDirectory + \"/trajectory.pcd\", *cloudKeyPoses3D);\n    pcl::io::savePCDFileBinary(saveMapDirectory + \"/transformations.pcd\", *cloudKeyPoses6D);\n    // extract global point cloud map\n    pcl::PointCloud<PointType>::Ptr globalCornerCloud(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr globalCornerCloudDS(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr globalSurfCloud(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr globalSurfCloudDS(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr globalMapCloud(new pcl::PointCloud<PointType>());\n    for (int i = 0; i < (int)cloudKeyPoses3D->size(); i++) {\n      *globalCornerCloud += *transformPointCloud(cornerCloudKeyFrames[i], &cloudKeyPoses6D->points[i]);\n      *globalSurfCloud += *transformPointCloud(surfCloudKeyFrames[i], &cloudKeyPoses6D->points[i]);\n      cout << \"\\r\" << std::flush << \"Processing feature cloud \" << i << \" of \" << cloudKeyPoses6D->size() << \" ...\";\n    }\n\n    if (req.resolution != 0) {\n      cout << \"\\n\\nSave resolution: \" << req.resolution << endl;\n\n      // down-sample and save corner cloud\n      downSizeFilterCorner.setInputCloud(globalCornerCloud);\n      downSizeFilterCorner.setLeafSize(req.resolution, req.resolution, req.resolution);\n      downSizeFilterCorner.filter(*globalCornerCloudDS);\n      pcl::io::savePCDFileBinary(saveMapDirectory + \"/CornerMap.pcd\", *globalCornerCloudDS);\n      // down-sample and save surf cloud\n      downSizeFilterSurf.setInputCloud(globalSurfCloud);\n      downSizeFilterSurf.setLeafSize(req.resolution, req.resolution, req.resolution);\n      downSizeFilterSurf.filter(*globalSurfCloudDS);\n      pcl::io::savePCDFileBinary(saveMapDirectory + \"/SurfMap.pcd\", *globalSurfCloudDS);\n    } else {\n      // save corner cloud\n      pcl::io::savePCDFileBinary(saveMapDirectory + \"/CornerMap.pcd\", *globalCornerCloud);\n      // save surf cloud\n      pcl::io::savePCDFileBinary(saveMapDirectory + \"/SurfMap.pcd\", *globalSurfCloud);\n    }\n\n    // save global point cloud map\n    *globalMapCloud += *globalCornerCloud;\n    *globalMapCloud += *globalSurfCloud;\n\n    int ret     = pcl::io::savePCDFileBinary(saveMapDirectory + \"/GlobalMap.pcd\", *globalMapCloud);\n    res.success = ret == 0;\n\n    downSizeFilterCorner.setLeafSize(mappingCornerLeafSize, mappingCornerLeafSize, mappingCornerLeafSize);\n    downSizeFilterSurf.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize);\n\n    cout << \"****************************************************\" << endl;\n    cout << \"Saving map to pcd files completed\\n\"\n         << endl;\n\n    return true;\n  }\n\n  bool saveEstimationResultService(lio_segmot::save_estimation_resultRequest& req, lio_segmot::save_estimation_resultResponse& res) {\n    res.robotTrajectory            = globalPath;\n    res.objectTrajectories         = std::vector<nav_msgs::Path>(numberOfRegisteredObjects, nav_msgs::Path());\n    res.objectVelocities           = std::vector<nav_msgs::Path>(numberOfRegisteredObjects, nav_msgs::Path());\n    res.trackingObjectTrajectories = std::vector<nav_msgs::Path>(numberOfTrackingObjects, nav_msgs::Path());\n    res.trackingObjectVelocities   = std::vector<nav_msgs::Path>(numberOfTrackingObjects, nav_msgs::Path());\n    res.trackingObjectStates       = std::vector<lio_segmot::ObjectStateArray>(numberOfTrackingObjects, lio_segmot::ObjectStateArray());\n    res.objectFlags                = std::vector<lio_segmot::flags>(numberOfRegisteredObjects, lio_segmot::flags());\n    res.trackingObjectFlags        = std::vector<lio_segmot::flags>(numberOfTrackingObjects, lio_segmot::flags());\n    for (int t = 0; t < objects.size(); ++t) {\n      for (const auto& pairedObject : objects[t]) {\n        const auto& object = pairedObject.second;\n\n        if (object.lostCount > 0) continue;\n\n        geometry_msgs::PoseStamped ps;\n        ps.header.frame_id = odometryFrame;\n        ps.header.stamp    = object.timestamp;\n        ps.pose            = gtsamPose2ROSPose(isamCurrentEstimate.at<Pose3>(object.poseNodeIndex));\n        res.objectTrajectories[object.objectIndex].poses.push_back(ps);\n        res.trackingObjectTrajectories[object.objectIndexForTracking].poses.push_back(ps);\n\n        ps.pose = gtsamPose2ROSPose(isamCurrentEstimate.at<Pose3>(object.velocityNodeIndex));\n        res.objectVelocities[object.objectIndex].poses.push_back(ps);\n        res.trackingObjectVelocities[object.objectIndexForTracking].poses.push_back(ps);\n\n        res.objectFlags[object.objectIndex].flags.push_back(object.isTightlyCoupled ? 1 : 0);\n        res.trackingObjectFlags[object.objectIndexForTracking].flags.push_back(object.isTightlyCoupled ? 1 : 0);\n\n        lio_segmot::ObjectState state;\n        state.header.frame_id = odometryFrame;\n        state.header.stamp    = object.timestamp;\n        state.pose            = gtsamPose2ROSPose(isamCurrentEstimate.at<Pose3>(object.poseNodeIndex));\n        state.velocity        = gtsamPose2ROSPose(isamCurrentEstimate.at<Pose3>(object.velocityNodeIndex));\n        state.detection       = object.detection;\n        res.trackingObjectStates[object.objectIndexForTracking].objects.push_back(state);\n      }\n    }\n    return true;\n  }\n\n  void visualizeGlobalMapThread() {\n    ros::Rate rate(0.2);\n    while (ros::ok()) {\n      rate.sleep();\n      publishGlobalMap();\n    }\n\n    if (savePCD == false)\n      return;\n\n    lio_segmot::save_mapRequest req;\n    lio_segmot::save_mapResponse res;\n\n    if (!saveMapService(req, res)) {\n      cout << \"Fail to save map\" << endl;\n    }\n  }\n\n  void publishGlobalMap() {\n    if (pubLaserCloudSurround.getNumSubscribers() == 0)\n      return;\n\n    if (cloudKeyPoses3D->points.empty() == true)\n      return;\n\n    pcl::KdTreeFLANN<PointType>::Ptr kdtreeGlobalMap(new pcl::KdTreeFLANN<PointType>());\n    ;\n    pcl::PointCloud<PointType>::Ptr globalMapKeyPoses(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr globalMapKeyPosesDS(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr globalMapKeyFrames(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr globalMapKeyFramesDS(new pcl::PointCloud<PointType>());\n\n    // kd-tree to find near key frames to visualize\n    std::vector<int> pointSearchIndGlobalMap;\n    std::vector<float> pointSearchSqDisGlobalMap;\n    // search near key frames to visualize\n    mtx.lock();\n    kdtreeGlobalMap->setInputCloud(cloudKeyPoses3D);\n    kdtreeGlobalMap->radiusSearch(cloudKeyPoses3D->back(), globalMapVisualizationSearchRadius, pointSearchIndGlobalMap, pointSearchSqDisGlobalMap, 0);\n    mtx.unlock();\n\n    for (int i = 0; i < (int)pointSearchIndGlobalMap.size(); ++i)\n      globalMapKeyPoses->push_back(cloudKeyPoses3D->points[pointSearchIndGlobalMap[i]]);\n    // downsample near selected key frames\n    pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyPoses;                                                                                             // for global map visualization\n    downSizeFilterGlobalMapKeyPoses.setLeafSize(globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity);  // for global map visualization\n    downSizeFilterGlobalMapKeyPoses.setInputCloud(globalMapKeyPoses);\n    downSizeFilterGlobalMapKeyPoses.filter(*globalMapKeyPosesDS);\n    for (auto& pt : globalMapKeyPosesDS->points) {\n      kdtreeGlobalMap->nearestKSearch(pt, 1, pointSearchIndGlobalMap, pointSearchSqDisGlobalMap);\n      pt.intensity = cloudKeyPoses3D->points[pointSearchIndGlobalMap[0]].intensity;\n    }\n\n    // extract visualized and downsampled key frames\n    for (int i = 0; i < (int)globalMapKeyPosesDS->size(); ++i) {\n      if (pointDistance(globalMapKeyPosesDS->points[i], cloudKeyPoses3D->back()) > globalMapVisualizationSearchRadius)\n        continue;\n      int thisKeyInd = (int)globalMapKeyPosesDS->points[i].intensity;\n      *globalMapKeyFrames += *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);\n      *globalMapKeyFrames += *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);\n    }\n    // downsample visualized points\n    pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyFrames;                                                                                    // for global map visualization\n    downSizeFilterGlobalMapKeyFrames.setLeafSize(globalMapVisualizationLeafSize, globalMapVisualizationLeafSize, globalMapVisualizationLeafSize);  // for global map visualization\n    downSizeFilterGlobalMapKeyFrames.setInputCloud(globalMapKeyFrames);\n    downSizeFilterGlobalMapKeyFrames.filter(*globalMapKeyFramesDS);\n    publishCloud(&pubLaserCloudSurround, globalMapKeyFramesDS, timeLaserInfoStamp, odometryFrame);\n  }\n\n  void loopClosureThread() {\n    if (loopClosureEnableFlag == false)\n      return;\n\n    ros::Rate rate(loopClosureFrequency);\n    while (ros::ok()) {\n      rate.sleep();\n      performLoopClosure();\n      visualizeLoopClosure();\n    }\n  }\n\n  void loopInfoHandler(const std_msgs::Float64MultiArray::ConstPtr& loopMsg) {\n    std::lock_guard<std::mutex> lock(mtxLoopInfo);\n    if (loopMsg->data.size() != 2)\n      return;\n\n    loopInfoVec.push_back(*loopMsg);\n\n    while (loopInfoVec.size() > 5)\n      loopInfoVec.pop_front();\n  }\n\n  void performLoopClosure() {\n    if (cloudKeyPoses3D->points.empty() == true)\n      return;\n\n    mtx.lock();\n    *copy_cloudKeyPoses3D = *cloudKeyPoses3D;\n    *copy_cloudKeyPoses6D = *cloudKeyPoses6D;\n    mtx.unlock();\n\n    // find keys\n    int loopKeyCur;\n    int loopKeyPre;\n    if (detectLoopClosureExternal(&loopKeyCur, &loopKeyPre) == false)\n      if (detectLoopClosureDistance(&loopKeyCur, &loopKeyPre) == false)\n        return;\n\n    // extract cloud\n    pcl::PointCloud<PointType>::Ptr cureKeyframeCloud(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr prevKeyframeCloud(new pcl::PointCloud<PointType>());\n    {\n      loopFindNearKeyframes(cureKeyframeCloud, loopKeyCur, 0);\n      loopFindNearKeyframes(prevKeyframeCloud, loopKeyPre, historyKeyframeSearchNum);\n      if (cureKeyframeCloud->size() < 300 || prevKeyframeCloud->size() < 1000)\n        return;\n      if (pubHistoryKeyFrames.getNumSubscribers() != 0)\n        publishCloud(&pubHistoryKeyFrames, prevKeyframeCloud, timeLaserInfoStamp, odometryFrame);\n    }\n\n    // ICP Settings\n    static pcl::IterativeClosestPoint<PointType, PointType> icp;\n    icp.setMaxCorrespondenceDistance(historyKeyframeSearchRadius * 2);\n    icp.setMaximumIterations(100);\n    icp.setTransformationEpsilon(1e-6);\n    icp.setEuclideanFitnessEpsilon(1e-6);\n    icp.setRANSACIterations(0);\n\n    // Align clouds\n    icp.setInputSource(cureKeyframeCloud);\n    icp.setInputTarget(prevKeyframeCloud);\n    pcl::PointCloud<PointType>::Ptr unused_result(new pcl::PointCloud<PointType>());\n    icp.align(*unused_result);\n\n    if (icp.hasConverged() == false || icp.getFitnessScore() > historyKeyframeFitnessScore)\n      return;\n\n    // publish corrected cloud\n    if (pubIcpKeyFrames.getNumSubscribers() != 0) {\n      pcl::PointCloud<PointType>::Ptr closed_cloud(new pcl::PointCloud<PointType>());\n      pcl::transformPointCloud(*cureKeyframeCloud, *closed_cloud, icp.getFinalTransformation());\n      publishCloud(&pubIcpKeyFrames, closed_cloud, timeLaserInfoStamp, odometryFrame);\n    }\n\n    // Get pose transformation\n    float x, y, z, roll, pitch, yaw;\n    Eigen::Affine3f correctionLidarFrame;\n    correctionLidarFrame = icp.getFinalTransformation();\n    // transform from world origin to wrong pose\n    Eigen::Affine3f tWrong = pclPointToAffine3f(copy_cloudKeyPoses6D->points[loopKeyCur]);\n    // transform from world origin to corrected pose\n    Eigen::Affine3f tCorrect = correctionLidarFrame * tWrong;  // pre-multiplying -> successive rotation about a fixed frame\n    pcl::getTranslationAndEulerAngles(tCorrect, x, y, z, roll, pitch, yaw);\n    gtsam::Pose3 poseFrom = Pose3(Rot3::RzRyRx(roll, pitch, yaw), Point3(x, y, z));\n    gtsam::Pose3 poseTo   = pclPointTogtsamPose3(copy_cloudKeyPoses6D->points[loopKeyPre]);\n    gtsam::Vector Vector6(6);\n    float noiseScore = icp.getFitnessScore();\n    Vector6 << noiseScore, noiseScore, noiseScore, noiseScore, noiseScore, noiseScore;\n    noiseModel::Diagonal::shared_ptr constraintNoise = noiseModel::Diagonal::Variances(Vector6);\n\n    // Add pose constraint\n    mtx.lock();\n    loopIndexQueue.push_back(make_pair(loopKeyCur, loopKeyPre));\n    loopPoseQueue.push_back(poseFrom.between(poseTo));\n    loopNoiseQueue.push_back(constraintNoise);\n    mtx.unlock();\n\n    // add loop constriant\n    loopIndexContainer[loopKeyCur] = loopKeyPre;\n  }\n\n  bool detectLoopClosureDistance(int* latestID, int* closestID) {\n    int loopKeyCur = copy_cloudKeyPoses3D->size() - 1;\n    int loopKeyPre = -1;\n\n    // check loop constraint added before\n    auto it = loopIndexContainer.find(loopKeyCur);\n    if (it != loopIndexContainer.end())\n      return false;\n\n    // find the closest history key frame\n    std::vector<int> pointSearchIndLoop;\n    std::vector<float> pointSearchSqDisLoop;\n    kdtreeHistoryKeyPoses->setInputCloud(copy_cloudKeyPoses3D);\n    kdtreeHistoryKeyPoses->radiusSearch(copy_cloudKeyPoses3D->back(), historyKeyframeSearchRadius, pointSearchIndLoop, pointSearchSqDisLoop, 0);\n\n    for (int i = 0; i < (int)pointSearchIndLoop.size(); ++i) {\n      int id = pointSearchIndLoop[i];\n      if (abs(copy_cloudKeyPoses6D->points[id].time - timeLaserInfoCur) > historyKeyframeSearchTimeDiff) {\n        loopKeyPre = id;\n        break;\n      }\n    }\n\n    if (loopKeyPre == -1 || loopKeyCur == loopKeyPre)\n      return false;\n\n    *latestID  = loopKeyCur;\n    *closestID = loopKeyPre;\n\n    return true;\n  }\n\n  bool detectLoopClosureExternal(int* latestID, int* closestID) {\n    // this function is not used yet, please ignore it\n    int loopKeyCur = -1;\n    int loopKeyPre = -1;\n\n    std::lock_guard<std::mutex> lock(mtxLoopInfo);\n    if (loopInfoVec.empty())\n      return false;\n\n    double loopTimeCur = loopInfoVec.front().data[0];\n    double loopTimePre = loopInfoVec.front().data[1];\n    loopInfoVec.pop_front();\n\n    if (abs(loopTimeCur - loopTimePre) < historyKeyframeSearchTimeDiff)\n      return false;\n\n    int cloudSize = copy_cloudKeyPoses6D->size();\n    if (cloudSize < 2)\n      return false;\n\n    // latest key\n    loopKeyCur = cloudSize - 1;\n    for (int i = cloudSize - 1; i >= 0; --i) {\n      if (copy_cloudKeyPoses6D->points[i].time >= loopTimeCur)\n        loopKeyCur = round(copy_cloudKeyPoses6D->points[i].intensity);\n      else\n        break;\n    }\n\n    // previous key\n    loopKeyPre = 0;\n    for (int i = 0; i < cloudSize; ++i) {\n      if (copy_cloudKeyPoses6D->points[i].time <= loopTimePre)\n        loopKeyPre = round(copy_cloudKeyPoses6D->points[i].intensity);\n      else\n        break;\n    }\n\n    if (loopKeyCur == loopKeyPre)\n      return false;\n\n    auto it = loopIndexContainer.find(loopKeyCur);\n    if (it != loopIndexContainer.end())\n      return false;\n\n    *latestID  = loopKeyCur;\n    *closestID = loopKeyPre;\n\n    return true;\n  }\n\n  void loopFindNearKeyframes(pcl::PointCloud<PointType>::Ptr& nearKeyframes, const int& key, const int& searchNum) {\n    // extract near keyframes\n    nearKeyframes->clear();\n    int cloudSize = copy_cloudKeyPoses6D->size();\n    for (int i = -searchNum; i <= searchNum; ++i) {\n      int keyNear = key + i;\n      if (keyNear < 0 || keyNear >= cloudSize)\n        continue;\n      *nearKeyframes += *transformPointCloud(cornerCloudKeyFrames[keyNear], &copy_cloudKeyPoses6D->points[keyNear]);\n      *nearKeyframes += *transformPointCloud(surfCloudKeyFrames[keyNear], &copy_cloudKeyPoses6D->points[keyNear]);\n    }\n\n    if (nearKeyframes->empty())\n      return;\n\n    // downsample near keyframes\n    pcl::PointCloud<PointType>::Ptr cloud_temp(new pcl::PointCloud<PointType>());\n    downSizeFilterICP.setInputCloud(nearKeyframes);\n    downSizeFilterICP.filter(*cloud_temp);\n    *nearKeyframes = *cloud_temp;\n  }\n\n  void visualizeLoopClosure() {\n    if (loopIndexContainer.empty())\n      return;\n\n    visualization_msgs::MarkerArray markerArray;\n    // loop nodes\n    visualization_msgs::Marker markerNode;\n    markerNode.header.frame_id    = odometryFrame;\n    markerNode.header.stamp       = timeLaserInfoStamp;\n    markerNode.action             = visualization_msgs::Marker::ADD;\n    markerNode.type               = visualization_msgs::Marker::SPHERE_LIST;\n    markerNode.ns                 = \"loop_nodes\";\n    markerNode.id                 = 0;\n    markerNode.pose.orientation.w = 1;\n    markerNode.scale.x            = 0.3;\n    markerNode.scale.y            = 0.3;\n    markerNode.scale.z            = 0.3;\n    markerNode.color.r            = 0;\n    markerNode.color.g            = 0.8;\n    markerNode.color.b            = 1;\n    markerNode.color.a            = 1;\n    // loop edges\n    visualization_msgs::Marker markerEdge;\n    markerEdge.header.frame_id    = odometryFrame;\n    markerEdge.header.stamp       = timeLaserInfoStamp;\n    markerEdge.action             = visualization_msgs::Marker::ADD;\n    markerEdge.type               = visualization_msgs::Marker::LINE_LIST;\n    markerEdge.ns                 = \"loop_edges\";\n    markerEdge.id                 = 1;\n    markerEdge.pose.orientation.w = 1;\n    markerEdge.scale.x            = 0.1;\n    markerEdge.color.r            = 0.9;\n    markerEdge.color.g            = 0.9;\n    markerEdge.color.b            = 0;\n    markerEdge.color.a            = 1;\n\n    for (auto it = loopIndexContainer.begin(); it != loopIndexContainer.end(); ++it) {\n      int key_cur = it->first;\n      int key_pre = it->second;\n      geometry_msgs::Point p;\n      p.x = copy_cloudKeyPoses6D->points[key_cur].x;\n      p.y = copy_cloudKeyPoses6D->points[key_cur].y;\n      p.z = copy_cloudKeyPoses6D->points[key_cur].z;\n      markerNode.points.push_back(p);\n      markerEdge.points.push_back(p);\n      p.x = copy_cloudKeyPoses6D->points[key_pre].x;\n      p.y = copy_cloudKeyPoses6D->points[key_pre].y;\n      p.z = copy_cloudKeyPoses6D->points[key_pre].z;\n      markerNode.points.push_back(p);\n      markerEdge.points.push_back(p);\n    }\n\n    markerArray.markers.push_back(markerNode);\n    markerArray.markers.push_back(markerEdge);\n    pubLoopConstraintEdge.publish(markerArray);\n  }\n\n  void updateInitialGuess() {\n    // save current transformation before any processing\n    incrementalOdometryAffineFront = trans2Affine3f(transformTobeMapped);\n\n    static Eigen::Affine3f lastImuTransformation;\n    // initialization\n    if (cloudKeyPoses3D->points.empty()) {\n      transformTobeMapped[0] = cloudInfo.imuRollInit;\n      transformTobeMapped[1] = cloudInfo.imuPitchInit;\n      transformTobeMapped[2] = cloudInfo.imuYawInit;\n\n      if (!useImuHeadingInitialization)\n        transformTobeMapped[2] = 0;\n\n      lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit);  // save imu before return;\n      return;\n    }\n\n    // use imu pre-integration estimation for pose guess\n    static bool lastImuPreTransAvailable = false;\n    static Eigen::Affine3f lastImuPreTransformation;\n    if (cloudInfo.odomAvailable == true) {\n      Eigen::Affine3f transBack = pcl::getTransformation(cloudInfo.initialGuessX, cloudInfo.initialGuessY, cloudInfo.initialGuessZ,\n                                                         cloudInfo.initialGuessRoll, cloudInfo.initialGuessPitch, cloudInfo.initialGuessYaw);\n      if (lastImuPreTransAvailable == false) {\n        lastImuPreTransformation = transBack;\n        lastImuPreTransAvailable = true;\n      } else {\n        Eigen::Affine3f transIncre = lastImuPreTransformation.inverse() * transBack;\n        Eigen::Affine3f transTobe  = trans2Affine3f(transformTobeMapped);\n        Eigen::Affine3f transFinal = transTobe * transIncre;\n        pcl::getTranslationAndEulerAngles(transFinal, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5],\n                                          transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]);\n\n        lastImuPreTransformation = transBack;\n\n        lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit);  // save imu before return;\n        return;\n      }\n    }\n\n    // use imu incremental estimation for pose guess (only rotation)\n    if (cloudInfo.imuAvailable == true) {\n      Eigen::Affine3f transBack  = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit);\n      Eigen::Affine3f transIncre = lastImuTransformation.inverse() * transBack;\n\n      Eigen::Affine3f transTobe  = trans2Affine3f(transformTobeMapped);\n      Eigen::Affine3f transFinal = transTobe * transIncre;\n      pcl::getTranslationAndEulerAngles(transFinal, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5],\n                                        transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]);\n\n      lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit);  // save imu before return;\n      return;\n    }\n  }\n\n  void extractForLoopClosure() {\n    pcl::PointCloud<PointType>::Ptr cloudToExtract(new pcl::PointCloud<PointType>());\n    int numPoses = cloudKeyPoses3D->size();\n    for (int i = numPoses - 1; i >= 0; --i) {\n      if ((int)cloudToExtract->size() <= surroundingKeyframeSize)\n        cloudToExtract->push_back(cloudKeyPoses3D->points[i]);\n      else\n        break;\n    }\n\n    extractCloud(cloudToExtract);\n  }\n\n  void extractNearby() {\n    pcl::PointCloud<PointType>::Ptr surroundingKeyPoses(new pcl::PointCloud<PointType>());\n    pcl::PointCloud<PointType>::Ptr surroundingKeyPosesDS(new pcl::PointCloud<PointType>());\n    std::vector<int> pointSearchInd;\n    std::vector<float> pointSearchSqDis;\n\n    // extract all the nearby key poses and downsample them\n    kdtreeSurroundingKeyPoses->setInputCloud(cloudKeyPoses3D);  // create kd-tree\n    kdtreeSurroundingKeyPoses->radiusSearch(cloudKeyPoses3D->back(), (double)surroundingKeyframeSearchRadius, pointSearchInd, pointSearchSqDis);\n    for (int i = 0; i < (int)pointSearchInd.size(); ++i) {\n      int id = pointSearchInd[i];\n      surroundingKeyPoses->push_back(cloudKeyPoses3D->points[id]);\n    }\n\n    downSizeFilterSurroundingKeyPoses.setInputCloud(surroundingKeyPoses);\n    downSizeFilterSurroundingKeyPoses.filter(*surroundingKeyPosesDS);\n    for (auto& pt : surroundingKeyPosesDS->points) {\n      kdtreeSurroundingKeyPoses->nearestKSearch(pt, 1, pointSearchInd, pointSearchSqDis);\n      pt.intensity = cloudKeyPoses3D->points[pointSearchInd[0]].intensity;\n    }\n\n    // also extract some latest key frames in case the robot rotates in one position\n    int numPoses = cloudKeyPoses3D->size();\n    for (int i = numPoses - 1; i >= 0; --i) {\n      if (timeLaserInfoCur - cloudKeyPoses6D->points[i].time < 10.0)\n        surroundingKeyPosesDS->push_back(cloudKeyPoses3D->points[i]);\n      else\n        break;\n    }\n\n    extractCloud(surroundingKeyPosesDS);\n  }\n\n  void extractCloud(pcl::PointCloud<PointType>::Ptr cloudToExtract) {\n    // fuse the map\n    laserCloudCornerFromMap->clear();\n    laserCloudSurfFromMap->clear();\n    for (int i = 0; i < (int)cloudToExtract->size(); ++i) {\n      if (pointDistance(cloudToExtract->points[i], cloudKeyPoses3D->back()) > surroundingKeyframeSearchRadius)\n        continue;\n\n      int thisKeyInd = (int)cloudToExtract->points[i].intensity;\n      if (laserCloudMapContainer.find(thisKeyInd) != laserCloudMapContainer.end()) {\n        // transformed cloud available\n        *laserCloudCornerFromMap += laserCloudMapContainer[thisKeyInd].first;\n        *laserCloudSurfFromMap += laserCloudMapContainer[thisKeyInd].second;\n      } else {\n        // transformed cloud not available\n        pcl::PointCloud<PointType> laserCloudCornerTemp = *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);\n        pcl::PointCloud<PointType> laserCloudSurfTemp   = *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]);\n        *laserCloudCornerFromMap += laserCloudCornerTemp;\n        *laserCloudSurfFromMap += laserCloudSurfTemp;\n        laserCloudMapContainer[thisKeyInd] = make_pair(laserCloudCornerTemp, laserCloudSurfTemp);\n      }\n    }\n\n    // Downsample the surrounding corner key frames (or map)\n    downSizeFilterCorner.setInputCloud(laserCloudCornerFromMap);\n    downSizeFilterCorner.filter(*laserCloudCornerFromMapDS);\n    laserCloudCornerFromMapDSNum = laserCloudCornerFromMapDS->size();\n    // Downsample the surrounding surf key frames (or map)\n    downSizeFilterSurf.setInputCloud(laserCloudSurfFromMap);\n    downSizeFilterSurf.filter(*laserCloudSurfFromMapDS);\n    laserCloudSurfFromMapDSNum = laserCloudSurfFromMapDS->size();\n\n    // clear map cache if too large\n    if (laserCloudMapContainer.size() > 1000)\n      laserCloudMapContainer.clear();\n  }\n\n  void extractSurroundingKeyFrames() {\n    if (cloudKeyPoses3D->points.empty() == true)\n      return;\n\n    // if (loopClosureEnableFlag == true)\n    // {\n    //     extractForLoopClosure();\n    // } else {\n    //     extractNearby();\n    // }\n\n    extractNearby();\n  }\n\n  void downsampleCurrentScan() {\n    // Downsample cloud from current scan\n    laserCloudCornerLastDS->clear();\n    downSizeFilterCorner.setInputCloud(laserCloudCornerLast);\n    downSizeFilterCorner.filter(*laserCloudCornerLastDS);\n    laserCloudCornerLastDSNum = laserCloudCornerLastDS->size();\n\n    laserCloudSurfLastDS->clear();\n    downSizeFilterSurf.setInputCloud(laserCloudSurfLast);\n    downSizeFilterSurf.filter(*laserCloudSurfLastDS);\n    laserCloudSurfLastDSNum = laserCloudSurfLastDS->size();\n  }\n\n  void updatePointAssociateToMap() {\n    transPointAssociateToMap = trans2Affine3f(transformTobeMapped);\n  }\n\n  void cornerOptimization() {\n    updatePointAssociateToMap();\n\n#pragma omp parallel for num_threads(numberOfCores)\n    for (int i = 0; i < laserCloudCornerLastDSNum; i++) {\n      PointType pointOri, pointSel, coeff;\n      std::vector<int> pointSearchInd;\n      std::vector<float> pointSearchSqDis;\n\n      pointOri = laserCloudCornerLastDS->points[i];\n      pointAssociateToMap(&pointOri, &pointSel);\n      kdtreeCornerFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis);\n\n      cv::Mat matA1(3, 3, CV_32F, cv::Scalar::all(0));\n      cv::Mat matD1(1, 3, CV_32F, cv::Scalar::all(0));\n      cv::Mat matV1(3, 3, CV_32F, cv::Scalar::all(0));\n\n      if (pointSearchSqDis[4] < 1.0) {\n        float cx = 0, cy = 0, cz = 0;\n        for (int j = 0; j < 5; j++) {\n          cx += laserCloudCornerFromMapDS->points[pointSearchInd[j]].x;\n          cy += laserCloudCornerFromMapDS->points[pointSearchInd[j]].y;\n          cz += laserCloudCornerFromMapDS->points[pointSearchInd[j]].z;\n        }\n        cx /= 5;\n        cy /= 5;\n        cz /= 5;\n\n        float a11 = 0, a12 = 0, a13 = 0, a22 = 0, a23 = 0, a33 = 0;\n        for (int j = 0; j < 5; j++) {\n          float ax = laserCloudCornerFromMapDS->points[pointSearchInd[j]].x - cx;\n          float ay = laserCloudCornerFromMapDS->points[pointSearchInd[j]].y - cy;\n          float az = laserCloudCornerFromMapDS->points[pointSearchInd[j]].z - cz;\n\n          a11 += ax * ax;\n          a12 += ax * ay;\n          a13 += ax * az;\n          a22 += ay * ay;\n          a23 += ay * az;\n          a33 += az * az;\n        }\n        a11 /= 5;\n        a12 /= 5;\n        a13 /= 5;\n        a22 /= 5;\n        a23 /= 5;\n        a33 /= 5;\n\n        matA1.at<float>(0, 0) = a11;\n        matA1.at<float>(0, 1) = a12;\n        matA1.at<float>(0, 2) = a13;\n        matA1.at<float>(1, 0) = a12;\n        matA1.at<float>(1, 1) = a22;\n        matA1.at<float>(1, 2) = a23;\n        matA1.at<float>(2, 0) = a13;\n        matA1.at<float>(2, 1) = a23;\n        matA1.at<float>(2, 2) = a33;\n\n        cv::eigen(matA1, matD1, matV1);\n\n        if (matD1.at<float>(0, 0) > 3 * matD1.at<float>(0, 1)) {\n          float x0 = pointSel.x;\n          float y0 = pointSel.y;\n          float z0 = pointSel.z;\n          float x1 = cx + 0.1 * matV1.at<float>(0, 0);\n          float y1 = cy + 0.1 * matV1.at<float>(0, 1);\n          float z1 = cz + 0.1 * matV1.at<float>(0, 2);\n          float x2 = cx - 0.1 * matV1.at<float>(0, 0);\n          float y2 = cy - 0.1 * matV1.at<float>(0, 1);\n          float z2 = cz - 0.1 * matV1.at<float>(0, 2);\n\n          // clang-format off\n          float a012 = sqrt(((x0 - x1) * (y0 - y2) - (x0 - x2) * (y0 - y1)) * ((x0 - x1) * (y0 - y2) - (x0 - x2) * (y0 - y1))\n                          + ((x0 - x1) * (z0 - z2) - (x0 - x2) * (z0 - z1)) * ((x0 - x1) * (z0 - z2) - (x0 - x2) * (z0 - z1))\n                          + ((y0 - y1) * (z0 - z2) - (y0 - y2) * (z0 - z1)) * ((y0 - y1) * (z0 - z2) - (y0 - y2) * (z0 - z1)));\n\n          float l12 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2));\n\n          float la = ((y1 - y2) * ((x0 - x1) * (y0 - y2) - (x0 - x2) * (y0 - y1))\n                    + (z1 - z2) * ((x0 - x1) * (z0 - z2) - (x0 - x2) * (z0 - z1))) / a012 / l12;\n\n          float lb = -((x1 - x2) * ((x0 - x1) * (y0 - y2) - (x0 - x2) * (y0 - y1)) \n                     - (z1 - z2) * ((y0 - y1) * (z0 - z2) - (y0 - y2) * (z0 - z1))) / a012 / l12;\n\n          float lc = -((x1 - x2) * ((x0 - x1) * (z0 - z2) - (x0 - x2) * (z0 - z1))\n                     + (y1 - y2) * ((y0 - y1) * (z0 - z2) - (y0 - y2) * (z0 - z1))) / a012 / l12;\n          // clang-format on\n\n          float ld2 = a012 / l12;\n\n          float s = 1 - 0.9 * fabs(ld2);\n\n          coeff.x         = s * la;\n          coeff.y         = s * lb;\n          coeff.z         = s * lc;\n          coeff.intensity = s * ld2;\n\n          if (s > 0.1) {\n            laserCloudOriCornerVec[i]  = pointOri;\n            coeffSelCornerVec[i]       = coeff;\n            laserCloudOriCornerFlag[i] = true;\n          }\n        }\n      }\n    }\n  }\n\n  void surfOptimization() {\n    updatePointAssociateToMap();\n\n#pragma omp parallel for num_threads(numberOfCores)\n    for (int i = 0; i < laserCloudSurfLastDSNum; i++) {\n      PointType pointOri, pointSel, coeff;\n      std::vector<int> pointSearchInd;\n      std::vector<float> pointSearchSqDis;\n\n      pointOri = laserCloudSurfLastDS->points[i];\n      pointAssociateToMap(&pointOri, &pointSel);\n      kdtreeSurfFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis);\n\n      Eigen::Matrix<float, 5, 3> matA0;\n      Eigen::Matrix<float, 5, 1> matB0;\n      Eigen::Vector3f matX0;\n\n      matA0.setZero();\n      matB0.fill(-1);\n      matX0.setZero();\n\n      if (pointSearchSqDis[4] < 1.0) {\n        for (int j = 0; j < 5; j++) {\n          matA0(j, 0) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].x;\n          matA0(j, 1) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].y;\n          matA0(j, 2) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].z;\n        }\n\n        matX0 = matA0.colPivHouseholderQr().solve(matB0);\n\n        float pa = matX0(0, 0);\n        float pb = matX0(1, 0);\n        float pc = matX0(2, 0);\n        float pd = 1;\n\n        float ps = sqrt(pa * pa + pb * pb + pc * pc);\n        pa /= ps;\n        pb /= ps;\n        pc /= ps;\n        pd /= ps;\n\n        bool planeValid = true;\n        for (int j = 0; j < 5; j++) {\n          if (fabs(pa * laserCloudSurfFromMapDS->points[pointSearchInd[j]].x +\n                   pb * laserCloudSurfFromMapDS->points[pointSearchInd[j]].y +\n                   pc * laserCloudSurfFromMapDS->points[pointSearchInd[j]].z + pd) > 0.2) {\n            planeValid = false;\n            break;\n          }\n        }\n\n        if (planeValid) {\n          float pd2 = pa * pointSel.x + pb * pointSel.y + pc * pointSel.z + pd;\n\n          float s = 1 - 0.9 * fabs(pd2) / sqrt(sqrt(pointSel.x * pointSel.x + pointSel.y * pointSel.y + pointSel.z * pointSel.z));\n\n          coeff.x         = s * pa;\n          coeff.y         = s * pb;\n          coeff.z         = s * pc;\n          coeff.intensity = s * pd2;\n\n          if (s > 0.1) {\n            laserCloudOriSurfVec[i]  = pointOri;\n            coeffSelSurfVec[i]       = coeff;\n            laserCloudOriSurfFlag[i] = true;\n          }\n        }\n      }\n    }\n  }\n\n  void combineOptimizationCoeffs() {\n    // combine corner coeffs\n    for (int i = 0; i < laserCloudCornerLastDSNum; ++i) {\n      if (laserCloudOriCornerFlag[i] == true) {\n        laserCloudOri->push_back(laserCloudOriCornerVec[i]);\n        coeffSel->push_back(coeffSelCornerVec[i]);\n      }\n    }\n    // combine surf coeffs\n    for (int i = 0; i < laserCloudSurfLastDSNum; ++i) {\n      if (laserCloudOriSurfFlag[i] == true) {\n        laserCloudOri->push_back(laserCloudOriSurfVec[i]);\n        coeffSel->push_back(coeffSelSurfVec[i]);\n      }\n    }\n    // reset flag for next iteration\n    std::fill(laserCloudOriCornerFlag.begin(), laserCloudOriCornerFlag.end(), false);\n    std::fill(laserCloudOriSurfFlag.begin(), laserCloudOriSurfFlag.end(), false);\n  }\n\n  bool LMOptimization(int iterCount) {\n    // This optimization is from the original loam_velodyne by Ji Zhang, need to cope with coordinate transformation\n    // lidar <- camera      ---     camera <- lidar\n    // x = z                ---     x = y\n    // y = x                ---     y = z\n    // z = y                ---     z = x\n    // roll = yaw           ---     roll = pitch\n    // pitch = roll         ---     pitch = yaw\n    // yaw = pitch          ---     yaw = roll\n\n    // lidar -> camera\n    float srx = sin(transformTobeMapped[1]);\n    float crx = cos(transformTobeMapped[1]);\n    float sry = sin(transformTobeMapped[2]);\n    float cry = cos(transformTobeMapped[2]);\n    float srz = sin(transformTobeMapped[0]);\n    float crz = cos(transformTobeMapped[0]);\n\n    int laserCloudSelNum = laserCloudOri->size();\n    if (laserCloudSelNum < 50) {\n      return false;\n    }\n\n    cv::Mat matA(laserCloudSelNum, 6, CV_32F, cv::Scalar::all(0));\n    cv::Mat matAt(6, laserCloudSelNum, CV_32F, cv::Scalar::all(0));\n    cv::Mat matAtA(6, 6, CV_32F, cv::Scalar::all(0));\n    cv::Mat matB(laserCloudSelNum, 1, CV_32F, cv::Scalar::all(0));\n    cv::Mat matAtB(6, 1, CV_32F, cv::Scalar::all(0));\n    cv::Mat matX(6, 1, CV_32F, cv::Scalar::all(0));\n\n    PointType pointOri, coeff;\n\n    for (int i = 0; i < laserCloudSelNum; i++) {\n      // lidar -> camera\n      pointOri.x = laserCloudOri->points[i].y;\n      pointOri.y = laserCloudOri->points[i].z;\n      pointOri.z = laserCloudOri->points[i].x;\n      // lidar -> camera\n      coeff.x         = coeffSel->points[i].y;\n      coeff.y         = coeffSel->points[i].z;\n      coeff.z         = coeffSel->points[i].x;\n      coeff.intensity = coeffSel->points[i].intensity;\n      // in camera\n      float arx = (crx * sry * srz * pointOri.x + crx * crz * sry * pointOri.y - srx * sry * pointOri.z) * coeff.x + (-srx * srz * pointOri.x - crz * srx * pointOri.y - crx * pointOri.z) * coeff.y + (crx * cry * srz * pointOri.x + crx * cry * crz * pointOri.y - cry * srx * pointOri.z) * coeff.z;\n\n      float ary = ((cry * srx * srz - crz * sry) * pointOri.x + (sry * srz + cry * crz * srx) * pointOri.y + crx * cry * pointOri.z) * coeff.x + ((-cry * crz - srx * sry * srz) * pointOri.x + (cry * srz - crz * srx * sry) * pointOri.y - crx * sry * pointOri.z) * coeff.z;\n\n      float arz = ((crz * srx * sry - cry * srz) * pointOri.x + (-cry * crz - srx * sry * srz) * pointOri.y) * coeff.x + (crx * crz * pointOri.x - crx * srz * pointOri.y) * coeff.y + ((sry * srz + cry * crz * srx) * pointOri.x + (crz * sry - cry * srx * srz) * pointOri.y) * coeff.z;\n      // lidar -> camera\n      matA.at<float>(i, 0) = arz;\n      matA.at<float>(i, 1) = arx;\n      matA.at<float>(i, 2) = ary;\n      matA.at<float>(i, 3) = coeff.z;\n      matA.at<float>(i, 4) = coeff.x;\n      matA.at<float>(i, 5) = coeff.y;\n      matB.at<float>(i, 0) = -coeff.intensity;\n    }\n\n    cv::transpose(matA, matAt);\n    matAtA = matAt * matA;\n    matAtB = matAt * matB;\n    cv::solve(matAtA, matAtB, matX, cv::DECOMP_QR);\n\n    if (iterCount == 0) {\n      cv::Mat matE(1, 6, CV_32F, cv::Scalar::all(0));\n      cv::Mat matV(6, 6, CV_32F, cv::Scalar::all(0));\n      cv::Mat matV2(6, 6, CV_32F, cv::Scalar::all(0));\n\n      cv::eigen(matAtA, matE, matV);\n      matV.copyTo(matV2);\n\n      isDegenerate      = false;\n      float eignThre[6] = {100, 100, 100, 100, 100, 100};\n      for (int i = 5; i >= 0; i--) {\n        if (matE.at<float>(0, i) < eignThre[i]) {\n          for (int j = 0; j < 6; j++) {\n            matV2.at<float>(i, j) = 0;\n          }\n          isDegenerate = true;\n        } else {\n          break;\n        }\n      }\n      matP = matV.inv() * matV2;\n    }\n\n    if (isDegenerate) {\n      cv::Mat matX2(6, 1, CV_32F, cv::Scalar::all(0));\n      matX.copyTo(matX2);\n      matX = matP * matX2;\n    }\n\n    transformTobeMapped[0] += matX.at<float>(0, 0);\n    transformTobeMapped[1] += matX.at<float>(1, 0);\n    transformTobeMapped[2] += matX.at<float>(2, 0);\n    transformTobeMapped[3] += matX.at<float>(3, 0);\n    transformTobeMapped[4] += matX.at<float>(4, 0);\n    transformTobeMapped[5] += matX.at<float>(5, 0);\n\n    float deltaR = sqrt(\n        pow(pcl::rad2deg(matX.at<float>(0, 0)), 2) +\n        pow(pcl::rad2deg(matX.at<float>(1, 0)), 2) +\n        pow(pcl::rad2deg(matX.at<float>(2, 0)), 2));\n    float deltaT = sqrt(\n        pow(matX.at<float>(3, 0) * 100, 2) +\n        pow(matX.at<float>(4, 0) * 100, 2) +\n        pow(matX.at<float>(5, 0) * 100, 2));\n\n    if (deltaR < 0.05 && deltaT < 0.05) {\n      return true;  // converged\n    }\n    return false;  // keep optimizing\n  }\n\n  void scan2MapOptimization() {\n    if (cloudKeyPoses3D->points.empty())\n      return;\n\n    if (laserCloudCornerLastDSNum > edgeFeatureMinValidNum && laserCloudSurfLastDSNum > surfFeatureMinValidNum) {\n      kdtreeCornerFromMap->setInputCloud(laserCloudCornerFromMapDS);\n      kdtreeSurfFromMap->setInputCloud(laserCloudSurfFromMapDS);\n\n      for (int iterCount = 0; iterCount < 30; iterCount++) {\n        laserCloudOri->clear();\n        coeffSel->clear();\n\n        cornerOptimization();\n        surfOptimization();\n\n        combineOptimizationCoeffs();\n\n        if (LMOptimization(iterCount) == true)\n          break;\n      }\n\n      transformUpdate();\n    } else {\n      ROS_WARN(\"Not enough features! Only %d edge and %d planar features available.\", laserCloudCornerLastDSNum, laserCloudSurfLastDSNum);\n    }\n  }\n\n  void transformUpdate() {\n    if (cloudInfo.imuAvailable == true) {\n      if (std::abs(cloudInfo.imuPitchInit) < 1.4) {\n        double imuWeight = imuRPYWeight;\n        tf::Quaternion imuQuaternion;\n        tf::Quaternion transformQuaternion;\n        double rollMid, pitchMid, yawMid;\n\n        // slerp roll\n        transformQuaternion.setRPY(transformTobeMapped[0], 0, 0);\n        imuQuaternion.setRPY(cloudInfo.imuRollInit, 0, 0);\n        tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid);\n        transformTobeMapped[0] = rollMid;\n\n        // slerp pitch\n        transformQuaternion.setRPY(0, transformTobeMapped[1], 0);\n        imuQuaternion.setRPY(0, cloudInfo.imuPitchInit, 0);\n        tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid);\n        transformTobeMapped[1] = pitchMid;\n      }\n    }\n\n    transformTobeMapped[0] = constraintTransformation(transformTobeMapped[0], rotation_tollerance);\n    transformTobeMapped[1] = constraintTransformation(transformTobeMapped[1], rotation_tollerance);\n    transformTobeMapped[5] = constraintTransformation(transformTobeMapped[5], z_tollerance);\n\n    incrementalOdometryAffineBack = trans2Affine3f(transformTobeMapped);\n  }\n\n  float constraintTransformation(float value, float limit) {\n    if (value < -limit)\n      value = -limit;\n    if (value > limit)\n      value = limit;\n\n    return value;\n  }\n\n  bool saveFrame() {\n    if (cloudKeyPoses3D->points.empty())\n      return true;\n\n    Eigen::Affine3f transStart   = pclPointToAffine3f(cloudKeyPoses6D->back());\n    Eigen::Affine3f transFinal   = pcl::getTransformation(transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5],\n                                                          transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]);\n    Eigen::Affine3f transBetween = transStart.inverse() * transFinal;\n    float x, y, z, roll, pitch, yaw;\n    pcl::getTranslationAndEulerAngles(transBetween, x, y, z, roll, pitch, yaw);\n\n    if (abs(roll) < surroundingkeyframeAddingAngleThreshold &&\n        abs(pitch) < surroundingkeyframeAddingAngleThreshold &&\n        abs(yaw) < surroundingkeyframeAddingAngleThreshold &&\n        sqrt(x * x + y * y + z * z) < surroundingkeyframeAddingDistThreshold)\n      return false;\n\n    return true;\n  }\n\n  void addOdomFactor() {\n    if (cloudKeyPoses3D->points.empty()) {\n      auto currentKeyIndex = numberOfNodes++;  // 0\n      keyPoseIndices.push_back(currentKeyIndex);\n\n      noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Variances(priorOdometryDiagonalVarianceEigenVector);  // rad*rad, meter*meter\n      gtSAMgraph.add(PriorFactor<Pose3>(0, trans2gtsamPose(transformTobeMapped), priorNoise));\n      initialEstimate.insert(currentKeyIndex, trans2gtsamPose(transformTobeMapped));\n      initialEstimateForAnalysis.insert(currentKeyIndex, trans2gtsamPose(transformTobeMapped));\n    } else {\n      auto previousKeyIndex = keyPoseIndices.back();\n      auto currentKeyIndex  = numberOfNodes++;\n      keyPoseIndices.push_back(currentKeyIndex);\n\n      noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Variances(odometryDiagonalVarianceEigenVector);\n      gtsam::Pose3 poseFrom                          = pclPointTogtsamPose3(cloudKeyPoses6D->points.back());\n      gtsam::Pose3 poseTo                            = trans2gtsamPose(transformTobeMapped);\n      gtSAMgraph.add(BetweenFactor<Pose3>(previousKeyIndex,\n                                          currentKeyIndex,\n                                          poseFrom.between(poseTo),\n                                          odometryNoise));\n      initialEstimate.insert(currentKeyIndex, poseTo);\n      initialEstimateForAnalysis.insert(currentKeyIndex, poseTo);\n    }\n  }\n\n  void addGPSFactor() {\n    if (gpsQueue.empty())\n      return;\n\n    // wait for system initialized and settles down\n    if (cloudKeyPoses3D->points.empty())\n      return;\n    else {\n      if (pointDistance(cloudKeyPoses3D->front(), cloudKeyPoses3D->back()) < 5.0)\n        return;\n    }\n\n    // pose covariance small, no need to correct\n    if (poseCovariance(3, 3) < poseCovThreshold && poseCovariance(4, 4) < poseCovThreshold)\n      return;\n\n    // last gps position\n    static PointType lastGPSPoint;\n\n    while (!gpsQueue.empty()) {\n      if (gpsQueue.front().header.stamp.toSec() < timeLaserInfoCur - 0.2) {\n        // message too old\n        gpsQueue.pop_front();\n      } else if (gpsQueue.front().header.stamp.toSec() > timeLaserInfoCur + 0.2) {\n        // message too new\n        break;\n      } else {\n        nav_msgs::Odometry thisGPS = gpsQueue.front();\n        gpsQueue.pop_front();\n\n        // GPS too noisy, skip\n        float noise_x = thisGPS.pose.covariance[0];\n        float noise_y = thisGPS.pose.covariance[7];\n        float noise_z = thisGPS.pose.covariance[14];\n        if (noise_x > gpsCovThreshold || noise_y > gpsCovThreshold)\n          continue;\n\n        float gps_x = thisGPS.pose.pose.position.x;\n        float gps_y = thisGPS.pose.pose.position.y;\n        float gps_z = thisGPS.pose.pose.position.z;\n        if (!useGpsElevation) {\n          gps_z   = transformTobeMapped[5];\n          noise_z = 0.01;\n        }\n\n        // GPS not properly initialized (0,0,0)\n        if (abs(gps_x) < 1e-6 && abs(gps_y) < 1e-6)\n          continue;\n\n        // Add GPS every a few meters\n        PointType curGPSPoint;\n        curGPSPoint.x = gps_x;\n        curGPSPoint.y = gps_y;\n        curGPSPoint.z = gps_z;\n        if (pointDistance(curGPSPoint, lastGPSPoint) < 5.0)\n          continue;\n        else\n          lastGPSPoint = curGPSPoint;\n\n        gtsam::Vector Vector3(3);\n        Vector3 << max(noise_x, 1.0f), max(noise_y, 1.0f), max(noise_z, 1.0f);\n        noiseModel::Diagonal::shared_ptr gps_noise = noiseModel::Diagonal::Variances(Vector3);\n        gtsam::GPSFactor gps_factor(cloudKeyPoses3D->size(), gtsam::Point3(gps_x, gps_y, gps_z), gps_noise);\n        gtSAMgraph.add(gps_factor);\n\n        aLoopIsClosed = true;\n        break;\n      }\n    }\n  }\n\n  void addLoopFactor() {\n    if (loopIndexQueue.empty())\n      return;\n\n    for (int i = 0; i < (int)loopIndexQueue.size(); ++i) {\n      int indexFrom                                        = loopIndexQueue[i].first;\n      int indexTo                                          = loopIndexQueue[i].second;\n      gtsam::Pose3 poseBetween                             = loopPoseQueue[i];\n      gtsam::noiseModel::Diagonal::shared_ptr noiseBetween = loopNoiseQueue[i];\n      gtSAMgraph.add(BetweenFactor<Pose3>(indexFrom, indexTo, poseBetween, noiseBetween));\n    }\n\n    loopIndexQueue.clear();\n    loopPoseQueue.clear();\n    loopNoiseQueue.clear();\n    aLoopIsClosed = true;\n  }\n\n  void propagateObjectPoses() {\n    std::map<uint64_t, ObjectState> nextObjects;\n\n    // Only triggered in the beginning.\n    if (objects.empty()) {\n      objects.push_back(nextObjects);\n      return;\n    }\n\n#ifdef MAP_OPTIMIZATION_DEBUG\n    std::cout << \"DELTA_TIME :: \" << deltaTime << \"\\n\\n\";\n#endif\n\n    for (const auto& pairedObject : objects.back()) {\n      // Drop the object if it is lost for a long time\n      if (pairedObject.second.lostCount > trackingStepsForLostObject) continue;\n\n      // Propagate the object using the constant velocity model as well as\n      // register a new variable node for the object\n      auto identity     = Pose3::identity();\n      auto nextObject   = pairedObject.second.clone();\n      auto deltaPoseVec = gtsam::traits<Pose3>::Local(identity, nextObject.velocity) * deltaTime;\n      auto deltaPose    = gtsam::traits<Pose3>::Retract(identity, deltaPoseVec);\n      nextObject.pose   = nextObject.pose * deltaPose;\n\n      nextObject.isFirst   = false;\n      nextObject.timestamp = timeLaserInfoStamp;\n      if (pairedObject.second.lostCount == 0) {\n        nextObject.poseNodeIndex     = numberOfNodes++;\n        nextObject.velocityNodeIndex = numberOfNodes++;\n\n        initialEstimate.insert(nextObject.poseNodeIndex, nextObject.pose);\n        initialEstimate.insert(nextObject.velocityNodeIndex, nextObject.velocity);\n\n        initialEstimateForAnalysis.insert(nextObject.poseNodeIndex, nextObject.pose);\n        initialEstimateForAnalysis.insert(nextObject.velocityNodeIndex, nextObject.velocity);\n\n        nextObject.previousVelocityNodeIndices.push_back(pairedObject.second.velocityNodeIndex);\n      } else {\n        nextObject.poseNodeIndex     = -1;\n        nextObject.velocityNodeIndex = -1;\n      }\n\n      nextObjects[pairedObject.first] = nextObject;\n    }\n    objects.push_back(nextObjects);\n\n#ifdef ENABLE_MINIMAL_MEMORY_USAGE\n    if (objects.size() > 2 &&\n        objects.size() > numberOfPreLooseCouplingSteps + 1 &&\n        objects.size() > numberOfVelocityConsistencySteps + 1) {\n      objects.erase(objects.begin());\n    }\n#endif\n  }\n\n  void addConstantVelocityFactor() {\n    // Skip the process if there is no enough time stamps for adding constant\n    // velocity factors\n    if (objects.size() < 2) return;\n\n    for (const auto& pairedObject : objects.back()) {\n      // Skip if the object is lost at this stamp or is a new object\n      if (pairedObject.second.isFirst) continue;\n      if (pairedObject.second.lostCount > 0) continue;\n\n      auto noiseModel      = noiseModel::Diagonal::Variances(constantVelocityDiagonalVarianceEigenVector);\n      auto earlyNoiseModel = noiseModel::Diagonal::Variances(earlyConstantVelocityDiagonalVarianceEigenVector);\n      auto currentObject   = pairedObject.second;\n      auto objectIndex     = currentObject.objectIndex;\n      auto previousObject  = objects[objects.size() - 2][objectIndex];\n      if (pairedObject.second.isTightlyCoupled) {\n        gtSAMgraph.add(ConstantVelocityFactor(previousObject.velocityNodeIndex,\n                                              currentObject.velocityNodeIndex,\n                                              noiseModel));\n      } else {\n        if (objectPaths.markers[pairedObject.second.objectIndex].points.size() <= numberOfEarlySteps) {\n          gtSAMgraphForLooselyCoupledObjects.add(ConstantVelocityFactor(previousObject.velocityNodeIndex,\n                                                                        currentObject.velocityNodeIndex,\n                                                                        earlyNoiseModel));\n        } else {\n          gtSAMgraphForLooselyCoupledObjects.add(ConstantVelocityFactor(previousObject.velocityNodeIndex,\n                                                                        currentObject.velocityNodeIndex,\n                                                                        noiseModel));\n        }\n      }\n    }\n  }\n\n  void addStablePoseFactor() {\n    // Skip the process if there is no enough time stamps for adding constant\n    // velocity factors\n    if (objects.size() < 2) return;\n\n    for (auto& pairedObject : objects.back()) {\n      // Skip if the object is lost at this stamp or is a new object\n      if (pairedObject.second.isFirst) continue;\n      if (pairedObject.second.lostCount > 0) continue;\n\n      auto noise          = noiseModel::Diagonal::Variances(motionDiagonalVarianceEigenVector);\n      auto& currentObject = pairedObject.second;\n      auto objectIndex    = currentObject.objectIndex;\n      auto previousObject = objects[objects.size() - 2][objectIndex];\n      if (pairedObject.second.isTightlyCoupled) {\n        gtSAMgraph.add(StablePoseFactor(previousObject.poseNodeIndex,\n#ifdef ENABLE_COMPACT_VERSION_OF_FACTOR_GRAPH\n                                        currentObject.velocityNodeIndex,\n#else\n                                        previousObject.velocityNodeIndex,\n#endif\n                                        currentObject.poseNodeIndex,\n                                        deltaTime,\n                                        noise));\n      } else {\n        gtSAMgraphForLooselyCoupledObjects.add(StablePoseFactor(previousObject.poseNodeIndex,\n#ifdef ENABLE_COMPACT_VERSION_OF_FACTOR_GRAPH\n                                                                currentObject.velocityNodeIndex,\n#else\n                                                                previousObject.velocityNodeIndex,\n#endif\n                                                                currentObject.poseNodeIndex,\n                                                                deltaTime,\n                                                                noise));\n      }\n      currentObject.motionFactorPtr = boost::make_shared<StablePoseFactor>(previousObject.poseNodeIndex,\n#ifdef ENABLE_COMPACT_VERSION_OF_FACTOR_GRAPH\n                                                                           currentObject.velocityNodeIndex,\n#else\n                                                                           previousObject.velocityNodeIndex,\n#endif\n                                                                           currentObject.poseNodeIndex,\n                                                                           deltaTime,\n                                                                           noise);\n      initialEstimateForAnalysis.insert(previousObject.poseNodeIndex, previousObject.pose);\n      initialEstimateForAnalysis.insert(previousObject.velocityNodeIndex, previousObject.velocity);\n      currentObject.initialMotionError = currentObject.motionFactorPtr->error(initialEstimateForAnalysis);\n    }\n  }\n\n  void addDetectionFactor(bool requiredMockDetection = false) {\n    anyObjectIsTightlyCoupled = false;\n\n    if (detections->boxes.size() == 0 && objects.size() == 0) {\n      // Skip the process if there is no detection and no active moving objects\n      return;\n    } else if (detections->boxes.size() == 0 && objects.size() > 0) {\n      // Set every moving object as lost if there is no detection\n      for (auto& pairedObject : objects.back()) {\n        auto& object = pairedObject.second;\n        ++object.lostCount;\n        object.confidence                               = 0.0;\n        objectPaths.markers[object.objectIndex].scale.x = 0.3;\n        objectPaths.markers[object.objectIndex].scale.y = 0.3;\n        objectPaths.markers[object.objectIndex].scale.z = 0.3;\n      }\n      return;\n    }\n    // After the above condition, the following process will suppose that there\n    // exist at least one detection in this step\n\n    // Initialize an indicator matrix for data association of objects and\n    // detections, where we secretly add a row to the matrix in case of no\n    // active objects at the current stamp\n    Eigen::MatrixXi indicator = Eigen::MatrixXi::Zero(objects.back().size() + 1,\n                                                      detections->boxes.size());\n    std::vector<int> trackingObjectIndices(detections->boxes.size(), -1);\n\n    // Create a vector of Detections.\n    auto smallEgoMotion = Pose3(Rot3::RzRyRx(0, 0, 0), Point3(0, 0, 0));\n    if (requiredMockDetection) {\n      Eigen::Affine3f transStart   = pclPointToAffine3f(cloudKeyPoses6D->back());\n      Eigen::Affine3f transFinal   = pcl::getTransformation(transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5],\n                                                            transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]);\n      Eigen::Affine3f transBetween = transStart.inverse() * transFinal;\n      float x, y, z, roll, pitch, yaw;\n      pcl::getTranslationAndEulerAngles(transBetween, x, y, z, roll, pitch, yaw);\n      smallEgoMotion = Pose3(Rot3::RzRyRx(roll, pitch, yaw), Point3(x, y, z));\n    }\n    detectionVector.clear();\n    tightlyCoupledDetectionVector.clear();\n    earlyLooselyCoupledMatchingVector.clear();\n    looselyCoupledMatchingVector.clear();\n    tightlyCoupledMatchingVector.clear();\n    dataAssociationVector.clear();\n    for (auto box : detections->boxes) {\n      if (requiredMockDetection) {\n        auto pose = Pose3(Rot3::Quaternion(box.pose.orientation.w, box.pose.orientation.x, box.pose.orientation.y, box.pose.orientation.z),\n                          Point3(box.pose.position.x, box.pose.position.y, box.pose.position.z));\n        pose      = smallEgoMotion * pose;\n\n        auto quat              = pose.rotation().toQuaternion();\n        box.pose.orientation.w = quat.w();\n        box.pose.orientation.x = quat.x();\n        box.pose.orientation.y = quat.y();\n        box.pose.orientation.z = quat.z();\n\n        auto pos            = pose.translation();\n        box.pose.position.x = pos.x();\n        box.pose.position.y = pos.y();\n        box.pose.position.z = pos.z();\n      }\n      detectionVector.emplace_back(box, looselyCoupledDetectionVarianceEigenVector);\n      tightlyCoupledDetectionVector.emplace_back(box, tightlyCoupledDetectionVarianceEigenVector);\n      earlyLooselyCoupledMatchingVector.emplace_back(box, earlyLooselyCoupledMatchingVarianceEigenVector);\n      looselyCoupledMatchingVector.emplace_back(box, looselyCoupledMatchingVarianceEigenVector);\n      tightlyCoupledMatchingVector.emplace_back(box, tightlyCoupledMatchingVarianceEigenVector);\n      dataAssociationVector.emplace_back(box, dataAssociationVarianceEigenVector);\n    }\n\n    auto egoPoseKey = keyPoseIndices.back();\n    auto egoPose    = initialEstimate.at<Pose3>(egoPoseKey);\n    auto invEgoPose = egoPose.inverse();\n\n    // Perform the data association for each object, and determine if this\n    // object is lost at this stamp\n#ifdef MAP_OPTIMIZATION_DEBUG\n    std::cout << \"OBJECT_INDICES ::\\n\";\n#endif\n    size_t i = 0;  // object index with respect to the indicator matrix\n    for (auto& pairedObject : objects.back()) {\n      auto& object = pairedObject.second;\n      size_t j;  // detection index with respect to the indicator matrix\n      double error;\n\n      size_t dataAssociationJ;\n      double dataAssociationError;\n\n#ifdef MAP_OPTIMIZATION_DEBUG\n      std::cout << object.objectIndex << ' ';\n#endif\n\n      auto&& predictedPose = invEgoPose * object.pose;\n      if (object.trackScore >= numberOfPreLooseCouplingSteps) {\n        // std::tie(j, error) = getDetectionIndexAndError(predictedPose, tightlyCoupledMatchingVector);\n        std::tie(j, error) = getDetectionIndexAndError(predictedPose, looselyCoupledMatchingVector);\n      } else if (objectPaths.markers[object.objectIndex].points.size() <= numberOfEarlySteps) {\n        // Set the detection factor error to the early-loosely-coupled detection\n        // error for new objects, as they need some chances tp obtain velocity\n        std::tie(j, error) = getDetectionIndexAndError(predictedPose, earlyLooselyCoupledMatchingVector);\n      } else {\n        std::tie(j, error) = getDetectionIndexAndError(predictedPose, looselyCoupledMatchingVector);\n      }\n      std::tie(dataAssociationJ, dataAssociationError) = getDetectionIndexAndError(predictedPose, dataAssociationVector);\n\n      // TODO: Increase code readability\n      if (error < detectionMatchThreshold) {  // found\n        // If the object is lost, we still need to create a new moving object\n        // for it. In this way we would temporarily skip it and later we will\n        // add the object properly\n        if (object.lostCount > 0) {\n          trackingObjectIndices[j] = object.objectIndexForTracking;\n          // A tricky part: set the lostCount to a large number so that the\n          // system will subtly remove this ``retired'' object\n          object.lostCount = std::numeric_limits<int>::max();\n        } else {\n          indicator(i, j)  = 1;\n          object.lostCount = 0;\n          // The maximum of `trackScore` is `numberOfPreLooseCouplingSteps` + 1\n          if (object.trackScore <= numberOfPreLooseCouplingSteps) {\n            ++object.trackScore;\n          }\n          object.detection = detections->boxes[j];\n\n          if (object.trackScore >= numberOfPreLooseCouplingSteps + 1) {\n            // Use a tighter threshold to determine if this detection is good\n            // for coupling. If this detection does not meet the requirement,\n            // the object will be loosely-coupled, and the corresponding\n            // `trackScore` will be deducted.\n            auto tightlyCoupledDetectionFactorPtr = boost::make_shared<TightlyCoupledDetectionFactor>(egoPoseKey,\n                                                                                                      object.poseNodeIndex,\n                                                                                                      tightlyCoupledDetectionVector);\n            // auto&& detectionError                 = tightlyCoupledDetectionFactorPtr->error(initialEstimateForAnalysis);\n            double detectionError;\n            std::tie(j, detectionError) = getDetectionIndexAndError(predictedPose, tightlyCoupledMatchingVector);\n\n            auto spatialConsistencyTest  = detectionError <= tightCouplingDetectionErrorThreshold;\n            auto temporalConsistencyTest = object.velocityIsConsistent(numberOfVelocityConsistencySteps, isamCurrentEstimate,\n                                                                       objectAngularVelocityConsistencyVarianceThreshold,\n                                                                       objectLinearVelocityConsistencyVarianceThreshold);\n            if (spatialConsistencyTest && temporalConsistencyTest) {\n              ++numberOfTightlyCoupledObjectsAtThisMoment;\n              anyObjectIsTightlyCoupled = true;\n              object.isTightlyCoupled   = true;\n              gtSAMgraph.add(TightlyCoupledDetectionFactor(egoPoseKey,\n                                                           object.poseNodeIndex,\n                                                           tightlyCoupledDetectionVector,\n                                                           j));\n              object.tightlyCoupledDetectionFactorPtr = tightlyCoupledDetectionFactorPtr;\n              object.initialDetectionError            = detectionError;\n            } else {\n              object.trackScore -= numberOfInterLooseCouplingSteps;\n              object.isTightlyCoupled = false;\n              initialEstimateForLooselyCoupledObjects.insert(object.poseNodeIndex, initialEstimate.at(object.poseNodeIndex));\n              initialEstimateForLooselyCoupledObjects.insert(object.velocityNodeIndex, initialEstimate.at(object.velocityNodeIndex));\n              initialEstimate.erase(object.poseNodeIndex);\n              initialEstimate.erase(object.velocityNodeIndex);\n              gtSAMgraphForLooselyCoupledObjects.add(LooselyCoupledDetectionFactor(egoPoseKey,\n                                                                                   object.poseNodeIndex,\n                                                                                   detectionVector,\n                                                                                   j));\n              object.looselyCoupledDetectionFactorPtr = boost::make_shared<LooselyCoupledDetectionFactor>(egoPoseKey,\n                                                                                                          object.poseNodeIndex,\n                                                                                                          detectionVector);\n              object.initialDetectionError            = object.looselyCoupledDetectionFactorPtr->error(initialEstimateForAnalysis);\n            }\n          } else {\n            // Pre-loosely coupled detection to stabilize the object velocity\n            object.isTightlyCoupled = false;\n            initialEstimateForLooselyCoupledObjects.insert(object.poseNodeIndex, initialEstimate.at(object.poseNodeIndex));\n            initialEstimateForLooselyCoupledObjects.insert(object.velocityNodeIndex, initialEstimate.at(object.velocityNodeIndex));\n            initialEstimate.erase(object.poseNodeIndex);\n            initialEstimate.erase(object.velocityNodeIndex);\n            gtSAMgraphForLooselyCoupledObjects.add(LooselyCoupledDetectionFactor(egoPoseKey,\n                                                                                 object.poseNodeIndex,\n                                                                                 detectionVector,\n                                                                                 j));\n            object.looselyCoupledDetectionFactorPtr = boost::make_shared<LooselyCoupledDetectionFactor>(egoPoseKey,\n                                                                                                        object.poseNodeIndex,\n                                                                                                        detectionVector);\n            object.initialDetectionError            = object.looselyCoupledDetectionFactorPtr->error(initialEstimateForAnalysis);\n          }\n        }\n      } else {  // lost\n        ++object.lostCount;\n        object.confidence                               = 0.0;\n        object.trackScore                               = 0.0;\n        objectPaths.markers[object.objectIndex].scale.x = 0.3;\n        objectPaths.markers[object.objectIndex].scale.y = 0.3;\n        objectPaths.markers[object.objectIndex].scale.z = 0.3;\n\n        // Use a larger threshold to track object without association in the\n        // factor graph. They are different objects in the factor graph, but in\n        // the tracking (visualization) system, they are the same object.\n        if (dataAssociationError < detectionMatchThreshold) {\n          trackingObjectIndices[dataAssociationJ] = object.objectIndexForTracking;\n          // A tricky part: set the lostCount to a large number so that the\n          // system will subtly remove this ``retired'' object\n          object.lostCount = std::numeric_limits<int>::max();\n        } else {\n          trackingObjectPaths.markers[object.objectIndexForTracking].scale.x = 0.3;\n          trackingObjectPaths.markers[object.objectIndexForTracking].scale.y = 0.3;\n          trackingObjectPaths.markers[object.objectIndexForTracking].scale.z = 0.3;\n        }\n      }\n\n      ++i;\n    }\n\n#ifdef MAP_OPTIMIZATION_DEBUG\n    cout << \"\\n\\n\"\n         << \"INDICATOR ::\\n\"\n         << indicator << \"\\n\\n\";\n#endif\n\n    // Register a new dynamic object if the detection does not belong to any\n    // current active objects\n    for (size_t idx = 0; idx < detectionVector.size(); ++idx) {\n      if (indicator.col(idx).sum() == 0) {\n        // Initialize the object\n        ObjectState object;\n        object.detection         = detections->boxes[idx];\n        object.pose              = egoPose * detectionVector[idx].getPose();\n        object.velocity          = Pose3::identity();\n        object.poseNodeIndex     = numberOfNodes++;\n        object.velocityNodeIndex = numberOfNodes++;\n        object.objectIndex       = numberOfRegisteredObjects++;\n        object.isFirst           = true;\n        object.timestamp         = timeLaserInfoStamp;\n        if (trackingObjectIndices[idx] < 0) {\n          object.objectIndexForTracking = numberOfTrackingObjects++;\n\n          // Initialize a path object (marker) for visualizing path\n          visualization_msgs::Marker marker;\n          marker.id                 = object.objectIndexForTracking;\n          marker.type               = visualization_msgs::Marker::SPHERE_LIST;\n          std_msgs::ColorRGBA color = jsk_topic_tools::colorCategory20(object.objectIndexForTracking);\n          marker.color.a            = 1.0;\n          marker.color.r            = color.r;\n          marker.color.g            = color.g;\n          marker.color.b            = color.b;\n          marker.scale.x            = 0.6;\n          marker.scale.y            = 0.6;\n          marker.scale.z            = 0.6;\n          marker.pose.orientation   = tf::createQuaternionMsgFromYaw(0);\n          trackingObjectPaths.markers.push_back(marker);\n\n          visualization_msgs::Marker labelMarker;\n          labelMarker.id      = object.objectIndexForTracking;\n          labelMarker.type    = visualization_msgs::Marker::TEXT_VIEW_FACING;\n          labelMarker.color.a = 1.0;\n          labelMarker.color.r = color.r;\n          labelMarker.color.g = color.g;\n          labelMarker.color.b = color.b;\n          labelMarker.scale.z = 1.2;\n          labelMarker.text    = \"Object \" + std::to_string(object.objectIndexForTracking);\n          trackingObjectLabels.markers.push_back(labelMarker);\n\n          visualization_msgs::Marker velocityMarker;\n          velocityMarker.id               = object.objectIndexForTracking;\n          velocityMarker.type             = visualization_msgs::Marker::LINE_STRIP;\n          velocityMarker.color.a          = 0.7;\n          velocityMarker.color.r          = color.r;\n          velocityMarker.color.g          = color.g;\n          velocityMarker.color.b          = color.b;\n          velocityMarker.scale.x          = 0.4;\n          velocityMarker.scale.y          = 0.4;\n          velocityMarker.scale.z          = 0.4;\n          velocityMarker.pose.orientation = tf::createQuaternionMsgFromYaw(0);\n          trackingObjectVelocities.markers.push_back(velocityMarker);\n\n          velocityMarker.type = visualization_msgs::Marker::ARROW;\n          trackingObjectVelocityArrows.markers.push_back(velocityMarker);\n        } else {\n          object.objectIndexForTracking                                      = trackingObjectIndices[idx];\n          trackingObjectPaths.markers[object.objectIndexForTracking].scale.x = 0.6;\n          trackingObjectPaths.markers[object.objectIndexForTracking].scale.y = 0.6;\n          trackingObjectPaths.markers[object.objectIndexForTracking].scale.z = 0.6;\n          trackingObjectLabels.markers[object.objectIndexForTracking].text   = \"Object \" + std::to_string(object.objectIndexForTracking);\n        }\n\n        // TODO: Propagate the bounding box in the post-processing\n        object.box = detectionVector[idx].getBoundingBox();\n\n        objects.back()[object.objectIndex] = object;\n\n        // Initialize a path object (marker) for visualizing path\n        visualization_msgs::Marker marker;\n        marker.id                 = object.objectIndex;\n        marker.type               = visualization_msgs::Marker::SPHERE_LIST;\n        std_msgs::ColorRGBA color = jsk_topic_tools::colorCategory20(object.objectIndex);\n        marker.color.a            = 1.0;\n        marker.color.r            = color.r;\n        marker.color.g            = color.g;\n        marker.color.b            = color.b;\n        marker.scale.x            = 0.6;\n        marker.scale.y            = 0.6;\n        marker.scale.z            = 0.6;\n        marker.pose.orientation   = tf::createQuaternionMsgFromYaw(0);\n        objectPaths.markers.push_back(marker);\n\n        visualization_msgs::Marker labelMarker;\n        labelMarker.id      = object.objectIndex;\n        labelMarker.type    = visualization_msgs::Marker::TEXT_VIEW_FACING;\n        labelMarker.color.a = 1.0;\n        labelMarker.color.r = color.r;\n        labelMarker.color.g = color.g;\n        labelMarker.color.b = color.b;\n        labelMarker.scale.z = 1.2;\n        labelMarker.text    = \"Object \" + std::to_string(object.objectIndex);\n        objectLabels.markers.push_back(labelMarker);\n\n        visualization_msgs::Marker velocityMarker;\n        velocityMarker.id               = object.objectIndex;\n        velocityMarker.type             = visualization_msgs::Marker::LINE_STRIP;\n        velocityMarker.color.a          = 0.7;\n        velocityMarker.color.r          = color.r;\n        velocityMarker.color.g          = color.g;\n        velocityMarker.color.b          = color.b;\n        velocityMarker.scale.x          = 0.4;\n        velocityMarker.scale.y          = 0.4;\n        velocityMarker.scale.z          = 0.4;\n        velocityMarker.pose.orientation = tf::createQuaternionMsgFromYaw(0);\n        objectVelocities.markers.push_back(velocityMarker);\n\n        velocityMarker.type = visualization_msgs::Marker::ARROW;\n        objectVelocityArrows.markers.push_back(velocityMarker);\n\n        initialEstimateForLooselyCoupledObjects.insert(object.poseNodeIndex, object.pose);\n        initialEstimateForLooselyCoupledObjects.insert(object.velocityNodeIndex, object.velocity);\n\n        initialEstimateForAnalysis.insert(object.poseNodeIndex, object.pose);\n        initialEstimateForAnalysis.insert(object.velocityNodeIndex, object.velocity);\n\n        // detection factor\n        gtSAMgraphForLooselyCoupledObjects.add(LooselyCoupledDetectionFactor(egoPoseKey,\n                                                                             object.poseNodeIndex,\n                                                                             detectionVector,\n                                                                             idx));\n        objects.back()[object.objectIndex].looselyCoupledDetectionFactorPtr = boost::make_shared<LooselyCoupledDetectionFactor>(egoPoseKey,\n                                                                                                                                object.poseNodeIndex,\n                                                                                                                                detectionVector);\n\n#ifndef ENABLE_COMPACT_VERSION_OF_FACTOR_GRAPH\n        // prior velocity factor (the noise should be large)\n        auto noise = noiseModel::Diagonal::Variances((Vector(6) << 1e-2, 1e-2, 1e0, 1e8, 1e2, 1e2).finished());\n        gtSAMgraphForLooselyCoupledObjects.add(PriorFactor<Pose3>(object.velocityNodeIndex, object.velocity, noise));\n#endif\n      }\n    }\n  }\n\n  void saveKeyFramesAndFactor() {\n    bool requiredSaveFrame = saveFrame();\n\n#ifdef ENABLE_SIMULTANEOUS_LOCALIZATION_AND_TRACKING\n    if (requiredSaveFrame) {\n      // odom factor\n      addOdomFactor();\n\n      // gps factor\n      addGPSFactor();\n\n      // loop factor\n      addLoopFactor();\n    } else {\n#ifdef ENABLE_ASYNCHRONOUS_STATE_ESTIMATE_FOR_SLOT\n      // add the latest ego-pose to the initial guess set\n      auto egoPose6D      = cloudKeyPoses6D->back();\n      Pose3 latestEgoPose = Pose3(Rot3::RzRyRx((Vector3() << egoPose6D.roll, egoPose6D.pitch, egoPose6D.yaw).finished()),\n                                  Point3((Vector3() << egoPose6D.x, egoPose6D.y, egoPose6D.z).finished()));\n      initialEstimate.insert(keyPoseIndices.back(), latestEgoPose);\n      initialEstimateForAnalysis.insert(keyPoseIndices.back(), latestEgoPose);\n#else\n      return;\n#endif\n    }\n#else  // LIO-SAM\n    if (!requiredSaveFrame)\n      return;\n\n    // odom factor\n    addOdomFactor();\n\n    // gps factor\n    addGPSFactor();\n\n    // loop factor\n    addLoopFactor();\n#endif\n\n#ifdef ENABLE_SIMULTANEOUS_LOCALIZATION_AND_TRACKING\n    // perform dynamic object propagation\n    propagateObjectPoses();\n\n    // detection factor (for multi-object tracking tracking)\n    addDetectionFactor(!requiredSaveFrame);\n\n    // // constant velocity factor (for multi-object tracking)\n    addConstantVelocityFactor();\n\n    // stable pose factor (for multi-object tracking)\n    addStablePoseFactor();\n#endif\n\n#ifdef MAP_OPTIMIZATION_DEBUG\n    std::cout << \"****************************************************\" << endl;\n    gtSAMgraph.print(\"GTSAM Graph:\\n\");\n#endif\n\n    if (!requiredSaveFrame) {\n      initialEstimate.erase(keyPoseIndices.back());\n      initialEstimateForAnalysis.erase(keyPoseIndices.back());\n    }\n\n    // update iSAM\n    isam->update(gtSAMgraph, initialEstimate);\n    isam->update();\n\n    if (aLoopIsClosed == true) {\n      isam->update();\n      isam->update();\n      isam->update();\n      isam->update();\n      isam->update();\n    }\n\n    gtSAMgraph.resize(0);\n    initialEstimate.clear();\n    initialEstimateForAnalysis.clear();\n\n#ifdef ENABLE_SIMULTANEOUS_LOCALIZATION_AND_TRACKING\n    if (!gtSAMgraphForLooselyCoupledObjects.empty()) {\n      isam->update(gtSAMgraphForLooselyCoupledObjects, initialEstimateForLooselyCoupledObjects);\n      isam->update();\n    }\n\n    gtSAMgraphForLooselyCoupledObjects.resize(0);\n    initialEstimateForLooselyCoupledObjects.clear();\n#endif\n\n    isamCurrentEstimate = isam->calculateEstimate();\n\n    if (requiredSaveFrame) {\n      // save key poses\n      PointType thisPose3D;\n      PointTypePose thisPose6D;\n      Pose3 latestEstimate;\n\n      latestEstimate = isamCurrentEstimate.at<Pose3>(keyPoseIndices.back());\n      // cout << \"****************************************************\" << endl;\n      // isamCurrentEstimate.print(\"Current estimate: \");\n\n      thisPose3D.x         = latestEstimate.translation().x();\n      thisPose3D.y         = latestEstimate.translation().y();\n      thisPose3D.z         = latestEstimate.translation().z();\n      thisPose3D.intensity = cloudKeyPoses3D->size();  // this can be used as index\n      cloudKeyPoses3D->push_back(thisPose3D);\n\n      thisPose6D.x         = thisPose3D.x;\n      thisPose6D.y         = thisPose3D.y;\n      thisPose6D.z         = thisPose3D.z;\n      thisPose6D.intensity = thisPose3D.intensity;  // this can be used as index\n      thisPose6D.roll      = latestEstimate.rotation().roll();\n      thisPose6D.pitch     = latestEstimate.rotation().pitch();\n      thisPose6D.yaw       = latestEstimate.rotation().yaw();\n      thisPose6D.time      = timeLaserInfoCur;\n      cloudKeyPoses6D->push_back(thisPose6D);\n\n      // cout << \"****************************************************\" << endl;\n      // cout << \"Pose covariance:\" << endl;\n      // cout << isam->marginalCovariance(isamCurrentEstimate.size()-1) << endl << endl;\n      poseCovariance = isam->marginalCovariance(keyPoseIndices.back());\n\n      // save updated transform\n      transformTobeMapped[0] = latestEstimate.rotation().roll();\n      transformTobeMapped[1] = latestEstimate.rotation().pitch();\n      transformTobeMapped[2] = latestEstimate.rotation().yaw();\n      transformTobeMapped[3] = latestEstimate.translation().x();\n      transformTobeMapped[4] = latestEstimate.translation().y();\n      transformTobeMapped[5] = latestEstimate.translation().z();\n\n      // save all the received edge and surf points\n      pcl::PointCloud<PointType>::Ptr thisCornerKeyFrame(new pcl::PointCloud<PointType>());\n      pcl::PointCloud<PointType>::Ptr thisSurfKeyFrame(new pcl::PointCloud<PointType>());\n      pcl::copyPointCloud(*laserCloudCornerLastDS, *thisCornerKeyFrame);\n      pcl::copyPointCloud(*laserCloudSurfLastDS, *thisSurfKeyFrame);\n\n      // save key frame cloud\n      cornerCloudKeyFrames.push_back(thisCornerKeyFrame);\n      surfCloudKeyFrames.push_back(thisSurfKeyFrame);\n\n      // save path for visualization\n      updatePath(thisPose6D);\n    }\n\n    // save dynamic objects\n    if (objects.size() > 0) {\n      size_t i = 0;\n      for (auto& pairedObject : objects.back()) {\n        auto& object = pairedObject.second;\n\n        // Since lost objects do not join the optimization, skip the value\n        // update for them\n        if (object.lostCount > 0) continue;\n\n#ifdef MAP_OPTIMIZATION_DEBUG\n        std::cout << \"(OBJECT \" << object.objectIndex << \") [BEFORE]\\nPOSITION ::\\n\"\n                  << object.pose << \"\\n\"\n                  << \"VELOCITY ::\\n\"\n                  << object.velocity << \"\\n\";\n#endif\n\n        object.pose = isamCurrentEstimate.at<Pose3>(object.poseNodeIndex);\n#ifdef ENABLE_COMPACT_VERSION_OF_FACTOR_GRAPH\n        if (!object.isFirst) {\n          object.velocity = isamCurrentEstimate.at<Pose3>(object.velocityNodeIndex);\n        }\n#else\n        object.velocity = isamCurrentEstimate.at<Pose3>(object.velocityNodeIndex);\n#endif\n\n#ifdef MAP_OPTIMIZATION_DEBUG\n        std::cout << \"(OBJECT \" << object.objectIndex << \") [AFTER ]\\nPOSITION ::\\n\"\n                  << object.pose << \"\\n\"\n                  << \"VELOCITY ::\\n\"\n                  << object.velocity << \"\\n\\n\";\n#endif\n\n        // TODO: Reproduce the post-processing of tracking (refer to FG-3DMOT)\n        // // Check if there is any detection belongs to the object.\n        // int index;\n        // double error;\n        // std::tie(index, error) = getDetectionIndexAndError(object.pose, detectionVector);\n\n        // if (error < detectionMatchThreshold) {  // found\n        // object.box = detections->boxes[index];\n\n        auto p                     = object.pose.translation();\n        object.box.pose.position.x = p.x();\n        object.box.pose.position.y = p.y();\n        object.box.pose.position.z = p.z();\n\n        auto r                      = object.pose.rotation();\n        object.box.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(r.roll(), r.pitch(), r.yaw());\n\n        object.box.header.frame_id = odometryFrame;\n        object.box.label           = object.objectIndex;\n        // object.confidence          = object.box.value;\n\n        // } else {  // lost\n        //   object.confidence = 0;\n        // }\n\n        ++i;\n      }\n    }\n  }\n\n  void correctPoses() {\n    if (cloudKeyPoses3D->points.empty())\n      return;\n\n    if (aLoopIsClosed || anyObjectIsTightlyCoupled) {\n      // clear map cache\n      laserCloudMapContainer.clear();\n      // clear path\n      globalPath.poses.clear();\n      // update key poses\n      int numPoses = keyPoseIndices.size();\n      for (int i = 0; i < numPoses; ++i) {\n        auto poseIndex               = keyPoseIndices[i];\n        cloudKeyPoses3D->points[i].x = isamCurrentEstimate.at<Pose3>(poseIndex).translation().x();\n        cloudKeyPoses3D->points[i].y = isamCurrentEstimate.at<Pose3>(poseIndex).translation().y();\n        cloudKeyPoses3D->points[i].z = isamCurrentEstimate.at<Pose3>(poseIndex).translation().z();\n\n        cloudKeyPoses6D->points[i].x     = cloudKeyPoses3D->points[i].x;\n        cloudKeyPoses6D->points[i].y     = cloudKeyPoses3D->points[i].y;\n        cloudKeyPoses6D->points[i].z     = cloudKeyPoses3D->points[i].z;\n        cloudKeyPoses6D->points[i].roll  = isamCurrentEstimate.at<Pose3>(poseIndex).rotation().roll();\n        cloudKeyPoses6D->points[i].pitch = isamCurrentEstimate.at<Pose3>(poseIndex).rotation().pitch();\n        cloudKeyPoses6D->points[i].yaw   = isamCurrentEstimate.at<Pose3>(poseIndex).rotation().yaw();\n\n        updatePath(cloudKeyPoses6D->points[i]);\n      }\n\n      aLoopIsClosed = false;\n    }\n  }\n\n  void updatePath(const PointTypePose& pose_in) {\n    geometry_msgs::PoseStamped pose_stamped;\n    pose_stamped.header.stamp       = ros::Time().fromSec(pose_in.time);\n    pose_stamped.header.frame_id    = odometryFrame;\n    pose_stamped.pose.position.x    = pose_in.x;\n    pose_stamped.pose.position.y    = pose_in.y;\n    pose_stamped.pose.position.z    = pose_in.z;\n    tf::Quaternion q                = tf::createQuaternionFromRPY(pose_in.roll, pose_in.pitch, pose_in.yaw);\n    pose_stamped.pose.orientation.x = q.x();\n    pose_stamped.pose.orientation.y = q.y();\n    pose_stamped.pose.orientation.z = q.z();\n    pose_stamped.pose.orientation.w = q.w();\n\n    globalPath.poses.push_back(pose_stamped);\n  }\n\n  void publishOdometry() {\n    // Publish odometry for ROS (global)\n    nav_msgs::Odometry laserOdometryROS;\n    laserOdometryROS.header.stamp          = timeLaserInfoStamp;\n    laserOdometryROS.header.frame_id       = odometryFrame;\n    laserOdometryROS.child_frame_id        = \"odom_mapping\";\n    laserOdometryROS.pose.pose.position.x  = transformTobeMapped[3];\n    laserOdometryROS.pose.pose.position.y  = transformTobeMapped[4];\n    laserOdometryROS.pose.pose.position.z  = transformTobeMapped[5];\n    laserOdometryROS.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]);\n    pubLaserOdometryGlobal.publish(laserOdometryROS);\n\n    // Publish TF\n    static tf::TransformBroadcaster br;\n    tf::Transform t_odom_to_lidar            = tf::Transform(tf::createQuaternionFromRPY(transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]),\n                                                             tf::Vector3(transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5]));\n    tf::StampedTransform trans_odom_to_lidar = tf::StampedTransform(t_odom_to_lidar, timeLaserInfoStamp, odometryFrame, \"lidar_link\");\n    br.sendTransform(trans_odom_to_lidar);\n\n    // Publish odometry for ROS (incremental)\n    static bool lastIncreOdomPubFlag = false;\n    static nav_msgs::Odometry laserOdomIncremental;  // incremental odometry msg\n    static Eigen::Affine3f increOdomAffine;          // incremental odometry in affine\n    if (lastIncreOdomPubFlag == false) {\n      lastIncreOdomPubFlag = true;\n      laserOdomIncremental = laserOdometryROS;\n      increOdomAffine      = trans2Affine3f(transformTobeMapped);\n    } else {\n      Eigen::Affine3f affineIncre = incrementalOdometryAffineFront.inverse() * incrementalOdometryAffineBack;\n      increOdomAffine             = increOdomAffine * affineIncre;\n      float x, y, z, roll, pitch, yaw;\n      pcl::getTranslationAndEulerAngles(increOdomAffine, x, y, z, roll, pitch, yaw);\n      if (cloudInfo.imuAvailable == true) {\n        if (std::abs(cloudInfo.imuPitchInit) < 1.4) {\n          double imuWeight = 0.1;\n          tf::Quaternion imuQuaternion;\n          tf::Quaternion transformQuaternion;\n          double rollMid, pitchMid, yawMid;\n\n          // slerp roll\n          transformQuaternion.setRPY(roll, 0, 0);\n          imuQuaternion.setRPY(cloudInfo.imuRollInit, 0, 0);\n          tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid);\n          roll = rollMid;\n\n          // slerp pitch\n          transformQuaternion.setRPY(0, pitch, 0);\n          imuQuaternion.setRPY(0, cloudInfo.imuPitchInit, 0);\n          tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid);\n          pitch = pitchMid;\n        }\n      }\n      laserOdomIncremental.header.stamp          = timeLaserInfoStamp;\n      laserOdomIncremental.header.frame_id       = odometryFrame;\n      laserOdomIncremental.child_frame_id        = \"odom_mapping\";\n      laserOdomIncremental.pose.pose.position.x  = x;\n      laserOdomIncremental.pose.pose.position.y  = y;\n      laserOdomIncremental.pose.pose.position.z  = z;\n      laserOdomIncremental.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(roll, pitch, yaw);\n      if (isDegenerate)\n        laserOdomIncremental.pose.covariance[0] = 1;\n      else\n        laserOdomIncremental.pose.covariance[0] = 0;\n    }\n    pubLaserOdometryIncremental.publish(laserOdomIncremental);\n  }\n\n  void publishFrames() {\n    if (cloudKeyPoses3D->points.empty())\n      return;\n    // publish key poses\n    publishCloud(&pubKeyPoses, cloudKeyPoses3D, timeLaserInfoStamp, odometryFrame);\n    // Publish surrounding key frames\n    publishCloud(&pubRecentKeyFrames, laserCloudSurfFromMapDS, timeLaserInfoStamp, odometryFrame);\n    // publish registered key frame\n    if (pubRecentKeyFrame.getNumSubscribers() != 0) {\n      pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>());\n      PointTypePose thisPose6D = trans2PointTypePose(transformTobeMapped);\n      *cloudOut += *transformPointCloud(laserCloudCornerLastDS, &thisPose6D);\n      *cloudOut += *transformPointCloud(laserCloudSurfLastDS, &thisPose6D);\n      publishCloud(&pubRecentKeyFrame, cloudOut, timeLaserInfoStamp, odometryFrame);\n    }\n    // publish registered high-res raw cloud\n    if (pubCloudRegisteredRaw.getNumSubscribers() != 0) {\n      pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>());\n      pcl::fromROSMsg(cloudInfo.cloud_deskewed, *cloudOut);\n      PointTypePose thisPose6D = trans2PointTypePose(transformTobeMapped);\n      *cloudOut                = *transformPointCloud(cloudOut, &thisPose6D);\n      publishCloud(&pubCloudRegisteredRaw, cloudOut, timeLaserInfoStamp, odometryFrame);\n    }\n    // publish path\n    if (pubPath.getNumSubscribers() != 0) {\n      globalPath.header.stamp    = timeLaserInfoStamp;\n      globalPath.header.frame_id = odometryFrame;\n      pubPath.publish(globalPath);\n    }\n    // publish detections\n    if (pubDetection.getNumSubscribers() != 0 && detectionIsActive) {\n      pubDetection.publish(detections);\n    }\n    if (pubLaserCloudDeskewed.getNumSubscribers() != 0) {\n      cloudInfo.header.stamp = timeLaserInfoStamp;\n      pubLaserCloudDeskewed.publish(cloudInfo.cloud_deskewed);\n    }\n    // public dynamic objects\n    if (detectionIsActive) {\n      BoundingBoxArray objectMessage;\n      BoundingBoxArray trackingObjectMessage;\n      objectMessage.header          = detections->header;\n      objectMessage.header.frame_id = odometryFrame;\n      objectMessage.header.stamp    = timeLaserInfoStamp;\n\n      trackingObjectMessage.header          = detections->header;\n      trackingObjectMessage.header.frame_id = odometryFrame;\n      trackingObjectMessage.header.stamp    = timeLaserInfoStamp;\n\n      tightlyCoupledObjectPoints.header.frame_id = odometryFrame;\n      tightlyCoupledObjectPoints.header.stamp    = timeLaserInfoStamp;\n\n      for (auto& marker : objectVelocities.markers) {\n        marker.points.clear();\n      }\n      for (auto& marker : objectVelocityArrows.markers) {\n        marker.points.clear();\n        marker.scale.x = 0;\n        marker.scale.y = 0;\n        marker.scale.z = 0;\n      }\n      for (auto& marker : trackingObjectVelocities.markers) {\n        marker.points.clear();\n      }\n      for (auto& marker : trackingObjectVelocityArrows.markers) {\n        marker.points.clear();\n        marker.scale.x = 0;\n        marker.scale.y = 0;\n        marker.scale.z = 0;\n      }\n\n      objectStates.objects.clear();\n      objectStates.header.frame_id = odometryFrame;\n      objectStates.header.stamp    = timeLaserInfoStamp;\n\n      std::vector<bool> trackingObjectIsActive(numberOfTrackingObjects, false);\n      for (auto& pairedObject : objects.back()) {\n        auto& object = pairedObject.second;\n        // Only publish active objects\n        if (object.lostCount == 0) {\n          // TODO: A better data structure to present moving object with path\n          // Mark the tracking object as an active instance\n          trackingObjectIsActive[object.objectIndexForTracking] = true;\n\n          // Bounding box\n          object.box.header.stamp = timeLaserInfoStamp;\n          objectMessage.boxes.push_back(object.box);\n          trackingObjectMessage.boxes.push_back(object.box);\n          trackingObjectMessage.boxes.back().label = object.objectIndexForTracking;\n\n          // Path\n          geometry_msgs::Point point;\n          point.x = object.box.pose.position.x;\n          point.y = object.box.pose.position.y;\n          point.z = object.box.pose.position.z;\n          objectPaths.markers[object.objectIndex].points.push_back(point);\n          objectPaths.markers[object.objectIndex].header.frame_id = odometryFrame;\n          objectPaths.markers[object.objectIndex].header.stamp    = timeLaserInfoStamp;\n\n          trackingObjectPaths.markers[object.objectIndexForTracking].points.push_back(point);\n          trackingObjectPaths.markers[object.objectIndexForTracking].header.frame_id = odometryFrame;\n          trackingObjectPaths.markers[object.objectIndexForTracking].header.stamp    = timeLaserInfoStamp;\n\n          // Tightly-coupled nodes (poses)\n          if (object.isTightlyCoupled) {\n            tightlyCoupledObjectPoints.points.push_back(point);\n          }\n\n          // Label\n          objectLabels.markers[object.objectIndex].pose.position.x = object.box.pose.position.x;\n          objectLabels.markers[object.objectIndex].pose.position.y = object.box.pose.position.y;\n          objectLabels.markers[object.objectIndex].pose.position.z = object.box.pose.position.z + 2.0;\n          objectLabels.markers[object.objectIndex].header.frame_id = odometryFrame;\n          objectLabels.markers[object.objectIndex].header.stamp    = timeLaserInfoStamp;\n\n          trackingObjectLabels.markers[object.objectIndexForTracking].pose.position.x = object.box.pose.position.x;\n          trackingObjectLabels.markers[object.objectIndexForTracking].pose.position.y = object.box.pose.position.y;\n          trackingObjectLabels.markers[object.objectIndexForTracking].pose.position.z = object.box.pose.position.z + 2.0;\n          trackingObjectLabels.markers[object.objectIndexForTracking].header.frame_id = odometryFrame;\n          trackingObjectLabels.markers[object.objectIndexForTracking].header.stamp    = timeLaserInfoStamp;\n\n          // Velocity (prediction of path)\n          objectVelocities.markers[object.objectIndex].header.frame_id     = odometryFrame;\n          objectVelocities.markers[object.objectIndex].header.stamp        = timeLaserInfoStamp;\n          objectVelocityArrows.markers[object.objectIndex].header.frame_id = odometryFrame;\n          objectVelocityArrows.markers[object.objectIndex].header.stamp    = timeLaserInfoStamp;\n          objectVelocityArrows.markers[object.objectIndex].scale.x         = 0.4;\n          objectVelocityArrows.markers[object.objectIndex].scale.y         = 0.8;\n          objectVelocityArrows.markers[object.objectIndex].scale.z         = 1.0;\n\n          trackingObjectVelocities.markers[object.objectIndexForTracking].header.frame_id     = odometryFrame;\n          trackingObjectVelocities.markers[object.objectIndexForTracking].header.stamp        = timeLaserInfoStamp;\n          trackingObjectVelocityArrows.markers[object.objectIndexForTracking].header.frame_id = odometryFrame;\n          trackingObjectVelocityArrows.markers[object.objectIndexForTracking].header.stamp    = timeLaserInfoStamp;\n          trackingObjectVelocityArrows.markers[object.objectIndexForTracking].scale.x         = 0.4;\n          trackingObjectVelocityArrows.markers[object.objectIndexForTracking].scale.y         = 0.8;\n          trackingObjectVelocityArrows.markers[object.objectIndexForTracking].scale.z         = 1.0;\n\n          // .. Compute the delta pose with respect to the delta time\n          auto identity     = gtsam::Pose3::identity();\n          auto deltaPoseVec = gtsam::traits<gtsam::Pose3>::Local(identity, object.velocity) * 0.1;\n          auto deltaPose    = gtsam::traits<gtsam::Pose3>::Retract(identity, deltaPoseVec);\n          auto nextPose     = object.pose;\n\n          // .. Object position at relative time stamp from 1 to 5\n          for (int timeStamp = 1; timeStamp <= 5; ++timeStamp) {\n            nextPose = nextPose * deltaPose;\n            point.x  = nextPose.translation().x();\n            point.y  = nextPose.translation().y();\n            point.z  = nextPose.translation().z();\n            if (timeStamp <= 4) {\n              objectVelocities.markers[object.objectIndex].points.push_back(point);\n              trackingObjectVelocities.markers[object.objectIndexForTracking].points.push_back(point);\n            }\n            if (timeStamp >= 4) {\n              objectVelocityArrows.markers[object.objectIndex].points.push_back(point);\n              trackingObjectVelocityArrows.markers[object.objectIndexForTracking].points.push_back(point);\n            }\n          }\n\n          // Diagnosis\n          lio_segmot::ObjectState state;\n          state.header.frame_id        = odometryFrame;\n          state.header.stamp           = timeLaserInfoStamp;\n          state.detection              = object.detection;\n          state.pose                   = object.box.pose;\n          state.velocity.position.x    = object.velocity.translation().x();\n          state.velocity.position.y    = object.velocity.translation().y();\n          state.velocity.position.z    = object.velocity.translation().z();\n          state.velocity.orientation.x = object.velocity.rotation().quaternion().x();\n          state.velocity.orientation.y = object.velocity.rotation().quaternion().y();\n          state.velocity.orientation.z = object.velocity.rotation().quaternion().z();\n          state.velocity.orientation.w = object.velocity.rotation().quaternion().w();\n          state.index                  = object.objectIndex;\n          state.lostCount              = object.lostCount;\n          state.confidence             = object.confidence;\n          state.isTightlyCoupled       = object.isTightlyCoupled;\n          state.isFirst                = object.isFirst;\n\n          state.hasTightlyCoupledDetectionError = false;\n          if (object.tightlyCoupledDetectionFactorPtr) {\n            state.hasTightlyCoupledDetectionError     = true;\n            state.tightlyCoupledDetectionError        = object.tightlyCoupledDetectionFactorPtr->error(isamCurrentEstimate);\n            state.initialTightlyCoupledDetectionError = object.initialDetectionError;\n          }\n\n          state.hasLooselyCoupledDetectionError = false;\n          if (object.looselyCoupledDetectionFactorPtr) {\n            state.hasLooselyCoupledDetectionError     = true;\n            state.looselyCoupledDetectionError        = object.looselyCoupledDetectionFactorPtr->error(isamCurrentEstimate);\n            state.initialLooselyCoupledDetectionError = object.initialDetectionError;\n          }\n\n          state.hasMotionError = false;\n          if (object.motionFactorPtr) {\n            state.hasMotionError     = true;\n            state.motionError        = object.motionFactorPtr->error(isamCurrentEstimate);\n            state.initialMotionError = object.initialMotionError;\n          }\n\n          objectStates.objects.push_back(state);\n        }\n      }\n\n      // Hide moving object if it only appears one time and it is not active\n      for (auto& pairedObject : objects.back()) {\n        auto& object = pairedObject.second;\n        auto& index  = object.objectIndexForTracking;\n        if (!trackingObjectIsActive[index] && object.lostCount != 0) {\n          if (trackingObjectPaths.markers[index].points.size() <= 1) {\n            trackingObjectPaths.markers[index].points.clear();\n            trackingObjectLabels.markers[index].text = \"\";\n          }\n        }\n      }\n\n      pubObjects.publish(objectMessage);\n      pubObjectPaths.publish(objectPaths);\n      pubTightlyCoupledObjectPoints.publish(tightlyCoupledObjectPoints);\n      pubObjectLabels.publish(objectLabels);\n      pubObjectVelocities.publish(objectVelocities);\n      pubObjectVelocityArrows.publish(objectVelocityArrows);\n      pubTrackingObjects.publish(trackingObjectMessage);\n      pubTrackingObjectPaths.publish(trackingObjectPaths);\n      pubTrackingObjectLabels.publish(trackingObjectLabels);\n      pubTrackingObjectVelocities.publish(trackingObjectVelocities);\n      pubTrackingObjectVelocityArrows.publish(trackingObjectVelocityArrows);\n      pubObjectStates.publish(objectStates);\n    }\n\n    lio_segmot::Diagnosis diagnosis;\n    diagnosis.header.frame_id               = odometryFrame;\n    diagnosis.header.stamp                  = timeLaserInfoStamp;\n    diagnosis.numberOfDetections            = detections ? detections->boxes.size() : 0;\n    diagnosis.computationalTime             = timer.elapsed();\n    diagnosis.numberOfTightlyCoupledObjects = numberOfTightlyCoupledObjectsAtThisMoment;\n    pubDiagnosis.publish(diagnosis);\n  }\n};\n\nint main(int argc, char** argv) {\n  ros::init(argc, argv, \"lio_segmot\");\n\n  mapOptimization MO;\n\n  ROS_INFO(\"\\033[1;32m----> Map Optimization Started.\\033[0m\");\n\n  std::thread loopthread(&mapOptimization::loopClosureThread, &MO);\n  std::thread visualizeMapThread(&mapOptimization::visualizeGlobalMapThread, &MO);\n\n  ros::spin();\n\n  loopthread.join();\n  visualizeMapThread.join();\n\n  return 0;\n}\n"
  },
  {
    "path": "src/offlineBagPlayer.cpp",
    "content": "#include \"utility.h\"\n\n#include <rosbag/bag.h>\n#include <rosbag/view.h>\n\nstd::queue<std::vector<rosbag::MessageInstance>> patchedInstances;\n\nfloat base_rate;\nstd::string bag_filename;\nstd::string imu_topic;\nstd::string lidar_topic;\nstd::string pub_imu_topic;\nstd::string pub_lidar_topic;\n\nros::Publisher pubImu;\nros::Publisher pubLiDAR;\n\nros::Time currentTime;\n\nvoid playInstances(const std::vector<rosbag::MessageInstance>& instances) {\n  for (auto& instance : instances) {\n    auto duration = instance.getTime() - currentTime;\n    if (duration.toSec() > 0) {\n      ros::WallDuration(duration.toSec() / base_rate).sleep();\n    }\n\n    auto imuMsg = instance.instantiate<sensor_msgs::Imu>();\n    if (imuMsg != nullptr) {\n      pubImu.publish(imuMsg);\n    }\n    auto lidarMsg = instance.instantiate<sensor_msgs::PointCloud2>();\n    if (lidarMsg != nullptr) {\n      pubLiDAR.publish(lidarMsg);\n    }\n\n    currentTime = instance.getTime();\n  }\n}\n\nvoid odometryIsDoneCallback(const std_msgs::EmptyConstPtr& msg) {\n  playInstances(patchedInstances.front());\n  patchedInstances.pop();\n  ROS_INFO(\"Remaining patches: %lu\", patchedInstances.size());\n}\n\nint main(int argc, char* argv[]) {\n  ros::init(argc, argv, \"offline_bag_player\");\n  ros::NodeHandle _nh(\"~\");\n  ros::NodeHandle nh;\n\n  _nh.param<float>(\"base_rate\", base_rate, 1.0);\n  _nh.param<std::string>(\"bag_filename\", bag_filename, \"\");\n  _nh.param<std::string>(\"imu_topic\", imu_topic, \"/imu_raw\");\n  _nh.param<std::string>(\"lidar_topic\", lidar_topic, \"/points_raw\");\n  _nh.param<std::string>(\"pub_imu_topic\", pub_imu_topic, \"/imu_raw\");\n  _nh.param<std::string>(\"pub_lidar_topic\", pub_lidar_topic, \"/points_raw\");\n\n  ros::Subscriber sub = nh.subscribe<std_msgs::Empty>(\"lio_segmot/ready\", 10, &odometryIsDoneCallback);\n\n  pubImu   = nh.advertise<sensor_msgs::Imu>(pub_imu_topic, 2000);\n  pubLiDAR = nh.advertise<sensor_msgs::PointCloud2>(pub_lidar_topic, 1);\n\n  if (bag_filename.empty()) {\n    ROS_ERROR(\"bag_filename is empty\");\n    return -1;\n  }\n\n  // Read and process the bag file\n  rosbag::Bag bag;\n  ROS_INFO(\"Opening bag file: %s\", bag_filename.c_str());\n  bag.open(bag_filename, rosbag::bagmode::Read);\n\n  std::vector<std::string> topics;\n  topics.push_back(imu_topic);\n  topics.push_back(lidar_topic);\n  rosbag::View view(bag, rosbag::TopicQuery(topics));\n\n  std::vector<rosbag::MessageInstance> instances;\n  bool timeIsInitialized = false;\n  BOOST_FOREACH (rosbag::MessageInstance const m, view) {\n    instances.push_back(m);\n    if (m.getTopic() == lidar_topic) {\n      patchedInstances.push(instances);\n      instances.clear();\n    }\n\n    if (!timeIsInitialized) {\n      currentTime       = m.getTime();\n      timeIsInitialized = true;\n    }\n  }\n\n  // Make sure that the system is ready to subscribe the data\n  ROS_INFO(\"Pending ...\");\n  while (pubImu.getNumSubscribers() < 2 || pubLiDAR.getNumSubscribers() < 1) {\n    ros::spinOnce();\n  }\n\n  ROS_INFO(\"Start playing the bag ...\");\n  // Play the first patch of the data\n  playInstances(patchedInstances.front());\n  patchedInstances.pop();\n\n  while (!patchedInstances.empty() && ros::ok()) {\n    ros::spinOnce();\n  }\n\n  bag.close();\n\n  return 0;\n}"
  },
  {
    "path": "src/solver.cpp",
    "content": "#include \"solver.h\"\n#include <gtsam/nonlinear/ISAM2-impl.h>\n\ngtsam::KeySet gatherMaxMixtureRelinearizationKeys(const gtsam::NonlinearFactorGraph nonlinearFactors,\n                                                  const gtsam::Values theta,\n                                                  const gtsam::VectorValues delta,\n                                                  gtsam::KeySet* markedKeys) {\n  gtsam::KeySet relinKeys;\n\n  LooselyCoupledDetectionFactor* lcdf;\n  TightlyCoupledDetectionFactor* tcdf;\n  for (const auto& factor : nonlinearFactors) {\n    lcdf = dynamic_cast<LooselyCoupledDetectionFactor*>(factor.get());\n    tcdf = dynamic_cast<TightlyCoupledDetectionFactor*>(factor.get());\n\n    // SKip if the factor is not based on a max-mixture model. Currently we\n    // only support max-mixture models for ``LooselyCoupledDetectionFactor``\n    // and ``TightlyCoupledDetectionFactor``\n    if (lcdf == nullptr && tcdf == nullptr) continue;\n\n    int index;\n    int cachedIndex;\n    double error;\n    const std::vector<Detection>* detections;\n    gtsam::Key robotPoseKey;\n    gtsam::Key objectPoseKey;\n    gtsam::Pose3 robotPose;\n    gtsam::Pose3 objectPose;\n\n    if (lcdf != nullptr) {\n      detections    = &lcdf->getDetections();\n      robotPoseKey  = lcdf->robotPoseKey();\n      objectPoseKey = lcdf->objectPoseKey();\n      cachedIndex   = lcdf->getCachedDetectionIndex();\n    } else {  // tcdf != nullptr\n      detections    = &tcdf->getDetections();\n      robotPoseKey  = tcdf->robotPoseKey();\n      objectPoseKey = tcdf->objectPoseKey();\n      cachedIndex   = tcdf->getCachedDetectionIndex();\n    }\n\n    robotPose  = gtsam::traits<gtsam::Pose3>::Retract(theta.at<gtsam::Pose3>(robotPoseKey),\n                                                      delta.at(robotPoseKey));\n    objectPose = gtsam::traits<gtsam::Pose3>::Retract(theta.at<gtsam::Pose3>(objectPoseKey),\n                                                      delta.at(objectPoseKey));\n\n    std::tie(index, error) = getDetectionIndexAndError(robotPose.inverse() * objectPose, *detections);\n\n    if (index != cachedIndex) {\n      relinKeys.insert(robotPoseKey);\n      relinKeys.insert(objectPoseKey);\n      markedKeys->insert(robotPoseKey);\n      markedKeys->insert(objectPoseKey);\n    }\n  }\n\n  return relinKeys;\n}\n\ngtsam::ISAM2Result\nMaxMixtureISAM2::update(\n    const gtsam::NonlinearFactorGraph& newFactors,\n    const gtsam::Values& newTheta,\n    const gtsam::FactorIndices& removeFactorIndices,\n    const boost::optional<gtsam::FastMap<gtsam::Key, int> >& constrainedKeys,\n    const boost::optional<gtsam::FastList<gtsam::Key> >& noRelinKeys,\n    const boost::optional<gtsam::FastList<gtsam::Key> >& extraReelimKeys,\n    bool force_relinearize) {\n  gtsam::ISAM2UpdateParams params;\n  params.constrainedKeys     = constrainedKeys;\n  params.extraReelimKeys     = extraReelimKeys;\n  params.force_relinearize   = force_relinearize;\n  params.noRelinKeys         = noRelinKeys;\n  params.removeFactorIndices = removeFactorIndices;\n\n  return update(newFactors, newTheta, params);\n}\n\ngtsam::ISAM2Result\nMaxMixtureISAM2::update(const gtsam::NonlinearFactorGraph& newFactors,\n                        const gtsam::Values& newTheta,\n                        const gtsam::ISAM2UpdateParams& updateParams) {\n  gttic(ISAM2_update);\n  this->update_count_ += 1;\n  gtsam::UpdateImpl::LogStartingUpdate(newFactors, *this);\n  gtsam::ISAM2Result result(params_.enableDetailedResults);\n  gtsam::UpdateImpl update(params_, updateParams);\n\n  // Update delta if we need it to check relinearization later\n  if (update.relinarizationNeeded(update_count_))\n    updateDelta(updateParams.forceFullSolve);\n\n  // 1. Add any new factors \\Factors:=\\Factors\\cup\\Factors'.\n  update.pushBackFactors(newFactors, &nonlinearFactors_, &linearFactors_,\n                         &variableIndex_, &result.newFactorsIndices,\n                         &result.keysWithRemovedFactors);\n  update.computeUnusedKeys(newFactors, variableIndex_,\n                           result.keysWithRemovedFactors, &result.unusedKeys);\n\n  // 2. Initialize any new variables \\Theta_{new} and add\n  // \\Theta:=\\Theta\\cup\\Theta_{new}.\n  addVariables(newTheta, result.details());\n  if (params_.evaluateNonlinearError)\n    update.error(nonlinearFactors_, calculateEstimate(), &result.errorBefore);\n\n  // 3. Mark linear update\n  update.gatherInvolvedKeys(newFactors, nonlinearFactors_,\n                            result.keysWithRemovedFactors, &result.markedKeys);\n  update.updateKeys(result.markedKeys, &result);\n\n  gtsam::KeySet relinKeys;\n  result.variablesRelinearized = 0;\n  if (update.relinarizationNeeded(update_count_)) {\n    // 4. Mark keys in \\Delta above threshold \\beta:\n    relinKeys = update.gatherRelinearizeKeys(roots_, delta_, fixedVariables_,\n                                             &result.markedKeys);\n    // also mark keys whose max-mixture model is changed\n    relinKeys.merge(gatherMaxMixtureRelinearizationKeys(nonlinearFactors_,\n                                                        theta_,\n                                                        delta_,\n                                                        &result.markedKeys));\n\n    update.recordRelinearizeDetail(relinKeys, result.details());\n    if (!relinKeys.empty()) {\n      // 5. Mark cliques that involve marked variables \\Theta_{J} and ancestors.\n      update.findFluid(roots_, relinKeys, &result.markedKeys, result.details());\n      // 6. Update linearization point for marked variables:\n      // \\Theta_{J}:=\\Theta_{J}+\\Delta_{J}.\n      gtsam::UpdateImpl::ExpmapMasked(delta_, relinKeys, &theta_);\n    }\n    result.variablesRelinearized = result.markedKeys.size();\n  }\n\n  // 7. Linearize new factors\n  update.linearizeNewFactors(newFactors, theta_, nonlinearFactors_.size(),\n                             result.newFactorsIndices, &linearFactors_);\n  update.augmentVariableIndex(newFactors, result.newFactorsIndices,\n                              &variableIndex_);\n\n  // 8. Redo top of Bayes tree and update data structures\n  recalculate(updateParams, relinKeys, &result);\n  if (!result.unusedKeys.empty()) removeVariables(result.unusedKeys);\n  result.cliques = this->nodes().size();\n\n  if (params_.evaluateNonlinearError)\n    update.error(nonlinearFactors_, calculateEstimate(), &result.errorAfter);\n  return result;\n}\n"
  },
  {
    "path": "srv/detection.srv",
    "content": "sensor_msgs/PointCloud2 cloud\n---\njsk_recognition_msgs/BoundingBoxArray detections\n"
  },
  {
    "path": "srv/save_estimation_result.srv",
    "content": "---\nnav_msgs/Path robotTrajectory\nnav_msgs/Path[] objectTrajectories\nnav_msgs/Path[] objectVelocities\nnav_msgs/Path[] trackingObjectTrajectories\nnav_msgs/Path[] trackingObjectVelocities\nlio_segmot/ObjectStateArray[] trackingObjectStates\nlio_segmot/flags[] objectFlags\nlio_segmot/flags[] trackingObjectFlags"
  },
  {
    "path": "srv/save_map.srv",
    "content": "float32 resolution\nstring destination\n---\nbool success\n"
  }
]