[
  {
    "path": ".gitignore",
    "content": ""
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.8.3)\nproject(i2ekf_lo)\n\nSET(CMAKE_BUILD_TYPE \"Release\")\n\nADD_COMPILE_OPTIONS(-std=c++14 )\nADD_COMPILE_OPTIONS(-std=c++14 )\nset( CMAKE_CXX_FLAGS \"-std=c++14 -O3\" )\n\nadd_definitions(-DROOT_DIR=\\\"${CMAKE_CURRENT_SOURCE_DIR}/\\\")\n\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -fexceptions\" )\nset(CMAKE_CXX_STANDARD 14)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_CXX_EXTENSIONS OFF)\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++14 -pthread -std=c++0x -std=c++14 -fexceptions\")\n\nmessage(\"Current CPU architecture: ${CMAKE_SYSTEM_PROCESSOR}\")\nif(CMAKE_SYSTEM_PROCESSOR MATCHES \"(x86)|(X86)|(amd64)|(AMD64)\" )\n  include(ProcessorCount)\n  ProcessorCount(N)\n  message(\"Processer number:  ${N}\")\n  if(N GREATER 4)\n    add_definitions(-DMP_EN)\n    add_definitions(-DMP_PROC_NUM=3)\n    message(\"core for MP: 3\")\n  elseif(N GREATER 3)\n    add_definitions(-DMP_EN)\n    add_definitions(-DMP_PROC_NUM=2)\n    message(\"core for MP: 2\")\n  else()\n    add_definitions(-DMP_PROC_NUM=1)\n  endif()\nelse()\n  add_definitions(-DMP_PROC_NUM=1)\nendif()\n\nadd_subdirectory(Thirdparty/Sophus)\nadd_subdirectory(Thirdparty/tessil-src)\n\nfind_package(OpenMP QUIET)\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}\")\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS}   ${OpenMP_C_FLAGS}\")\n\nfind_package(PythonLibs REQUIRED)\nfind_path(MATPLOTLIB_CPP_INCLUDE_DIRS \"matplotlibcpp.h\")\n\nfind_package(catkin REQUIRED COMPONENTS\n  geometry_msgs\n  nav_msgs\n  sensor_msgs\n  roscpp\n  rospy\n  std_msgs\n  pcl_ros\n  tf\n  livox_ros_driver\n  message_generation\n  eigen_conversions\n)\n\nfind_package(Eigen3 REQUIRED)\nfind_package(PCL 1.8 REQUIRED)\nfind_package(Ceres REQUIRED)\n\nmessage(Eigen: ${EIGEN3_INCLUDE_DIR})\n\ninclude_directories(\n     ../../devel/include\n\t${catkin_INCLUDE_DIRS} \n  ${EIGEN3_INCLUDE_DIR}\n  ${PCL_INCLUDE_DIRS}\n  ${PYTHON_INCLUDE_DIRS}\n  include)\n\nadd_message_files(\n  FILES\n  Pose6D.msg\n  States.msg\n)\n\ngenerate_messages(\n DEPENDENCIES\n geometry_msgs\n)\n\ncatkin_package(\n  CATKIN_DEPENDS geometry_msgs nav_msgs roscpp rospy std_msgs message_runtime\n  DEPENDS EIGEN3 PCL\n  INCLUDE_DIRS\n)\n\nadd_executable(i2ekf_lo_mapping\n        src/laserMapping.cpp\n        include/ikd-Tree/ikd_Tree.cpp\n        src/preprocess.cpp)\n\nadd_dependencies(i2ekf_lo_mapping ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})\n\ntarget_link_libraries(i2ekf_lo_mapping ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${PYTHON_LIBRARIES} ${CERES_LIBRARIES} Sophus::Sophus tsl::robin_map)\ntarget_include_directories(i2ekf_lo_mapping PRIVATE ${PYTHON_INCLUDE_DIRS})\n\n\n\n"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.,\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The licenses for most software are designed to take away your\nfreedom to share and change it.  By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users.  This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it.  (Some other Free Software Foundation software is covered by\nthe GNU Lesser General Public License instead.)  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n  To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have.  You must make sure that they, too, receive or can get the\nsource code.  And you must show them these terms so they know their\nrights.\n\n  We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n  Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware.  If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n  Finally, any free program is threatened constantly by software\npatents.  We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary.  To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                    GNU GENERAL PUBLIC LICENSE\n   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n  0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License.  The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage.  (Hereinafter, translation is included without limitation in\nthe term \"modification\".)  Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope.  The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n  1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n  2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n    a) You must cause the modified files to carry prominent notices\n    stating that you changed the files and the date of any change.\n\n    b) You must cause any work that you distribute or publish, that in\n    whole or in part contains or is derived from the Program or any\n    part thereof, to be licensed as a whole at no charge to all third\n    parties under the terms of this License.\n\n    c) If the modified program normally reads commands interactively\n    when run, you must cause it, when started running for such\n    interactive use in the most ordinary way, to print or display an\n    announcement including an appropriate copyright notice and a\n    notice that there is no warranty (or else, saying that you provide\n    a warranty) and that users may redistribute the program under\n    these conditions, and telling the user how to view a copy of this\n    License.  (Exception: if the Program itself is interactive but\n    does not normally print such an announcement, your work based on\n    the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole.  If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works.  But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n  3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n    a) Accompany it with the complete corresponding machine-readable\n    source code, which must be distributed under the terms of Sections\n    1 and 2 above on a medium customarily used for software interchange; or,\n\n    b) Accompany it with a written offer, valid for at least three\n    years, to give any third party, for a charge no more than your\n    cost of physically performing source distribution, a complete\n    machine-readable copy of the corresponding source code, to be\n    distributed under the terms of Sections 1 and 2 above on a medium\n    customarily used for software interchange; or,\n\n    c) Accompany it with the information you received as to the offer\n    to distribute corresponding source code.  (This alternative is\n    allowed only for noncommercial distribution and only if you\n    received the program in object code or executable form with such\n    an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it.  For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable.  However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n  4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License.  Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n  5. You are not required to accept this License, since you have not\nsigned it.  However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works.  These actions are\nprohibited by law if you do not accept this License.  Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n  6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions.  You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n  7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all.  For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices.  Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n  8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded.  In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n  9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number.  If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation.  If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n  10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission.  For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this.  Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n                            NO WARRANTY\n\n  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software; you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation; either version 2 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License along\n    with this program; if not, write to the Free Software Foundation, Inc.,\n    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n    Gnomovision version 69, Copyright (C) year name of author\n    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary.  Here is a sample; alter the names:\n\n  Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n  `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n  <signature of Ty Coon>, 1 April 1989\n  Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs.  If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n    <h1>I2EKF-LO</h1>\n    <i>A Dual-Iteration Extended Kalman Filter Based  LiDAR Odometry</i>\n    <br>\n    <strong>IROS 2024 Oral</strong>\n    <br>\n    <img src=\"image/overview.png\" width=\"80%\" height=\"auto\" alt=\"Pipeline Image\">\n<br>\n</div>\n\n### Introduction\nLiDAR odometry is a pivotal technology in the fields of autonomous driving and autonomous mobile robotics. However, most of the current works focuse on nonlinear optimization methods, and there are still many challenges in using the traditional Iterative Extended Kalman Filter (IEKF) framework to tackle the problem: IEKF only iterates over the observation equation, relying on a rough estimate of the initial state, which is insufficient to fully eliminate motion distortion in the input point cloud; the system process noise is difficult to be determined during state estimation of the complex motions; and the varying motion models across different sensor carriers. To address these issues, we propose the Dual-Iteration Extended Kalman Filter (I2EKF) and the LiDAR odometry based on I2EKF (I2EKF-LO). This approach not only iterates over the observation equation but also leverages state updates to iteratively mitigate motion distortion in LiDAR point clouds. Moreover, it dynamically adjusts process noise based on the confidence level of prior predictions during state estimation and establishes motion models for different sensor carriers to achieve accurate and efficient state estimation. Comprehensive experiments demonstrate that I2EKF-LO achieves outstanding levels of accuracy and computational efficiency in the realm of LiDAR odometry.\n\n**Developers**: The codes of this repo are contributed by [Wenlu Yu (于文录)](https://github.com/YWL0720), [Jie Xu (徐杰)](https://github.com/jiejie567), [Chengwei Zhao (赵成伟)](https://github.com/chengwei0427)\n\n### News\n* **[30/06/2024]**: I2EKF-LO is accepted to IROS 2024.\n* **[01/07/2024]**: We are currently working on organizing and refining the complete code. The full version will be released soon.\n* **[02/07/2024]**: Updated the video link and submitted the paper to arxiv.\n\n### Related Paper\n\nRelated paper available on arxiv: [I2EKF-LO: A Dual-Iteration Extended Kalman Filter Based LiDAR Odometry](https://arxiv.org/abs/2407.02190) \n\n### Related Video:\n\n<div align=\"center\">\n    <a href=\"https://youtu.be/jaNavKRnpiE\" target=\"_blank\"/>\n    <img src=\"image/video_cover.png\" width=70% />\n</div>\n\n## 1. Prerequisites\n\n### 1.1 **Ubuntu** and **ROS**\n\nUbuntu >= 18.04.\n\nROS    >= Melodic. [ROS Installation](http://wiki.ros.org/ROS/Installation)\n\n### 1.2. **PCL && Eigen**\n\nPCL    >= 1.8,   Follow [PCL Installation](http://www.pointclouds.org/downloads/linux.html).\n\nEigen  >= 3.3.4, Follow [Eigen Installation](http://eigen.tuxfamily.org/index.php?title=Main_Page).\n\n### 1.3. **livox_ros_driver**\n\nFollow [livox_ros_driver Installation](https://github.com/Livox-SDK/livox_ros_driver).\n\n\n## 2. Build\n\nClone the repository and catkin_make:\n\n```\ncd ~/catkin_ws/src\ngit clone https://github.com/YWL0720/I2EKF-LO\ncd ..\ncatkin_make -j\nsource devel/setup.bash\n```\n\n## 3. Directly run\n\n```bash\ncd ~/$I2EKF_LO_ROS_DIR$\nsource devel/setup.bash\nroslaunch i2ekf_lo xxx.launch\n```\n## 4. Rosbag Example\nDownload our test bags here: [HIT-TIB Datasets](https://drive.google.com/drive/folders/1L5cX9uyAiZei17oq7ELyd8WyIRReHq8u?usp=drive_link).\n\n## 5. Acknowledgments\n\nThanks for [Fast-LIO2](https://github.com/hku-mars/FAST_LIO) (Fast Direct LiDAR-inertial Odometry) and [LI-INIT](https://github.com/hku-mars/LiDAR_IMU_Init)(Robust Real-time LiDAR-inertial Initialization).\n\n## 6. License\n\nThe source code is released under [GPLv2](http://www.gnu.org/licenses/) license.\n\n## 7. TODO(s)\n- [ ] Updated video link\n- [ ] Upload a preprint of our paper\n"
  },
  {
    "path": "Thirdparty/Sophus/.clang-format",
    "content": "Language:        Cpp\n# BasedOnStyle:  Google\nAccessModifierOffset: -1\nAlignAfterOpenBracket: Align\nAlignConsecutiveAssignments: false\nAlignConsecutiveDeclarations: false\nAlignEscapedNewlinesLeft: true\nAlignOperands:   true\nAlignTrailingComments: true\nAllowAllParametersOfDeclarationOnNextLine: true\nAllowShortBlocksOnASingleLine: false\nAllowShortCaseLabelsOnASingleLine: false\nAllowShortFunctionsOnASingleLine: All\nAllowShortIfStatementsOnASingleLine: true\nAllowShortLoopsOnASingleLine: true\nAlwaysBreakAfterDefinitionReturnType: None\nAlwaysBreakAfterReturnType: None\nAlwaysBreakBeforeMultilineStrings: true\nAlwaysBreakTemplateDeclarations: true\nBinPackArguments: true\nBinPackParameters: true\nBraceWrapping:\n  AfterClass:      false\n  AfterControlStatement: false\n  AfterEnum:       false\n  AfterFunction:   false\n  AfterNamespace:  false\n  AfterObjCDeclaration: false\n  AfterStruct:     false\n  AfterUnion:      false\n  BeforeCatch:     false\n  BeforeElse:      false\n  IndentBraces:    false\nBreakBeforeBinaryOperators: None\nBreakBeforeBraces: Attach\nBreakBeforeTernaryOperators: true\nBreakConstructorInitializersBeforeComma: false\nColumnLimit:     80\nCommentPragmas:  '^ IWYU pragma:'\nConstructorInitializerAllOnOneLineOrOnePerLine: true\nConstructorInitializerIndentWidth: 4\nContinuationIndentWidth: 4\nCpp11BracedListStyle: true\nDerivePointerAlignment: true\nDisableFormat:   false\nExperimentalAutoDetectBinPacking: false\nForEachMacros:   [ foreach, Q_FOREACH, BOOST_FOREACH ]\nIncludeCategories:\n  - Regex:           '^<.*\\.h>'\n    Priority:        1\n  - Regex:           '^<.*'\n    Priority:        2\n  - Regex:           '.*'\n    Priority:        3\nIndentCaseLabels: true\nIndentWidth:     2\nIndentWrappedFunctionNames: false\nKeepEmptyLinesAtTheStartOfBlocks: false\nMacroBlockBegin: ''\nMacroBlockEnd:   ''\nMaxEmptyLinesToKeep: 1\nNamespaceIndentation: None\nObjCBlockIndentWidth: 2\nObjCSpaceAfterProperty: false\nObjCSpaceBeforeProtocolList: false\nPenaltyBreakBeforeFirstCallParameter: 1\nPenaltyBreakComment: 300\nPenaltyBreakFirstLessLess: 120\nPenaltyBreakString: 1000\nPenaltyExcessCharacter: 1000000\nPenaltyReturnTypeOnItsOwnLine: 200\nPointerAlignment: Left\nReflowComments:  true\nSortIncludes:    true\nSpaceAfterCStyleCast: false\nSpaceBeforeAssignmentOperators: true\nSpaceBeforeParens: ControlStatements\nSpaceInEmptyParentheses: false\nSpacesBeforeTrailingComments: 2\nSpacesInAngles:  false\nSpacesInContainerLiterals: true\nSpacesInCStyleCastParentheses: false\nSpacesInParentheses: false\nSpacesInSquareBrackets: false\nStandard:        Auto\nTabWidth:        8\nUseTab:          Never\n"
  },
  {
    "path": "Thirdparty/Sophus/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.4)\nproject(Sophus VERSION 1.22.10)\n\ninclude(CMakePackageConfigHelpers)\ninclude(GNUInstallDirs)\n\n# Determine if sophus is built as a subproject (using add_subdirectory)\n# or if it is the master project.\nif (NOT DEFINED SOPHUS_MASTER_PROJECT)\n  set(SOPHUS_MASTER_PROJECT OFF)\n  if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)\n    set(SOPHUS_MASTER_PROJECT ON)\n    message(STATUS \"CMake version: ${CMAKE_VERSION}\")\n  endif ()\nendif ()\n\noption(SOPHUS_INSTALL \"Generate the install target.\" ${SOPHUS_MASTER_PROJECT})\noption(SOPHUS_USE_BASIC_LOGGING \"Use basic logging (in ensure and test macros)\" OFF)\n\nif(SOPHUS_MASTER_PROJECT)\n  # Release by default\n  # Turn on Debug with \"-DCMAKE_BUILD_TYPE=Debug\"\n  if(NOT CMAKE_BUILD_TYPE)\n    set(CMAKE_BUILD_TYPE Release)\n  endif()\n\n  set(CMAKE_CXX_STANDARD 14)\n\n  # Set compiler specific settings (FixMe: Should not cmake do this for us automatically?)\n  IF(\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"Clang\")\n    SET(CMAKE_CXX_FLAGS_DEBUG  \"-O0 -g\")\n    SET(CMAKE_CXX_FLAGS_RELEASE \"-O3\")\n    SET(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wall -Werror -Wextra  -Wno-deprecated-register -Qunused-arguments -fcolor-diagnostics\")\n  ELSEIF(\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"GNU\")\n    SET(CMAKE_CXX_FLAGS_DEBUG  \"-O0 -g\")\n    SET(CMAKE_CXX_FLAGS_RELEASE \"-O3\")\n    SET(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wall -Werror -Wextra -std=c++14 -Wno-deprecated-declarations -ftemplate-backtrace-limit=0\")\n    SET(CMAKE_CXX_FLAGS_COVERAGE \"${CMAKE_CXX_FLAGS_DEBUG} --coverage -fno-inline -fno-inline-small-functions -fno-default-inline\")\n    SET(CMAKE_EXE_LINKER_FLAGS_COVERAGE \"${CMAKE_EXE_LINKER_FLAGS_DEBUG} --coverage\")\n    SET(CMAKE_SHARED_LINKER_FLAGS_COVERAGE \"${CMAKE_SHARED_LINKER_FLAGS_DEBUG} --coverage\")\n  ELSEIF(CMAKE_CXX_COMPILER_ID MATCHES \"^MSVC$\")\n    ADD_DEFINITIONS(\"-D _USE_MATH_DEFINES /bigobj /wd4305 /wd4244 /MP\")\n  ENDIF()\n\n  # Add local path for finding packages, set the local version first\n  list(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules\")\nendif()\n\nif(SOPHUS_USE_BASIC_LOGGING)\n  set (CMAKE_DISABLE_FIND_PACKAGE_fmt ON)\nendif()\n\n# Find public dependencies if targets are not yet defined. (Targets might be for example\n# defined by a parent project including Sophus via `add_subdirectory`.)\n\nif(NOT TARGET Eigen3::Eigen)\n  find_package(Eigen3 3.3.0 REQUIRED)\nendif()\n\nif(NOT TARGET fmt::fmt)\n  find_package(fmt)\nendif()\n\n\n# Define interface library target\nadd_library(sophus INTERFACE)\nadd_library (Sophus::Sophus ALIAS sophus)\n\nset(SOPHUS_HEADER_FILES\n  sophus/average.hpp\n  sophus/cartesian.hpp\n  sophus/ceres_local_parameterization.hpp\n  sophus/ceres_manifold.hpp\n  sophus/ceres_typetraits.hpp\n  sophus/common.hpp\n  sophus/geometry.hpp\n  sophus/interpolate.hpp\n  sophus/interpolate_details.hpp\n  sophus/num_diff.hpp\n  sophus/rotation_matrix.hpp\n  sophus/rxso2.hpp\n  sophus/rxso3.hpp\n  sophus/se2.hpp\n  sophus/se3.hpp\n  sophus/sim2.hpp\n  sophus/sim3.hpp\n  sophus/sim_details.hpp\n  sophus/so2.hpp\n  sophus/so3.hpp\n  sophus/spline.hpp\n  sophus/types.hpp\n  sophus/velocities.hpp\n)\n\nset(SOPHUS_OTHER_FILES\n  sophus/test_macros.hpp\n  sophus/example_ensure_handler.cpp\n)\n\nif(MSVC)\n  # Define common math constants if we compile with MSVC\n  target_compile_definitions (sophus INTERFACE _USE_MATH_DEFINES)\nendif (MSVC)\n\n# Add Eigen interface dependency, depending on available cmake info\nif(TARGET Eigen3::Eigen)\n  target_link_libraries(sophus INTERFACE Eigen3::Eigen)\n  set(Eigen3_DEPENDENCY \"find_dependency (Eigen3 ${Eigen3_VERSION})\")\nelse()\n  target_include_directories (sophus SYSTEM INTERFACE ${EIGEN3_INCLUDE_DIR})\nendif()\n\nif(SOPHUS_USE_BASIC_LOGGING OR NOT TARGET fmt::fmt)\n  # NOTE fmt_FOUND does not seem to be defined even though the package config\n  # was found.\n  target_compile_definitions(sophus INTERFACE SOPHUS_USE_BASIC_LOGGING=1)\n  message(STATUS \"Turning basic logging ON\")\nelse()\n  target_link_libraries(sophus INTERFACE fmt::fmt)\n  set(fmt_DEPENDENCY \"find_dependency (fmt ${fmt_VERSION})\")\n  message(STATUS \"Turning basic logging OFF\")\nendif()\n\n# Associate target with include directory\ntarget_include_directories(sophus INTERFACE\n    \"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>\"\n    \"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>\"\n)\n\n# Declare all used C++14 features\ntarget_compile_features (sophus INTERFACE\n  cxx_auto_type\n  cxx_decltype\n  cxx_nullptr\n  cxx_right_angle_brackets\n  cxx_variadic_macros\n  cxx_variadic_templates\n)\n\n# Add sources as custom target so that they are shown in IDE's\nadd_custom_target(other SOURCES ${SOPHUS_OTHER_FILES} ${SOPHUS_HEADER_FILES})\n\n# Create 'test' make target using ctest\noption(BUILD_SOPHUS_TESTS \"Build tests.\" ON)\nif(BUILD_SOPHUS_TESTS)\n    enable_testing()\n    add_subdirectory(test)\nendif()\n\n# Create examples make targets using ctest\noption(BUILD_SOPHUS_EXAMPLES \"Build examples.\" ON)\nif(BUILD_SOPHUS_EXAMPLES)\n    add_subdirectory(examples)\nendif()\n\n# Build python sophus bindings\noption(BUILD_PYTHON_BINDINGS \"Build python sophus bindings.\" OFF)\nif(BUILD_PYTHON_BINDINGS)\n  include(FetchContent)\n  FetchContent_Declare(\n      pybind11\n      GIT_REPOSITORY https://github.com/pybind/pybind11.git\n      GIT_TAG master\n  )\n  FetchContent_MakeAvailable(pybind11)\n\n  add_subdirectory(${pybind11_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/pybind)\n  pybind11_add_module(sophus_pybind ${CMAKE_CURRENT_SOURCE_DIR}/sophus_pybind/bindings.cpp)\n  target_link_libraries(sophus_pybind PUBLIC sophus)\nendif(BUILD_PYTHON_BINDINGS)\n\nif(SOPHUS_INSTALL)\n  # Export package for use from the build tree\n  set(SOPHUS_CMAKE_EXPORT_DIR ${CMAKE_INSTALL_DATADIR}/sophus/cmake)\n\n  set_target_properties(sophus PROPERTIES EXPORT_NAME Sophus)\n\n  install(TARGETS sophus EXPORT SophusTargets)\n  install(EXPORT SophusTargets\n    NAMESPACE Sophus::\n    DESTINATION ${SOPHUS_CMAKE_EXPORT_DIR}\n  )\n\n  export(TARGETS sophus NAMESPACE Sophus:: FILE SophusTargets.cmake)\n  export(PACKAGE Sophus)\n\n  configure_package_config_file(\n    SophusConfig.cmake.in\n    ${CMAKE_CURRENT_BINARY_DIR}/SophusConfig.cmake\n    INSTALL_DESTINATION ${SOPHUS_CMAKE_EXPORT_DIR}\n    NO_CHECK_REQUIRED_COMPONENTS_MACRO\n  )\n\n  # Remove architecture dependence. Sophus is a header-only library.\n  set(TEMP_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})\n  unset(CMAKE_SIZEOF_VOID_P)\n\n  # Write version to file\n  write_basic_package_version_file (\n    SophusConfigVersion.cmake\n    VERSION ${PROJECT_VERSION}\n    COMPATIBILITY SameMajorVersion\n  )\n\n  # Recover architecture dependence\n  set(CMAKE_SIZEOF_VOID_P ${TEMP_SIZEOF_VOID_P})\n\n  # Install cmake targets\n  install(\n    FILES ${CMAKE_CURRENT_BINARY_DIR}/SophusConfig.cmake\n          ${CMAKE_CURRENT_BINARY_DIR}/SophusConfigVersion.cmake\n    DESTINATION ${SOPHUS_CMAKE_EXPORT_DIR}\n  )\n\n  # Install header files\n  install(\n    FILES ${SOPHUS_HEADER_FILES}\n    DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/sophus\n  )\nendif()\n"
  },
  {
    "path": "Thirdparty/Sophus/LICENSE.txt",
    "content": "Copyright 2011-2017 Hauke Strasdat           \n          2012-2017 Steven Lovegrove\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights  to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE."
  },
  {
    "path": "Thirdparty/Sophus/README.rst",
    "content": "|GithubCICpp|_ windows: |AppVeyor|_ |GithubCISympy|_ |ci_cov|_\n\n\nSophus\n======\n\nOverview\n--------\n\nThis is a c++ implementation of Lie groups commonly used for 2d and 3d\ngeometric problems (i.e. for Computer Vision or Robotics applications).\nAmong others, this package includes the special orthogonal groups SO(2) and\nSO(3) to present rotations in 2d and 3d as well as the special Euclidean group\nSE(2) and SE(3) to represent rigid body transformations (i.e. rotations and\ntranslations) in 2d and 3d.\n\nAPI documentation: https://strasdat.github.io/Sophus/\n\nCross platform support\n----------------------\n\nSophus compiles with clang and gcc on Linux and OS X as well as msvc on Windows.\nThe specific compiler and operating system versions which are supported are\nthe ones which are used in the Continuous Integration (CI): See GitHubCI_ and\nAppVeyor_ for details.\n\nHowever, it should work (with no to minor modification) on many other\nmodern configurations as long they support c++14, CMake, Eigen 3.3.X and\n(optionally) fmt. The fmt dependency can be eliminated by passing\n\"-DUSE_BASIC_LOGGING=ON\" to cmake when configuring Sophus.\n\n.. _GitHubCI: https://github.com/strasdat/Sophus/actions\n\n.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/um4285lwhs8ci7pt/branch/master?svg=true\n.. _AppVeyor: https://ci.appveyor.com/project/strasdat/sophus/branch/master\n\n.. |ci_cov| image:: https://coveralls.io/repos/github/strasdat/Sophus/badge.svg?branch=master\n.. _ci_cov: https://coveralls.io/github/strasdat/Sophus?branch=master\n\n.. |GithubCICpp| image:: https://github.com/strasdat/Sophus/actions/workflows/main.yml/badge.svg?branch=master\n.. _GithubCICpp: https://github.com/strasdat/Sophus/actions/workflows/main.yml?query=branch%3Amaster\n\n.. |GithubCISympy| image:: https://github.com/strasdat/Sophus/actions/workflows/sympy.yml/badge.svg?branch=master\n.. _GithubCISympy: https://github.com/strasdat/Sophus/actions/workflows/sympy.yml?query=branch%3Amaster\n"
  },
  {
    "path": "Thirdparty/Sophus/Sophus.code-workspace",
    "content": "{\n\t\"folders\": [\n\t\t{\n\t\t\t\"path\": \".\"\n\t\t}\n\t],\n\t\"settings\": {}\n}"
  },
  {
    "path": "Thirdparty/Sophus/SophusConfig.cmake.in",
    "content": "@PACKAGE_INIT@\n\ninclude (CMakeFindDependencyMacro)\n\n@Eigen3_DEPENDENCY@\n@fmt_DEPENDENCY@\n\ninclude (\"${CMAKE_CURRENT_LIST_DIR}/SophusTargets.cmake\")\n"
  },
  {
    "path": "Thirdparty/Sophus/appveyor.yml",
    "content": "branches:\n  only:\n    - master\n\nos: Visual Studio 2015\n\nclone_folder: c:\\projects\\sophus\n\nplatform: x64\nconfiguration: Debug\n\nbuild:\n  project: c:\\projects\\sophus\\build\\Sophus.sln\n\ninstall:\n  - ps: wget https://gitlab.com/libeigen/eigen/-/archive/3.3.4/eigen-3.3.4.zip -outfile eigen3.zip\n  - cmd: 7z x eigen3.zip -o\"C:\\projects\" -y > nul\n  - git clone https://github.com/fmtlib/fmt.git\n  - cd fmt\n  - git checkout 5.3.0\n  - mkdir build\n  - cd build\n  - cmake -G \"Visual Studio 14 2015 Win64\" -DCMAKE_BUILD_TYPE=Debug ..\n  - cmake --build .\n  - cmake --build . --target install\n  - cd ../..\n\nbefore_build:\n  - cd c:\\projects\\sophus\n  - mkdir build\n  - cd build\n  - cmake -G \"Visual Studio 14 2015 Win64\" -DCMAKE_BUILD_TYPE=Debug -D EIGEN3_INCLUDE_DIR=C:\\projects\\eigen-3.3.4 ..\n\nafter_build:\n  - ctest --output-on-failure\n"
  },
  {
    "path": "Thirdparty/Sophus/cmake_modules/FindEigen3.cmake",
    "content": "# - Try to find Eigen3 lib\n#\n# This module supports requiring a minimum version, e.g. you can do\n#   find_package(Eigen3 3.1.2)\n# to require version 3.1.2 or newer of Eigen3.\n#\n# Once done this will define\n#\n#  EIGEN3_FOUND - system has eigen lib with correct version\n#  EIGEN3_INCLUDE_DIR - the eigen include directory\n#  EIGEN3_VERSION - eigen version\n#\n# and the following imported target:\n#\n#  Eigen3::Eigen - The header-only Eigen library\n#\n# This module reads hints about search locations from \n# the following environment variables:\n#\n# EIGEN3_ROOT\n# EIGEN3_ROOT_DIR\n\n# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>\n# Copyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>\n# Copyright (c) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n# Redistribution and use is allowed according to the terms of the 2-clause BSD license.\n\nif(NOT Eigen3_FIND_VERSION)\n  if(NOT Eigen3_FIND_VERSION_MAJOR)\n    set(Eigen3_FIND_VERSION_MAJOR 2)\n  endif()\n  if(NOT Eigen3_FIND_VERSION_MINOR)\n    set(Eigen3_FIND_VERSION_MINOR 91)\n  endif()\n  if(NOT Eigen3_FIND_VERSION_PATCH)\n    set(Eigen3_FIND_VERSION_PATCH 0)\n  endif()\n\n  set(Eigen3_FIND_VERSION \"${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}\")\nendif()\n\nmacro(_eigen3_check_version)\n  file(READ \"${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h\" _eigen3_version_header)\n\n  string(REGEX MATCH \"define[ \\t]+EIGEN_WORLD_VERSION[ \\t]+([0-9]+)\" _eigen3_world_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_WORLD_VERSION \"${CMAKE_MATCH_1}\")\n  string(REGEX MATCH \"define[ \\t]+EIGEN_MAJOR_VERSION[ \\t]+([0-9]+)\" _eigen3_major_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_MAJOR_VERSION \"${CMAKE_MATCH_1}\")\n  string(REGEX MATCH \"define[ \\t]+EIGEN_MINOR_VERSION[ \\t]+([0-9]+)\" _eigen3_minor_version_match \"${_eigen3_version_header}\")\n  set(EIGEN3_MINOR_VERSION \"${CMAKE_MATCH_1}\")\n\n  set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION})\n  if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})\n    set(EIGEN3_VERSION_OK FALSE)\n  else()\n    set(EIGEN3_VERSION_OK TRUE)\n  endif()\n\n  if(NOT EIGEN3_VERSION_OK)\n\n    message(STATUS \"Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, \"\n                   \"but at least version ${Eigen3_FIND_VERSION} is required\")\n  endif()\nendmacro()\n\nif (EIGEN3_INCLUDE_DIR)\n\n  # in cache already\n  _eigen3_check_version()\n  set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})\n  set(Eigen3_FOUND ${EIGEN3_VERSION_OK})\n\nelse ()\n  \n  # search first if an Eigen3Config.cmake is available in the system,\n  # if successful this would set EIGEN3_INCLUDE_DIR and the rest of\n  # the script will work as usual\n  find_package(Eigen3 ${Eigen3_FIND_VERSION} NO_MODULE QUIET)\n\n  if(NOT EIGEN3_INCLUDE_DIR)\n    find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library\n        HINTS\n        ENV EIGEN3_ROOT \n        ENV EIGEN3_ROOT_DIR\n        PATHS\n        ${CMAKE_INSTALL_PREFIX}/include\n        ${KDE4_INCLUDE_DIR}\n        PATH_SUFFIXES eigen3 eigen\n      )\n  endif()\n\n  if(EIGEN3_INCLUDE_DIR)\n    _eigen3_check_version()\n  endif()\n\n  include(FindPackageHandleStandardArgs)\n  find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK)\n\n  mark_as_advanced(EIGEN3_INCLUDE_DIR)\n\nendif()\n\nif(EIGEN3_FOUND AND NOT TARGET Eigen3::Eigen)\n  add_library(Eigen3::Eigen INTERFACE IMPORTED)\n  set_target_properties(Eigen3::Eigen PROPERTIES\n    INTERFACE_INCLUDE_DIRECTORIES \"${EIGEN3_INCLUDE_DIR}\")\nendif()\n"
  },
  {
    "path": "Thirdparty/Sophus/doxyfile",
    "content": "DOXYFILE_ENCODING      = UTF-8\nPROJECT_NAME           = \"Sophus\"\nINPUT                  = sophus\nEXTRACT_ALL            = YES\nENABLE_PREPROCESSING   = YES\nMACRO_EXPANSION        = YES\nWARN_AS_ERROR          = YES\nEXPAND_ONLY_PREDEF     = NO\nSKIP_FUNCTION_MACROS   = NO\nAUTOLINK_SUPPORT       = YES\nMULTILINE_CPP_IS_BRIEF = YES\nMARKDOWN_SUPPORT       = YES\nINLINE_INHERITED_MEMB  = NO\nEXCLUDE_SYMBOLS        = Eigen::internal Sophus::details Sophus::interp_details Sophus::experimental\nGENERATE_LATEX         = NO\nSTRIP_CODE_COMMENTS    = NO\n\nGENERATE_XML = YES\nGENERATE_HTML = NO\nXML_OUTPUT = xml-dir\nXML_PROGRAMLISTING = NO\nCASE_SENSE_NAMES = NO\nHIDE_UNDOC_RELATIONS = YES\nEXTRACT_ALL = YES"
  },
  {
    "path": "Thirdparty/Sophus/doxyrest-config.lua",
    "content": "FRAME_DIR_LIST = { \"doxyrest_b/doxyrest/frame/cfamily\", \"doxyrest_b/doxyrest/frame/common\" }\nFRAME_FILE = \"index.rst.in\"\nINPUT_FILE = \"xml-dir/index.xml\"\nOUTPUT_FILE = \"rst-dir/index.rst\"\nINTRO_FILE = \"page_index.rst\"\nSORT_GROUPS_BY = \"title\"\nGLOBAL_AUX_COMPOUND_ID = \"group_global\"\nLANGUAGE = cpp\nVERBATIM_TO_CODE_BLOCK = \"cpp\"\nESCAPE_ASTERISKS = true\nESCAPE_PIPES = true\nESCAPE_TRAILING_UNDERSCORES = true\nPROTECTION_FILTER = \"protected\"\nEXCLUDE_EMPTY_DEFINES = true\nEXCLUDE_DEFAULT_CONSTRUCTORS = false\nEXCLUDE_DESTRUCTORS = false\nEXCLUDE_PRIMITIVE_TYPEDEFS = true\nSHOW_DIRECT_DESCENDANTS = true\nTYPEDEF_TO_USING = true\nML_PARAM_LIST_LENGTH_THRESHOLD = 80"
  },
  {
    "path": "Thirdparty/Sophus/examples/CMakeLists.txt",
    "content": "# Tests to run\nSET( EXAMPLE_SOURCES HelloSO3)\n\nFOREACH(example_src ${EXAMPLE_SOURCES})\n  ADD_EXECUTABLE( ${example_src} ${example_src}.cpp)\n  TARGET_LINK_LIBRARIES( ${example_src} sophus)\nENDFOREACH(example_src)\n"
  },
  {
    "path": "Thirdparty/Sophus/examples/HelloSO3.cpp",
    "content": "#include <iostream>\n#include \"sophus/geometry.hpp\"\n\nint main() {\n  // The following demonstrates the group multiplication of rotation matrices\n\n  // Create rotation matrices from rotations around the x and y and z axes:\n  const double kPi = Sophus::Constants<double>::pi();\n  Sophus::SO3d R1 = Sophus::SO3d::rotX(kPi / 4);\n  Sophus::SO3d R2 = Sophus::SO3d::rotY(kPi / 6);\n  Sophus::SO3d R3 = Sophus::SO3d::rotZ(-kPi / 3);\n\n  std::cout << \"The rotation matrices are\" << std::endl;\n  std::cout << \"R1:\\n\" << R1.matrix() << std::endl;\n  std::cout << \"R2:\\n\" << R2.matrix() << std::endl;\n  std::cout << \"R3:\\n\" << R3.matrix() << std::endl;\n  std::cout << \"Their product R1*R2*R3:\\n\"\n            << (R1 * R2 * R3).matrix() << std::endl;\n  std::cout << std::endl;\n\n  // Rotation matrices can act on vectors\n  Eigen::Vector3d x;\n  x << 0.0, 0.0, 1.0;\n  std::cout << \"Rotation matrices can act on vectors\" << std::endl;\n  std::cout << \"x\\n\" << x << std::endl;\n  std::cout << \"R2*x\\n\" << R2 * x << std::endl;\n  std::cout << \"R1*(R2*x)\\n\" << R1 * (R2 * x) << std::endl;\n  std::cout << \"(R1*R2)*x\\n\" << (R1 * R2) * x << std::endl;\n  std::cout << std::endl;\n\n  // SO(3) are internally represented as unit quaternions.\n  std::cout << \"R1 in matrix form:\\n\" << R1.matrix() << std::endl;\n  std::cout << \"R1 in unit quaternion form:\\n\"\n            << R1.unit_quaternion().coeffs() << std::endl;\n  // Note that the order of coefficients of Eigen's quaternion class is\n  // (imag0, imag1, imag2, real)\n  std::cout << std::endl;\n}"
  },
  {
    "path": "Thirdparty/Sophus/generate_stubs.py",
    "content": "import subprocess\n\nsubprocess.run(\n    \"pybind11-stubgen sophus_pybind -o sophus_pybind-stubs/\",\n    shell=True,\n    check=True,\n)\n\nsubprocess.run(\"touch sophus_pybind-stubs/py.typed\", shell=True, check=True)\n"
  },
  {
    "path": "Thirdparty/Sophus/make_docs.sh",
    "content": "doxygen doxyfile\ndoxyrest_b/build/doxyrest/bin/Release/doxyrest -c doxyrest-config.lua\nsphinx-build -b html rst-dir html-dir"
  },
  {
    "path": "Thirdparty/Sophus/package.xml",
    "content": "<?xml version=\"1.0\"?>\n<package format=\"2\">\n  <name>sophus</name>\n  <version>1.22.10</version>\n  <description>\n   C++ implementation of Lie Groups using Eigen.\n  </description>\n  <url type=\"repository\">https://github.com/strasdat/sophus</url>\n  <url type=\"bugtracker\">https://github.com/strasdat/sophus/issues</url>\n  <maintainer email=\"d.stonier@gmail.com\">Daniel Stonier</maintainer>\n  <author>Hauke Strasdat</author>\n  <license>MIT</license>\n\n  <buildtool_depend>cmake</buildtool_depend>\n\n  <build_depend>eigen</build_depend>\n  <build_export_depend>eigen</build_export_depend>\n\n  <export>\n    <build_type>cmake</build_type>\n  </export>\n</package>\n"
  },
  {
    "path": "Thirdparty/Sophus/rst-dir/conf.py",
    "content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../sympy'))\n\n\nsys.path.insert(1, os.path.abspath('../doxyrest_b/doxyrest/sphinx'))\nextensions = ['doxyrest', 'cpplexer', 'sphinx.ext.autodoc']\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Sophus'\ncopyright = '2019, Hauke Strasdat'\nauthor = 'Hauke Strasdat'\n\n\n# Tell sphinx what the primary language being documented is.\nprimary_domain = 'cpp'\n\n# Tell sphinx what the pygments highlight language should be.\nhighlight_language = 'cpp'\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n"
  },
  {
    "path": "Thirdparty/Sophus/rst-dir/page_index.rst",
    "content": "Sophus -  Lie groups for 2d/3d Geometry\n=======================================\n\n.. toctree::\n   :maxdepth: 2\n   :caption: Contents:\n\n   GitHub Page <https://github.com/strasdat/Sophus>\n   pysophus"
  },
  {
    "path": "Thirdparty/Sophus/rst-dir/pysophus.rst",
    "content": "Python API\n==========\n\n.. automodule:: sophus.matrix\n   :members:\n\n.. automodule:: sophus.complex\n   :members:\n\n.. automodule:: sophus.quaternion\n   :members:\n\n.. automodule:: sophus.so2\n   :members:\n\n.. automodule:: sophus.so3\n   :members:\n\n.. automodule:: sophus.se2\n   :members:\n\n.. automodule:: sophus.se3\n   :members:"
  },
  {
    "path": "Thirdparty/Sophus/run_format.sh",
    "content": "find . -type d \\( -path ./sympy -o -path ./doxyrest_b -o -path \"./*/CMakeFiles/*\" \\) -prune -o \\( -iname \"*.hpp\" -o -iname \"*.cpp\" \\) -print | xargs clang-format -i\n"
  },
  {
    "path": "Thirdparty/Sophus/scripts/install_docs_deps.sh",
    "content": "#!/bin/bash\n\nset -x # echo on\nset -e # exit on error\n\nsudo apt-get -qq update\nsudo apt-get install doxygen liblua5.3-dev ragel\npip3 install 'sphinx==2.0.1'\npip3 install sphinx_rtd_theme\npip3 install sympy\n\ngit clone https://github.com/vovkos/doxyrest_b\ncd doxyrest_b\ngit reset --hard ad45c064d1199e71b8cae5aa66d4251c4228b958\ngit submodule update --init\nmkdir build\ncd build\ncmake ..\ncmake --build .\n\ncd ../..\n"
  },
  {
    "path": "Thirdparty/Sophus/scripts/install_linux_deps.sh",
    "content": "#!/bin/bash\n\nset -x # echo on\nset -e # exit on error\n\ncmake --version\n\nsudo apt-get -qq update\nsudo apt-get install gfortran libc++-dev libgoogle-glog-dev libatlas-base-dev libsuitesparse-dev libceres-dev ccache\nwget https://gitlab.com/libeigen/eigen/-/archive/3.3.4/eigen-3.3.4.tar.bz2\ntar xvf eigen-3.3.4.tar.bz2\nmkdir build-eigen\ncd build-eigen\ncmake ../eigen-3.3.4 -DEIGEN_DEFAULT_TO_ROW_MAJOR=$ROW_MAJOR_DEFAULT\nsudo make install\ncd ..\n\ngit clone https://ceres-solver.googlesource.com/ceres-solver ceres-solver\ncd ceres-solver\ngit reset --hard b0aef211db734379319c19c030e734d6e23436b0\nmkdir build\ncd build\nccache -s\ncmake -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ..\nmake -j8\nsudo make install\ncd ../..\n"
  },
  {
    "path": "Thirdparty/Sophus/scripts/install_linux_fmt_deps.sh",
    "content": "#!/bin/bash\n\nset -x # echo on\nset -e # exit on error\n\ngit clone https://github.com/fmtlib/fmt.git\ncd fmt\ngit checkout 5.3.0\nmkdir build\ncd build\ncmake -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ..\nmake -j8\nsudo make install\ncd ../..\n"
  },
  {
    "path": "Thirdparty/Sophus/scripts/install_osx_deps.sh",
    "content": "#!/bin/bash\n\nset -x # echo on\nset -e # exit on error\n\nbrew update\nbrew install fmt\nbrew install ccache\n\n# Build a specific version of ceres-solver instead of one shipped over brew\ncurl https://raw.githubusercontent.com/Homebrew/homebrew-core/b0792ccba6e71cd028263ca7621db894afc602d2/Formula/ceres-solver.rb -o ceres-solver.rb\npatch <<EOF\n--- ceres-solver.rb\n+++ ceres-solver.rb\n@@ -1,8 +1,8 @@\n class CeresSolver < Formula\n   desc \"C++ library for large-scale optimization\"\n   homepage \"http://ceres-solver.org/\"\n-  url \"http://ceres-solver.org/ceres-solver-2.0.0.tar.gz\"\n-  sha256 \"10298a1d75ca884aa0507d1abb0e0f04800a92871cd400d4c361b56a777a7603\"\n+  url \"https://github.com/ceres-solver/ceres-solver/archive/refs/tags/2.1.0rc1.tar.gz\"\n+  sha256 \"9138a7d80a3142fe3a98519d58a489da9e204b815cd8c0571b3e643f04eca574\"\n   license \"BSD-3-Clause\"\n   revision 4\n   head \"https://ceres-solver.googlesource.com/ceres-solver.git\", branch: \"master\"\n@@ -31,23 +31,16 @@\n   depends_on \"suite-sparse\"\n   depends_on \"tbb\"\n \n-  # Fix compatibility with TBB 2021.1\n-  # See https://github.com/ceres-solver/ceres-solver/issues/669\n-  # Remove in the next release\n-  patch do\n-    url \"https://github.com/ceres-solver/ceres-solver/commit/941ea13475913ef8322584f7401633de9967ccc8.patch?full_index=1\"\n-    sha256 \"c61ca2ff1e92cc2134ba8e154bd9052717ba3fcae085e8f44957b9c22e6aa4ff\"\n-  end\n \n   def install\n     system \"cmake\", \".\", *std_cmake_args,\n                     \"-DBUILD_SHARED_LIBS=ON\",\n                     \"-DBUILD_EXAMPLES=OFF\",\n-                    \"-DLIB_SUFFIX=''\"\n+                    \"-DLIB_SUFFIX=''\",\n+                    \"-DCMAKE_CXX_COMPILER_LAUNCHER=/usr/local/bin/ccache\"\n     system \"make\"\n     system \"make\", \"install\"\n     pkgshare.install \"examples\", \"data\"\n-    doc.install \"docs/html\" unless build.head?\n   end\nEOF\n\nHOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 brew install --build-from-source ./ceres-solver.rb\n"
  },
  {
    "path": "Thirdparty/Sophus/scripts/run_cpp_tests.sh",
    "content": "#!/bin/bash\n\nset -x # echo on\nset -e # exit on error\n\nmkdir build\ncd build\npwd\ncmake -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DUSE_BASIC_LOGGING=$USE_BASIC_LOGGING -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..\n# Ubuntu builds via Github actions run on 2-core virtual machines\nmake -j2\nmake CTEST_OUTPUT_ON_FAILURE=1 test\n"
  },
  {
    "path": "Thirdparty/Sophus/setup.py",
    "content": "import os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_ext import build_ext\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Convert distutils Windows platform specifiers to CMake -A arguments\nPLAT_TO_CMAKE = {\n    \"win32\": \"Win32\",\n    \"win-amd64\": \"x64\",\n    \"win-arm32\": \"ARM\",\n    \"win-arm64\": \"ARM64\",\n}\n\n# A CMakeExtension needs a sourcedir instead of a file list.\n# The name must be the _single_ output extension from the CMake build.\n# If you need multiple extensions, see scikit-build.\nclass CMakeExtension(Extension):\n    def __init__(self, name, sourcedir=\"\"):\n        Extension.__init__(self, name, sources=[])\n        self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n    def build_extension(self, ext):\n        extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n\n        # required for auto-detection & inclusion of auxiliary \"native\" libs\n        if not extdir.endswith(os.path.sep):\n            extdir += os.path.sep\n\n        debug = int(os.environ.get(\"DEBUG\", 0)) if self.debug is None else self.debug\n        cfg = \"Debug\" if debug else \"Release\"\n\n        # CMake lets you override the generator - we need to check this.\n        # Can be set with Conda-Build, for example.\n        cmake_generator = os.environ.get(\"CMAKE_GENERATOR\", \"\")\n\n        cmake_args = [\n            \"-DBUILD_PYTHON_BINDINGS=ON\",\n            \"-DBUILD_SOPHUS_EXAMPLES=OFF\",\n            \"-DBUILD_SOPHUS_TESTS=OFF\",\n        ]\n        build_args = []\n        # Adding CMake arguments set as environment variable\n        # (needed e.g. to build for ARM OSx on conda-forge)\n        if \"CMAKE_ARGS\" in os.environ:\n            cmake_args += [item for item in os.environ[\"CMAKE_ARGS\"].split(\" \") if item]\n\n        if \"PYTHONPATH\" in os.environ and \"pip-build-env\" in os.environ[\"PYTHONPATH\"]:\n            del os.environ[\"PYTHONPATH\"]\n\n        if self.compiler.compiler_type != \"msvc\":\n            # Using Ninja-build since it a) is available as a wheel and b)\n            # multithreaded automatically. MSVC would require all variables be\n            # exported for Ninja to pick it up, which is a little tricky to do.\n            # Users can override the generator with CMAKE_GENERATOR in CMake\n            # 3.15+.\n            if not cmake_generator:\n                try:\n                    import ninja  # noqa: F401\n\n                    cmake_args += [\"-GNinja\"]\n                except ImportError:\n                    pass\n\n        else:\n\n            # Single config generators are handled \"normally\"\n            single_config = any(x in cmake_generator for x in {\"NMake\", \"Ninja\"})\n\n            # CMake allows an arch-in-generator style for backward compatibility\n            contains_arch = any(x in cmake_generator for x in {\"ARM\", \"Win64\"})\n\n            # Specify the arch if using MSVC generator, but only if it doesn't\n            # contain a backward-compatibility arch spec already in the\n            # generator name.\n            if not single_config and not contains_arch:\n                cmake_args += [\"-A\", PLAT_TO_CMAKE[self.plat_name]]\n\n            # Multi-config generators have a different way to specify configs\n            if not single_config:\n                cmake_args += [\n                    f\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}\"\n                ]\n                build_args += [\"--config\", cfg]\n\n        if sys.platform.startswith(\"darwin\"):\n            # Cross-compile support for macOS - respect ARCHFLAGS if set\n            archs = re.findall(r\"-arch (\\S+)\", os.environ.get(\"ARCHFLAGS\", \"\"))\n            if archs:\n                cmake_args += [\"-DCMAKE_OSX_ARCHITECTURES={}\".format(\";\".join(archs))]\n\n        # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level\n        # across all generators.\n        if \"CMAKE_BUILD_PARALLEL_LEVEL\" not in os.environ:\n            # self.parallel is a Python 3 only way to set parallel jobs by hand\n            # using -j in the build_ext call, not supported by pip or PyPA-build.\n            if hasattr(self, \"parallel\") and self.parallel:\n                # CMake 3.12+ only.\n                build_args += [f\"-j{self.parallel}\"]\n\n        if not os.path.exists(self.build_temp):\n            os.makedirs(self.build_temp)\n\n        if not os.path.exists(self.build_lib):\n                os.makedirs(self.build_lib)\n\n        subprocess.check_call([\"cmake\", ext.sourcedir] + cmake_args, cwd=self.build_temp)\n        subprocess.check_call(\n            [\"cmake\", \"--build\", \".\"] + build_args, cwd=self.build_temp\n        )\n\n        # copy stubs files from sophus_pybind-stubs to lib folder to be installed\n        subprocess.run(\n            f\"cp sophus_pybind-stubs/*.pyi {self.build_lib}\", shell=True, check=True,\n        )\n        subprocess.run(\n            f\"cp sophus_pybind-stubs/*.typed {self.build_lib}\",  shell=True, check=True,\n        )\n        # copy .so file to lib\n        subprocess.run(f\"cp {self.build_temp}/*.so {self.build_lib}/\", shell=True, check=True,)\n\n\ndef main():\n    # The information here can also be placed in setup.cfg - better separation of\n    # logic and declaration, and simpler if you include description/version in a file.\n    setup(\n        name=\"sophus_pybind\",\n        version=\"1.22.10\",\n        description=\"Sophus python API\",\n        long_description=\"Python API for sophus library\",\n        url=\"https://github.com/strasdat/sophus\",\n        ext_modules=[CMakeExtension(\"sophus_pybind\", sourcedir=ROOT_DIR)],\n        author=\"Cheng Peng, David Caruso\",\n        cmdclass={\"build_ext\": CMakeBuild},\n        zip_safe=False,\n        python_requires=\">=3.6\",\n        install_requires=[\"numpy\"],\n        packages=find_packages(),\n        license=\"Apache-2.0\",\n    )\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/average.hpp",
    "content": "/// @file\n/// Calculation of biinvariant means.\n\n#pragma once\n\n#include <complex>\n\n#include \"cartesian.hpp\"\n#include \"common.hpp\"\n#include \"rxso2.hpp\"\n#include \"rxso3.hpp\"\n#include \"se2.hpp\"\n#include \"se3.hpp\"\n#include \"sim2.hpp\"\n#include \"sim3.hpp\"\n#include \"so2.hpp\"\n#include \"so3.hpp\"\n\nnamespace Sophus {\n\n/// Calculates mean iteratively.\n///\n/// Returns ``nullopt`` if it does not converge.\n///\ntemplate <class SequenceContainer>\noptional<typename SequenceContainer::value_type> iterativeMean(\n    SequenceContainer const& foo_Ts_bar, int max_num_iterations) {\n  size_t N = foo_Ts_bar.size();\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n\n  using Group = typename SequenceContainer::value_type;\n  using Scalar = typename Group::Scalar;\n  using Tangent = typename Group::Tangent;\n\n  // This implements the algorithm in the beginning of Sec. 4.2 in\n  // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf.\n  Group foo_T_average = foo_Ts_bar.front();\n  Scalar w = Scalar(1. / N);\n  for (int i = 0; i < max_num_iterations; ++i) {\n    Tangent average;\n    setToZero<Tangent>(average);\n    for (Group const& foo_T_bar : foo_Ts_bar) {\n      average += w * (foo_T_average.inverse() * foo_T_bar).log();\n    }\n    Group foo_T_newaverage = foo_T_average * Group::exp(average);\n    if (squaredNorm<Tangent>(\n            (foo_T_newaverage.inverse() * foo_T_average).log()) <\n        Constants<Scalar>::epsilon()) {\n      return foo_T_newaverage;\n    }\n\n    foo_T_average = foo_T_newaverage;\n  }\n  // LCOV_EXCL_START\n  return nullopt;\n  // LCOV_EXCL_STOP\n}\n\n#ifdef DOXYGEN_SHOULD_SKIP_THIS\n/// Mean implementation for any Lie group.\ntemplate <class SequenceContainer, class Scalar>\noptional<typename SequenceContainer::value_type> average(\n    SequenceContainer const& foo_Ts_bar);\n#else\n\n// Mean implementation for Cartesian.\ntemplate <class SequenceContainer, int Dim = SequenceContainer::value_type::DoF,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<std::is_same<typename SequenceContainer::value_type,\n                         Cartesian<Scalar, Dim> >::value,\n            optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n\n  Sophus::Vector<Scalar, Dim> average;\n  average.setZero();\n  for (Cartesian<Scalar, Dim> const& foo_T_bar : foo_Ts_bar) {\n    average += foo_T_bar.params();\n  }\n  return Cartesian<Scalar, Dim>(average / Scalar(N));\n}\n\n// Mean implementation for SO(2).\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SO2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  // This implements rotational part of Proposition 12 from Sec. 6.2 of\n  // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf.\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  SO2<Scalar> foo_T_average = foo_Ts_bar.front();\n  Scalar w = Scalar(1. / N);\n\n  Scalar average(0);\n  for (SO2<Scalar> const& foo_T_bar : foo_Ts_bar) {\n    average += w * (foo_T_average.inverse() * foo_T_bar).log();\n  }\n  return foo_T_average * SO2<Scalar>::exp(average);\n}\n\n// Mean implementation for RxSO(2).\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, RxSO2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  RxSO2<Scalar> foo_T_average = foo_Ts_bar.front();\n  Scalar w = Scalar(1. / N);\n\n  Vector2<Scalar> average(Scalar(0), Scalar(0));\n  for (RxSO2<Scalar> const& foo_T_bar : foo_Ts_bar) {\n    average += w * (foo_T_average.inverse() * foo_T_bar).log();\n  }\n  return foo_T_average * RxSO2<Scalar>::exp(average);\n}\n\nnamespace details {\ntemplate <class T>\nvoid getQuaternion(T const&);\n\ntemplate <class Scalar>\nEigen::Quaternion<Scalar> getUnitQuaternion(SO3<Scalar> const& R) {\n  return R.unit_quaternion();\n}\n\ntemplate <class Scalar>\nEigen::Quaternion<Scalar> getUnitQuaternion(RxSO3<Scalar> const& sR) {\n  return sR.so3().unit_quaternion();\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nEigen::Quaternion<Scalar> averageUnitQuaternion(\n    SequenceContainer const& foo_Ts_bar) {\n  // This:  http://stackoverflow.com/a/27410865/1221742\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  Eigen::Matrix<Scalar, 4, Eigen::Dynamic> Q(4, N);\n  int i = 0;\n  Scalar w = Scalar(1. / N);\n  for (auto const& foo_T_bar : foo_Ts_bar) {\n    Q.col(i) = w * details::getUnitQuaternion(foo_T_bar).coeffs();\n    ++i;\n  }\n\n  Eigen::Matrix<Scalar, 4, 4> QQt = Q * Q.transpose();\n  // TODO: Figure out why we can't use SelfAdjointEigenSolver here.\n  Eigen::EigenSolver<Eigen::Matrix<Scalar, 4, 4> > es(QQt);\n\n  std::complex<Scalar> max_eigenvalue = es.eigenvalues()[0];\n  Eigen::Matrix<std::complex<Scalar>, 4, 1> max_eigenvector =\n      es.eigenvectors().col(0);\n\n  for (int i = 1; i < 4; i++) {\n    if (std::norm(es.eigenvalues()[i]) > std::norm(max_eigenvalue)) {\n      max_eigenvalue = es.eigenvalues()[i];\n      max_eigenvector = es.eigenvectors().col(i);\n    }\n  }\n  Eigen::Quaternion<Scalar> quat;\n  quat.coeffs() <<                //\n      max_eigenvector[0].real(),  //\n      max_eigenvector[1].real(),  //\n      max_eigenvector[2].real(),  //\n      max_eigenvector[3].real();\n  return quat;\n}\n}  // namespace details\n\n// Mean implementation for SO(3).\n//\n// TODO: Detect degenerated cases and return nullopt.\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SO3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  return SO3<Scalar>(details::averageUnitQuaternion(foo_Ts_bar));\n}\n\n// Mean implementation for R x SO(3).\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, RxSO3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar) {\n  size_t N = std::distance(std::begin(foo_Ts_bar), std::end(foo_Ts_bar));\n\n  SOPHUS_ENSURE(N >= 1, \"N must be >= 1.\");\n  Scalar scale_sum = Scalar(0);\n  using std::exp;\n  using std::log;\n  for (RxSO3<Scalar> const& foo_T_bar : foo_Ts_bar) {\n    scale_sum += log(foo_T_bar.scale());\n  }\n  return RxSO3<Scalar>(exp(scale_sum / Scalar(N)),\n                       SO3<Scalar>(details::averageUnitQuaternion(foo_Ts_bar)));\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SE2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  // TODO: Implement Proposition 12 from Sec. 6.2 of\n  // ftp://ftp-sop.inria.fr/epidaure/Publications/Arsigny/arsigny_rr_biinvariant_average.pdf.\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, Sim2<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, SE3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\ntemplate <class SequenceContainer,\n          class Scalar = typename SequenceContainer::value_type::Scalar>\nenable_if_t<\n    std::is_same<typename SequenceContainer::value_type, Sim3<Scalar> >::value,\n    optional<typename SequenceContainer::value_type> >\naverage(SequenceContainer const& foo_Ts_bar, int max_num_iterations = 20) {\n  return iterativeMean(foo_Ts_bar, max_num_iterations);\n}\n\n#endif  // DOXYGEN_SHOULD_SKIP_THIS\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/cartesian.hpp",
    "content": "/// @file\n/// Cartesian - Euclidean vector space as Lie group\n\n#pragma once\n#include <sophus/types.hpp>\n\nnamespace Sophus {\ntemplate <class Scalar_, int M, int Options = 0>\nclass Cartesian;\n\ntemplate <class Scalar_>\nusing Cartesian2 = Cartesian<Scalar_, 2>;\n\ntemplate <class Scalar_>\nusing Cartesian3 = Cartesian<Scalar_, 3>;\n\nusing Cartesian2d = Cartesian2<double>;\nusing Cartesian3d = Cartesian3<double>;\n\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int M, int Options>\nstruct traits<Sophus::Cartesian<Scalar_, M, Options>> {\n  using Scalar = Scalar_;\n  using ParamsType = Sophus::Vector<Scalar, M, Options>;\n};\n\ntemplate <class Scalar_, int M, int Options>\nstruct traits<Map<Sophus::Cartesian<Scalar_, M>, Options>>\n    : traits<Sophus::Cartesian<Scalar_, M, Options>> {\n  using Scalar = Scalar_;\n  using ParamsType = Map<Sophus::Vector<Scalar, M>, Options>;\n};\n\ntemplate <class Scalar_, int M, int Options>\nstruct traits<Map<Sophus::Cartesian<Scalar_, M> const, Options>>\n    : traits<Sophus::Cartesian<Scalar_, M, Options> const> {\n  using Scalar = Scalar_;\n  using ParamsType = Map<Sophus::Vector<Scalar, M> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// Cartesian base type - implements Cartesian class but is storage agnostic.\n///\n/// Euclidean vector space as Lie group.\n///\n/// Lie groups can be seen as a generalization over the Euclidean vector\n/// space R^M. Here a N-dimensional vector ``p`` is represented as a\n//  (M+1) x (M+1) homogeneous matrix:\n///\n///   | I p |\n///   | o 1 |\n///\n/// On the other hand, Cartesian(M) can be seen as a special case of SE(M)\n/// with identity rotation, and hence represents pure translation.\n///\n/// The purpose of this class is two-fold:\n///  - for educational purpose, to highlight how Lie groups generalize over\n///    Euclidean vector spaces.\n///  - to be used in templated/generic algorithms (such as Sophus::Spline)\n///    which are implemented against the Lie group interface.\n///\n/// Obviously, Cartesian(M) can just be represented as a M-tuple.\n///\n/// Cartesian is not compact, but a commutative group. For vector additions it\n/// holds `a+b = b+a`.\n///\n/// See Cartesian class  for more details below.\n///\ntemplate <class Derived, int M>\nclass CartesianBase {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using ParamsType = typename Eigen::internal::traits<Derived>::ParamsType;\n  /// Degrees of freedom of manifold, equals to number of Cartesian coordinates.\n  static int constexpr DoF = M;\n  /// Number of internal parameters used, also M.\n  static int constexpr num_parameters = M;\n  /// Group transformations are (M+1)x(M+1) matrices.\n  static int constexpr N = M + 1;\n  static int constexpr Dim = M;\n\n  using Transformation = Sophus::Matrix<Scalar, N, N>;\n  using Point = Sophus::Vector<Scalar, M>;\n  using HomogeneousPoint = Sophus::Vector<Scalar, N>;\n  using Line = ParametrizedLine<Scalar, M>;\n  using Hyperplane = Eigen::Hyperplane<Scalar, M>;\n  using Tangent = Sophus::Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with Cartesian operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using CartesianSum = Cartesian<ReturnScalar<OtherDerived>, M>;\n\n  template <typename PointDerived>\n  using PointProduct = Sophus::Vector<ReturnScalar<PointDerived>, M>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct =\n      Sophus::Vector<ReturnScalar<HPointDerived>, N>;\n\n  /// Adjoint transformation\n  ///\n  /// Always identity of commutative groups.\n  SOPHUS_FUNC Adjoint Adj() const { return Adjoint::Identity(); }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC Cartesian<NewScalarType, M> cast() const {\n    return Cartesian<NewScalarType, M>(params().template cast<NewScalarType>());\n  }\n\n  /// Returns derivative of  this * exp(x)  wrt x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Sophus::Matrix<Scalar, num_parameters, DoF> m;\n    m.setIdentity();\n    return m;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_log_this_inv_by_x_at_this()\n      const {\n    Matrix<Scalar, DoF, num_parameters> m;\n    m.setIdentity();\n    return m;\n  }\n\n  /// Returns group inverse.\n  ///\n  /// The additive inverse.\n  ///\n  SOPHUS_FUNC Cartesian<Scalar, M> inverse() const {\n    return Cartesian<Scalar, M>(-params());\n  }\n\n  /// Logarithmic map\n  ///\n  /// For Euclidean vector space, just the identity. Or to be more precise\n  /// it just extracts the significant M-vector from the NxN matrix.\n  ///\n  SOPHUS_FUNC Tangent log() const { return params(); }\n\n  /// Returns 4x4 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///   | I p |\n  ///   | o 1 |\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Sophus::Matrix<Scalar, N, N> matrix;\n    matrix.setIdentity();\n    matrix.col(M).template head<M>() = params();\n    return matrix;\n  }\n\n  /// Group multiplication, are vector additions.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC CartesianBase<Derived, M>& operator=(\n      CartesianBase<OtherDerived, M> const& other) {\n    params() = other.params();\n    return *this;\n  }\n\n  /// Group multiplication, are vector additions.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC CartesianSum<OtherDerived> operator*(\n      CartesianBase<OtherDerived, M> const& other) const {\n    return CartesianSum<OtherDerived>(params() + other.params());\n  }\n\n  /// Group action on points, again just vector addition.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, M>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return PointProduct<PointDerived>(params() + p);\n  }\n\n  /// Group action on homogeneous points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, N>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const auto rp = *this * p.template head<M>();\n    HomogeneousPointProduct<HPointDerived> r;\n    r << rp, p(M);\n    return r;\n  }\n\n  /// Group action on lines.\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), l.direction());\n  }\n\n  /// Group action on planes.\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    return Hyperplane(p.normal(), p.offset() - params().dot(p.normal()));\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this Cartesian's Scalar\n  /// type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC CartesianBase<Derived, M>& operator*=(\n      CartesianBase<OtherDerived, M> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Mutator of params vector.\n  ///\n  SOPHUS_FUNC ParamsType& params() {\n    return static_cast<Derived*>(this)->params();\n  }\n\n  /// Accessor of params vector\n  ///\n  SOPHUS_FUNC ParamsType const& params() const {\n    return static_cast<Derived const*>(this)->params();\n  }\n};\n\n/// Cartesian using default storage; derived from CartesianBase.\ntemplate <class Scalar_, int M, int Options>\nclass Cartesian : public CartesianBase<Cartesian<Scalar_, M, Options>, M> {\n  using Base = CartesianBase<Cartesian<Scalar_, M, Options>, M>;\n\n public:\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n  static int constexpr N = Base::N;\n  static int constexpr Dim = Base::Dim;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using ParamsMember = Sophus::Vector<Scalar, M, Options>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC Cartesian& operator=(Cartesian const& other) = default;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes to zero vector.\n  ///\n  SOPHUS_FUNC Cartesian() { params_.setZero(); }\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC Cartesian(Cartesian const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Cartesian(CartesianBase<OtherDerived, M> const& other)\n      : params_(other.params()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Accepts either M-vector or (M+1)x(M+1) matrices.\n  ///\n  template <class D>\n  explicit SOPHUS_FUNC Cartesian(Eigen::MatrixBase<D> const& m) {\n    static_assert(\n        std::is_same<typename Eigen::MatrixBase<D>::Scalar, Scalar>::value, \"\");\n    if (m.rows() == DoF && m.cols() == 1) {\n      // trick so this compiles\n      params_ = m.template block<M, 1>(0, 0);\n    } else if (m.rows() == N && m.cols() == N) {\n      params_ = m.template block<M, 1>(0, M);\n    } else {\n      SOPHUS_ENSURE(false, \"{} {}\", m.rows(), m.cols());\n    }\n  }\n\n  /// This provides unsafe read/write access to internal data.\n  ///\n  SOPHUS_FUNC Scalar* data() { return params_.data(); }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const { return params_.data(); }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> m;\n    m.setIdentity();\n    return m;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const&) {\n    return Dx_exp_x_at_0();\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, Dim, DoF> Dx_exp_x_times_point_at_0(\n      Point const&) {\n    Sophus::Matrix<Scalar, Dim, DoF> J;\n    J.setIdentity();\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Mutator of params vector\n  ///\n  SOPHUS_FUNC ParamsMember& params() { return params_; }\n\n  /// Accessor of params vector\n  ///\n  SOPHUS_FUNC ParamsMember const& params() const { return params_; }\n\n  /// Returns the ith infinitesimal generators of Cartesian(M).\n  ///\n  /// The infinitesimal generators for e.g. the 3-dimensional case:\n  ///\n  /// ```\n  ///         |  0  0  0  1 |\n  ///   G_0 = |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_1 = |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_2 = |  0  0  0  0 |\n  ///         |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in [0, M-1].\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= M, \"i should be in range [0,M-1].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// Group exponential\n  ///\n  /// For Euclidean vector space, just the identity. Or to be more precise\n  /// it just constructs the (M+1xM+1) homogeneous matrix representation\n  //  from the M-vector.\n  ///\n  SOPHUS_FUNC static Cartesian<Scalar, M> exp(Tangent const& a) {\n    return Cartesian<Scalar, M>(a);\n  }\n\n  /// hat-operator\n  ///\n  /// Formally, the hat()-operator of Cartesian(M) is defined as\n  ///\n  ///   ``hat(.): R^M -> R^{M+1xM+1},  hat(a) = sum_i a_i * G_i``\n  ///   (for i=0,...,M-1)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of Cartesian(M).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.setZero();\n    Omega.col(M).template head<M>() = a.template head<M>();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// Always 0 for commutative groups.\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const&, Tangent const&) {\n    return Tangent::Zero();\n  }\n\n  /// Draws uniform samples in the range [-1, 1] per coordinates.\n  ///\n  template <class UniformRandomBitGenerator>\n  static Cartesian sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    Vector<Scalar, M> v;\n    for (int i = 0; i < M; ++i) {\n      v[i] = uniform(generator);\n    }\n    return Cartesian(v);\n  }\n\n  /// vee-operator\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& m) {\n    return m.col(M).template head<M>();\n  }\n\n protected:\n  ParamsMember params_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``Cartesian``; derived from\n/// CartesianBase.\n///\n/// Allows us to wrap Cartesian objects around POD array.\ntemplate <class Scalar_, int M, int Options>\nclass Map<Sophus::Cartesian<Scalar_, M>, Options>\n    : public Sophus::CartesianBase<Map<Sophus::Cartesian<Scalar_, M>, Options>,\n                                   M> {\n public:\n  using Base =\n      Sophus::CartesianBase<Map<Sophus::Cartesian<Scalar_, M>, Options>, M>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar* coeffs) : params_(coeffs) {}\n\n  /// Mutator of params vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector<Scalar, M, Options>>& params() {\n    return params_;\n  }\n\n  /// Accessor of params vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector<Scalar, M, Options>> const& params() const {\n    return params_;\n  }\n\n protected:\n  Map<Sophus::Vector<Scalar, M>, Options> params_;\n};\n\n/// Specialization of Eigen::Map for ``Cartesian const``; derived from\n/// CartesianBase.\n///\n/// Allows us to wrap Cartesian objects around POD array.\ntemplate <class Scalar_, int M, int Options>\nclass Map<Sophus::Cartesian<Scalar_, M> const, Options>\n    : public Sophus::CartesianBase<\n          Map<Sophus::Cartesian<Scalar_, M> const, Options>, M> {\n public:\n  using Base =\n      Sophus::CartesianBase<Map<Sophus::Cartesian<Scalar_, M> const, Options>,\n                            M>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n\n  using Base::operator*;\n\n  SOPHUS_FUNC Map(Scalar const* coeffs) : params_(coeffs) {}\n\n  /// Accessor of params vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector<Scalar, M> const, Options> const& params()\n      const {\n    return params_;\n  }\n\n protected:\n  Map<Sophus::Vector<Scalar, M> const, Options> const params_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/ceres_local_parameterization.hpp",
    "content": "#pragma once\n\n#include <ceres/local_parameterization.h>\n\n#include <sophus/ceres_typetraits.hpp>\n\nnamespace Sophus {\n\n/// Templated local parameterization for LieGroup [with implemented\n/// LieGroup::Dx_this_mul_exp_x_at_0() ]\ntemplate <template <typename, int = 0> class LieGroup>\nclass LocalParameterization : public ceres::LocalParameterization {\n public:\n  using LieGroupd = LieGroup<double>;\n  using Tangent = typename LieGroupd::Tangent;\n  using TangentMap = typename Sophus::Mapper<Tangent>::ConstMap;\n  static int constexpr DoF = LieGroupd::DoF;\n  static int constexpr num_parameters = LieGroupd::num_parameters;\n\n  /// LieGroup plus operation for Ceres\n  ///\n  ///  T * exp(x)\n  ///\n  bool Plus(double const* T_raw, double const* delta_raw,\n            double* T_plus_delta_raw) const override {\n    Eigen::Map<LieGroupd const> const T(T_raw);\n    TangentMap delta = Sophus::Mapper<Tangent>::map(delta_raw);\n    Eigen::Map<LieGroupd> T_plus_delta(T_plus_delta_raw);\n    T_plus_delta = T * LieGroupd::exp(delta);\n    return true;\n  }\n\n  /// Jacobian of LieGroup plus operation for Ceres\n  ///\n  /// Dx T * exp(x)  with  x=0\n  ///\n  bool ComputeJacobian(double const* T_raw,\n                       double* jacobian_raw) const override {\n    Eigen::Map<LieGroupd const> T(T_raw);\n    Eigen::Map<Eigen::Matrix<double, num_parameters, DoF,\n                             DoF == 1 ? Eigen::ColMajor : Eigen::RowMajor>>\n        jacobian(jacobian_raw);\n    jacobian = T.Dx_this_mul_exp_x_at_0();\n    return true;\n  }\n\n  int GlobalSize() const override { return LieGroupd::num_parameters; }\n\n  int LocalSize() const override { return LieGroupd::DoF; }\n};\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/ceres_manifold.hpp",
    "content": "#pragma once\n\n#include <ceres/manifold.h>\n#include <sophus/ceres_typetraits.hpp>\n\nnamespace Sophus {\n\n/// Templated local parameterization for LieGroup [with implemented\n/// LieGroup::Dx_this_mul_exp_x_at_0() ]\ntemplate <template <typename, int = 0> class LieGroup>\nclass Manifold : public ceres::Manifold {\n public:\n  using LieGroupd = LieGroup<double>;\n  using Tangent = typename LieGroupd::Tangent;\n  using TangentMap = typename Sophus::Mapper<Tangent>::Map;\n  using TangentConstMap = typename Sophus::Mapper<Tangent>::ConstMap;\n  static int constexpr DoF = LieGroupd::DoF;\n  static int constexpr num_parameters = LieGroupd::num_parameters;\n\n  /// LieGroup plus operation for Ceres\n  ///\n  ///  T * exp(x)\n  ///\n  bool Plus(double const* T_raw, double const* delta_raw,\n            double* T_plus_delta_raw) const override {\n    Eigen::Map<LieGroupd const> const T(T_raw);\n    TangentConstMap delta = Sophus::Mapper<Tangent>::map(delta_raw);\n    Eigen::Map<LieGroupd> T_plus_delta(T_plus_delta_raw);\n    T_plus_delta = T * LieGroupd::exp(delta);\n    return true;\n  }\n\n  /// Jacobian of LieGroup plus operation for Ceres\n  ///\n  /// Dx T * exp(x)  with  x=0\n  ///\n  bool PlusJacobian(double const* T_raw, double* jacobian_raw) const override {\n    Eigen::Map<LieGroupd const> T(T_raw);\n    Eigen::Map<Eigen::Matrix<double, num_parameters, DoF,\n                             DoF == 1 ? Eigen::ColMajor : Eigen::RowMajor>>\n        jacobian(jacobian_raw);\n    jacobian = T.Dx_this_mul_exp_x_at_0();\n    return true;\n  }\n\n  bool Minus(double const* y_raw, double const* x_raw,\n             double* y_minus_x_raw) const override {\n    Eigen::Map<LieGroupd const> y(y_raw), x(x_raw);\n    TangentMap y_minus_x = Sophus::Mapper<Tangent>::map(y_minus_x_raw);\n\n    y_minus_x = (x.inverse() * y).log();\n    return true;\n  }\n\n  bool MinusJacobian(double const* x_raw, double* jacobian_raw) const override {\n    Eigen::Map<LieGroupd const> x(x_raw);\n    Eigen::Map<Eigen::Matrix<double, DoF, num_parameters, Eigen::RowMajor>>\n        jacobian(jacobian_raw);\n    jacobian = x.Dx_log_this_inv_by_x_at_this();\n    return true;\n  }\n\n  int AmbientSize() const override { return LieGroupd::num_parameters; }\n\n  int TangentSize() const override { return LieGroupd::DoF; }\n};\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/ceres_typetraits.hpp",
    "content": "#ifndef SOPHUS_CERES_TYPETRAITS_HPP\n#define SOPHUS_CERES_TYPETRAITS_HPP\n\nnamespace Sophus {\n\ntemplate <class T, std::size_t = sizeof(T)>\nconstexpr std::true_type complete(T*);\nconstexpr std::false_type complete(...);\n\ntemplate <class T>\nusing IsSpecialized = decltype(complete(std::declval<T*>()));\n\n/// Type trait used to distinguish mappable vector types from scalars\n///\n/// We use this class to distinguish Sophus::Vector<Scalar, N> from Scalar types\n/// in LieGroup<T>::Tangent\n///\n/// Primary use is mapping LieGroup::Tangent over raw data, with 2 options:\n///  - LieGroup::Tangent is \"scalar\" (for SO2), then we just dereference pointer\n///  - LieGroup::Tangent is Sophus::Vector<...>, then we need to use Eigen::Map\n///\n/// Specialization of Eigen::internal::traits<T> for T is crucial for\n/// for constructing Eigen::Map<T>, thus we use that property for distinguishing\n/// between those two options.\n/// At this moment there seem to be no option to check this using only\n/// \"external\" API of Eigen\ntemplate <class T>\nusing IsMappable = IsSpecialized<Eigen::internal::traits<std::decay_t<T>>>;\n\ntemplate <class T>\nconstexpr bool IsMappableV = IsMappable<T>::value;\n\n/// Helper for mapping tangent vectors (scalars) over pointers to data\ntemplate <typename T, typename E = void>\nstruct Mapper {\n  using Scalar = T;\n  using Map = Scalar&;\n  using ConstMap = const Scalar&;\n\n  static Map map(Scalar* ptr) noexcept { return *ptr; }\n  static ConstMap map(const Scalar* ptr) noexcept { return *ptr; }\n};\n\ntemplate <typename T>\nstruct Mapper<T, typename std::enable_if<IsMappableV<T>>::type> {\n  using Scalar = typename T::Scalar;\n  using Map = Eigen::Map<T>;\n  using ConstMap = Eigen::Map<const T>;\n\n  static Map map(Scalar* ptr) noexcept { return Map(ptr); }\n  static ConstMap map(const Scalar* ptr) noexcept { return ConstMap(ptr); }\n};\n\n}  // namespace Sophus\n\n#endif\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/common.hpp",
    "content": "/// @file\n/// Common functionality.\n\n#pragma once\n\n#include <cmath>\n#include <cstdio>\n#include <cstdlib>\n#include <random>\n#include <type_traits>\n\n#include <Eigen/Core>\n\n#undef SOPHUS_COMPILE_TIME_FMT\n\n#ifdef SOPHUS_USE_BASIC_LOGGING\n\n#define SOPHUS_FMT_CSTR(description, ...) description\n#define SOPHUS_FMT_STR(description, ...) std::string(description)\n#define SOPHUS_FMT_PRINT(description, ...) std::printf(\"%s\\n\", description)\n#define SOPHUS_FMT_ARG(arg)\n\n#else  // !SOPHUS_USE_BASIC_LOGGING\n\n#ifdef __linux__\n#define SOPHUS_COMPILE_TIME_FMT\n#endif\n\n#ifdef __APPLE__\n#include \"TargetConditionals.h\"\n#ifdef TARGET_OS_MAC\n#define SOPHUS_COMPILE_TIME_FMT\n#endif\n#endif\n\n#include <fmt/format.h>\n#include <fmt/ostream.h>\n\n#if FMT_VERSION >= 90000\n#define SOPHUS_FMT_ARG(arg) fmt::streamed(arg)\n#else\n#define SOPHUS_FMT_ARG(arg) arg\n#endif\n\n#ifdef SOPHUS_COMPILE_TIME_FMT\n// To keep compatibility with older libfmt versions,\n// disable the compile time check if FMT_STRING is not available.\n#ifdef FMT_STRING\n// compile-time format check on x\n#define SOPHUS_FMT_STRING(x) FMT_STRING(x)\n#else\n// identity, hence no compile-time check on x\n#define SOPHUS_FMT_STRING(x) x\n#endif\n#else  // ! SOPHUS_COMPILE_TIME_FMT\n// identity, hence no compile-time check on x\n#define SOPHUS_FMT_STRING(x) x\n#endif  // ! SOPHUS_COMPILE_TIME_FMT\n\n#define SOPHUS_FMT_CSTR(description, ...) \\\n  fmt::format(SOPHUS_FMT_STRING(description), ##__VA_ARGS__).c_str()\n\n#define SOPHUS_FMT_STR(description, ...) \\\n  fmt::format(SOPHUS_FMT_STRING(description), ##__VA_ARGS__)\n\n#define SOPHUS_FMT_PRINT(description, ...)                   \\\n  fmt::print(SOPHUS_FMT_STRING(description), ##__VA_ARGS__); \\\n  fmt::print(\"\\n\")\n\n#endif  // !SOPHUS_USE_BASIC_LOGGING\n\n// following boost's assert.hpp\n#undef SOPHUS_ENSURE\n\n// ENSURES are similar to ASSERTS, but they are always checked for (including in\n// release builds). At the moment there are no ASSERTS in Sophus which should\n// only be used for checks which are performance critical.\n\n#ifdef __GNUC__\n#define SOPHUS_FUNCTION __PRETTY_FUNCTION__\n#elif (_MSC_VER >= 1310)\n#define SOPHUS_FUNCTION __FUNCTION__\n#else\n#define SOPHUS_FUNCTION \"unknown\"\n#endif\n\n// Make sure this compiles with older versions of Eigen which do not have\n// EIGEN_DEVICE_FUNC defined.\n#ifndef EIGEN_DEVICE_FUNC\n#define EIGEN_DEVICE_FUNC\n#endif\n\n// NVCC on windows has issues with defaulting the Map specialization\n// constructors, so special case that specific platform case.\n#if defined(_MSC_VER) && defined(__CUDACC__)\n#define SOPHUS_WINDOW_NVCC_FALLBACK\n#endif\n\n#define SOPHUS_FUNC EIGEN_DEVICE_FUNC\n\n#if defined(SOPHUS_DISABLE_ENSURES)\n\n#define SOPHUS_ENSURE(expr, ...) ((void)0)\n\n#elif defined(SOPHUS_ENABLE_ENSURE_HANDLER)\n\nnamespace Sophus {\nvoid ensureFailed(char const* function, char const* file, int line,\n                  char const* description);\n}\n\n#define SOPHUS_ENSURE(expr, description, ...)                        \\\n  ((expr)                                                            \\\n       ? ((void)0)                                                   \\\n       : ::Sophus::ensureFailed(SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                SOPHUS_FMT_CSTR(description, ##__VA_ARGS__)))\n#else\n\n#define SOPHUS_DEDAULT_ENSURE_FAILURE_IMPL(function, file, line, description, \\\n                                           ...)                               \\\n  do {                                                                        \\\n    std::printf(                                                              \\\n        \"Sophus ensure failed in function '%s', \"                             \\\n        \"file '%s', line %d.\\n\",                                              \\\n        function, file, line);                                                \\\n    SOPHUS_FMT_PRINT(description, ##__VA_ARGS__);                             \\\n    std::abort();                                                             \\\n  } while (false)\n\n#ifdef __CUDACC__\n#define SOPHUS_ENSURE(expr, description, ...)                                  \\\n  do {                                                                         \\\n    if (!(expr)) {                                                             \\\n      std::printf(                                                             \\\n          \"Sophus ensure failed in function '%s', file '%s', line %d.\\n\",      \\\n          SOPHUS_FUNCTION, __FILE__, __LINE__);                                \\\n      std::printf(\"%s\", description);                                          \\\n      /* there is no std::abort in cuda kernels, hence we just print the error \\\n       * message here*/                                                        \\\n    }                                                                          \\\n  } while (false)\n#else\n#define SOPHUS_ENSURE(expr, ...)                                              \\\n  do {                                                                        \\\n    if (!(expr)) {                                                            \\\n      SOPHUS_DEDAULT_ENSURE_FAILURE_IMPL(SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                         ##__VA_ARGS__);                      \\\n    }                                                                         \\\n  } while (false)\n#endif\n#endif\n\nnamespace Sophus {\n\ntemplate <class Scalar>\nstruct Constants {\n  SOPHUS_FUNC static Scalar epsilon() { return Scalar(1e-10); }\n\n  SOPHUS_FUNC static Scalar epsilonPlus() {\n    return epsilon() * (Scalar(1.) + epsilon());\n  }\n\n  SOPHUS_FUNC static Scalar epsilonSqrt() {\n    using std::sqrt;\n    return sqrt(epsilon());\n  }\n\n  SOPHUS_FUNC static Scalar pi() {\n    return Scalar(3.141592653589793238462643383279502884);\n  }\n};\n\ntemplate <>\nstruct Constants<float> {\n  SOPHUS_FUNC static float constexpr epsilon() {\n    return static_cast<float>(1e-5);\n  }\n  SOPHUS_FUNC static float epsilonPlus() {\n    return epsilon() * (1.f + epsilon());\n  }\n\n  SOPHUS_FUNC static float epsilonSqrt() { return std::sqrt(epsilon()); }\n\n  SOPHUS_FUNC static float constexpr pi() {\n    return 3.141592653589793238462643383279502884f;\n  }\n};\n\n/// Nullopt type of lightweight optional class.\nstruct nullopt_t {\n  explicit constexpr nullopt_t() {}\n};\n\nconstexpr nullopt_t nullopt{};\n\n/// Lightweight optional implementation which requires ``T`` to have a\n/// default constructor.\n///\n/// TODO: Replace with std::optional once Sophus moves to c++17.\n///\ntemplate <class T>\nclass optional {\n public:\n  optional() : is_valid_(false) {}\n\n  optional(nullopt_t) : is_valid_(false) {}\n\n  optional(T const& type) : type_(type), is_valid_(true) {}\n\n  explicit operator bool() const { return is_valid_; }\n\n  T const* operator->() const {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return &type_;\n  }\n\n  T* operator->() {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return &type_;\n  }\n\n  T const& operator*() const {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return type_;\n  }\n\n  T& operator*() {\n    SOPHUS_ENSURE(is_valid_, \"must be valid\");\n    return type_;\n  }\n\n private:\n  T type_;\n  bool is_valid_;\n};\n\ntemplate <bool B, class T = void>\nusing enable_if_t = typename std::enable_if<B, T>::type;\n\ntemplate <class G>\nstruct IsUniformRandomBitGenerator {\n  static const bool value = std::is_unsigned<typename G::result_type>::value &&\n                            std::is_unsigned<decltype(G::min())>::value &&\n                            std::is_unsigned<decltype(G::max())>::value;\n};\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/example_ensure_handler.cpp",
    "content": "#include \"common.hpp\"\n\n#include <cstdio>\n#include <cstdlib>\n\nnamespace Sophus {\nvoid ensureFailed(char const* function, char const* file, int line,\n                  char const* description) {\n  std::printf(\"Sophus ensure failed in function '%s', file '%s', line %d.\\n\",\n              file, function, line);\n  std::printf(\"Description: %s\\n\", description);\n  std::abort();\n}\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/geometry.hpp",
    "content": "/// @file\n/// Transformations between poses and hyperplanes.\n\n#pragma once\n\n#include \"se2.hpp\"\n#include \"se3.hpp\"\n#include \"so2.hpp\"\n#include \"so3.hpp\"\n#include \"types.hpp\"\n\nnamespace Sophus {\n\n/// Takes in a rotation ``R_foo_plane`` and returns the corresponding line\n/// normal along the y-axis (in reference frame ``foo``).\n///\ntemplate <class T>\nVector2<T> normalFromSO2(SO2<T> const& R_foo_line) {\n  return R_foo_line.matrix().col(1);\n}\n\n/// Takes in line normal in reference frame foo and constructs a corresponding\n/// rotation matrix ``R_foo_line``.\n///\n/// Precondition: ``normal_foo`` must not be close to zero.\n///\ntemplate <class T>\nSO2<T> SO2FromNormal(Vector2<T> normal_foo) {\n  SOPHUS_ENSURE(normal_foo.squaredNorm() > Constants<T>::epsilon(), \"{}\",\n                SOPHUS_FMT_ARG(normal_foo.transpose()));\n  normal_foo.normalize();\n  return SO2<T>(normal_foo.y(), -normal_foo.x());\n}\n\n/// Takes in a rotation ``R_foo_plane`` and returns the corresponding plane\n/// normal along the z-axis\n/// (in reference frame ``foo``).\n///\ntemplate <class T>\nVector3<T> normalFromSO3(SO3<T> const& R_foo_plane) {\n  return R_foo_plane.matrix().col(2);\n}\n\n/// Takes in plane normal in reference frame foo and constructs a corresponding\n/// rotation matrix ``R_foo_plane``.\n///\n/// Note: The ``plane`` frame is defined as such that the normal points along\n///       the positive z-axis. One can specify hints for the x-axis and y-axis\n///       of the ``plane`` frame.\n///\n/// Preconditions:\n/// - ``normal_foo``, ``xDirHint_foo``, ``yDirHint_foo`` must not be close to\n///   zero.\n/// - ``xDirHint_foo`` and ``yDirHint_foo`` must be approx. perpendicular.\n///\ntemplate <class T>\nMatrix3<T> rotationFromNormal(Vector3<T> const& normal_foo,\n                              Vector3<T> xDirHint_foo = Vector3<T>(T(1), T(0),\n                                                                   T(0)),\n                              Vector3<T> yDirHint_foo = Vector3<T>(T(0), T(1),\n                                                                   T(0))) {\n  SOPHUS_ENSURE(xDirHint_foo.dot(yDirHint_foo) < Constants<T>::epsilon(),\n                \"xDirHint ({}) and yDirHint ({}) must be perpendicular.\",\n                SOPHUS_FMT_ARG(xDirHint_foo.transpose()),\n                SOPHUS_FMT_ARG(yDirHint_foo.transpose()));\n  using std::abs;\n  using std::sqrt;\n  T const xDirHint_foo_sqr_length = xDirHint_foo.squaredNorm();\n  T const yDirHint_foo_sqr_length = yDirHint_foo.squaredNorm();\n  T const normal_foo_sqr_length = normal_foo.squaredNorm();\n  SOPHUS_ENSURE(xDirHint_foo_sqr_length > Constants<T>::epsilon(), \"{}\",\n                SOPHUS_FMT_ARG(xDirHint_foo.transpose()));\n  SOPHUS_ENSURE(yDirHint_foo_sqr_length > Constants<T>::epsilon(), \"{}\",\n                SOPHUS_FMT_ARG(yDirHint_foo.transpose()));\n  SOPHUS_ENSURE(normal_foo_sqr_length > Constants<T>::epsilon(), \"{}\",\n                SOPHUS_FMT_ARG(normal_foo.transpose()));\n\n  Matrix3<T> basis_foo;\n  basis_foo.col(2) = normal_foo;\n\n  if (abs(xDirHint_foo_sqr_length - T(1)) > Constants<T>::epsilon()) {\n    xDirHint_foo.normalize();\n  }\n  if (abs(yDirHint_foo_sqr_length - T(1)) > Constants<T>::epsilon()) {\n    yDirHint_foo.normalize();\n  }\n  if (abs(normal_foo_sqr_length - T(1)) > Constants<T>::epsilon()) {\n    basis_foo.col(2).normalize();\n  }\n\n  T abs_x_dot_z = abs(basis_foo.col(2).dot(xDirHint_foo));\n  T abs_y_dot_z = abs(basis_foo.col(2).dot(yDirHint_foo));\n  if (abs_x_dot_z < abs_y_dot_z) {\n    // basis_foo.z and xDirHint are far from parallel.\n    basis_foo.col(1) = basis_foo.col(2).cross(xDirHint_foo).normalized();\n    basis_foo.col(0) = basis_foo.col(1).cross(basis_foo.col(2));\n  } else {\n    // basis_foo.z and yDirHint are far from parallel.\n    basis_foo.col(0) = yDirHint_foo.cross(basis_foo.col(2)).normalized();\n    basis_foo.col(1) = basis_foo.col(2).cross(basis_foo.col(0));\n  }\n  T det = basis_foo.determinant();\n  // sanity check\n  SOPHUS_ENSURE(abs(det - T(1)) < Constants<T>::epsilon(),\n                \"Determinant of basis is not 1, but {}. Basis is \\n{}\\n\",\n                SOPHUS_FMT_ARG(det), SOPHUS_FMT_ARG(basis_foo));\n  return basis_foo;\n}\n\n/// Takes in plane normal in reference frame foo and constructs a corresponding\n/// rotation matrix ``R_foo_plane``.\n///\n/// See ``rotationFromNormal`` for details.\n///\ntemplate <class T>\nSO3<T> SO3FromNormal(Vector3<T> const& normal_foo) {\n  return SO3<T>(rotationFromNormal(normal_foo));\n}\n\n/// Returns a line (wrt. to frame ``foo``), given a pose of the ``line`` in\n/// reference frame ``foo``.\n///\n/// Note: The plane is defined by X-axis of the ``line`` frame.\n///\ntemplate <class T>\nLine2<T> lineFromSE2(SE2<T> const& T_foo_line) {\n  return Line2<T>(normalFromSO2(T_foo_line.so2()), T_foo_line.translation());\n}\n\n/// Returns the pose ``T_foo_line``, given a line in reference frame ``foo``.\n///\n/// Note: The line is defined by X-axis of the frame ``line``.\n///\ntemplate <class T>\nSE2<T> SE2FromLine(Line2<T> const& line_foo) {\n  T const d = line_foo.offset();\n  Vector2<T> const n = line_foo.normal();\n  SO2<T> const R_foo_plane = SO2FromNormal(n);\n  return SE2<T>(R_foo_plane, -d * n);\n}\n\n/// Returns a plane (wrt. to frame ``foo``), given a pose of the ``plane`` in\n/// reference frame ``foo``.\n///\n/// Note: The plane is defined by XY-plane of the frame ``plane``.\n///\ntemplate <class T>\nPlane3<T> planeFromSE3(SE3<T> const& T_foo_plane) {\n  return Plane3<T>(normalFromSO3(T_foo_plane.so3()), T_foo_plane.translation());\n}\n\n/// Returns the pose ``T_foo_plane``, given a plane in reference frame ``foo``.\n///\n/// Note: The plane is defined by XY-plane of the frame ``plane``.\n///\ntemplate <class T>\nSE3<T> SE3FromPlane(Plane3<T> const& plane_foo) {\n  T const d = plane_foo.offset();\n  Vector3<T> const n = plane_foo.normal();\n  SO3<T> const R_foo_plane = SO3FromNormal(n);\n  return SE3<T>(R_foo_plane, -d * n);\n}\n\n/// Takes in a hyperplane and returns unique representation by ensuring that the\n/// ``offset`` is not negative.\n///\ntemplate <class T, int N>\nEigen::Hyperplane<T, N> makeHyperplaneUnique(\n    Eigen::Hyperplane<T, N> const& plane) {\n  if (plane.offset() >= 0) {\n    return plane;\n  }\n\n  return Eigen::Hyperplane<T, N>(-plane.normal(), -plane.offset());\n}\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/interpolate.hpp",
    "content": "/// @file\n/// Interpolation for Lie groups.\n\n#pragma once\n\n#include <Eigen/Eigenvalues>\n\n#include \"interpolate_details.hpp\"\n\nnamespace Sophus {\n\n/// This function interpolates between two Lie group elements ``foo_T_bar``\n/// and ``foo_T_baz`` with an interpolation factor of ``alpha`` in [0, 1].\n///\n/// It returns a pose ``foo_T_quiz`` with ``quiz`` being a frame between ``bar``\n/// and ``baz``. If ``alpha=0`` it returns ``foo_T_bar``. If it is 1, it returns\n/// ``foo_T_baz``.\n///\n/// (Since interpolation on Lie groups is inverse-invariant, we can equivalently\n/// think of the input arguments as being ``bar_T_foo``, ``baz_T_foo`` and the\n/// return value being ``quiz_T_foo``.)\n///\n/// Precondition: ``p`` must be in [0, 1].\n///\ntemplate <class G, class Scalar2 = typename G::Scalar>\nenable_if_t<interp_details::Traits<G>::supported, G> interpolate(\n    G const& foo_T_bar, G const& foo_T_baz, Scalar2 p = Scalar2(0.5f)) {\n  using Scalar = typename G::Scalar;\n  Scalar inter_p(p);\n  SOPHUS_ENSURE(inter_p >= Scalar(0) && inter_p <= Scalar(1),\n                \"p ({}) must in [0, 1].\", SOPHUS_FMT_ARG(inter_p));\n  return foo_T_bar * G::exp(inter_p * (foo_T_bar.inverse() * foo_T_baz).log());\n}\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/interpolate_details.hpp",
    "content": "#pragma once\n\n#include \"cartesian.hpp\"\n#include \"rxso2.hpp\"\n#include \"rxso3.hpp\"\n#include \"se2.hpp\"\n#include \"se3.hpp\"\n#include \"sim2.hpp\"\n#include \"sim3.hpp\"\n#include \"so2.hpp\"\n#include \"so3.hpp\"\n\nnamespace Sophus {\nnamespace interp_details {\n\ntemplate <class Group>\nstruct Traits;\n\ntemplate <class Scalar, int Dim>\nstruct Traits<Cartesian<Scalar, Dim>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(Cartesian<Scalar, Dim> const&) {\n    return false;\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<SO2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SO2<Scalar> const& foo_T_bar) {\n    using std::abs;\n    Scalar angle = abs(foo_T_bar.log());\n    Scalar const kPi = Constants<Scalar>::pi();\n    return abs(angle - kPi) / (angle + kPi) < Constants<Scalar>::epsilon();\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<RxSO2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(RxSO2<Scalar> const& foo_T_bar) {\n    return Traits<SO2<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so2());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<SO3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SO3<Scalar> const& foo_T_bar) {\n    using std::abs;\n    Scalar angle = abs(foo_T_bar.logAndTheta().theta);\n    Scalar const kPi = Constants<Scalar>::pi();\n    return abs(angle - kPi) / (angle + kPi) < Constants<Scalar>::epsilon();\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<RxSO3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(RxSO3<Scalar> const& foo_T_bar) {\n    return Traits<SO3<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so3());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<SE2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SE2<Scalar> const& foo_T_bar) {\n    return Traits<SO2<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so2());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<SE3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(SE3<Scalar> const& foo_T_bar) {\n    return Traits<SO3<Scalar>>::hasShortestPathAmbiguity(foo_T_bar.so3());\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<Sim2<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(Sim2<Scalar> const& foo_T_bar) {\n    return Traits<SO2<Scalar>>::hasShortestPathAmbiguity(\n        foo_T_bar.rxso2().so2());\n    ;\n  }\n};\n\ntemplate <class Scalar>\nstruct Traits<Sim3<Scalar>> {\n  static bool constexpr supported = true;\n\n  static bool hasShortestPathAmbiguity(Sim3<Scalar> const& foo_T_bar) {\n    return Traits<SO3<Scalar>>::hasShortestPathAmbiguity(\n        foo_T_bar.rxso3().so3());\n    ;\n  }\n};\n\n}  // namespace interp_details\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/num_diff.hpp",
    "content": "/// @file\n/// Numerical differentiation using finite differences\n\n#pragma once\n\n#include <functional>\n#include <type_traits>\n#include <utility>\n\n#include \"types.hpp\"\n\nnamespace Sophus {\n\nnamespace details {\ntemplate <class Scalar>\nclass Curve {\n public:\n  template <class Fn>\n  static auto num_diff(Fn curve, Scalar t, Scalar h) -> decltype(curve(t)) {\n    using ReturnType = decltype(curve(t));\n    static_assert(std::is_floating_point<Scalar>::value,\n                  \"Scalar must be a floating point type.\");\n    static_assert(IsFloatingPoint<ReturnType>::value,\n                  \"ReturnType must be either a floating point scalar, \"\n                  \"vector or matrix.\");\n\n    return (curve(t + h) - curve(t - h)) / (Scalar(2) * h);\n  }\n};\n\ntemplate <class Scalar, int N, int M>\nclass VectorField {\n public:\n  static Eigen::Matrix<Scalar, N, M> num_diff(\n      std::function<Sophus::Vector<Scalar, N>(Sophus::Vector<Scalar, M>)>\n          vector_field,\n      Sophus::Vector<Scalar, M> const& a, Scalar eps) {\n    static_assert(std::is_floating_point<Scalar>::value,\n                  \"Scalar must be a floating point type.\");\n    Eigen::Matrix<Scalar, N, M> J;\n    Sophus::Vector<Scalar, M> h;\n    h.setZero();\n    for (int i = 0; i < M; ++i) {\n      h[i] = eps;\n      J.col(i) =\n          (vector_field(a + h) - vector_field(a - h)) / (Scalar(2) * eps);\n      h[i] = Scalar(0);\n    }\n\n    return J;\n  }\n};\n\ntemplate <class Scalar, int N>\nclass VectorField<Scalar, N, 1> {\n public:\n  static Eigen::Matrix<Scalar, N, 1> num_diff(\n      std::function<Sophus::Vector<Scalar, N>(Scalar)> vector_field,\n      Scalar const& a, Scalar eps) {\n    return details::Curve<Scalar>::num_diff(std::move(vector_field), a, eps);\n  }\n};\n}  // namespace details\n\n/// Calculates the derivative of a curve at a point ``t``.\n///\n/// Here, a curve is a function from a Scalar to a Euclidean space. Thus, it\n/// returns either a Scalar, a vector or a matrix.\n///\ntemplate <class Scalar, class Fn>\nauto curveNumDiff(Fn curve, Scalar t,\n                  Scalar h = Constants<Scalar>::epsilonSqrt())\n    -> decltype(details::Curve<Scalar>::num_diff(std::move(curve), t, h)) {\n  return details::Curve<Scalar>::num_diff(std::move(curve), t, h);\n}\n\n/// Calculates the derivative of a vector field at a point ``a``.\n///\n/// Here, a vector field is a function from a vector space to another vector\n/// space.\n///\ntemplate <class Scalar, int N, int M, class ScalarOrVector, class Fn>\nEigen::Matrix<Scalar, N, M> vectorFieldNumDiff(\n    Fn vector_field, ScalarOrVector const& a,\n    Scalar eps = Constants<Scalar>::epsilonSqrt()) {\n  return details::VectorField<Scalar, N, M>::num_diff(std::move(vector_field),\n                                                      a, eps);\n}\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/rotation_matrix.hpp",
    "content": "/// @file\n/// Rotation matrix helper functions.\n\n#pragma once\n\n#include <Eigen/Dense>\n#include <Eigen/SVD>\n\n#include \"types.hpp\"\n\nnamespace Sophus {\n\n/// Takes in arbitrary square matrix and returns true if it is\n/// orthogonal.\ntemplate <class D>\nSOPHUS_FUNC bool isOrthogonal(Eigen::MatrixBase<D> const& R) {\n  using Scalar = typename D::Scalar;\n  static int const N = D::RowsAtCompileTime;\n  static int const M = D::ColsAtCompileTime;\n\n  static_assert(N == M, \"must be a square matrix\");\n  static_assert(N >= 2, \"must have compile time dimension >= 2\");\n\n  return (R * R.transpose() - Matrix<Scalar, N, N>::Identity()).norm() <\n         Constants<Scalar>::epsilon();\n}\n\n/// Takes in arbitrary square matrix and returns true if it is\n/// \"scaled-orthogonal\" with positive determinant.\n///\ntemplate <class D>\nSOPHUS_FUNC bool isScaledOrthogonalAndPositive(Eigen::MatrixBase<D> const& sR) {\n  using Scalar = typename D::Scalar;\n  static int const N = D::RowsAtCompileTime;\n  static int const M = D::ColsAtCompileTime;\n  using std::pow;\n  using std::sqrt;\n\n  Scalar det = sR.determinant();\n\n  if (det <= Scalar(0)) {\n    return false;\n  }\n\n  Scalar scale_sqr = pow(det, Scalar(2. / N));\n\n  static_assert(N == M, \"must be a square matrix\");\n  static_assert(N >= 2, \"must have compile time dimension >= 2\");\n\n  return (sR * sR.transpose() - scale_sqr * Matrix<Scalar, N, N>::Identity())\n             .template lpNorm<Eigen::Infinity>() <\n         sqrt(Constants<Scalar>::epsilon());\n}\n\n/// Takes in arbitrary square matrix (2x2 or larger) and returns closest\n/// orthogonal matrix with positive determinant.\ntemplate <class D>\nSOPHUS_FUNC enable_if_t<\n    std::is_floating_point<typename D::Scalar>::value,\n    Matrix<typename D::Scalar, D::RowsAtCompileTime, D::RowsAtCompileTime>>\nmakeRotationMatrix(Eigen::MatrixBase<D> const& R) {\n  using Scalar = typename D::Scalar;\n  static int const N = D::RowsAtCompileTime;\n  static int const M = D::ColsAtCompileTime;\n\n  static_assert(N == M, \"must be a square matrix\");\n  static_assert(N >= 2, \"must have compile time dimension >= 2\");\n\n  Eigen::JacobiSVD<Matrix<Scalar, N, N>> svd(\n      R, Eigen::ComputeFullU | Eigen::ComputeFullV);\n\n  // Determine determinant of orthogonal matrix U*V'.\n  Scalar d = (svd.matrixU() * svd.matrixV().transpose()).determinant();\n  // Starting from the identity matrix D, set the last entry to d (+1 or\n  // -1),  so that det(U*D*V') = 1.\n  Matrix<Scalar, N, N> Diag = Matrix<Scalar, N, N>::Identity();\n  Diag(N - 1, N - 1) = d;\n  return svd.matrixU() * Diag * svd.matrixV().transpose();\n}\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/rxso2.hpp",
    "content": "/// @file\n/// Direct product R X SO(2) - rotation and scaling in 2d.\n\n#pragma once\n\n#include \"so2.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass RxSO2;\nusing RxSO2d = RxSO2<double>;\nusing RxSO2f = RxSO2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::RxSO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Sophus::Vector2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO2<Scalar_>, Options_>>\n    : traits<Sophus::RxSO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO2<Scalar_> const, Options_>>\n    : traits<Sophus::RxSO2<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// RxSO2 base type - implements RxSO2 class but is storage agnostic\n///\n/// This class implements the group ``R+ x SO(2)``, the direct product of the\n/// group of positive scalar 2x2 matrices (= isomorph to the positive\n/// real numbers) and the two-dimensional special orthogonal group SO(2).\n/// Geometrically, it is the group of rotation and scaling in two dimensions.\n/// As a matrix groups, R+ x SO(2) consists of matrices of the form ``s * R``\n/// where ``R`` is an orthogonal matrix with ``det(R) = 1`` and ``s > 0``\n/// being a positive real number. In particular, it has the following form:\n///\n///     | s * cos(theta)  s * -sin(theta) |\n///     | s * sin(theta)  s *  cos(theta) |\n///\n/// where ``theta`` being the rotation angle. Internally, it is represented by\n/// the first column of the rotation matrix, or in other words by a non-zero\n/// complex number.\n///\n/// R+ x SO(2) is not compact, but a commutative group. First it is not compact\n/// since the scale factor is not bound. Second it is commutative since\n/// ``sR(alpha, s1) * sR(beta, s2) = sR(beta, s2) * sR(alpha, s1)``,  simply\n/// because ``alpha + beta = beta + alpha`` and ``s1 * s2 = s2 * s1`` with\n/// ``alpha`` and ``beta`` being rotation angles and ``s1``, ``s2`` being scale\n/// factors.\n///\n/// This class has the explicit class invariant that the scale ``s`` is not\n/// too close to either zero or infinity. Strictly speaking, it must hold that:\n///\n///   ``complex().norm() >= Constants::epsilon()`` and\n///   ``1. / complex().norm() >= Constants::epsilon()``.\n///\n/// In order to obey this condition, group multiplication is implemented with\n/// saturation such that a product always has a scale which is equal or greater\n/// this threshold.\ntemplate <class Derived>\nclass RxSO2Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using ComplexType = typename Eigen::internal::traits<Derived>::ComplexType;\n  using ComplexTemporaryType = Sophus::Vector2<Scalar, Options>;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (one for rotation and one for scaling).\n  static int constexpr DoF = 2;\n  /// Number of internal parameters used (complex number is a tuple).\n  static int constexpr num_parameters = 2;\n  /// Group transformations are 2x2 matrices.\n  static int constexpr N = 2;\n  /// Points are 2-dimensional\n  static int constexpr Dim = 2;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Hyperplane = Hyperplane2<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with RxSO2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using RxSO2Product = RxSO2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  /// For RxSO(2), it simply returns the identity matrix.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const { return Adjoint::Identity(); }\n\n  /// Returns rotation angle.\n  ///\n  SOPHUS_FUNC Scalar angle() const { return SO2<Scalar>(complex()).log(); }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC RxSO2<NewScalarType> cast() const {\n    typename RxSO2<NewScalarType>::ComplexType c =\n        complex().template cast<NewScalarType>();\n    return RxSO2<NewScalarType>(c);\n  }\n\n  /// This provides unsafe read/write access to internal data. RxSO(2) is\n  /// represented by a complex number (two parameters). When using direct\n  /// write access, the user needs to take care of that the complex number is\n  /// not set close to zero.\n  ///\n  /// Note: The first parameter represents the real part, while the\n  /// second parameter represent the imaginary part.\n  ///\n  SOPHUS_FUNC Scalar* data() { return complex_nonconst().data(); }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const { return complex().data(); }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC RxSO2<Scalar> inverse() const {\n    Scalar squared_scale = complex().squaredNorm();\n    Vector2<Scalar> xy = complex() / squared_scale;\n    return RxSO2<Scalar>(xy.x(), -xy.y());\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (scaled rotation matrices) to elements of the tangent\n  /// space (rotation-vector plus logarithm of scale factor).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of RxSO2.\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    using std::log;\n    Tangent theta_sigma;\n    theta_sigma[1] = log(scale());\n    theta_sigma[0] = SO2<Scalar>(complex()).log();\n    return theta_sigma;\n  }\n\n  /// Returns 2x2 matrix representation of the instance.\n  ///\n  /// For RxSO2, the matrix representation is an scaled orthogonal matrix ``sR``\n  /// with ``det(R)=s^2``, thus a scaled rotation matrix ``R``  with scale\n  /// ``s``.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation sR;\n    // clang-format off\n    sR << complex()[0], -complex()[1],\n          complex()[1],  complex()[0];\n    // clang-format on\n    return sR;\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO2Base<Derived>& operator=(\n      RxSO2Base<OtherDerived> const& other) {\n    complex_nonconst() = other.complex();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation and scale\n  /// multiplication.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC RxSO2Product<OtherDerived> operator*(\n      RxSO2Base<OtherDerived> const& other) const {\n    using ResultT = ReturnScalar<OtherDerived>;\n\n    Scalar lhs_real = complex().x();\n    Scalar lhs_imag = complex().y();\n    typename OtherDerived::Scalar const& rhs_real = other.complex().x();\n    typename OtherDerived::Scalar const& rhs_imag = other.complex().y();\n    /// complex multiplication\n    typename RxSO2Product<OtherDerived>::ComplexType result_complex(\n        lhs_real * rhs_real - lhs_imag * rhs_imag,\n        lhs_real * rhs_imag + lhs_imag * rhs_real);\n\n    const ResultT squared_scale = result_complex.squaredNorm();\n\n    if (squared_scale <\n        Constants<ResultT>::epsilon() * Constants<ResultT>::epsilon()) {\n      /// Saturation to ensure class invariant.\n      result_complex.normalize();\n      result_complex *= Constants<ResultT>::epsilonPlus();\n    }\n    if (squared_scale > Scalar(1.) / (Constants<ResultT>::epsilon() *\n                                      Constants<ResultT>::epsilon())) {\n      /// Saturation to ensure class invariant.\n      result_complex.normalize();\n      result_complex /= Constants<ResultT>::epsilonPlus();\n    }\n    return RxSO2Product<OtherDerived>(result_complex);\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates a 2 dimensional point ``p`` by the SO2 element\n  /// ``bar_R_foo`` (= rotation matrix) and scales it by the scale factor ``s``:\n  ///\n  ///   ``p_bar = s * (bar_R_foo * p_foo)``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return matrix() * p;\n  }\n\n  /// Group action on homogeneous 2-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const auto rsp = *this * p.template head<2>();\n    return HomogeneousPointProduct<HPointDerived>(rsp(0), rsp(1), p(2));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parameterized line ``l(t) = o + t * d`` by the SO2\n  /// element and scales it by the scale factor\n  ///\n  /// Origin ``o`` is rotated and scaled\n  /// Direction ``d`` is rotated (preserving it's norm)\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), (*this) * l.direction() / scale());\n  }\n\n  /// Group action on hyper-planes.\n  ///\n  /// This function rotates a hyper-plane ``n.x + d = 0`` by the SO2\n  /// element and scales offset by the scale factor\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is scaled\n  ///\n  /// Note that in 2d-case hyper-planes are just another parametrization of\n  /// lines\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    const auto this_scale = scale();\n    return Hyperplane((*this) * p.normal() / this_scale,\n                      this_scale * p.offset());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC RxSO2Base<Derived>& operator*=(\n      RxSO2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns derivative of  this * RxSO2::exp(x) wrt. x at x=0\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    J << -complex().y(), complex().x(), complex().x(), complex().y();\n    return J;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    Matrix<Scalar, DoF, num_parameters> J;\n    const Scalar norm_sq_inv = Scalar(1.) / complex().squaredNorm();\n    J << -complex().y(), complex().x(), complex().x(), complex().y();\n    return J * norm_sq_inv;\n  }\n\n  /// Returns internal parameters of RxSO(2).\n  ///\n  /// It returns (c[0], c[1]), with c being the  complex number.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return complex();\n  }\n\n  /// Sets non-zero complex\n  ///\n  /// Precondition: ``z`` must not be close to either zero or infinity.\n  SOPHUS_FUNC void setComplex(Vector2<Scalar> const& z) {\n    SOPHUS_ENSURE(z.squaredNorm() > Constants<Scalar>::epsilon() *\n                                        Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    SOPHUS_ENSURE(z.squaredNorm() < Scalar(1.) / (Constants<Scalar>::epsilon() *\n                                                  Constants<Scalar>::epsilon()),\n                  \"Inverse scale factor must be greater-equal epsilon.\");\n    static_cast<Derived*>(this)->complex_nonconst() = z;\n  }\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC ComplexType const& complex() const {\n    return static_cast<Derived const*>(this)->complex();\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Transformation rotationMatrix() const {\n    ComplexTemporaryType norm_quad = complex();\n    norm_quad.normalize();\n    return SO2<Scalar>(norm_quad).matrix();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC Scalar scale() const {\n    using std::hypot;\n    return hypot(complex().x(), complex().y());\n  }\n\n  /// Setter of rotation angle, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setAngle(Scalar const& theta) { setSO2(SO2<Scalar>(theta)); }\n\n  /// Setter of complex using rotation matrix ``R``, leaves scale as is.\n  ///\n  /// Precondition: ``R`` must be orthogonal with determinant of one.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Transformation const& R) {\n    setSO2(SO2<Scalar>(R));\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  SOPHUS_FUNC void setScale(Scalar const& scale) {\n    using std::sqrt;\n    complex_nonconst().normalize();\n    complex_nonconst() *= scale;\n  }\n\n  /// Setter of complex number using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 2x2 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Transformation const& sR) {\n    SOPHUS_ENSURE(isScaledOrthogonalAndPositive(sR),\n                  \"sR must be scaled orthogonal:\\n {}\", SOPHUS_FMT_ARG(sR));\n    complex_nonconst() = sR.col(0);\n  }\n\n  /// Setter of SO(2) rotations, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setSO2(SO2<Scalar> const& so2) {\n    using std::sqrt;\n    Scalar saved_scale = scale();\n    complex_nonconst() = so2.unit_complex();\n    complex_nonconst() *= saved_scale;\n  }\n\n  SOPHUS_FUNC SO2<Scalar> so2() const { return SO2<Scalar>(complex()); }\n\n private:\n  /// Mutator of complex is private to ensure class invariant.\n  ///\n  SOPHUS_FUNC ComplexType& complex_nonconst() {\n    return static_cast<Derived*>(this)->complex_nonconst();\n  }\n};\n\n/// RxSO2 using storage; derived from RxSO2Base.\ntemplate <class Scalar_, int Options>\nclass RxSO2 : public RxSO2Base<RxSO2<Scalar_, Options>> {\n public:\n  using Base = RxSO2Base<RxSO2<Scalar_, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using ComplexMember = Eigen::Matrix<Scalar, 2, 1, Options>;\n\n  /// ``Base`` is friend so complex_nonconst can be accessed from ``Base``.\n  friend class RxSO2Base<RxSO2<Scalar_, Options>>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC RxSO2& operator=(RxSO2 const& other) = default;\n\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes complex number to identity rotation and\n  /// scale to 1.\n  ///\n  SOPHUS_FUNC RxSO2() : complex_(Scalar(1), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC RxSO2(RxSO2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO2(RxSO2Base<OtherDerived> const& other)\n      : complex_(other.complex()) {}\n\n  /// Constructor from scaled rotation matrix\n  ///\n  /// Precondition: rotation matrix need to be scaled orthogonal with\n  /// determinant of ``s^2``.\n  ///\n  SOPHUS_FUNC explicit RxSO2(Transformation const& sR) {\n    this->setScaledRotationMatrix(sR);\n  }\n\n  /// Constructor from scale factor and rotation matrix ``R``.\n  ///\n  /// Precondition: Rotation matrix ``R`` must to be orthogonal with determinant\n  ///               of 1 and ``scale`` must not to be close to either zero or\n  ///               infinity.\n  ///\n  SOPHUS_FUNC RxSO2(Scalar const& scale, Transformation const& R)\n      : RxSO2((scale * SO2<Scalar>(R).unit_complex()).eval()) {}\n\n  /// Constructor from scale factor and SO2\n  ///\n  /// Precondition: ``scale`` must not be close to either zero or infinity.\n  ///\n  SOPHUS_FUNC RxSO2(Scalar const& scale, SO2<Scalar> const& so2)\n      : RxSO2((scale * so2.unit_complex()).eval()) {}\n\n  /// Constructor from complex number.\n  ///\n  /// Precondition: complex number must not be close to either zero or infinity\n  ///\n  SOPHUS_FUNC explicit RxSO2(Vector2<Scalar> const& z) : complex_(z) {\n    SOPHUS_ENSURE(complex_.squaredNorm() >= Constants<Scalar>::epsilon() *\n                                                Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon: {} vs {}\",\n                  SOPHUS_FMT_ARG(complex_.squaredNorm()),\n                  SOPHUS_FMT_ARG(Constants<Scalar>::epsilon() *\n                                 Constants<Scalar>::epsilon()));\n    SOPHUS_ENSURE(\n        complex_.squaredNorm() <= Scalar(1.) / (Constants<Scalar>::epsilon() *\n                                                Constants<Scalar>::epsilon()),\n        \"Inverse scale factor must be greater-equal epsilon: {} vs {}\",\n        SOPHUS_FMT_ARG(Scalar(1.) / complex_.squaredNorm()),\n        SOPHUS_FMT_ARG(Constants<Scalar>::epsilon() *\n                       Constants<Scalar>::epsilon()));\n  }\n\n  /// Constructor from complex number.\n  ///\n  /// Precondition: complex number must not be close to either zero or infinity.\n  ///\n  SOPHUS_FUNC explicit RxSO2(Scalar const& real, Scalar const& imag)\n      : RxSO2(Vector2<Scalar>(real, imag)) {}\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC ComplexMember const& complex() const { return complex_; }\n\n  /// Returns derivative of exp(x) wrt. ``x``\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& a) {\n    using std::cos;\n    using std::exp;\n    using std::sin;\n    Scalar const theta = a[0];\n    Scalar const sigma = a[1];\n\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    J << -sin(theta), cos(theta), cos(theta), sin(theta);\n    return J * exp(sigma);\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    static Scalar const i(1.);\n    static Scalar const o(0.);\n    J << o, i, i, o;\n    return J;\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 2, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    Sophus::Matrix<Scalar, 2, DoF> j;\n    j << Sophus::SO2<Scalar>::Dx_exp_x_times_point_at_0(point), point;\n    return j;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation angle\n  /// plus logarithm of scale) and returns the corresponding element of the\n  /// group RxSO2.\n  ///\n  /// To be more specific, this function computes ``expmat(hat(theta))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of RSO2.\n  ///\n  SOPHUS_FUNC static RxSO2<Scalar> exp(Tangent const& a) {\n    using std::exp;\n    using std::max;\n    using std::min;\n\n    Scalar const theta = a[0];\n    Scalar const sigma = a[1];\n    Scalar s = exp(sigma);\n    // Ensuring proper scale\n    s = max(s, Constants<Scalar>::epsilonPlus());\n    s = min(s, Scalar(1.) / Constants<Scalar>::epsilonPlus());\n    Vector2<Scalar> z = SO2<Scalar>::exp(theta).unit_complex();\n    z *= s;\n    return RxSO2<Scalar>(z);\n  }\n\n  /// Returns the ith infinitesimal generators of ``R+ x SO(2)``.\n  ///\n  /// The infinitesimal generators of RxSO2 are:\n  ///\n  /// ```\n  ///         |  0 -1 |\n  ///   G_0 = |  1  0 |\n  ///\n  ///         |  1  0 |\n  ///   G_1 = |  0  1 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be 0, or 1.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 1, \"i should be 0 or 1.\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 2-vector representation ``a`` (= rotation angle plus\n  /// logarithm of scale) and  returns the corresponding matrix representation\n  /// of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of RxSO2 is defined as\n  ///\n  ///   ``hat(.): R^2 -> R^{2x2},  hat(a) = sum_i a_i * G_i``  (for i=0,1,2)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of RxSO2.\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation A;\n    // clang-format off\n    A << a(1), -a(0),\n         a(0),  a(1);\n    // clang-format on\n    return A;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of RxSO(2). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_rxso2 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of RxSO2.\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const&, Tangent const&) {\n    Vector2<Scalar> res;\n    res.setZero();\n    return res;\n  }\n\n  /// Draw uniform sample from RxSO(2) manifold.\n  ///\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2)].\n  ///\n  template <class UniformRandomBitGenerator>\n  static RxSO2 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    using std::exp2;\n    return RxSO2(exp2(uniform(generator)),\n                 SO2<Scalar>::sampleUniform(generator));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 2x2-matrix representation ``Omega`` and maps it to the\n  /// corresponding vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  d -x |\n  ///                |  x  d |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    using std::abs;\n    return Tangent(Omega(1, 0), Omega(0, 0));\n  }\n\n protected:\n  SOPHUS_FUNC ComplexMember& complex_nonconst() { return complex_; }\n\n  ComplexMember complex_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``RxSO2``; derived from  RxSO2Base.\n///\n/// Allows us to wrap RxSO2 objects around POD array (e.g. external z style\n/// complex).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO2<Scalar_>, Options>\n    : public Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_>, Options>> {\n  using Base = Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_>, Options>>;\n\n public:\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so complex_nonconst can be accessed from ``Base``.\n  friend class Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_>, Options>>;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar* coeffs) : complex_(coeffs) {}\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar>, Options> const& complex() const {\n    return complex_;\n  }\n\n protected:\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options>& complex_nonconst() {\n    return complex_;\n  }\n\n  Map<Sophus::Vector2<Scalar>, Options> complex_;\n};\n\n/// Specialization of Eigen::Map for ``RxSO2 const``; derived from  RxSO2Base.\n///\n/// Allows us to wrap RxSO2 objects around POD array (e.g. external z style\n/// complex).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO2<Scalar_> const, Options>\n    : public Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::RxSO2Base<Map<Sophus::RxSO2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  explicit Map(Scalar const* coeffs) : complex_(coeffs) {}\n\n  /// Accessor of complex.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar> const, Options> const& complex() const {\n    return complex_;\n  }\n\n protected:\n  Map<Sophus::Vector2<Scalar> const, Options> const complex_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/rxso3.hpp",
    "content": "/// @file\n/// Direct product R X SO(3) - rotation and scaling in 3d.\n\n#pragma once\n\n#include \"so3.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass RxSO3;\nusing RxSO3d = RxSO3<double>;\nusing RxSO3f = RxSO3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::RxSO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Eigen::Quaternion<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO3<Scalar_>, Options_>>\n    : traits<Sophus::RxSO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::RxSO3<Scalar_> const, Options_>>\n    : traits<Sophus::RxSO3<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// RxSO3 base type - implements RxSO3 class but is storage agnostic\n///\n/// This class implements the group ``R+ x SO(3)``, the direct product of the\n/// group of positive scalar 3x3 matrices (= isomorph to the positive\n/// real numbers) and the three-dimensional special orthogonal group SO(3).\n/// Geometrically, it is the group of rotation and scaling in three dimensions.\n/// As a matrix groups, RxSO3 consists of matrices of the form ``s * R``\n/// where ``R`` is an orthogonal matrix with ``det(R)=1`` and ``s > 0``\n/// being a positive real number.\n///\n/// Internally, RxSO3 is represented by the group of non-zero quaternions.\n/// In particular, the scale equals the squared(!) norm of the quaternion ``q``,\n/// ``s = |q|^2``. This is a most compact representation since the degrees of\n/// freedom (DoF) of RxSO3 (=4) equals the number of internal parameters (=4).\n///\n/// This class has the explicit class invariant that the scale ``s`` is not\n/// too close to either zero or infinity. Strictly speaking, it must hold that:\n///\n///   ``quaternion().squaredNorm() >= Constants::epsilon()`` and\n///   ``1. / quaternion().squaredNorm() >= Constants::epsilon()``.\n///\n/// In order to obey this condition, group multiplication is implemented with\n/// saturation such that a product always has a scale which is equal or greater\n/// this threshold.\ntemplate <class Derived>\nclass RxSO3Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using QuaternionType =\n      typename Eigen::internal::traits<Derived>::QuaternionType;\n  using QuaternionTemporaryType = Eigen::Quaternion<Scalar, Options>;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (three for rotation and one for scaling).\n  static int constexpr DoF = 4;\n  /// Number of internal parameters used (quaternion is a 4-tuple).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  /// Points are 3-dimensional\n  static int constexpr Dim = 3;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Hyperplane = Hyperplane3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  struct TangentAndTheta {\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n    Tangent tangent;\n    Scalar theta;\n  };\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with RxSO3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using RxSO3Product = RxSO3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  /// For RxSO(3), it simply returns the rotation matrix corresponding to ``A``.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Adjoint res;\n    res.setIdentity();\n    res.template topLeftCorner<3, 3>() = rotationMatrix();\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC RxSO3<NewScalarType> cast() const {\n    return RxSO3<NewScalarType>(quaternion().template cast<NewScalarType>());\n  }\n\n  /// This provides unsafe read/write access to internal data. RxSO(3) is\n  /// represented by an Eigen::Quaternion (four parameters). When using direct\n  /// write access, the user needs to take care of that the quaternion is not\n  /// set close to zero.\n  ///\n  /// Note: The first three Scalars represent the imaginary parts, while the\n  /// forth Scalar represent the real part.\n  ///\n  SOPHUS_FUNC Scalar* data() { return quaternion_nonconst().coeffs().data(); }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    return quaternion().coeffs().data();\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC RxSO3<Scalar> inverse() const {\n    return RxSO3<Scalar>(quaternion().inverse());\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (scaled rotation matrices) to elements of the tangent\n  /// space (rotation-vector plus logarithm of scale factor).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of RxSO3.\n  ///\n  SOPHUS_FUNC Tangent log() const { return logAndTheta().tangent; }\n\n  /// As above, but also returns ``theta = |omega|``.\n  ///\n  SOPHUS_FUNC TangentAndTheta logAndTheta() const {\n    using std::log;\n\n    Scalar scale = quaternion().squaredNorm();\n    TangentAndTheta result;\n    result.tangent[3] = log(scale);\n    auto omega_and_theta = SO3<Scalar>(quaternion()).logAndTheta();\n    result.tangent.template head<3>() = omega_and_theta.tangent;\n    result.theta = omega_and_theta.theta;\n    return result;\n  }\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// For RxSO3, the matrix representation is an scaled orthogonal matrix ``sR``\n  /// with ``det(R)=s^3``, thus a scaled rotation matrix ``R``  with scale\n  /// ``s``.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation sR;\n\n    Scalar const vx_sq = quaternion().vec().x() * quaternion().vec().x();\n    Scalar const vy_sq = quaternion().vec().y() * quaternion().vec().y();\n    Scalar const vz_sq = quaternion().vec().z() * quaternion().vec().z();\n    Scalar const w_sq = quaternion().w() * quaternion().w();\n    Scalar const two_vx = Scalar(2) * quaternion().vec().x();\n    Scalar const two_vy = Scalar(2) * quaternion().vec().y();\n    Scalar const two_vz = Scalar(2) * quaternion().vec().z();\n    Scalar const two_vx_vy = two_vx * quaternion().vec().y();\n    Scalar const two_vx_vz = two_vx * quaternion().vec().z();\n    Scalar const two_vx_w = two_vx * quaternion().w();\n    Scalar const two_vy_vz = two_vy * quaternion().vec().z();\n    Scalar const two_vy_w = two_vy * quaternion().w();\n    Scalar const two_vz_w = two_vz * quaternion().w();\n\n    sR(0, 0) = vx_sq - vy_sq - vz_sq + w_sq;\n    sR(1, 0) = two_vx_vy + two_vz_w;\n    sR(2, 0) = two_vx_vz - two_vy_w;\n\n    sR(0, 1) = two_vx_vy - two_vz_w;\n    sR(1, 1) = -vx_sq + vy_sq - vz_sq + w_sq;\n    sR(2, 1) = two_vx_w + two_vy_vz;\n\n    sR(0, 2) = two_vx_vz + two_vy_w;\n    sR(1, 2) = -two_vx_w + two_vy_vz;\n    sR(2, 2) = -vx_sq - vy_sq + vz_sq + w_sq;\n    return sR;\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO3Base<Derived>& operator=(\n      RxSO3Base<OtherDerived> const& other) {\n    quaternion_nonconst() = other.quaternion();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation and scale\n  /// multiplication.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC RxSO3Product<OtherDerived> operator*(\n      RxSO3Base<OtherDerived> const& other) const {\n    using std::sqrt;\n    using ResultT = ReturnScalar<OtherDerived>;\n    using QuaternionProductType =\n        typename RxSO3Product<OtherDerived>::QuaternionType;\n\n    QuaternionProductType result_quaternion(\n        Sophus::SO3<double>::QuaternionProduct<QuaternionProductType>(\n            quaternion(), other.quaternion()));\n\n    ResultT scale = result_quaternion.squaredNorm();\n    if (scale < Constants<ResultT>::epsilon()) {\n      SOPHUS_ENSURE(scale > ResultT(0), \"Scale must be greater zero.\");\n      /// Saturation to ensure class invariant.\n      result_quaternion.normalize();\n      result_quaternion.coeffs() *= sqrt(Constants<ResultT>::epsilonPlus());\n    }\n    if (scale > ResultT(1.) / Constants<ResultT>::epsilon()) {\n      result_quaternion.normalize();\n      result_quaternion.coeffs() /= sqrt(Constants<ResultT>::epsilonPlus());\n    }\n    return RxSO3Product<OtherDerived>(result_quaternion);\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates a 3 dimensional point ``p`` by the SO3 element\n  ///  ``bar_R_foo`` (= rotation matrix) and scales it by the scale factor\n  ///  ``s``:\n  ///\n  ///   ``p_bar = s * (bar_R_foo * p_foo)``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    // Follows http:///eigen.tuxfamily.org/bz/show_bug.cgi?id=459\n    Scalar scale = quaternion().squaredNorm();\n    PointProduct<PointDerived> two_vec_cross_p = quaternion().vec().cross(p);\n    two_vec_cross_p += two_vec_cross_p;\n    return scale * p + (quaternion().w() * two_vec_cross_p +\n                        quaternion().vec().cross(two_vec_cross_p));\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const auto rsp = *this * p.template head<3>();\n    return HomogeneousPointProduct<HPointDerived>(rsp(0), rsp(1), rsp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO3\n  /// element and scales it by the scale factor:\n  ///\n  /// Origin ``o`` is rotated and scaled\n  /// Direction ``d`` is rotated (preserving it's norm)\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(),\n                (*this) * l.direction() / quaternion().squaredNorm());\n  }\n\n  /// Group action on planes.\n  ///\n  /// This function rotates parametrized plane\n  /// ``n.x + d = 0`` by the SO3 element and scales it by the scale factor:\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is scaled\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    const auto this_scale = scale();\n    return Hyperplane((*this) * p.normal() / this_scale,\n                      this_scale * p.offset());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO3's Scalar type.\n  ///\n  /// Note: This function performs saturation for products close to zero in\n  /// order to ensure the class invariant.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC RxSO3Base<Derived>& operator*=(\n      RxSO3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns internal parameters of RxSO(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real), with q being the\n  /// quaternion.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return quaternion().coeffs();\n  }\n\n  /// Sets non-zero quaternion\n  ///\n  /// Precondition: ``quat`` must not be close to either zero or infinity\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quat) {\n    SOPHUS_ENSURE(quat.squaredNorm() > Constants<Scalar>::epsilon() *\n                                           Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    SOPHUS_ENSURE(\n        quat.squaredNorm() < Scalar(1.) / (Constants<Scalar>::epsilon() *\n                                           Constants<Scalar>::epsilon()),\n        \"Inverse scale factor must be greater-equal epsilon.\");\n    static_cast<Derived*>(this)->quaternion_nonconst() = quat;\n  }\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& quaternion() const {\n    return static_cast<Derived const*>(this)->quaternion();\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Transformation rotationMatrix() const {\n    QuaternionTemporaryType norm_quad = quaternion();\n    norm_quad.normalize();\n    return norm_quad.toRotationMatrix();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC\n  Scalar scale() const { return quaternion().squaredNorm(); }\n\n  /// Setter of quaternion using rotation matrix ``R``, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Transformation const& R) {\n    using std::sqrt;\n    Scalar saved_scale = scale();\n    quaternion_nonconst() = R;\n    quaternion_nonconst().coeffs() *= sqrt(saved_scale);\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  /// Note: This function as a significant computational cost, since it has to\n  /// call the square root twice.\n  ///\n  SOPHUS_FUNC\n  void setScale(Scalar const& scale) {\n    using std::sqrt;\n    quaternion_nonconst().normalize();\n    quaternion_nonconst().coeffs() *= sqrt(scale);\n  }\n\n  /// Setter of quaternion using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 3x3 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Transformation const& sR) {\n    Transformation squared_sR = sR * sR.transpose();\n    Scalar squared_scale =\n        Scalar(1. / 3.) *\n        (squared_sR(0, 0) + squared_sR(1, 1) + squared_sR(2, 2));\n    SOPHUS_ENSURE(squared_scale >= Constants<Scalar>::epsilon() *\n                                       Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    SOPHUS_ENSURE(squared_scale < Scalar(1.) / (Constants<Scalar>::epsilon() *\n                                                Constants<Scalar>::epsilon()),\n                  \"Inverse scale factor must be greater-equal epsilon.\");\n    Scalar scale = sqrt(squared_scale);\n    quaternion_nonconst() = sR / scale;\n    quaternion_nonconst().coeffs() *= sqrt(scale);\n  }\n\n  /// Setter of SO(3) rotations, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setSO3(SO3<Scalar> const& so3) {\n    using std::sqrt;\n    Scalar saved_scale = scale();\n    quaternion_nonconst() = so3.unit_quaternion();\n    quaternion_nonconst().coeffs() *= sqrt(saved_scale);\n  }\n\n  SOPHUS_FUNC SO3<Scalar> so3() const { return SO3<Scalar>(quaternion()); }\n\n  /// Returns derivative of  this * RxSO3::exp(x) wrt. x at x=0\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    Eigen::Quaternion<Scalar> const q = quaternion();\n    J.col(3) = q.coeffs() * Scalar(0.5);\n    Scalar const c0 = Scalar(0.5) * q.w();\n    Scalar const c1 = Scalar(0.5) * q.z();\n    Scalar const c2 = -c1;\n    Scalar const c3 = Scalar(0.5) * q.y();\n    Scalar const c4 = Scalar(0.5) * q.x();\n    Scalar const c5 = -c4;\n    Scalar const c6 = -c3;\n    J(0, 0) = c0;\n    J(0, 1) = c2;\n    J(0, 2) = c3;\n    J(1, 0) = c1;\n    J(1, 1) = c0;\n    J(1, 2) = c5;\n    J(2, 0) = c6;\n    J(2, 1) = c4;\n    J(2, 2) = c0;\n    J(3, 0) = c5;\n    J(3, 1) = c6;\n    J(3, 2) = c2;\n    return J;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    auto& q = quaternion();\n    Matrix<Scalar, DoF, num_parameters> J;\n    // clang-format off\n    J << q.w(),  q.z(), -q.y(), -q.x(),\n        -q.z(),  q.w(),  q.x(), -q.y(),\n         q.y(), -q.x(),  q.w(), -q.z(),\n         q.x(),  q.y(),  q.z(),  q.w();\n    // clang-format on\n    const Scalar scaler = Scalar(2.) / q.squaredNorm();\n    return J * scaler;\n  }\n\n private:\n  /// Mutator of quaternion is private to ensure class invariant.\n  ///\n  SOPHUS_FUNC QuaternionType& quaternion_nonconst() {\n    return static_cast<Derived*>(this)->quaternion_nonconst();\n  }\n};\n\n/// RxSO3 using storage; derived from RxSO3Base.\ntemplate <class Scalar_, int Options>\nclass RxSO3 : public RxSO3Base<RxSO3<Scalar_, Options>> {\n public:\n  using Base = RxSO3Base<RxSO3<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using QuaternionMember = Eigen::Quaternion<Scalar, Options>;\n\n  /// ``Base`` is friend so quaternion_nonconst can be accessed from ``Base``.\n  friend class RxSO3Base<RxSO3<Scalar_, Options>>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC RxSO3& operator=(RxSO3 const& other) = default;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes quaternion to identity rotation and scale\n  /// to 1.\n  ///\n  SOPHUS_FUNC RxSO3()\n      : quaternion_(Scalar(1), Scalar(0), Scalar(0), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC RxSO3(RxSO3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC RxSO3(RxSO3Base<OtherDerived> const& other)\n      : quaternion_(other.quaternion()) {}\n\n  /// Constructor from scaled rotation matrix\n  ///\n  /// Precondition: rotation matrix need to be scaled orthogonal with\n  /// determinant of ``s^3``.\n  ///\n  SOPHUS_FUNC explicit RxSO3(Transformation const& sR) {\n    this->setScaledRotationMatrix(sR);\n  }\n\n  /// Constructor from scale factor and rotation matrix ``R``.\n  ///\n  /// Precondition: Rotation matrix ``R`` must to be orthogonal with determinant\n  ///               of 1 and ``scale`` must not be close to either zero or\n  ///               infinity.\n  ///\n  SOPHUS_FUNC RxSO3(Scalar const& scale, Transformation const& R)\n      : quaternion_(R) {\n    SOPHUS_ENSURE(scale >= Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    SOPHUS_ENSURE(scale < Scalar(1.) / Constants<Scalar>::epsilon(),\n                  \"Inverse scale factor must be greater-equal epsilon.\");\n    using std::sqrt;\n    quaternion_.coeffs() *= sqrt(scale);\n  }\n\n  /// Constructor from scale factor and SO3\n  ///\n  /// Precondition: ``scale`` must not to be close to either zero or infinity.\n  ///\n  SOPHUS_FUNC RxSO3(Scalar const& scale, SO3<Scalar> const& so3)\n      : quaternion_(so3.unit_quaternion()) {\n    SOPHUS_ENSURE(scale >= Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    SOPHUS_ENSURE(scale < Scalar(1.) / Constants<Scalar>::epsilon(),\n                  \"Inverse scale factor must be greater-equal epsilon.\");\n    using std::sqrt;\n    quaternion_.coeffs() *= sqrt(scale);\n  }\n\n  /// Constructor from quaternion\n  ///\n  /// Precondition: quaternion must not be close to either zero or infinity.\n  ///\n  template <class D>\n  SOPHUS_FUNC explicit RxSO3(Eigen::QuaternionBase<D> const& quat)\n      : quaternion_(quat) {\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type.\");\n    SOPHUS_ENSURE(quaternion_.squaredNorm() >= Constants<Scalar>::epsilon(),\n                  \"Scale factor must be greater-equal epsilon.\");\n    SOPHUS_ENSURE(\n        quat.squaredNorm() < Scalar(1.) / Constants<Scalar>::epsilon(),\n        \"Inverse scale factor must be greater-equal epsilon.\");\n  }\n\n  /// Constructor from scale factor and unit quaternion\n  ///\n  /// Precondition: quaternion must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC explicit RxSO3(Scalar const& scale,\n                             Eigen::QuaternionBase<D> const& unit_quat)\n      : RxSO3(scale, SO3<Scalar>(unit_quat)) {}\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC QuaternionMember const& quaternion() const { return quaternion_; }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    static Scalar const h(0.5);\n    return h * Sophus::Matrix<Scalar, num_parameters, DoF>::Identity();\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      const Tangent& a) {\n    using std::exp;\n    using std::sqrt;\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Vector3<Scalar> const omega = a.template head<3>();\n    Scalar const sigma = a[3];\n    Eigen::Quaternion<Scalar> quat = SO3<Scalar>::exp(omega).unit_quaternion();\n    Scalar const scale = sqrt(exp(sigma));\n    Scalar const scale_half = scale * Scalar(0.5);\n\n    J.template block<4, 3>(0, 0) = SO3<Scalar>::Dx_exp_x(omega) * scale;\n    J.col(3) = quat.coeffs() * scale_half;\n    return J;\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 3, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    Sophus::Matrix<Scalar, 3, DoF> j;\n    j << Sophus::SO3<Scalar>::hat(-point), point;\n    return j;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation 3-vector\n  /// plus logarithm of scale) and returns the corresponding element of the\n  /// group RxSO3.\n  ///\n  /// To be more specific, this function computes ``expmat(hat(omega))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of RSO3.\n  ///\n  SOPHUS_FUNC static RxSO3<Scalar> exp(Tangent const& a) {\n    Scalar theta;\n    return expAndTheta(a, &theta);\n  }\n\n  /// As above, but also returns ``theta = |omega|`` as out-parameter.\n  ///\n  /// Precondition: ``theta`` must not be ``nullptr``.\n  ///\n  SOPHUS_FUNC static RxSO3<Scalar> expAndTheta(Tangent const& a,\n                                               Scalar* theta) {\n    SOPHUS_ENSURE(theta != nullptr, \"must not be nullptr.\");\n    using std::exp;\n    using std::max;\n    using std::min;\n    using std::sqrt;\n\n    Vector3<Scalar> const omega = a.template head<3>();\n    Scalar sigma = a[3];\n    Scalar scale = exp(sigma);\n    // Ensure that scale-factor constraint is always valid\n    scale = max(scale, Constants<Scalar>::epsilonPlus());\n    scale = min(scale, Scalar(1.) / Constants<Scalar>::epsilonPlus());\n    Scalar sqrt_scale = sqrt(scale);\n    Eigen::Quaternion<Scalar> quat =\n        SO3<Scalar>::expAndTheta(omega, theta).unit_quaternion();\n    quat.coeffs() *= sqrt_scale;\n    return RxSO3<Scalar>(quat);\n  }\n\n  /// Returns the ith infinitesimal generators of ``R+ x SO(3)``.\n  ///\n  /// The infinitesimal generators of RxSO3 are:\n  ///\n  /// ```\n  ///         |  0  0  0 |\n  ///   G_0 = |  0  0 -1 |\n  ///         |  0  1  0 |\n  ///\n  ///         |  0  0  1 |\n  ///   G_1 = |  0  0  0 |\n  ///         | -1  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  1  0  0 |\n  ///   G_2 = |  0  1  0 |\n  ///         |  0  0  1 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be 0, 1, 2 or 3.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 3, \"i should be in range [0,3].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 4-vector representation ``a`` (= rotation vector plus\n  /// logarithm of scale) and  returns the corresponding matrix representation\n  /// of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of RxSO3 is defined as\n  ///\n  ///   ``hat(.): R^4 -> R^{3x3},  hat(a) = sum_i a_i * G_i``  (for i=0,1,2,3)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of RxSO3.\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation A;\n    // clang-format off\n    A <<  a(3), -a(2),  a(1),\n          a(2),  a(3), -a(0),\n         -a(1),  a(0),  a(3);\n    // clang-format on\n    return A;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of RxSO(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_rxso3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of RxSO3.\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector3<Scalar> const omega1 = a.template head<3>();\n    Vector3<Scalar> const omega2 = b.template head<3>();\n    Vector4<Scalar> res;\n    res.template head<3>() = omega1.cross(omega2);\n    res[3] = Scalar(0);\n    return res;\n  }\n\n  /// Draw uniform sample from RxSO(3) manifold.\n  ///\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2].\n  ///\n  template <class UniformRandomBitGenerator>\n  static RxSO3 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    using std::exp2;\n    return RxSO3(exp2(uniform(generator)),\n                 SO3<Scalar>::sampleUniform(generator));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  d -c  b |\n  ///                |  c  d -a |\n  ///                | -b  a  d |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    using std::abs;\n    return Tangent(Omega(2, 1), Omega(0, 2), Omega(1, 0), Omega(0, 0));\n  }\n\n protected:\n  SOPHUS_FUNC QuaternionMember& quaternion_nonconst() { return quaternion_; }\n\n  QuaternionMember quaternion_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``RxSO3``; derived from RxSO3Base\n///\n/// Allows us to wrap RxSO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO3<Scalar_>, Options>\n    : public Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so quaternion_nonconst can be accessed from ``Base``.\n  friend class Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_>, Options>>;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar* coeffs) : quaternion_(coeffs) {}\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC\n  Map<Eigen::Quaternion<Scalar>, Options> const& quaternion() const {\n    return quaternion_;\n  }\n\n protected:\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar>, Options>& quaternion_nonconst() {\n    return quaternion_;\n  }\n\n  Map<Eigen::Quaternion<Scalar>, Options> quaternion_;\n};\n\n/// Specialization of Eigen::Map for ``RxSO3 const``; derived from RxSO3Base.\n///\n/// Allows us to wrap RxSO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::RxSO3<Scalar_> const, Options>\n    : public Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::RxSO3Base<Map<Sophus::RxSO3<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  explicit Map(Scalar const* coeffs) : quaternion_(coeffs) {}\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC\n  Map<Eigen::Quaternion<Scalar> const, Options> const& quaternion() const {\n    return quaternion_;\n  }\n\n protected:\n  Map<Eigen::Quaternion<Scalar> const, Options> const quaternion_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/se2.hpp",
    "content": "/// @file\n/// Special Euclidean group SE(2) - rotation and translation in 2d.\n\n#pragma once\n\n#include \"so2.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SE2;\nusing SE2d = SE2<double>;\nusing SE2f = SE2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::SE2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector2<Scalar, Options>;\n  using SO2Type = Sophus::SO2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE2<Scalar_>, Options>>\n    : traits<Sophus::SE2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar>, Options>;\n  using SO2Type = Map<Sophus::SO2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE2<Scalar_> const, Options>>\n    : traits<Sophus::SE2<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar> const, Options>;\n  using SO2Type = Map<Sophus::SO2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SE2 base type - implements SE2 class but is storage agnostic.\n///\n/// SE(2) is the group of rotations  and translation in 2d. It is the\n/// semi-direct product of SO(2) and the 2d Euclidean vector space.  The class\n/// is represented using a composition of SO2Group  for rotation and a 2-vector\n/// for translation.\n///\n/// SE(2) is neither compact, nor a commutative group.\n///\n/// See SO2Group for more details of the rotation representation in 2d.\n///\ntemplate <class Derived>\nclass SE2Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using SO2Type = typename Eigen::internal::traits<Derived>::SO2Type;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (two for translation, three for rotation).\n  static int constexpr DoF = 3;\n  /// Number of internal parameters used (tuple for complex, two for\n  /// translation).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  /// Points are 2-dimensional\n  static int constexpr Dim = 2;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Hyperplane = Hyperplane2<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SE2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SE2Product = SE2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Matrix<Scalar, 2, 2> const& R = so2().matrix();\n    Transformation res;\n    res.setIdentity();\n    res.template topLeftCorner<2, 2>() = R;\n    res(0, 2) = translation()[1];\n    res(1, 2) = -translation()[0];\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SE2<NewScalarType> cast() const {\n    return SE2<NewScalarType>(so2().template cast<NewScalarType>(),\n                              translation().template cast<NewScalarType>());\n  }\n\n  /// Returns derivative of  this * exp(x)  wrt x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    Sophus::Vector2<Scalar> const c = unit_complex();\n    Scalar o(0);\n    J(0, 0) = o;\n    J(0, 1) = o;\n    J(0, 2) = -c[1];\n    J(1, 0) = o;\n    J(1, 1) = o;\n    J(1, 2) = c[0];\n    J(2, 0) = c[0];\n    J(2, 1) = -c[1];\n    J(2, 2) = o;\n    J(3, 0) = c[1];\n    J(3, 1) = c[0];\n    J(3, 2) = o;\n    return J;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    Matrix<Scalar, DoF, num_parameters> J;\n    J.template block<2, 2>(0, 0).setZero();\n    J.template block<2, 2>(0, 2) = so2().inverse().matrix();\n    J.template block<1, 2>(2, 0) = so2().Dx_log_this_inv_by_x_at_this();\n    J.template block<1, 2>(2, 2).setZero();\n    return J;\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SE2<Scalar> inverse() const {\n    SO2<Scalar> const invR = so2().inverse();\n    return SE2<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SE(2).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    using std::abs;\n\n    Tangent upsilon_theta;\n    Scalar theta = so2().log();\n    upsilon_theta[2] = theta;\n    Scalar halftheta = Scalar(0.5) * theta;\n    Scalar halftheta_by_tan_of_halftheta;\n\n    Vector2<Scalar> z = so2().unit_complex();\n    Scalar real_minus_one = z.x() - Scalar(1.);\n    if (abs(real_minus_one) < Constants<Scalar>::epsilon()) {\n      halftheta_by_tan_of_halftheta =\n          Scalar(1.) - Scalar(1. / 12) * theta * theta;\n    } else {\n      halftheta_by_tan_of_halftheta = -(halftheta * z.y()) / (real_minus_one);\n    }\n    Matrix<Scalar, 2, 2> V_inv;\n    V_inv << halftheta_by_tan_of_halftheta, halftheta, -halftheta,\n        halftheta_by_tan_of_halftheta;\n    upsilon_theta.template head<2>() = V_inv * translation();\n    return upsilon_theta;\n  }\n\n  /// Normalize SO2 element\n  ///\n  /// It re-normalizes the SO2 element.\n  ///\n  SOPHUS_FUNC void normalize() { so2().normalize(); }\n\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///   | R t |\n  ///   | o 1 |\n  ///\n  /// where ``R`` is a 2x2 rotation matrix, ``t`` a translation 2-vector and\n  /// ``o`` a 2-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogeneous_matrix;\n    homogeneous_matrix.template topLeftCorner<2, 3>() = matrix2x3();\n    homogeneous_matrix.row(2) =\n        Matrix<Scalar, 1, 3>(Scalar(0), Scalar(0), Scalar(1));\n    return homogeneous_matrix;\n  }\n\n  /// Returns the significant first two rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 2, 3> matrix2x3() const {\n    Matrix<Scalar, 2, 3> matrix;\n    matrix.template topLeftCorner<2, 2>() = rotationMatrix();\n    matrix.col(2) = translation();\n    return matrix;\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE2Base<Derived>& operator=(SE2Base<OtherDerived> const& other) {\n    so2() = other.so2();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SE2Product<OtherDerived> operator*(\n      SE2Base<OtherDerived> const& other) const {\n    return SE2Product<OtherDerived>(\n        so2() * other.so2(), translation() + so2() * other.translation());\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates and translates a two dimensional point ``p`` by the\n  /// SE(2) element ``bar_T_foo = (bar_R_foo, t_bar)`` (= rigid body\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_R_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return so2() * p + translation();\n  }\n\n  /// Group action on homogeneous 2-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        so2() * p.template head<2>() + p(2) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), p(2));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the SE(2) element:\n  ///\n  /// Origin ``o`` is rotated and translated using SE(2) action\n  /// Direction ``d`` is rotated using SO(2) action\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), so2() * l.direction());\n  }\n\n  /// Group action on hyper-planes.\n  ///\n  /// This function rotates a hyper-plane ``n.x + d = 0`` by the SE2\n  /// element:\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is adjusted for translation\n  ///\n  /// Note that in 2d-case hyper-planes are just another parametrization of\n  /// lines\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    Hyperplane const rotated = so2() * p;\n    return Hyperplane(rotated.normal(),\n                      rotated.offset() - translation().dot(rotated.normal()));\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SE2Base<Derived>& operator*=(SE2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns internal parameters of SE(2).\n  ///\n  /// It returns (c[0], c[1], t[0], t[1]),\n  /// with c being the unit complex number, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << so2().params(), translation();\n    return p;\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 2, 2> rotationMatrix() const {\n    return so2().matrix();\n  }\n\n  /// Takes in complex number, and normalizes it.\n  ///\n  /// Precondition: The complex number must not be close to zero.\n  ///\n  SOPHUS_FUNC void setComplex(Sophus::Vector2<Scalar> const& complex) {\n    return so2().setComplex(complex);\n  }\n\n  /// Sets ``so3`` using ``rotation_matrix``.\n  ///\n  /// Precondition: ``R`` must be orthogonal and ``det(R)=1``.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix<Scalar, 2, 2> const& R) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n {}\",\n                  SOPHUS_FMT_ARG(R));\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: {}\",\n                  SOPHUS_FMT_ARG(R.determinant()));\n    typename SO2Type::ComplexTemporaryType const complex(\n        Scalar(0.5) * (R(0, 0) + R(1, 1)), Scalar(0.5) * (R(1, 0) - R(0, 1)));\n    so2().setComplex(complex);\n  }\n\n  /// Mutator of SO3 group.\n  ///\n  SOPHUS_FUNC\n  SO2Type& so2() { return static_cast<Derived*>(this)->so2(); }\n\n  /// Accessor of SO3 group.\n  ///\n  SOPHUS_FUNC\n  SO2Type const& so2() const {\n    return static_cast<Derived const*>(this)->so2();\n  }\n\n  /// Mutator of translation vector.\n  ///\n  SOPHUS_FUNC\n  TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC\n  TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n\n  /// Accessor of unit complex number.\n  ///\n  SOPHUS_FUNC\n  typename Eigen::internal::traits<Derived>::SO2Type::ComplexT const&\n  unit_complex() const {\n    return so2().unit_complex();\n  }\n};\n\n/// SE2 using default storage; derived from SE2Base.\ntemplate <class Scalar_, int Options>\nclass SE2 : public SE2Base<SE2<Scalar_, Options>> {\n public:\n  using Base = SE2Base<SE2<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using SO2Member = SO2<Scalar, Options>;\n  using TranslationMember = Vector2<Scalar, Options>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC SE2& operator=(SE2 const& other) = default;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes rigid body motion to the identity.\n  ///\n  SOPHUS_FUNC SE2();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SE2(SE2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE2(SE2Base<OtherDerived> const& other)\n      : so2_(other.so2()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from SO3 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC SE2(SO2Base<OtherDerived> const& so2,\n                  Eigen::MatrixBase<D> const& translation)\n      : so2_(so2), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from rotation matrix and translation vector\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  /// of 1.\n  ///\n  SOPHUS_FUNC\n  SE2(typename SO2<Scalar>::Transformation const& rotation_matrix,\n      Point const& translation)\n      : so2_(rotation_matrix), translation_(translation) {}\n\n  /// Constructor from rotation angle and translation vector.\n  ///\n  SOPHUS_FUNC SE2(Scalar const& theta, Point const& translation)\n      : so2_(theta), translation_(translation) {}\n\n  /// Constructor from complex number and translation vector\n  ///\n  /// Precondition: ``complex`` must not be close to zero.\n  SOPHUS_FUNC SE2(Vector2<Scalar> const& complex, Point const& translation)\n      : so2_(complex), translation_(translation) {}\n\n  /// Constructor from 3x3 matrix\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  /// of 1. The last row must be ``(0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit SE2(Transformation const& T)\n      : so2_(T.template topLeftCorner<2, 2>().eval()),\n        translation_(T.template block<2, 1>(0, 2)) {}\n\n  /// This provides unsafe read/write access to internal data. SO(2) is\n  /// represented by a complex number (two parameters). When using direct write\n  /// access, the user needs to take care of that the complex number stays\n  /// normalized.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // so2_ and translation_ are lay out sequentially with no padding\n    return so2_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    /// so2_ and translation_ are lay out sequentially with no padding\n    return so2_.data();\n  }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC SO2Member& so2() { return so2_; }\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC SO2Member const& so2() const { return so2_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& upsilon_theta) {\n    using std::abs;\n    using std::cos;\n    using std::pow;\n    using std::sin;\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Sophus::Vector<Scalar, 2> upsilon = upsilon_theta.template head<2>();\n    Scalar theta = upsilon_theta[2];\n\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      Scalar const o(0);\n      Scalar const i(1);\n\n      // clang-format off\n      J << o, o, o,\n           o, o, i,\n           i, o, -Scalar(0.5) * upsilon[1],\n           o, i,  Scalar(0.5) * upsilon[0];\n      // clang-format on\n      return J;\n    }\n\n    Scalar const c0 = sin(theta);\n    Scalar const c1 = cos(theta);\n    Scalar const c2 = 1.0 / theta;\n    Scalar const c3 = c0 * c2;\n    Scalar const c4 = -c1 + Scalar(1);\n    Scalar const c5 = c2 * c4;\n    Scalar const c6 = c1 * c2;\n    Scalar const c7 = pow(theta, -2);\n    Scalar const c8 = c0 * c7;\n    Scalar const c9 = c4 * c7;\n\n    Scalar const o = Scalar(0);\n    J(0, 0) = o;\n    J(0, 1) = o;\n    J(0, 2) = -c0;\n    J(1, 0) = o;\n    J(1, 1) = o;\n    J(1, 2) = c1;\n    J(2, 0) = c3;\n    J(2, 1) = -c5;\n    J(2, 2) =\n        -c3 * upsilon[1] + c6 * upsilon[0] - c8 * upsilon[0] + c9 * upsilon[1];\n    J(3, 0) = c5;\n    J(3, 1) = c3;\n    J(3, 2) =\n        c3 * upsilon[0] + c6 * upsilon[1] - c8 * upsilon[1] - c9 * upsilon[0];\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Scalar const o(0);\n    Scalar const i(1);\n\n    // clang-format off\n    J << o, o, o,\n         o, o, i,\n         i, o, o,\n         o, i, o;\n    // clang-format on\n    return J;\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 2, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    Sophus::Matrix<Scalar, 2, DoF> J;\n    J << Sophus::Matrix2<Scalar>::Identity(),\n        Sophus::SO2<Scalar>::Dx_exp_x_times_point_at_0(point);\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= twist ``a``) and\n  /// returns the corresponding element of the group SE(2).\n  ///\n  /// The first two components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of SE(2), while the last three components\n  /// of ``a`` represents the rotation vector ``omega``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of SE(2), see below.\n  ///\n  SOPHUS_FUNC static SE2<Scalar> exp(Tangent const& a) {\n    Scalar theta = a[2];\n    SO2<Scalar> so2 = SO2<Scalar>::exp(theta);\n    Scalar sin_theta_by_theta;\n    Scalar one_minus_cos_theta_by_theta;\n    using std::abs;\n\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      Scalar theta_sq = theta * theta;\n      sin_theta_by_theta = Scalar(1.) - Scalar(1. / 6.) * theta_sq;\n      one_minus_cos_theta_by_theta =\n          Scalar(0.5) * theta - Scalar(1. / 24.) * theta * theta_sq;\n    } else {\n      sin_theta_by_theta = so2.unit_complex().y() / theta;\n      one_minus_cos_theta_by_theta =\n          (Scalar(1.) - so2.unit_complex().x()) / theta;\n    }\n    Vector2<Scalar> trans(\n        sin_theta_by_theta * a[0] - one_minus_cos_theta_by_theta * a[1],\n        one_minus_cos_theta_by_theta * a[0] + sin_theta_by_theta * a[1]);\n    return SE2<Scalar>(so2, trans);\n  }\n\n  /// Returns closest SE3 given arbitrary 4x4 matrix.\n  ///\n  template <class S = Scalar>\n  static SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, SE2>\n  fitToSE2(Matrix3<Scalar> const& T) {\n    return SE2(SO2<Scalar>::fitToSO2(T.template block<2, 2>(0, 0)),\n               T.template block<2, 1>(0, 2));\n  }\n\n  /// Returns the ith infinitesimal generators of SE(2).\n  ///\n  /// The infinitesimal generators of SE(2) are:\n  ///\n  /// ```\n  ///         |  0  0  1 |\n  ///   G_0 = |  0  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0  0  0 |\n  ///   G_1 = |  0  0  1 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in 0, 1 or 2.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 || i <= 2, \"i should be in range [0,2].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 3-vector representation (= twist) and returns the\n  /// corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SE(3) is defined as\n  ///\n  ///   ``hat(.): R^3 -> R^{3x33},  hat(a) = sum_i a_i * G_i``  (for i=0,1,2)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of SE(2).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.setZero();\n    Omega.template topLeftCorner<2, 2>() = SO2<Scalar>::hat(a[2]);\n    Omega.col(2).template head<2>() = a.template head<2>();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of SE(2). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_se2 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of SE(2).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector2<Scalar> upsilon1 = a.template head<2>();\n    Vector2<Scalar> upsilon2 = b.template head<2>();\n    Scalar theta1 = a[2];\n    Scalar theta2 = b[2];\n\n    return Tangent(-theta1 * upsilon2[1] + theta2 * upsilon1[1],\n                   theta1 * upsilon2[0] - theta2 * upsilon1[0], Scalar(0));\n  }\n\n  /// Construct pure rotation.\n  ///\n  static SOPHUS_FUNC SE2 rot(Scalar const& x) {\n    return SE2(SO2<Scalar>(x), Sophus::Vector2<Scalar>::Zero());\n  }\n\n  /// Draw uniform sample from SE(2) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  ///\n  template <class UniformRandomBitGenerator>\n  static SE2 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return SE2(SO2<Scalar>::sampleUniform(generator),\n               Vector2<Scalar>(uniform(generator), uniform(generator)));\n  }\n\n  /// Construct a translation only SE(2) instance.\n  ///\n  template <class T0, class T1>\n  static SOPHUS_FUNC SE2 trans(T0 const& x, T1 const& y) {\n    return SE2(SO2<Scalar>(), Vector2<Scalar>(x, y));\n  }\n\n  static SOPHUS_FUNC SE2 trans(Vector2<Scalar> const& xy) {\n    return SE2(SO2<Scalar>(), xy);\n  }\n\n  /// Construct x-axis translation.\n  ///\n  static SOPHUS_FUNC SE2 transX(Scalar const& x) {\n    return SE2::trans(x, Scalar(0));\n  }\n\n  /// Construct y-axis translation.\n  ///\n  static SOPHUS_FUNC SE2 transY(Scalar const& y) {\n    return SE2::trans(Scalar(0), y);\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding 3-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -d  a |\n  ///                |  d  0  b |\n  ///                |  0  0  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    SOPHUS_ENSURE(\n        Omega.row(2).template lpNorm<1>() < Constants<Scalar>::epsilon(),\n        \"Omega: \\n{}\", SOPHUS_FMT_ARG(Omega));\n    Tangent upsilon_omega;\n    upsilon_omega.template head<2>() = Omega.col(2).template head<2>();\n    upsilon_omega[2] = SO2<Scalar>::vee(Omega.template topLeftCorner<2, 2>());\n    return upsilon_omega;\n  }\n\n protected:\n  SO2Member so2_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSOPHUS_FUNC SE2<Scalar, Options>::SE2()\n    : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<SE2>::value,\n                \"Assume standard layout for the use of offset of check below.\");\n  static_assert(\n      offsetof(SE2, so2_) + sizeof(Scalar) * SO2<Scalar>::num_parameters ==\n          offsetof(SE2, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``SE2``; derived from SE2Base.\n///\n/// Allows us to wrap SE2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE2<Scalar_>, Options>\n    : public Sophus::SE2Base<Map<Sophus::SE2<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SE2Base<Map<Sophus::SE2<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  explicit Map(Scalar* coeffs)\n      : so2_(coeffs),\n        translation_(coeffs + Sophus::SO2<Scalar>::num_parameters) {}\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO2<Scalar>, Options>& so2() { return so2_; }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO2<Scalar>, Options> const& so2() const {\n    return so2_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO2<Scalar>, Options> so2_;\n  Map<Sophus::Vector2<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``SE2 const``; derived from SE2Base.\n///\n/// Allows us to wrap SE2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE2<Scalar_> const, Options>\n    : public Sophus::SE2Base<Map<Sophus::SE2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SE2Base<Map<Sophus::SE2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar const* coeffs)\n      : so2_(coeffs),\n        translation_(coeffs + Sophus::SO2<Scalar>::num_parameters) {}\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO2<Scalar> const, Options> const& so2() const {\n    return so2_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO2<Scalar> const, Options> const so2_;\n  Map<Sophus::Vector2<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/se3.hpp",
    "content": "/// @file\n/// Special Euclidean group SE(3) - rotation and translation in 3d.\n\n#pragma once\n\n#include \"so3.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SE3;\nusing SE3d = SE3<double>;\nusing SE3f = SE3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::SE3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector3<Scalar, Options>;\n  using SO3Type = Sophus::SO3<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE3<Scalar_>, Options>>\n    : traits<Sophus::SE3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar>, Options>;\n  using SO3Type = Map<Sophus::SO3<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::SE3<Scalar_> const, Options>>\n    : traits<Sophus::SE3<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar> const, Options>;\n  using SO3Type = Map<Sophus::SO3<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SE3 base type - implements SE3 class but is storage agnostic.\n///\n/// SE(3) is the group of rotations  and translation in 3d. It is the\n/// semi-direct product of SO(3) and the 3d Euclidean vector space.  The class\n/// is represented using a composition of SO3  for rotation and a one 3-vector\n/// for translation.\n///\n/// SE(3) is neither compact, nor a commutative group.\n///\n/// See SO3 for more details of the rotation representation in 3d.\n///\ntemplate <class Derived>\nclass SE3Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using SO3Type = typename Eigen::internal::traits<Derived>::SO3Type;\n  using QuaternionType = typename SO3Type::QuaternionType;\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (three for translation, three for rotation).\n  static int constexpr DoF = 6;\n  /// Number of internal parameters used (4-tuple for quaternion, three for\n  /// translation).\n  static int constexpr num_parameters = 7;\n  /// Group transformations are 4x4 matrices.\n  static int constexpr N = 4;\n  /// Points are 3-dimensional\n  static int constexpr Dim = 3;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Hyperplane = Hyperplane3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SE3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SE3Product = SE3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Sophus::Matrix3<Scalar> const R = so3().matrix();\n    Adjoint res;\n    res.block(0, 0, 3, 3) = R;\n    res.block(3, 3, 3, 3) = R;\n    res.block(0, 3, 3, 3) = SO3<Scalar>::hat(translation()) * R;\n    res.block(3, 0, 3, 3) = Matrix3<Scalar>::Zero(3, 3);\n    return res;\n  }\n\n  /// Extract rotation angle about canonical X-axis\n  ///\n  Scalar angleX() const { return so3().angleX(); }\n\n  /// Extract rotation angle about canonical Y-axis\n  ///\n  Scalar angleY() const { return so3().angleY(); }\n\n  /// Extract rotation angle about canonical Z-axis\n  ///\n  Scalar angleZ() const { return so3().angleZ(); }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SE3<NewScalarType> cast() const {\n    return SE3<NewScalarType>(so3().template cast<NewScalarType>(),\n                              translation().template cast<NewScalarType>());\n  }\n\n  /// Returns derivative of  this * exp(x)  wrt x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    Eigen::Quaternion<Scalar> const q = unit_quaternion();\n    Scalar const c0 = Scalar(0.5) * q.w();\n    Scalar const c1 = Scalar(0.5) * q.z();\n    Scalar const c2 = -c1;\n    Scalar const c3 = Scalar(0.5) * q.y();\n    Scalar const c4 = Scalar(0.5) * q.x();\n    Scalar const c5 = -c4;\n    Scalar const c6 = -c3;\n    Scalar const c7 = q.w() * q.w();\n    Scalar const c8 = q.x() * q.x();\n    Scalar const c9 = q.y() * q.y();\n    Scalar const c10 = -c9;\n    Scalar const c11 = q.z() * q.z();\n    Scalar const c12 = -c11;\n    Scalar const c13 = Scalar(2) * q.w();\n    Scalar const c14 = c13 * q.z();\n    Scalar const c15 = Scalar(2) * q.x();\n    Scalar const c16 = c15 * q.y();\n    Scalar const c17 = c13 * q.y();\n    Scalar const c18 = c15 * q.z();\n    Scalar const c19 = c7 - c8;\n    Scalar const c20 = c13 * q.x();\n    Scalar const c21 = Scalar(2) * q.y() * q.z();\n    J(0, 0) = 0;\n    J(0, 1) = 0;\n    J(0, 2) = 0;\n    J(0, 3) = c0;\n    J(0, 4) = c2;\n    J(0, 5) = c3;\n    J(1, 0) = 0;\n    J(1, 1) = 0;\n    J(1, 2) = 0;\n    J(1, 3) = c1;\n    J(1, 4) = c0;\n    J(1, 5) = c5;\n    J(2, 0) = 0;\n    J(2, 1) = 0;\n    J(2, 2) = 0;\n    J(2, 3) = c6;\n    J(2, 4) = c4;\n    J(2, 5) = c0;\n    J(3, 0) = 0;\n    J(3, 1) = 0;\n    J(3, 2) = 0;\n    J(3, 3) = c5;\n    J(3, 4) = c6;\n    J(3, 5) = c2;\n    J(4, 0) = c10 + c12 + c7 + c8;\n    J(4, 1) = -c14 + c16;\n    J(4, 2) = c17 + c18;\n    J(4, 3) = 0;\n    J(4, 4) = 0;\n    J(4, 5) = 0;\n    J(5, 0) = c14 + c16;\n    J(5, 1) = c12 + c19 + c9;\n    J(5, 2) = -c20 + c21;\n    J(5, 3) = 0;\n    J(5, 4) = 0;\n    J(5, 5) = 0;\n    J(6, 0) = -c17 + c18;\n    J(6, 1) = c20 + c21;\n    J(6, 2) = c10 + c11 + c19;\n    J(6, 3) = 0;\n    J(6, 4) = 0;\n    J(6, 5) = 0;\n    return J;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    Matrix<Scalar, DoF, num_parameters> J;\n    J.template block<3, 4>(0, 0).setZero();\n    J.template block<3, 3>(0, 4) = so3().inverse().matrix();\n    J.template block<3, 4>(3, 0) = so3().Dx_log_this_inv_by_x_at_this();\n    J.template block<3, 3>(3, 4).setZero();\n    return J;\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SE3<Scalar> inverse() const {\n    SO3<Scalar> invR = so3().inverse();\n    return SE3<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SE(3).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    // For the derivation of the logarithm of SE(3), see\n    // J. Gallier, D. Xu, \"Computing exponentials of skew symmetric matrices\n    // and logarithms of orthogonal matrices\", IJRA 2002.\n    // https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf\n    // (Sec. 6., pp. 8)\n    using std::abs;\n    using std::cos;\n    using std::sin;\n    Tangent upsilon_omega;\n    auto omega_and_theta = so3().logAndTheta();\n    Scalar theta = omega_and_theta.theta;\n    Vector3<Scalar> const& omega = omega_and_theta.tangent;\n    upsilon_omega.template tail<3>() = omega;\n    Matrix3<Scalar> V_inv = SO3<Scalar>::leftJacobianInverse(omega, theta);\n    upsilon_omega.template head<3>() = V_inv * translation();\n    return upsilon_omega;\n  }\n\n  /// It re-normalizes the SO3 element.\n  ///\n  /// Note: Because of the class invariant of SO3, there is typically no need to\n  /// call this function directly.\n  ///\n  SOPHUS_FUNC void normalize() { so3().normalize(); }\n\n  /// Returns 4x4 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///   | R t |\n  ///   | o 1 |\n  ///\n  /// where ``R`` is a 3x3 rotation matrix, ``t`` a translation 3-vector and\n  /// ``o`` a 3-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogeneous_matrix;\n    homogeneous_matrix.template topLeftCorner<3, 4>() = matrix3x4();\n    homogeneous_matrix.row(3) =\n        Matrix<Scalar, 1, 4>(Scalar(0), Scalar(0), Scalar(0), Scalar(1));\n    return homogeneous_matrix;\n  }\n\n  /// Returns the significant first three rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 3, 4> matrix3x4() const {\n    Matrix<Scalar, 3, 4> matrix;\n    matrix.template topLeftCorner<3, 3>() = rotationMatrix();\n    matrix.col(3) = translation();\n    return matrix;\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE3Base<Derived>& operator=(SE3Base<OtherDerived> const& other) {\n    so3() = other.so3();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SE3Product<OtherDerived> operator*(\n      SE3Base<OtherDerived> const& other) const {\n    return SE3Product<OtherDerived>(\n        so3() * other.so3(), translation() + so3() * other.translation());\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates and translates a three dimensional point ``p`` by\n  /// the SE(3) element ``bar_T_foo = (bar_R_foo, t_bar)`` (= rigid body\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_R_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return so3() * p + translation();\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        so3() * p.template head<3>() + p(3) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), tp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the SE(3) element:\n  ///\n  /// Origin is transformed using SE(3) action\n  /// Direction is transformed using rotation part\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), so3() * l.direction());\n  }\n\n  /// Group action on planes.\n  ///\n  /// This function rotates and translates a plane\n  /// ``n.x + d = 0`` by the SE(3) element:\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is adjusted for translation\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    Hyperplane const rotated = so3() * p;\n    return Hyperplane(rotated.normal(),\n                      rotated.offset() - translation().dot(rotated.normal()));\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SE3's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SE3Base<Derived>& operator*=(SE3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns rotation matrix.\n  ///\n  SOPHUS_FUNC Matrix3<Scalar> rotationMatrix() const { return so3().matrix(); }\n\n  /// Mutator of SO3 group.\n  ///\n  SOPHUS_FUNC SO3Type& so3() { return static_cast<Derived*>(this)->so3(); }\n\n  /// Accessor of SO3 group.\n  ///\n  SOPHUS_FUNC SO3Type const& so3() const {\n    return static_cast<const Derived*>(this)->so3();\n  }\n\n  /// Takes in quaternion, and normalizes it.\n  ///\n  /// Precondition: The quaternion must not be close to zero.\n  ///\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quat) {\n    so3().setQuaternion(quat);\n  }\n\n  /// Sets ``so3`` using ``rotation_matrix``.\n  ///\n  /// Precondition: ``R`` must be orthogonal and ``det(R)=1``.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix3<Scalar> const& R) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n {}\",\n                  SOPHUS_FMT_ARG(R));\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: {}\",\n                  SOPHUS_FMT_ARG(R.determinant()));\n    so3().setQuaternion(Eigen::Quaternion<Scalar>(R));\n  }\n\n  /// Returns internal parameters of SE(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real, t[0], t[1], t[2]),\n  /// with q being the unit quaternion, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << so3().params(), translation();\n    return p;\n  }\n\n  /// Mutator of translation vector.\n  ///\n  SOPHUS_FUNC TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& unit_quaternion() const {\n    return this->so3().unit_quaternion();\n  }\n};\n\n/// SE3 using default storage; derived from SE3Base.\ntemplate <class Scalar_, int Options>\nclass SE3 : public SE3Base<SE3<Scalar_, Options>> {\n  using Base = SE3Base<SE3<Scalar_, Options>>;\n\n public:\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using SO3Member = SO3<Scalar, Options>;\n  using TranslationMember = Vector3<Scalar, Options>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC SE3& operator=(SE3 const& other) = default;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes rigid body motion to the identity.\n  ///\n  SOPHUS_FUNC SE3();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SE3(SE3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SE3(SE3Base<OtherDerived> const& other)\n      : so3_(other.so3()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from SO3 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC SE3(SO3Base<OtherDerived> const& so3,\n                  Eigen::MatrixBase<D> const& translation)\n      : so3_(so3), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from rotation matrix and translation vector\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  ///               of 1.\n  ///\n  SOPHUS_FUNC\n  SE3(Matrix3<Scalar> const& rotation_matrix, Point const& translation)\n      : so3_(rotation_matrix), translation_(translation) {}\n\n  /// Constructor from quaternion and translation vector.\n  ///\n  /// Precondition: ``quaternion`` must not be close to zero.\n  ///\n  SOPHUS_FUNC SE3(Eigen::Quaternion<Scalar> const& quaternion,\n                  Point const& translation)\n      : so3_(quaternion), translation_(translation) {}\n\n  /// Constructor from 4x4 matrix\n  ///\n  /// Precondition: Rotation matrix needs to be orthogonal with determinant\n  ///               of 1. The last row must be ``(0, 0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit SE3(Matrix4<Scalar> const& T)\n      : so3_(T.template topLeftCorner<3, 3>()),\n        translation_(T.template block<3, 1>(0, 3)) {\n    SOPHUS_ENSURE((T.row(3) - Matrix<Scalar, 1, 4>(Scalar(0), Scalar(0),\n                                                   Scalar(0), Scalar(1)))\n                          .squaredNorm() < Constants<Scalar>::epsilon(),\n                  \"Last row is not (0,0,0,1), but ({}).\",\n                  SOPHUS_FMT_ARG(T.row(3)));\n  }\n\n  /// This provides unsafe read/write access to internal data. SO(3) is\n  /// represented by an Eigen::Quaternion (four parameters). When using direct\n  /// write access, the user needs to take care of that the quaternion stays\n  /// normalized.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // so3_ and translation_ are laid out sequentially with no padding\n    return so3_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    // so3_ and translation_ are laid out sequentially with no padding\n    return so3_.data();\n  }\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC SO3Member& so3() { return so3_; }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC SO3Member const& so3() const { return so3_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  SOPHUS_FUNC static Matrix3<Scalar> jacobianUpperRightBlock(\n      Vector3<Scalar> const& upsilon, Vector3<Scalar> const& omega) {\n    using std::cos;\n    using std::sin;\n    using std::sqrt;\n\n    Scalar const k1By2(0.5);\n\n    Scalar const theta_sq = omega.squaredNorm();\n    Matrix3<Scalar> const Upsilon = SO3<Scalar>::hat(upsilon);\n\n    Matrix3<Scalar> Q;\n    if (theta_sq <\n        Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon()) {\n      Q = k1By2 * Upsilon;\n\n    } else {\n      Scalar const theta = sqrt(theta_sq);\n      Scalar const i_theta = Scalar(1) / theta;\n      Scalar const i_theta_sq = i_theta * i_theta;\n      Scalar const i_theta_po4 = i_theta_sq * i_theta_sq;\n      Scalar const st = sin(theta);\n      Scalar const ct = cos(theta);\n      Scalar const c1 = i_theta_sq - st * i_theta_sq * i_theta;\n      Scalar const c2 = k1By2 * i_theta_sq + ct * i_theta_po4 - i_theta_po4;\n      Scalar const c3 = i_theta_po4 + k1By2 * ct * i_theta_po4 -\n                        Scalar(1.5) * st * i_theta * i_theta_po4;\n\n      Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n      Matrix3<Scalar> const OmegaUpsilon = Omega * Upsilon;\n      Matrix3<Scalar> const OmegaUpsilonOmega = OmegaUpsilon * Omega;\n      Q = k1By2 * Upsilon +\n          c1 * (OmegaUpsilon + Upsilon * Omega + OmegaUpsilonOmega) -\n          c2 * (theta_sq * Upsilon + Scalar(2) * OmegaUpsilonOmega) +\n          c3 * (OmegaUpsilonOmega * Omega + Omega * OmegaUpsilonOmega);\n    }\n    return Q;\n  }\n\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, DoF, DoF> leftJacobian(\n      Tangent const& upsilon_omega) {\n    Vector3<Scalar> const upsilon = upsilon_omega.template head<3>();\n    Vector3<Scalar> const omega = upsilon_omega.template tail<3>();\n    Matrix3<Scalar> const J = SO3<Scalar>::leftJacobian(omega);\n    Matrix3<Scalar> Q = jacobianUpperRightBlock(upsilon, omega);\n    Matrix6<Scalar> U;\n    U << J, Q, Matrix3<Scalar>::Zero(), J;\n    return U;\n  }\n\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, DoF, DoF> leftJacobianInverse(\n      Tangent const& upsilon_omega) {\n    Vector3<Scalar> const upsilon = upsilon_omega.template head<3>();\n    Vector3<Scalar> const omega = upsilon_omega.template tail<3>();\n    Matrix3<Scalar> const J_inv = SO3<Scalar>::leftJacobianInverse(omega);\n    Matrix3<Scalar> Q = jacobianUpperRightBlock(upsilon, omega);\n    Matrix6<Scalar> U;\n    U << J_inv, -J_inv * Q * J_inv, Matrix3<Scalar>::Zero(), J_inv;\n    return U;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& upsilon_omega) {\n    using std::cos;\n    using std::pow;\n    using std::sin;\n    using std::sqrt;\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Sophus::Vector<Scalar, 3> upsilon = upsilon_omega.template head<3>();\n    Sophus::Vector<Scalar, 3> omega = upsilon_omega.template tail<3>();\n\n    Scalar const c0 = omega[0] * omega[0];\n    Scalar const c1 = omega[1] * omega[1];\n    Scalar const c2 = omega[2] * omega[2];\n    Scalar const c3 = c0 + c1 + c2;\n    Scalar const o(0);\n    Scalar const h(0.5);\n    Scalar const i(1);\n\n    if (c3 < Constants<Scalar>::epsilon()) {\n      Scalar const ux = Scalar(0.5) * upsilon[0];\n      Scalar const uy = Scalar(0.5) * upsilon[1];\n      Scalar const uz = Scalar(0.5) * upsilon[2];\n\n      /// clang-format off\n      J << o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o, o, h, o, o, o, o, o,\n          o, i, o, o, o, uz, -uy, o, i, o, -uz, o, ux, o, o, i, uy, -ux, o;\n      /// clang-format on\n      return J;\n    }\n\n    Scalar const c4 = sqrt(c3);\n    Scalar const c5 = Scalar(1.0) / c4;\n    Scalar const c6 = Scalar(0.5) * c4;\n    Scalar const c7 = sin(c6);\n    Scalar const c8 = c5 * c7;\n    Scalar const c9 = pow(c3, -3.0L / 2.0L);\n    Scalar const c10 = c7 * c9;\n    Scalar const c11 = Scalar(1.0) / c3;\n    Scalar const c12 = cos(c6);\n    Scalar const c13 = Scalar(0.5) * c11 * c12;\n    Scalar const c14 = c7 * c9 * omega[0];\n    Scalar const c15 = Scalar(0.5) * c11 * c12 * omega[0];\n    Scalar const c16 = -c14 * omega[1] + c15 * omega[1];\n    Scalar const c17 = -c14 * omega[2] + c15 * omega[2];\n    Scalar const c18 = omega[1] * omega[2];\n    Scalar const c19 = -c10 * c18 + c13 * c18;\n    Scalar const c20 = c5 * omega[0];\n    Scalar const c21 = Scalar(0.5) * c7;\n    Scalar const c22 = c5 * omega[1];\n    Scalar const c23 = c5 * omega[2];\n    Scalar const c24 = -c1;\n    Scalar const c25 = -c2;\n    Scalar const c26 = c24 + c25;\n    Scalar const c27 = sin(c4);\n    Scalar const c28 = -c27 + c4;\n    Scalar const c29 = c28 * c9;\n    Scalar const c30 = cos(c4);\n    Scalar const c31 = -c30 + Scalar(1);\n    Scalar const c32 = c11 * c31;\n    Scalar const c33 = c32 * omega[2];\n    Scalar const c34 = c29 * omega[0];\n    Scalar const c35 = c34 * omega[1];\n    Scalar const c36 = c32 * omega[1];\n    Scalar const c37 = c34 * omega[2];\n    Scalar const c38 = pow(c3, -5.0L / 2.0L);\n    Scalar const c39 = Scalar(3) * c28 * c38 * omega[0];\n    Scalar const c40 = c26 * c9;\n    Scalar const c41 = -c20 * c30 + c20;\n    Scalar const c42 = c27 * c9 * omega[0];\n    Scalar const c43 = c42 * omega[1];\n    Scalar const c44 = pow(c3, -2);\n    Scalar const c45 = Scalar(2) * c31 * c44 * omega[0];\n    Scalar const c46 = c45 * omega[1];\n    Scalar const c47 = c29 * omega[2];\n    Scalar const c48 = c43 - c46 + c47;\n    Scalar const c49 = Scalar(3) * c0 * c28 * c38;\n    Scalar const c50 = c9 * omega[0] * omega[2];\n    Scalar const c51 = c41 * c50 - c49 * omega[2];\n    Scalar const c52 = c9 * omega[0] * omega[1];\n    Scalar const c53 = c41 * c52 - c49 * omega[1];\n    Scalar const c54 = c42 * omega[2];\n    Scalar const c55 = c45 * omega[2];\n    Scalar const c56 = c29 * omega[1];\n    Scalar const c57 = -c54 + c55 + c56;\n    Scalar const c58 = Scalar(-2) * c56;\n    Scalar const c59 = Scalar(3) * c28 * c38 * omega[1];\n    Scalar const c60 = -c22 * c30 + c22;\n    Scalar const c61 = -c18 * c39;\n    Scalar const c62 = c32 + c61;\n    Scalar const c63 = c27 * c9;\n    Scalar const c64 = c1 * c63;\n    Scalar const c65 = Scalar(2) * c31 * c44;\n    Scalar const c66 = c1 * c65;\n    Scalar const c67 = c50 * c60;\n    Scalar const c68 = -c1 * c39 + c52 * c60;\n    Scalar const c69 = c18 * c63;\n    Scalar const c70 = c18 * c65;\n    Scalar const c71 = c34 - c69 + c70;\n    Scalar const c72 = Scalar(-2) * c47;\n    Scalar const c73 = Scalar(3) * c28 * c38 * omega[2];\n    Scalar const c74 = -c23 * c30 + c23;\n    Scalar const c75 = -c32 + c61;\n    Scalar const c76 = c2 * c63;\n    Scalar const c77 = c2 * c65;\n    Scalar const c78 = c52 * c74;\n    Scalar const c79 = c34 + c69 - c70;\n    Scalar const c80 = -c2 * c39 + c50 * c74;\n    Scalar const c81 = -c0;\n    Scalar const c82 = c25 + c81;\n    Scalar const c83 = c32 * omega[0];\n    Scalar const c84 = c18 * c29;\n    Scalar const c85 = Scalar(-2) * c34;\n    Scalar const c86 = c82 * c9;\n    Scalar const c87 = c0 * c63;\n    Scalar const c88 = c0 * c65;\n    Scalar const c89 = c9 * omega[1] * omega[2];\n    Scalar const c90 = c41 * c89;\n    Scalar const c91 = c54 - c55 + c56;\n    Scalar const c92 = -c1 * c73 + c60 * c89;\n    Scalar const c93 = -c43 + c46 + c47;\n    Scalar const c94 = -c2 * c59 + c74 * c89;\n    Scalar const c95 = c24 + c81;\n    Scalar const c96 = c9 * c95;\n    J(0, 0) = o;\n    J(0, 1) = o;\n    J(0, 2) = o;\n    J(0, 3) = -c0 * c10 + c0 * c13 + c8;\n    J(0, 4) = c16;\n    J(0, 5) = c17;\n    J(1, 0) = o;\n    J(1, 1) = o;\n    J(1, 2) = o;\n    J(1, 3) = c16;\n    J(1, 4) = -c1 * c10 + c1 * c13 + c8;\n    J(1, 5) = c19;\n    J(2, 0) = o;\n    J(2, 1) = o;\n    J(2, 2) = o;\n    J(2, 3) = c17;\n    J(2, 4) = c19;\n    J(2, 5) = -c10 * c2 + c13 * c2 + c8;\n    J(3, 0) = o;\n    J(3, 1) = o;\n    J(3, 2) = o;\n    J(3, 3) = -c20 * c21;\n    J(3, 4) = -c21 * c22;\n    J(3, 5) = -c21 * c23;\n    J(4, 0) = c26 * c29 + Scalar(1);\n    J(4, 1) = -c33 + c35;\n    J(4, 2) = c36 + c37;\n    J(4, 3) = upsilon[0] * (-c26 * c39 + c40 * c41) + upsilon[1] * (c53 + c57) +\n              upsilon[2] * (c48 + c51);\n    J(4, 4) = upsilon[0] * (-c26 * c59 + c40 * c60 + c58) +\n              upsilon[1] * (c68 + c71) + upsilon[2] * (c62 + c64 - c66 + c67);\n    J(4, 5) = upsilon[0] * (-c26 * c73 + c40 * c74 + c72) +\n              upsilon[1] * (c75 - c76 + c77 + c78) + upsilon[2] * (c79 + c80);\n    J(5, 0) = c33 + c35;\n    J(5, 1) = c29 * c82 + Scalar(1);\n    J(5, 2) = -c83 + c84;\n    J(5, 3) = upsilon[0] * (c53 + c91) +\n              upsilon[1] * (-c39 * c82 + c41 * c86 + c85) +\n              upsilon[2] * (c75 - c87 + c88 + c90);\n    J(5, 4) = upsilon[0] * (c68 + c79) + upsilon[1] * (-c59 * c82 + c60 * c86) +\n              upsilon[2] * (c92 + c93);\n    J(5, 5) = upsilon[0] * (c62 + c76 - c77 + c78) +\n              upsilon[1] * (c72 - c73 * c82 + c74 * c86) +\n              upsilon[2] * (c57 + c94);\n    J(6, 0) = -c36 + c37;\n    J(6, 1) = c83 + c84;\n    J(6, 2) = c29 * c95 + Scalar(1);\n    J(6, 3) = upsilon[0] * (c51 + c93) + upsilon[1] * (c62 + c87 - c88 + c90) +\n              upsilon[2] * (-c39 * c95 + c41 * c96 + c85);\n    J(6, 4) = upsilon[0] * (-c64 + c66 + c67 + c75) + upsilon[1] * (c48 + c92) +\n              upsilon[2] * (c58 - c59 * c95 + c60 * c96);\n    J(6, 5) = upsilon[0] * (c71 + c80) + upsilon[1] * (c91 + c94) +\n              upsilon[2] * (-c73 * c95 + c74 * c96);\n\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    Scalar const o(0);\n    Scalar const h(0.5);\n    Scalar const i(1);\n\n    // clang-format off\n    J << o, o, o, h, o, o,\n         o, o, o, o, h, o,\n\t o, o, o, o, o, h,\n\t o, o, o, o, o, o,\n\t i, o, o, o, o, o,\n\t o, i, o, o, o, o,\n\t o, o, i, o, o, o;\n    // clang-format on\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 3, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    Sophus::Matrix<Scalar, 3, DoF> J;\n    J << Sophus::Matrix3<Scalar>::Identity(),\n        Sophus::SO3<Scalar>::Dx_exp_x_times_point_at_0(point);\n    return J;\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= twist ``a``) and\n  /// returns the corresponding element of the group SE(3).\n  ///\n  /// The first three components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of SE(3), while the last three components\n  /// of ``a`` represents the rotation vector ``omega``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of SE(3), see below.\n  ///\n  SOPHUS_FUNC static SE3<Scalar> exp(Tangent const& a) {\n    using std::cos;\n    using std::sin;\n    Vector3<Scalar> const omega = a.template tail<3>();\n\n    Scalar theta;\n    SO3<Scalar> const so3 = SO3<Scalar>::expAndTheta(omega, &theta);\n    Matrix3<Scalar> const V = SO3<Scalar>::leftJacobian(omega, theta);\n    return SE3<Scalar>(so3, V * a.template head<3>());\n  }\n\n  /// Returns closest SE3 given arbitrary 4x4 matrix.\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC static enable_if_t<std::is_floating_point<S>::value, SE3>\n  fitToSE3(Matrix4<Scalar> const& T) {\n    return SE3(SO3<Scalar>::fitToSO3(T.template block<3, 3>(0, 0)),\n               T.template block<3, 1>(0, 3));\n  }\n\n  /// Returns the ith infinitesimal generators of SE(3).\n  ///\n  /// The infinitesimal generators of SE(3) are:\n  ///\n  /// ```\n  ///         |  0  0  0  1 |\n  ///   G_0 = |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_1 = |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_2 = |  0  0  0  0 |\n  ///         |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_3 = |  0  0 -1  0 |\n  ///         |  0  1  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  1  0 |\n  ///   G_4 = |  0  0  0  0 |\n  ///         | -1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0 -1  0  0 |\n  ///   G_5 = |  1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in [0, 5].\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 5, \"i should be in range [0,5].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 6-vector representation (= twist) and returns the\n  /// corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SE(3) is defined as\n  ///\n  ///   ``hat(.): R^6 -> R^{4x4},  hat(a) = sum_i a_i * G_i``  (for i=0,...,5)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of SE(3).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.setZero();\n    Omega.template topLeftCorner<3, 3>() =\n        SO3<Scalar>::hat(a.template tail<3>());\n    Omega.col(3).template head<3>() = a.template head<3>();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of SE(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_se3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of SE(3).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector3<Scalar> const upsilon1 = a.template head<3>();\n    Vector3<Scalar> const upsilon2 = b.template head<3>();\n    Vector3<Scalar> const omega1 = a.template tail<3>();\n    Vector3<Scalar> const omega2 = b.template tail<3>();\n\n    Tangent res;\n    res.template head<3>() = omega1.cross(upsilon2) + upsilon1.cross(omega2);\n    res.template tail<3>() = omega1.cross(omega2);\n\n    return res;\n  }\n\n  /// Construct x-axis rotation.\n  ///\n  static SOPHUS_FUNC SE3 rotX(Scalar const& x) {\n    return SE3(SO3<Scalar>::rotX(x), Sophus::Vector3<Scalar>::Zero());\n  }\n\n  /// Construct y-axis rotation.\n  ///\n  static SOPHUS_FUNC SE3 rotY(Scalar const& y) {\n    return SE3(SO3<Scalar>::rotY(y), Sophus::Vector3<Scalar>::Zero());\n  }\n\n  /// Construct z-axis rotation.\n  ///\n  static SOPHUS_FUNC SE3 rotZ(Scalar const& z) {\n    return SE3(SO3<Scalar>::rotZ(z), Sophus::Vector3<Scalar>::Zero());\n  }\n\n  /// Draw uniform sample from SE(3) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  ///\n  template <class UniformRandomBitGenerator>\n  static SE3 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return SE3(SO3<Scalar>::sampleUniform(generator),\n               Vector3<Scalar>(uniform(generator), uniform(generator),\n                               uniform(generator)));\n  }\n\n  /// Construct a translation only SE3 instance.\n  ///\n  template <class T0, class T1, class T2>\n  static SOPHUS_FUNC SE3 trans(T0 const& x, T1 const& y, T2 const& z) {\n    return SE3(SO3<Scalar>(), Vector3<Scalar>(x, y, z));\n  }\n\n  static SOPHUS_FUNC SE3 trans(Vector3<Scalar> const& xyz) {\n    return SE3(SO3<Scalar>(), xyz);\n  }\n\n  /// Construct x-axis translation.\n  ///\n  static SOPHUS_FUNC SE3 transX(Scalar const& x) {\n    return SE3::trans(x, Scalar(0), Scalar(0));\n  }\n\n  /// Construct y-axis translation.\n  ///\n  static SOPHUS_FUNC SE3 transY(Scalar const& y) {\n    return SE3::trans(Scalar(0), y, Scalar(0));\n  }\n\n  /// Construct z-axis translation.\n  ///\n  static SOPHUS_FUNC SE3 transZ(Scalar const& z) {\n    return SE3::trans(Scalar(0), Scalar(0), z);\n  }\n\n  /// vee-operator\n  ///\n  /// It takes 4x4-matrix representation ``Omega`` and maps it to the\n  /// corresponding 6-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -f  e  a |\n  ///                |  f  0 -d  b |\n  ///                | -e  d  0  c\n  ///                |  0  0  0  0 | .\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    Tangent upsilon_omega;\n    upsilon_omega.template head<3>() = Omega.col(3).template head<3>();\n    upsilon_omega.template tail<3>() =\n        SO3<Scalar>::vee(Omega.template topLeftCorner<3, 3>());\n    return upsilon_omega;\n  }\n\n protected:\n  SO3Member so3_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSOPHUS_FUNC SE3<Scalar, Options>::SE3()\n    : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<SE3>::value,\n                \"Assume standard layout for the use of offsetof check below.\");\n  static_assert(\n      offsetof(SE3, so3_) + sizeof(Scalar) * SO3<Scalar>::num_parameters ==\n          offsetof(SE3, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``SE3``; derived from SE3Base.\n///\n/// Allows us to wrap SE3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE3<Scalar_>, Options>\n    : public Sophus::SE3Base<Map<Sophus::SE3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SE3Base<Map<Sophus::SE3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar* coeffs)\n      : so3_(coeffs),\n        translation_(coeffs + Sophus::SO3<Scalar>::num_parameters) {}\n\n  /// Mutator of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO3<Scalar>, Options>& so3() { return so3_; }\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO3<Scalar>, Options> const& so3() const {\n    return so3_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar, Options>>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar, Options>> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO3<Scalar>, Options> so3_;\n  Map<Sophus::Vector3<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``SE3 const``; derived from SE3Base.\n///\n/// Allows us to wrap SE3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SE3<Scalar_> const, Options>\n    : public Sophus::SE3Base<Map<Sophus::SE3<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SE3Base<Map<Sophus::SE3<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar const* coeffs)\n      : so3_(coeffs),\n        translation_(coeffs + Sophus::SO3<Scalar>::num_parameters) {}\n\n  /// Accessor of SO3\n  ///\n  SOPHUS_FUNC Map<Sophus::SO3<Scalar> const, Options> const& so3() const {\n    return so3_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::SO3<Scalar> const, Options> const so3_;\n  Map<Sophus::Vector3<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/sim2.hpp",
    "content": "/// @file\n/// Similarity group Sim(2) - scaling, rotation and translation in 2d.\n\n#pragma once\n\n#include \"rxso2.hpp\"\n#include \"sim_details.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass Sim2;\nusing Sim2d = Sim2<double>;\nusing Sim2f = Sim2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::Sim2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector2<Scalar, Options>;\n  using RxSO2Type = Sophus::RxSO2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim2<Scalar_>, Options>>\n    : traits<Sophus::Sim2<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar>, Options>;\n  using RxSO2Type = Map<Sophus::RxSO2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim2<Scalar_> const, Options>>\n    : traits<Sophus::Sim2<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector2<Scalar> const, Options>;\n  using RxSO2Type = Map<Sophus::RxSO2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// Sim2 base type - implements Sim2 class but is storage agnostic.\n///\n/// Sim(2) is the group of rotations  and translation and scaling in 2d. It is\n/// the semi-direct product of R+xSO(2) and the 2d Euclidean vector space. The\n/// class is represented using a composition of RxSO2  for scaling plus\n/// rotation and a 2-vector for translation.\n///\n/// Sim(2) is neither compact, nor a commutative group.\n///\n/// See RxSO2 for more details of the scaling + rotation representation in 2d.\n///\ntemplate <class Derived>\nclass Sim2Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using RxSO2Type = typename Eigen::internal::traits<Derived>::RxSO2Type;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (two for translation, one for rotation and one for scaling).\n  static int constexpr DoF = 4;\n  /// Number of internal parameters used (2-tuple for complex number, two for\n  /// translation).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  /// Points are 2-dimensional\n  static int constexpr Dim = 2;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Hyperplane = Hyperplane2<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SIM2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using Sim2Product = Sim2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Adjoint res;\n    res.setZero();\n    res.template block<2, 2>(0, 0) = rxso2().matrix();\n    res(0, 2) = translation()[1];\n    res(1, 2) = -translation()[0];\n    res.template block<2, 1>(0, 3) = -translation();\n\n    res(2, 2) = Scalar(1);\n\n    res(3, 3) = Scalar(1);\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC Sim2<NewScalarType> cast() const {\n    return Sim2<NewScalarType>(rxso2().template cast<NewScalarType>(),\n                               translation().template cast<NewScalarType>());\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC Sim2<Scalar> inverse() const {\n    RxSO2<Scalar> invR = rxso2().inverse();\n    return Sim2<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of Sim(2).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    /// The derivation of the closed-form Sim(2) logarithm for is done\n    /// analogously to the closed-form solution of the SE(2) logarithm, see\n    /// J. Gallier, D. Xu, \"Computing exponentials of skew symmetric matrices\n    /// and logarithms of orthogonal matrices\", IJRA 2002.\n    /// https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf\n    /// (Sec. 6., pp. 8)\n    Tangent res;\n    Vector2<Scalar> const theta_sigma = rxso2().log();\n    Scalar const theta = theta_sigma[0];\n    Scalar const sigma = theta_sigma[1];\n    Matrix2<Scalar> const Omega = SO2<Scalar>::hat(theta);\n    Matrix2<Scalar> const W_inv =\n        details::calcWInv<Scalar, 2>(Omega, theta, sigma, scale());\n\n    res.segment(0, 2) = W_inv * translation();\n    res[2] = theta;\n    res[3] = sigma;\n    return res;\n  }\n\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///   | s*R t |\n  ///   |  o  1 |\n  ///\n  /// where ``R`` is a 2x2 rotation matrix, ``s`` a scale factor, ``t`` a\n  /// translation 2-vector and ``o`` a 2-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogeneous_matrix;\n    homogeneous_matrix.template topLeftCorner<2, 3>() = matrix2x3();\n    homogeneous_matrix.row(2) =\n        Matrix<Scalar, 3, 1>(Scalar(0), Scalar(0), Scalar(1));\n    return homogeneous_matrix;\n  }\n\n  /// Returns the significant first two rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 2, 3> matrix2x3() const {\n    Matrix<Scalar, 2, 3> matrix;\n    matrix.template topLeftCorner<2, 2>() = rxso2().matrix();\n    matrix.col(2) = translation();\n    return matrix;\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim2Base<Derived>& operator=(\n      Sim2Base<OtherDerived> const& other) {\n    rxso2() = other.rxso2();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation plus scaling concatenation.\n  ///\n  /// Note: That scaling is calculated with saturation. See RxSO2 for\n  /// details.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC Sim2Product<OtherDerived> operator*(\n      Sim2Base<OtherDerived> const& other) const {\n    return Sim2Product<OtherDerived>(\n        rxso2() * other.rxso2(), translation() + rxso2() * other.translation());\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates, scales and translates a two dimensional point\n  /// ``p`` by the Sim(2) element ``(bar_sR_foo, t_bar)`` (= similarity\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_sR_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return rxso2() * p + translation();\n  }\n\n  /// Group action on homogeneous 2-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        rxso2() * p.template head<2>() + p(2) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), p(2));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates, scales and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the Sim(2) element:\n  ///\n  /// Origin ``o`` is rotated, scaled and translated\n  /// Direction ``d`` is rotated\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    Line rotatedLine = rxso2() * l;\n    return Line(rotatedLine.origin() + translation(), rotatedLine.direction());\n  }\n\n  /// Group action on hyper-planes.\n  ///\n  /// This function rotates a hyper-plane ``n.x + d = 0`` by the Sim2\n  /// element:\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is scaled and adjusted for translation\n  ///\n  /// Note that in 2d-case hyper-planes are just another parametrization of\n  /// lines\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    Hyperplane const rotated = rxso2() * p;\n    return Hyperplane(rotated.normal(),\n                      rotated.offset() - translation().dot(rotated.normal()));\n  }\n\n  /// Returns internal parameters of Sim(2).\n  ///\n  /// It returns (c[0], c[1], t[0], t[1]),\n  /// with c being the complex number, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << rxso2().params(), translation();\n    return p;\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC Sim2Base<Derived>& operator*=(\n      Sim2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns derivative of  this * Sim2::exp(x)  wrt. x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    J.template block<2, 2>(0, 0).setZero();\n    J.template block<2, 2>(0, 2) = rxso2().Dx_this_mul_exp_x_at_0();\n    J.template block<2, 2>(2, 2).setZero();\n    J.template block<2, 2>(2, 0) = rxso2().matrix();\n    return J;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    J.template block<2, 2>(0, 0).setZero();\n    J.template block<2, 2>(0, 2) = rxso2().inverse().matrix();\n    J.template block<2, 2>(2, 0) = rxso2().Dx_log_this_inv_by_x_at_this();\n    J.template block<2, 2>(2, 2).setZero();\n    return J;\n  }\n\n  /// Setter of non-zero complex number.\n  ///\n  /// Precondition: ``z`` must not be close to zero.\n  ///\n  SOPHUS_FUNC void setComplex(Vector2<Scalar> const& z) {\n    rxso2().setComplex(z);\n  }\n\n  /// Accessor of complex number.\n  ///\n  SOPHUS_FUNC\n  typename Eigen::internal::traits<Derived>::RxSO2Type::ComplexType const&\n  complex() const {\n    return rxso2().complex();\n  }\n\n  /// Returns Rotation matrix\n  ///\n  SOPHUS_FUNC Matrix2<Scalar> rotationMatrix() const {\n    return rxso2().rotationMatrix();\n  }\n\n  /// Mutator of SO2 group.\n  ///\n  SOPHUS_FUNC RxSO2Type& rxso2() {\n    return static_cast<Derived*>(this)->rxso2();\n  }\n\n  /// Accessor of SO2 group.\n  ///\n  SOPHUS_FUNC RxSO2Type const& rxso2() const {\n    return static_cast<Derived const*>(this)->rxso2();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC Scalar scale() const { return rxso2().scale(); }\n\n  /// Setter of complex number using rotation matrix ``R``, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix2<Scalar>& R) {\n    rxso2().setRotationMatrix(R);\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  /// Note: This function as a significant computational cost, since it has to\n  /// call the square root twice.\n  ///\n  SOPHUS_FUNC void setScale(Scalar const& scale) { rxso2().setScale(scale); }\n\n  /// Setter of complex number using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 2x2 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Matrix2<Scalar> const& sR) {\n    rxso2().setScaledRotationMatrix(sR);\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n};\n\n/// Sim2 using default storage; derived from Sim2Base.\ntemplate <class Scalar_, int Options>\nclass Sim2 : public Sim2Base<Sim2<Scalar_, Options>> {\n public:\n  using Base = Sim2Base<Sim2<Scalar_, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using RxSo2Member = RxSO2<Scalar, Options>;\n  using TranslationMember = Vector2<Scalar, Options>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC Sim2& operator=(Sim2 const& other) = default;\n\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes similarity transform to the identity.\n  ///\n  SOPHUS_FUNC Sim2();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC Sim2(Sim2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim2(Sim2Base<OtherDerived> const& other)\n      : rxso2_(other.rxso2()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from RxSO2 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC Sim2(RxSO2Base<OtherDerived> const& rxso2,\n                   Eigen::MatrixBase<D> const& translation)\n      : rxso2_(rxso2), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from complex number and translation vector.\n  ///\n  /// Precondition: complex number must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC Sim2(Vector2<Scalar> const& complex_number,\n                   Eigen::MatrixBase<D> const& translation)\n      : rxso2_(complex_number), translation_(translation) {\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from 3x3 matrix\n  ///\n  /// Precondition: Top-left 2x2 matrix needs to be \"scaled-orthogonal\" with\n  ///               positive determinant. The last row must be ``(0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit Sim2(Matrix<Scalar, 3, 3> const& T)\n      : rxso2_((T.template topLeftCorner<2, 2>()).eval()),\n        translation_(T.template block<2, 1>(0, 2)) {}\n\n  /// This provides unsafe read/write access to internal data. Sim(2) is\n  /// represented by a complex number (two parameters) and a 2-vector. When\n  /// using direct write access, the user needs to take care of that the\n  /// complex number is not set close to zero.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // rxso2_ and translation_ are laid out sequentially with no padding\n    return rxso2_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    // rxso2_ and translation_ are laid out sequentially with no padding\n    return rxso2_.data();\n  }\n\n  /// Accessor of RxSO2\n  ///\n  SOPHUS_FUNC RxSo2Member& rxso2() { return rxso2_; }\n\n  /// Mutator of RxSO2\n  ///\n  SOPHUS_FUNC RxSo2Member const& rxso2() const { return rxso2_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    J.template block<2, 2>(0, 0).setZero();\n    J.template block<2, 2>(0, 2) = RxSO2<Scalar>::Dx_exp_x_at_0();\n    J.template block<2, 2>(2, 0).setIdentity();\n    J.template block<2, 2>(2, 2).setZero();\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      const Tangent& a) {\n    static Matrix2<Scalar> const I = Matrix2<Scalar>::Identity();\n    static Scalar const one(1.0);\n\n    Scalar const theta = a[2];\n    Scalar const sigma = a[3];\n\n    Matrix2<Scalar> const Omega = SO2<Scalar>::hat(theta);\n    Matrix2<Scalar> const Omega_dtheta = SO2<Scalar>::hat(one);\n    Matrix2<Scalar> const Omega2 = Omega * Omega;\n    Matrix2<Scalar> const Omega2_dtheta =\n        Omega_dtheta * Omega + Omega * Omega_dtheta;\n    Matrix2<Scalar> const W = details::calcW<Scalar, 2>(Omega, theta, sigma);\n    Vector2<Scalar> const upsilon = a.segment(0, 2);\n\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    J.template block<2, 2>(0, 0).setZero();\n    J.template block<2, 2>(0, 2) =\n        RxSO2<Scalar>::Dx_exp_x(a.template tail<2>());\n    J.template block<2, 2>(2, 0) = W;\n\n    Scalar A, B, C, A_dtheta, B_dtheta, A_dsigma, B_dsigma, C_dsigma;\n    details::calcW_derivatives(theta, sigma, A, B, C, A_dsigma, B_dsigma,\n                               C_dsigma, A_dtheta, B_dtheta);\n\n    J.template block<2, 1>(2, 2) = (A_dtheta * Omega + A * Omega_dtheta +\n                                    B_dtheta * Omega2 + B * Omega2_dtheta) *\n                                   upsilon;\n    J.template block<2, 1>(2, 3) =\n        (A_dsigma * Omega + B_dsigma * Omega2 + C_dsigma * I) * upsilon;\n\n    return J;\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 2, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    Sophus::Matrix<Scalar, 2, DoF> J;\n    J << Sophus::Matrix2<Scalar>::Identity(),\n        Sophus::RxSO2<Scalar>::Dx_exp_x_times_point_at_0(point);\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Derivative of Lie bracket with respect to first element.\n  ///\n  /// This function returns ``D_a [a, b]`` with ``D_a`` being the\n  /// differential operator with respect to ``a``, ``[a, b]`` being the lie\n  /// bracket of the Lie algebra sim(2).\n  /// See ``lieBracket()`` below.\n  ///\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space and returns the\n  /// corresponding element of the group Sim(2).\n  ///\n  /// The first two components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of Sim(2), the following two components\n  /// of ``a`` represents the rotation ``theta`` and the final component\n  /// represents the logarithm of the scaling factor ``sigma``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of Sim(2), see below.\n  ///\n  SOPHUS_FUNC static Sim2<Scalar> exp(Tangent const& a) {\n    // For the derivation of the exponential map of Sim(N) see\n    // H. Strasdat, \"Local Accuracy and Global Consistency for Efficient Visual\n    // SLAM\", PhD thesis, 2012.\n    // http:///hauke.strasdat.net/files/strasdat_thesis_2012.pdf (A.5, pp. 186)\n    Vector2<Scalar> const upsilon = a.segment(0, 2);\n    Scalar const theta = a[2];\n    Scalar const sigma = a[3];\n    RxSO2<Scalar> rxso2 = RxSO2<Scalar>::exp(a.template tail<2>());\n    Matrix2<Scalar> const Omega = SO2<Scalar>::hat(theta);\n    Matrix2<Scalar> const W = details::calcW<Scalar, 2>(Omega, theta, sigma);\n    return Sim2<Scalar>(rxso2, W * upsilon);\n  }\n\n  /// Returns the ith infinitesimal generators of Sim(2).\n  ///\n  /// The infinitesimal generators of Sim(2) are:\n  ///\n  /// ```\n  ///         |  0  0  1 |\n  ///   G_0 = |  0  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0  0  0 |\n  ///   G_1 = |  0  0  1 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  ///\n  ///         |  1  0  0 |\n  ///   G_3 = |  0  1  0 |\n  ///         |  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in [0, 3].\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 || i <= 3, \"i should be in range [0,3].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 4-vector representation and returns the corresponding\n  /// matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of Sim(2) is defined as\n  ///\n  ///   ``hat(.): R^4 -> R^{3x3},  hat(a) = sum_i a_i * G_i``  (for i=0,...,6)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of Sim(2).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.template topLeftCorner<2, 2>() =\n        RxSO2<Scalar>::hat(a.template tail<2>());\n    Omega.col(2).template head<2>() = a.template head<2>();\n    Omega.row(2).setZero();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of Sim(2). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_sim2 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of Sim(2).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector2<Scalar> const upsilon1 = a.template head<2>();\n    Vector2<Scalar> const upsilon2 = b.template head<2>();\n    Scalar const theta1 = a[2];\n    Scalar const theta2 = b[2];\n    Scalar const sigma1 = a[3];\n    Scalar const sigma2 = b[3];\n\n    Tangent res;\n    res[0] = -theta1 * upsilon2[1] + theta2 * upsilon1[1] +\n             sigma1 * upsilon2[0] - sigma2 * upsilon1[0];\n    res[1] = theta1 * upsilon2[0] - theta2 * upsilon1[0] +\n             sigma1 * upsilon2[1] - sigma2 * upsilon1[1];\n    res[2] = Scalar(0);\n    res[3] = Scalar(0);\n\n    return res;\n  }\n\n  /// Draw uniform sample from Sim(2) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2].\n  ///\n  template <class UniformRandomBitGenerator>\n  static Sim2 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return Sim2(RxSO2<Scalar>::sampleUniform(generator),\n                Vector2<Scalar>(uniform(generator), uniform(generator)));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding 4-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  d -c  a |\n  ///                |  c  d  b |\n  ///                |  0  0  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    Tangent upsilon_omega_sigma;\n    upsilon_omega_sigma.template head<2>() = Omega.col(2).template head<2>();\n    upsilon_omega_sigma.template tail<2>() =\n        RxSO2<Scalar>::vee(Omega.template topLeftCorner<2, 2>());\n    return upsilon_omega_sigma;\n  }\n\n protected:\n  RxSo2Member rxso2_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSOPHUS_FUNC Sim2<Scalar, Options>::Sim2()\n    : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<Sim2>::value,\n                \"Assume standard layout for the use of offset of check below.\");\n  static_assert(\n      offsetof(Sim2, rxso2_) + sizeof(Scalar) * RxSO2<Scalar>::num_parameters ==\n          offsetof(Sim2, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``Sim2``; derived from Sim2Base.\n///\n/// Allows us to wrap Sim2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim2<Scalar_>, Options>\n    : public Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_>, Options>> {\n public:\n  using Base = Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar* coeffs)\n      : rxso2_(coeffs),\n        translation_(coeffs + Sophus::RxSO2<Scalar>::num_parameters) {}\n\n  /// Mutator of RxSO2\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO2<Scalar>, Options>& rxso2() { return rxso2_; }\n\n  /// Accessor of RxSO2\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO2<Scalar>, Options> const& rxso2() const {\n    return rxso2_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar>, Options> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO2<Scalar>, Options> rxso2_;\n  Map<Sophus::Vector2<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``Sim2 const``; derived from Sim2Base.\n///\n/// Allows us to wrap RxSO2 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim2<Scalar_> const, Options>\n    : public Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::Sim2Base<Map<Sophus::Sim2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar const* coeffs)\n      : rxso2_(coeffs),\n        translation_(coeffs + Sophus::RxSO2<Scalar>::num_parameters) {}\n\n  /// Accessor of RxSO2\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO2<Scalar> const, Options> const& rxso2() const {\n    return rxso2_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO2<Scalar> const, Options> const rxso2_;\n  Map<Sophus::Vector2<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/sim3.hpp",
    "content": "/// @file\n/// Similarity group Sim(3) - scaling, rotation and translation in 3d.\n\n#pragma once\n\n#include \"rxso3.hpp\"\n#include \"sim_details.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass Sim3;\nusing Sim3d = Sim3<double>;\nusing Sim3f = Sim3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options>\nstruct traits<Sophus::Sim3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Sophus::Vector3<Scalar, Options>;\n  using RxSO3Type = Sophus::RxSO3<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim3<Scalar_>, Options>>\n    : traits<Sophus::Sim3<Scalar_, Options>> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar>, Options>;\n  using RxSO3Type = Map<Sophus::RxSO3<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options>\nstruct traits<Map<Sophus::Sim3<Scalar_> const, Options>>\n    : traits<Sophus::Sim3<Scalar_, Options> const> {\n  using Scalar = Scalar_;\n  using TranslationType = Map<Sophus::Vector3<Scalar> const, Options>;\n  using RxSO3Type = Map<Sophus::RxSO3<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// Sim3 base type - implements Sim3 class but is storage agnostic.\n///\n/// Sim(3) is the group of rotations  and translation and scaling in 3d. It is\n/// the semi-direct product of R+xSO(3) and the 3d Euclidean vector space.  The\n/// class is represented using a composition of RxSO3  for scaling plus\n/// rotation and a 3-vector for translation.\n///\n/// Sim(3) is neither compact, nor a commutative group.\n///\n/// See RxSO3 for more details of the scaling + rotation representation in 3d.\n///\ntemplate <class Derived>\nclass Sim3Base {\n public:\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using TranslationType =\n      typename Eigen::internal::traits<Derived>::TranslationType;\n  using RxSO3Type = typename Eigen::internal::traits<Derived>::RxSO3Type;\n  using QuaternionType = typename RxSO3Type::QuaternionType;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space\n  /// (three for translation, three for rotation and one for scaling).\n  static int constexpr DoF = 7;\n  /// Number of internal parameters used (4-tuple for quaternion, three for\n  /// translation).\n  static int constexpr num_parameters = 7;\n  /// Group transformations are 4x4 matrices.\n  static int constexpr N = 4;\n  /// Points are 3-dimensional\n  static int constexpr Dim = 3;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Hyperplane = Hyperplane3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with Sim3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using Sim3Product = Sim3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const {\n    Matrix3<Scalar> const R = rxso3().rotationMatrix();\n    Adjoint res;\n    res.setZero();\n    res.template block<3, 3>(0, 0) = rxso3().matrix();\n    res.template block<3, 3>(0, 3) = SO3<Scalar>::hat(translation()) * R;\n    res.template block<3, 1>(0, 6) = -translation();\n\n    res.template block<3, 3>(3, 3) = R;\n\n    res(6, 6) = Scalar(1);\n    return res;\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC Sim3<NewScalarType> cast() const {\n    return Sim3<NewScalarType>(rxso3().template cast<NewScalarType>(),\n                               translation().template cast<NewScalarType>());\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC Sim3<Scalar> inverse() const {\n    RxSO3<Scalar> invR = rxso3().inverse();\n    return Sim3<Scalar>(invR, invR * (translation() * Scalar(-1)));\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rigid body transformations) to elements of the\n  /// tangent space (twist).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of Sim(3).\n  ///\n  SOPHUS_FUNC Tangent log() const {\n    // The derivation of the closed-form Sim(3) logarithm for is done\n    // analogously to the closed-form solution of the SE(3) logarithm, see\n    // J. Gallier, D. Xu, \"Computing exponentials of skew symmetric matrices\n    // and logarithms of orthogonal matrices\", IJRA 2002.\n    // https:///pdfs.semanticscholar.org/cfe3/e4b39de63c8cabd89bf3feff7f5449fc981d.pdf\n    // (Sec. 6., pp. 8)\n    Tangent res;\n    auto omega_sigma_and_theta = rxso3().logAndTheta();\n    Vector3<Scalar> const omega =\n        omega_sigma_and_theta.tangent.template head<3>();\n    Scalar sigma = omega_sigma_and_theta.tangent[3];\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n    Matrix3<Scalar> const W_inv = details::calcWInv<Scalar, 3>(\n        Omega, omega_sigma_and_theta.theta, sigma, scale());\n\n    res.segment(0, 3) = W_inv * translation();\n    res.segment(3, 3) = omega;\n    res[6] = sigma;\n    return res;\n  }\n\n  /// Returns 4x4 matrix representation of the instance.\n  ///\n  /// It has the following form:\n  ///\n  ///     | s*R t |\n  ///     |  o  1 |\n  ///\n  /// where ``R`` is a 3x3 rotation matrix, ``s`` a scale factor, ``t`` a\n  /// translation 3-vector and ``o`` a 3-column vector of zeros.\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Transformation homogeneous_matrix;\n    homogeneous_matrix.template topLeftCorner<3, 4>() = matrix3x4();\n    homogeneous_matrix.row(3) =\n        Matrix<Scalar, 4, 1>(Scalar(0), Scalar(0), Scalar(0), Scalar(1));\n    return homogeneous_matrix;\n  }\n\n  /// Returns the significant first three rows of the matrix above.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, 3, 4> matrix3x4() const {\n    Matrix<Scalar, 3, 4> matrix;\n    matrix.template topLeftCorner<3, 3>() = rxso3().matrix();\n    matrix.col(3) = translation();\n    return matrix;\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim3Base<Derived>& operator=(\n      Sim3Base<OtherDerived> const& other) {\n    rxso3() = other.rxso3();\n    translation() = other.translation();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation plus scaling concatenation.\n  ///\n  /// Note: That scaling is calculated with saturation. See RxSO3 for\n  /// details.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC Sim3Product<OtherDerived> operator*(\n      Sim3Base<OtherDerived> const& other) const {\n    return Sim3Product<OtherDerived>(\n        rxso3() * other.rxso3(), translation() + rxso3() * other.translation());\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates, scales and translates a three dimensional point\n  /// ``p`` by the Sim(3) element ``(bar_sR_foo, t_bar)`` (= similarity\n  /// transformation):\n  ///\n  ///   ``p_bar = bar_sR_foo * p_foo + t_bar``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    return rxso3() * p + translation();\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const PointProduct<HPointDerived> tp =\n        rxso3() * p.template head<3>() + p(3) * translation();\n    return HomogeneousPointProduct<HPointDerived>(tp(0), tp(1), tp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates, scales and translates a parametrized line\n  /// ``l(t) = o + t * d`` by the Sim(3) element:\n  ///\n  /// Origin ``o`` is rotated, scaled and translated\n  /// Direction ``d`` is rotated\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    Line rotatedLine = rxso3() * l;\n    return Line(rotatedLine.origin() + translation(), rotatedLine.direction());\n  }\n\n  /// Group action on planes.\n  ///\n  /// This function rotates and translates a plane\n  /// ``n.x + d = 0`` by the Sim(3) element:\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is adjusted for scale and translation\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    Hyperplane const rotated = rxso3() * p;\n    return Hyperplane(rotated.normal(),\n                      rotated.offset() - translation().dot(rotated.normal()));\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO3's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC Sim3Base<Derived>& operator*=(\n      Sim3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns derivative of  this * Sim3::exp(x) w.r.t. x at x = 0\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    J.template block<4, 3>(0, 0).setZero();\n    J.template block<4, 4>(0, 3) = rxso3().Dx_this_mul_exp_x_at_0();\n    J.template block<3, 3>(4, 0) = rxso3().matrix();\n    J.template block<3, 4>(4, 3).setZero();\n\n    return J;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    Matrix<Scalar, DoF, num_parameters> J;\n    J.template block<3, 4>(0, 0).setZero();\n    J.template block<3, 3>(0, 4) = rxso3().inverse().matrix();\n    J.template block<4, 4>(3, 0) = rxso3().Dx_log_this_inv_by_x_at_this();\n    J.template block<4, 3>(3, 4).setZero();\n    return J;\n  }\n\n  /// Returns internal parameters of Sim(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real, t[0], t[1], t[2]),\n  /// with q being the quaternion, t the translation 3-vector.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    Sophus::Vector<Scalar, num_parameters> p;\n    p << rxso3().params(), translation();\n    return p;\n  }\n\n  /// Setter of non-zero quaternion.\n  ///\n  /// Precondition: ``quat`` must not be close to zero.\n  ///\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quat) {\n    rxso3().setQuaternion(quat);\n  }\n\n  /// Accessor of quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& quaternion() const {\n    return rxso3().quaternion();\n  }\n\n  /// Returns Rotation matrix\n  ///\n  SOPHUS_FUNC Matrix3<Scalar> rotationMatrix() const {\n    return rxso3().rotationMatrix();\n  }\n\n  /// Mutator of SO3 group.\n  ///\n  SOPHUS_FUNC RxSO3Type& rxso3() {\n    return static_cast<Derived*>(this)->rxso3();\n  }\n\n  /// Accessor of SO3 group.\n  ///\n  SOPHUS_FUNC RxSO3Type const& rxso3() const {\n    return static_cast<Derived const*>(this)->rxso3();\n  }\n\n  /// Returns scale.\n  ///\n  SOPHUS_FUNC Scalar scale() const { return rxso3().scale(); }\n\n  /// Setter of quaternion using rotation matrix ``R``, leaves scale as is.\n  ///\n  SOPHUS_FUNC void setRotationMatrix(Matrix3<Scalar>& R) {\n    rxso3().setRotationMatrix(R);\n  }\n\n  /// Sets scale and leaves rotation as is.\n  ///\n  /// Note: This function as a significant computational cost, since it has to\n  /// call the square root twice.\n  ///\n  SOPHUS_FUNC void setScale(Scalar const& scale) { rxso3().setScale(scale); }\n\n  /// Setter of quaternion using scaled rotation matrix ``sR``.\n  ///\n  /// Precondition: The 3x3 matrix must be \"scaled orthogonal\"\n  ///               and have a positive determinant.\n  ///\n  SOPHUS_FUNC void setScaledRotationMatrix(Matrix3<Scalar> const& sR) {\n    rxso3().setScaledRotationMatrix(sR);\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationType& translation() {\n    return static_cast<Derived*>(this)->translation();\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationType const& translation() const {\n    return static_cast<Derived const*>(this)->translation();\n  }\n};\n\n/// Sim3 using default storage; derived from Sim3Base.\ntemplate <class Scalar_, int Options>\nclass Sim3 : public Sim3Base<Sim3<Scalar_, Options>> {\n public:\n  using Base = Sim3Base<Sim3<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using RxSo3Member = RxSO3<Scalar, Options>;\n  using TranslationMember = Vector3<Scalar, Options>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC Sim3& operator=(Sim3 const& other) = default;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes similarity transform to the identity.\n  ///\n  SOPHUS_FUNC Sim3();\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC Sim3(Sim3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC Sim3(Sim3Base<OtherDerived> const& other)\n      : rxso3_(other.rxso3()), translation_(other.translation()) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from RxSO3 and translation vector\n  ///\n  template <class OtherDerived, class D>\n  SOPHUS_FUNC explicit Sim3(RxSO3Base<OtherDerived> const& rxso3,\n                            Eigen::MatrixBase<D> const& translation)\n      : rxso3_(rxso3), translation_(translation) {\n    static_assert(std::is_same<typename OtherDerived::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from quaternion and translation vector.\n  ///\n  /// Precondition: quaternion must not be close to zero.\n  ///\n  template <class D1, class D2>\n  SOPHUS_FUNC explicit Sim3(Eigen::QuaternionBase<D1> const& quaternion,\n                            Eigen::MatrixBase<D2> const& translation)\n      : rxso3_(quaternion), translation_(translation) {\n    static_assert(std::is_same<typename D1::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    static_assert(std::is_same<typename D2::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n  }\n\n  /// Constructor from scale factor, unit quaternion, and translation vector.\n  ///\n  /// Precondition: quaternion must not be close to zero.\n  ///\n  template <class D1, class D2>\n  SOPHUS_FUNC explicit Sim3(Scalar const& scale,\n                            Eigen::QuaternionBase<D1> const& unit_quaternion,\n                            Eigen::MatrixBase<D2> const& translation)\n      : Sim3(RxSO3<Scalar>(scale, unit_quaternion), translation) {}\n\n  /// Constructor from 4x4 matrix\n  ///\n  /// Precondition: Top-left 3x3 matrix needs to be \"scaled-orthogonal\" with\n  ///               positive determinant. The last row must be ``(0, 0, 0, 1)``.\n  ///\n  SOPHUS_FUNC explicit Sim3(Matrix<Scalar, 4, 4> const& T)\n      : rxso3_(T.template topLeftCorner<3, 3>()),\n        translation_(T.template block<3, 1>(0, 3)) {}\n\n  /// This provides unsafe read/write access to internal data. Sim(3) is\n  /// represented by an Eigen::Quaternion (four parameters) and a 3-vector. When\n  /// using direct write access, the user needs to take care of that the\n  /// quaternion is not set close to zero.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    // rxso3_ and translation_ are laid out sequentially with no padding\n    return rxso3_.data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    // rxso3_ and translation_ are laid out sequentially with no padding\n    return rxso3_.data();\n  }\n\n  /// Accessor of RxSO3\n  ///\n  SOPHUS_FUNC RxSo3Member& rxso3() { return rxso3_; }\n\n  /// Mutator of RxSO3\n  ///\n  SOPHUS_FUNC RxSo3Member const& rxso3() const { return rxso3_; }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember& translation() { return translation_; }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC TranslationMember const& translation() const {\n    return translation_;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    J.template block<4, 3>(0, 0).setZero();\n    J.template block<4, 4>(0, 3) = RxSO3<Scalar>::Dx_exp_x_at_0();\n    J.template block<3, 3>(4, 0).setIdentity();\n    J.template block<3, 4>(4, 3).setZero();\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      const Tangent& a) {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n\n    static Matrix3<Scalar> const I = Matrix3<Scalar>::Identity();\n    Vector3<Scalar> const omega = a.template segment<3>(3);\n    Vector3<Scalar> const upsilon = a.template head<3>();\n    Scalar const sigma = a[6];\n    Scalar const theta = omega.norm();\n\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n    Matrix3<Scalar> const Omega2 = Omega * Omega;\n    Vector3<Scalar> theta_domega;\n    if (theta < Constants<Scalar>::epsilon()) {\n      theta_domega = Vector3<Scalar>::Zero();\n    } else {\n      theta_domega = omega / theta;\n    }\n    static Matrix3<Scalar> const Omega_domega[3] = {\n        SO3<Scalar>::hat(Vector3<Scalar>::Unit(0)),\n        SO3<Scalar>::hat(Vector3<Scalar>::Unit(1)),\n        SO3<Scalar>::hat(Vector3<Scalar>::Unit(2))};\n\n    Matrix3<Scalar> const Omega2_domega[3] = {\n        Omega_domega[0] * Omega + Omega * Omega_domega[0],\n        Omega_domega[1] * Omega + Omega * Omega_domega[1],\n        Omega_domega[2] * Omega + Omega * Omega_domega[2]};\n\n    Matrix3<Scalar> const W = details::calcW<Scalar, 3>(Omega, theta, sigma);\n\n    J.template block<4, 3>(0, 0).setZero();\n    J.template block<4, 4>(0, 3) =\n        RxSO3<Scalar>::Dx_exp_x(a.template tail<4>());\n    J.template block<3, 4>(4, 3).setZero();\n    J.template block<3, 3>(4, 0) = W;\n\n    Scalar A, B, C, A_dtheta, B_dtheta, A_dsigma, B_dsigma, C_dsigma;\n    details::calcW_derivatives(theta, sigma, A, B, C, A_dsigma, B_dsigma,\n                               C_dsigma, A_dtheta, B_dtheta);\n\n    for (int i = 0; i < 3; ++i) {\n      J.template block<3, 1>(4, 3 + i) =\n          (A_dtheta * theta_domega[i] * Omega + A * Omega_domega[i] +\n           B_dtheta * theta_domega[i] * Omega2 + B * Omega2_domega[i]) *\n          upsilon;\n    }\n\n    J.template block<3, 1>(4, 6) =\n        (A_dsigma * Omega + B_dsigma * Omega2 + C_dsigma * I) * upsilon;\n\n    return J;\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 3, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    Sophus::Matrix<Scalar, 3, DoF> J;\n    J << Sophus::Matrix3<Scalar>::Identity(),\n        Sophus::RxSO3<Scalar>::Dx_exp_x_times_point_at_0(point);\n    return J;\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space and returns the\n  /// corresponding element of the group Sim(3).\n  ///\n  /// The first three components of ``a`` represent the translational part\n  /// ``upsilon`` in the tangent space of Sim(3), the following three components\n  /// of ``a`` represents the rotation vector ``omega`` and the final component\n  /// represents the logarithm of the scaling factor ``sigma``.\n  /// To be more specific, this function computes ``expmat(hat(a))`` with\n  /// ``expmat(.)`` being the matrix exponential and ``hat(.)`` the hat-operator\n  /// of Sim(3), see below.\n  ///\n  SOPHUS_FUNC static Sim3<Scalar> exp(Tangent const& a) {\n    // For the derivation of the exponential map of Sim(3) see\n    // H. Strasdat, \"Local Accuracy and Global Consistency for Efficient Visual\n    // SLAM\", PhD thesis, 2012.\n    // http:///hauke.strasdat.net/files/strasdat_thesis_2012.pdf (A.5, pp. 186)\n    Vector3<Scalar> const upsilon = a.segment(0, 3);\n    Vector3<Scalar> const omega = a.segment(3, 3);\n    Scalar const sigma = a[6];\n    Scalar theta;\n    RxSO3<Scalar> rxso3 =\n        RxSO3<Scalar>::expAndTheta(a.template tail<4>(), &theta);\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n\n    Matrix3<Scalar> const W = details::calcW<Scalar, 3>(Omega, theta, sigma);\n    return Sim3<Scalar>(rxso3, W * upsilon);\n  }\n\n  /// Returns the ith infinitesimal generators of Sim(3).\n  ///\n  /// The infinitesimal generators of Sim(3) are:\n  ///\n  /// ```\n  ///         |  0  0  0  1 |\n  ///   G_0 = |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_1 = |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_2 = |  0  0  0  0 |\n  ///         |  0  0  0  1 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  0  0 |\n  ///   G_3 = |  0  0 -1  0 |\n  ///         |  0  1  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0  0  1  0 |\n  ///   G_4 = |  0  0  0  0 |\n  ///         | -1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  0 -1  0  0 |\n  ///   G_5 = |  1  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///         |  0  0  0  0 |\n  ///\n  ///         |  1  0  0  0 |\n  ///   G_6 = |  0  1  0  0 |\n  ///         |  0  0  1  0 |\n  ///         |  0  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be in [0, 6].\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 || i <= 6, \"i should be in range [0,6].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 7-vector representation and returns the corresponding\n  /// matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of Sim(3) is defined as\n  ///\n  ///   ``hat(.): R^7 -> R^{4x4},  hat(a) = sum_i a_i * G_i``  (for i=0,...,6)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of Sim(3).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& a) {\n    Transformation Omega;\n    Omega.template topLeftCorner<3, 3>() =\n        RxSO3<Scalar>::hat(a.template tail<4>());\n    Omega.col(3).template head<3>() = a.template head<3>();\n    Omega.row(3).setZero();\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of Sim(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_sim3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of Sim(3).\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& a, Tangent const& b) {\n    Vector3<Scalar> const upsilon1 = a.template head<3>();\n    Vector3<Scalar> const upsilon2 = b.template head<3>();\n    Vector3<Scalar> const omega1 = a.template segment<3>(3);\n    Vector3<Scalar> const omega2 = b.template segment<3>(3);\n    Scalar sigma1 = a[6];\n    Scalar sigma2 = b[6];\n\n    Tangent res;\n    res.template head<3>() = SO3<Scalar>::hat(omega1) * upsilon2 +\n                             SO3<Scalar>::hat(upsilon1) * omega2 +\n                             sigma1 * upsilon2 - sigma2 * upsilon1;\n    res.template segment<3>(3) = omega1.cross(omega2);\n    res[6] = Scalar(0);\n\n    return res;\n  }\n\n  /// Draw uniform sample from Sim(3) manifold.\n  ///\n  /// Translations are drawn component-wise from the range [-1, 1].\n  /// The scale factor is drawn uniformly in log2-space from [-1, 1],\n  /// hence the scale is in [0.5, 2].\n  ///\n  template <class UniformRandomBitGenerator>\n  static Sim3 sampleUniform(UniformRandomBitGenerator& generator) {\n    std::uniform_real_distribution<Scalar> uniform(Scalar(-1), Scalar(1));\n    return Sim3(RxSO3<Scalar>::sampleUniform(generator),\n                Vector3<Scalar>(uniform(generator), uniform(generator),\n                                uniform(generator)));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 4x4-matrix representation ``Omega`` and maps it to the\n  /// corresponding 7-vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  g -f  e  a |\n  ///                |  f  g -d  b |\n  ///                | -e  d  g  c |\n  ///                |  0  0  0  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    Tangent upsilon_omega_sigma;\n    upsilon_omega_sigma.template head<3>() = Omega.col(3).template head<3>();\n    upsilon_omega_sigma.template tail<4>() =\n        RxSO3<Scalar>::vee(Omega.template topLeftCorner<3, 3>());\n    return upsilon_omega_sigma;\n  }\n\n protected:\n  RxSo3Member rxso3_;\n  TranslationMember translation_;\n};\n\ntemplate <class Scalar, int Options>\nSOPHUS_FUNC Sim3<Scalar, Options>::Sim3()\n    : translation_(TranslationMember::Zero()) {\n  static_assert(std::is_standard_layout<Sim3>::value,\n                \"Assume standard layout for the use of offset of check below.\");\n  static_assert(\n      offsetof(Sim3, rxso3_) + sizeof(Scalar) * RxSO3<Scalar>::num_parameters ==\n          offsetof(Sim3, translation_),\n      \"This class assumes packed storage and hence will only work \"\n      \"correctly depending on the compiler (options) - in \"\n      \"particular when using [this->data(), this-data() + \"\n      \"num_parameters] to access the raw data in a contiguous fashion.\");\n}\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``Sim3``; derived from Sim3Base.\n///\n/// Allows us to wrap Sim3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim3<Scalar_>, Options>\n    : public Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar* coeffs)\n      : rxso3_(coeffs),\n        translation_(coeffs + Sophus::RxSO3<Scalar>::num_parameters) {}\n\n  /// Mutator of RxSO3\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO3<Scalar>, Options>& rxso3() { return rxso3_; }\n\n  /// Accessor of RxSO3\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO3<Scalar>, Options> const& rxso3() const {\n    return rxso3_;\n  }\n\n  /// Mutator of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar>, Options>& translation() {\n    return translation_;\n  }\n\n  /// Accessor of translation vector\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar>, Options> const& translation() const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO3<Scalar>, Options> rxso3_;\n  Map<Sophus::Vector3<Scalar>, Options> translation_;\n};\n\n/// Specialization of Eigen::Map for ``Sim3 const``; derived from Sim3Base.\n///\n/// Allows us to wrap RxSO3 objects around POD array.\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::Sim3<Scalar_> const, Options>\n    : public Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_> const, Options>> {\n  using Base = Sophus::Sim3Base<Map<Sophus::Sim3<Scalar_> const, Options>>;\n\n public:\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar const* coeffs)\n      : rxso3_(coeffs),\n        translation_(coeffs + Sophus::RxSO3<Scalar>::num_parameters) {}\n\n  /// Accessor of RxSO3\n  ///\n  SOPHUS_FUNC Map<Sophus::RxSO3<Scalar> const, Options> const& rxso3() const {\n    return rxso3_;\n  }\n\n  /// Accessor of translation vector\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector3<Scalar> const, Options> const& translation()\n      const {\n    return translation_;\n  }\n\n protected:\n  Map<Sophus::RxSO3<Scalar> const, Options> const rxso3_;\n  Map<Sophus::Vector3<Scalar> const, Options> const translation_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/sim_details.hpp",
    "content": "#pragma once\n\n#include \"types.hpp\"\n\nnamespace Sophus {\nnamespace details {\n\ntemplate <class Scalar, int N>\nMatrix<Scalar, N, N> calcW(Matrix<Scalar, N, N> const &Omega,\n                           Scalar const theta, Scalar const sigma) {\n  using std::abs;\n  using std::cos;\n  using std::exp;\n  using std::sin;\n  static Matrix<Scalar, N, N> const I = Matrix<Scalar, N, N>::Identity();\n  static Scalar const one(1);\n  static Scalar const half(0.5);\n  Matrix<Scalar, N, N> const Omega2 = Omega * Omega;\n  Scalar const scale = exp(sigma);\n  Scalar A, B, C;\n  if (abs(sigma) < Constants<Scalar>::epsilon()) {\n    C = one;\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      A = half;\n      B = Scalar(1. / 6.);\n    } else {\n      Scalar theta_sq = theta * theta;\n      A = (one - cos(theta)) / theta_sq;\n      B = (theta - sin(theta)) / (theta_sq * theta);\n    }\n  } else {\n    C = (scale - one) / sigma;\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      Scalar sigma_sq = sigma * sigma;\n      A = ((sigma - one) * scale + one) / sigma_sq;\n      B = (scale * half * sigma_sq + scale - one - sigma * scale) /\n          (sigma_sq * sigma);\n    } else {\n      Scalar theta_sq = theta * theta;\n      Scalar a = scale * sin(theta);\n      Scalar b = scale * cos(theta);\n      Scalar c = theta_sq + sigma * sigma;\n      A = (a * sigma + (one - b) * theta) / (theta * c);\n      B = (C - ((b - one) * sigma + a * theta) / (c)) * one / (theta_sq);\n    }\n  }\n  return A * Omega + B * Omega2 + C * I;\n}\n\ntemplate <class Scalar>\nvoid calcW_derivatives(Scalar const theta, Scalar const sigma, Scalar &A,\n                       Scalar &B, Scalar &C, Scalar &A_dsigma, Scalar &B_dsigma,\n                       Scalar &C_dsigma, Scalar &A_dtheta, Scalar &B_dtheta) {\n  using std::abs;\n  using std::cos;\n  using std::exp;\n  using std::sin;\n  static Scalar const zero(0.0);\n  static Scalar const one(1.0);\n  static Scalar const half(0.5);\n  static Scalar const two(2.0);\n  static Scalar const three(3.0);\n  Scalar const theta_sq = theta * theta;\n  Scalar const theta_c = theta * theta_sq;\n  Scalar const sin_theta = sin(theta);\n  Scalar const cos_theta = cos(theta);\n\n  Scalar const scale = exp(sigma);\n  Scalar const sigma_sq = sigma * sigma;\n  Scalar const sigma_c = sigma * sigma_sq;\n\n  if (abs(sigma) < Constants<Scalar>::epsilon()) {\n    C = one;\n    C_dsigma = half;\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      A = half;\n      B = Scalar(1. / 6.);\n      A_dtheta = A_dsigma = zero;\n      B_dtheta = B_dsigma = zero;\n    } else {\n      A = (one - cos_theta) / theta_sq;\n      B = (theta - sin_theta) / theta_c;\n      A_dtheta = (theta * sin_theta + two * cos_theta - two) / theta_c;\n      B_dtheta = -(two * theta - three * sin_theta + theta * cos_theta) /\n                 (theta_c * theta);\n      A_dsigma = (sin_theta - theta * cos_theta) / theta_c;\n      B_dsigma =\n          (half - (cos_theta + theta * sin_theta - one) / theta_sq) / theta_sq;\n    }\n  } else {\n    C = (scale - one) / sigma;\n    C_dsigma = (scale * (sigma - one) + one) / sigma_sq;\n    if (abs(theta) < Constants<Scalar>::epsilon()) {\n      A = ((sigma - one) * scale + one) / sigma_sq;\n      B = (scale * half * sigma_sq + scale - one - sigma * scale) / sigma_c;\n      A_dsigma = (scale * (sigma_sq - two * sigma + two) - two) / sigma_c;\n      B_dsigma = (scale * (half * sigma_c - (one + half) * sigma_sq +\n                           three * sigma - three) +\n                  three) /\n                 (sigma_c * sigma);\n      A_dtheta = B_dtheta = zero;\n    } else {\n      Scalar const a = scale * sin_theta;\n      Scalar const b = scale * cos_theta;\n      Scalar const b_one = b - one;\n      Scalar const theta_b_one = theta * b_one;\n      Scalar const c = theta_sq + sigma_sq;\n      Scalar const c_sq = c * c;\n      Scalar const theta_sq_c = theta_sq * c;\n      Scalar const a_theta = theta * a;\n      Scalar const b_theta = theta * b;\n      Scalar const c_theta = theta * c;\n      Scalar const a_sigma = sigma * a;\n      Scalar const b_sigma = sigma * b;\n      Scalar const two_sigma = sigma * two;\n      Scalar const two_theta = theta * two;\n      Scalar const sigma_b_one = sigma * b_one;\n\n      A = (a_sigma - theta_b_one) / c_theta;\n      A_dtheta = (two * (theta_b_one - a_sigma)) / c_sq +\n                 (b_sigma - b + a_theta + one) / c_theta +\n                 (theta_b_one - a_sigma) / theta_sq_c;\n      A_dsigma = (a - b_theta + a_sigma) / c_theta -\n                 (two_sigma * (theta - b_theta + a_sigma)) / (theta * c_sq);\n\n      B = (C - (sigma_b_one + a_theta) / (c)) * one / (theta_sq);\n      B_dtheta =\n          ((two_theta * (b_sigma - sigma + a_theta)) / c_sq -\n           ((a + b_theta - a_sigma)) / c) /\n              theta_sq -\n          (two * ((scale - one) / sigma - (b_sigma - sigma + a_theta) / c)) /\n              theta_c;\n      B_dsigma =\n          -((b_sigma + a_theta + b_one) / c + (scale - one) / sigma_sq -\n            (two_sigma * (sigma_b_one + a_theta)) / c_sq - scale / sigma) /\n          theta_sq;\n    }\n  }\n}\n\ntemplate <class Scalar, int N>\nMatrix<Scalar, N, N> calcWInv(Matrix<Scalar, N, N> const &Omega,\n                              Scalar const theta, Scalar const sigma,\n                              Scalar const scale) {\n  using std::abs;\n  using std::cos;\n  using std::sin;\n  static Matrix<Scalar, N, N> const I = Matrix<Scalar, N, N>::Identity();\n  static Scalar const half(0.5);\n  static Scalar const one(1);\n  static Scalar const two(2);\n  Matrix<Scalar, N, N> const Omega2 = Omega * Omega;\n  Scalar const scale_sq = scale * scale;\n  Scalar const theta_sq = theta * theta;\n  Scalar const sin_theta = sin(theta);\n  Scalar const cos_theta = cos(theta);\n\n  Scalar a, b, c;\n  if (abs(sigma * sigma) < Constants<Scalar>::epsilon()) {\n    c = one - half * sigma;\n    a = -half;\n    if (abs(theta_sq) < Constants<Scalar>::epsilon()) {\n      b = Scalar(1. / 12.);\n    } else {\n      b = (theta * sin_theta + two * cos_theta - two) /\n          (two * theta_sq * (cos_theta - one));\n    }\n  } else {\n    Scalar const scale_cu = scale_sq * scale;\n    c = sigma / (scale - one);\n    if (abs(theta_sq) < Constants<Scalar>::epsilon()) {\n      a = (-sigma * scale + scale - one) / ((scale - one) * (scale - one));\n      b = (scale_sq * sigma - two * scale_sq + scale * sigma + two * scale) /\n          (two * scale_cu - Scalar(6) * scale_sq + Scalar(6) * scale - two);\n    } else {\n      Scalar const s_sin_theta = scale * sin_theta;\n      Scalar const s_cos_theta = scale * cos_theta;\n      a = (theta * s_cos_theta - theta - sigma * s_sin_theta) /\n          (theta * (scale_sq - two * s_cos_theta + one));\n      b = -scale *\n          (theta * s_sin_theta - theta * sin_theta + sigma * s_cos_theta -\n           scale * sigma + sigma * cos_theta - sigma) /\n          (theta_sq * (scale_cu - two * scale * s_cos_theta - scale_sq +\n                       two * s_cos_theta + scale - one));\n    }\n  }\n  return a * Omega + b * Omega2 + c * I;\n}\n\n}  // namespace details\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/so2.hpp",
    "content": "/// @file\n/// Special orthogonal group SO(2) - rotation in 2d.\n\n#pragma once\n\n#include <type_traits>\n\n// Include only the selective set of Eigen headers that we need.\n// This helps when using Sophus with unusual compilers, like nvcc.\n#include <Eigen/LU>\n\n#include \"rotation_matrix.hpp\"\n#include \"types.hpp\"\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SO2;\nusing SO2d = SO2<double>;\nusing SO2f = SO2<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::SO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Sophus::Vector2<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO2<Scalar_>, Options_>>\n    : traits<Sophus::SO2<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO2<Scalar_> const, Options_>>\n    : traits<Sophus::SO2<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using ComplexType = Map<Sophus::Vector2<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SO2 base type - implements SO2 class but is storage agnostic.\n///\n/// SO(2) is the group of rotations in 2d. As a matrix group, it is the set of\n/// matrices which are orthogonal such that ``R * R' = I`` (with ``R'`` being\n/// the transpose of ``R``) and have a positive determinant. In particular, the\n/// determinant is 1. Let ``theta`` be the rotation angle, the rotation matrix\n/// can be written in close form:\n///\n///      | cos(theta) -sin(theta) |\n///      | sin(theta)  cos(theta) |\n///\n/// As a matter of fact, the first column of those matrices is isomorph to the\n/// set of unit complex numbers U(1). Thus, internally, SO2 is represented as\n/// complex number with length 1.\n///\n/// SO(2) is a compact and commutative group. First it is compact since the set\n/// of rotation matrices is a closed and bounded set. Second it is commutative\n/// since ``R(alpha) * R(beta) = R(beta) * R(alpha)``,  simply because ``alpha +\n/// beta = beta + alpha`` with ``alpha`` and ``beta`` being rotation angles\n/// (about the same axis).\n///\n/// Class invariant: The 2-norm of ``unit_complex`` must be close to 1.\n/// Technically speaking, it must hold that:\n///\n///   ``|unit_complex().squaredNorm() - 1| <= Constants::epsilon()``.\ntemplate <class Derived>\nclass SO2Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using ComplexT = typename Eigen::internal::traits<Derived>::ComplexType;\n  using ComplexTemporaryType = Sophus::Vector2<Scalar, Options>;\n\n  /// Degrees of freedom of manifold, number of dimensions in tangent space (one\n  /// since we only have in-plane rotations).\n  static int constexpr DoF = 1;\n  /// Number of internal parameters used (complex numbers are a tuples).\n  static int constexpr num_parameters = 2;\n  /// Group transformations are 2x2 matrices.\n  static int constexpr N = 2;\n  /// Points are 3-dimensional\n  static int constexpr Dim = 2;\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector2<Scalar>;\n  using HomogeneousPoint = Vector3<Scalar>;\n  using Line = ParametrizedLine2<Scalar>;\n  using Hyperplane = Hyperplane2<Scalar>;\n  using Tangent = Scalar;\n  using Adjoint = Scalar;\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SO2 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SO2Product = SO2<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector2<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector3<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  ///\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  ///\n  /// It simply ``1``, since ``SO(2)`` is a commutative group.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const { return Scalar(1); }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SO2<NewScalarType> cast() const {\n    return SO2<NewScalarType>(unit_complex().template cast<NewScalarType>());\n  }\n\n  /// This provides unsafe read/write access to internal data. SO(2) is\n  /// represented by a unit complex number (two parameters). When using direct\n  /// write access, the user needs to take care of that the complex number stays\n  /// normalized.\n  ///\n  SOPHUS_FUNC Scalar* data() { return unit_complex_nonconst().data(); }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const { return unit_complex().data(); }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SO2<Scalar> inverse() const {\n    return SO2<Scalar>(unit_complex().x(), -unit_complex().y());\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rotation matrices) to elements of the tangent space\n  /// (rotation angles).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SO(2).\n  ///\n  SOPHUS_FUNC Scalar log() const {\n    using std::atan2;\n    return atan2(unit_complex().y(), unit_complex().x());\n  }\n\n  /// It re-normalizes ``unit_complex`` to unit length.\n  ///\n  /// Note: Because of the class invariant, there is typically no need to call\n  /// this function directly.\n  ///\n  SOPHUS_FUNC void normalize() {\n    using std::hypot;\n    // Avoid under/overflows for higher precision\n    Scalar length = hypot(unit_complex().x(), unit_complex().y());\n    SOPHUS_ENSURE(length >= Constants<Scalar>::epsilon(),\n                  \"Complex number should not be close to zero!\");\n    unit_complex_nonconst() /= length;\n  }\n\n  /// Returns 2x2 matrix representation of the instance.\n  ///\n  /// For SO(2), the matrix representation is an orthogonal matrix ``R`` with\n  /// ``det(R)=1``, thus the so-called \"rotation matrix\".\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    Scalar const& real = unit_complex().x();\n    Scalar const& imag = unit_complex().y();\n    Transformation R;\n    // clang-format off\n    R <<\n      real, -imag,\n      imag,  real;\n    // clang-format on\n    return R;\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO2Base<Derived>& operator=(SO2Base<OtherDerived> const& other) {\n    unit_complex_nonconst() = other.unit_complex();\n    return *this;\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SO2Product<OtherDerived> operator*(\n      SO2Base<OtherDerived> const& other) const {\n    using ResultT = ReturnScalar<OtherDerived>;\n    Scalar const lhs_real = unit_complex().x();\n    Scalar const lhs_imag = unit_complex().y();\n    typename OtherDerived::Scalar const& rhs_real = other.unit_complex().x();\n    typename OtherDerived::Scalar const& rhs_imag = other.unit_complex().y();\n    // complex multiplication\n    ResultT const result_real = lhs_real * rhs_real - lhs_imag * rhs_imag;\n    ResultT const result_imag = lhs_real * rhs_imag + lhs_imag * rhs_real;\n\n    ResultT const squared_norm =\n        result_real * result_real + result_imag * result_imag;\n    // We can assume that the squared-norm is close to 1 since we deal with a\n    // unit complex number. Due to numerical precision issues, there might\n    // be a small drift after pose concatenation. Hence, we need to renormalizes\n    // the complex number here.\n    // Since squared-norm is close to 1, we do not need to calculate the costly\n    // square-root, but can use an approximation around 1 (see\n    // http://stackoverflow.com/a/12934750 for details).\n    if (squared_norm != ResultT(1.0)) {\n      ResultT const scale = ResultT(2.0) / (ResultT(1.0) + squared_norm);\n      return SO2Product<OtherDerived>(result_real * scale, result_imag * scale);\n    }\n    return SO2Product<OtherDerived>(result_real, result_imag);\n  }\n\n  /// Group action on 2-points.\n  ///\n  /// This function rotates a 2 dimensional point ``p`` by the SO2 element\n  ///  ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 2>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    Scalar const& real = unit_complex().x();\n    Scalar const& imag = unit_complex().y();\n    return PointProduct<PointDerived>(real * p[0] - imag * p[1],\n                                      imag * p[0] + real * p[1]);\n  }\n\n  /// Group action on homogeneous 2-points.\n  ///\n  /// This function rotates a homogeneous 2 dimensional point ``p`` by the SO2\n  /// element ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``.\n  ///\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 3>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    Scalar const& real = unit_complex().x();\n    Scalar const& imag = unit_complex().y();\n    return HomogeneousPointProduct<HPointDerived>(\n        real * p[0] - imag * p[1], imag * p[0] + real * p[1], p[2]);\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO2\n  /// element:\n  ///\n  /// Both direction ``d`` and origin ``o`` are rotated as a 2 dimensional point\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), (*this) * l.direction());\n  }\n\n  /// Group action on hyper-planes.\n  ///\n  /// This function rotates a hyper-plane ``n.x + d = 0`` by the SO2\n  /// element:\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is left unchanged\n  ///\n  /// Note that in 2d-case hyper-planes are just another parametrization of\n  /// lines\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    return Hyperplane((*this) * p.normal(), p.offset());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO2's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SO2Base<Derived> operator*=(SO2Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Returns derivative of  this * SO2::exp(x)  wrt. x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    return Matrix<Scalar, num_parameters, DoF>(-unit_complex()[1],\n                                               unit_complex()[0]);\n  }\n\n  /// Returns internal parameters of SO(2).\n  ///\n  /// It returns (c[0], c[1]), with c being the unit complex number.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return unit_complex();\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    return Matrix<Scalar, DoF, num_parameters>(-unit_complex()[1],\n                                               unit_complex()[0]);\n  }\n\n  /// Takes in complex number / tuple and normalizes it.\n  ///\n  /// Precondition: The complex number must not be close to zero.\n  ///\n  SOPHUS_FUNC void setComplex(Point const& complex) {\n    unit_complex_nonconst() = complex;\n    normalize();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC\n  ComplexT const& unit_complex() const {\n    return static_cast<Derived const*>(this)->unit_complex();\n  }\n\n private:\n  /// Mutator of unit_complex is private to ensure class invariant. That is\n  /// the complex number must stay close to unit length.\n  ///\n  SOPHUS_FUNC\n  ComplexT& unit_complex_nonconst() {\n    return static_cast<Derived*>(this)->unit_complex_nonconst();\n  }\n};\n\n/// SO2 using  default storage; derived from SO2Base.\ntemplate <class Scalar_, int Options>\nclass SO2 : public SO2Base<SO2<Scalar_, Options>> {\n public:\n  using Base = SO2Base<SO2<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using ComplexMember = Vector2<Scalar, Options>;\n\n  /// ``Base`` is friend so unit_complex_nonconst can be accessed from ``Base``.\n  friend class SO2Base<SO2<Scalar, Options>>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC SO2& operator=(SO2 const& other) = default;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes unit complex number to identity rotation.\n  ///\n  SOPHUS_FUNC SO2() : unit_complex_(Scalar(1), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SO2(SO2 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO2(SO2Base<OtherDerived> const& other)\n      : unit_complex_(other.unit_complex()) {}\n\n  /// Constructor from rotation matrix\n  ///\n  /// Precondition: rotation matrix need to be orthogonal with determinant of 1.\n  ///\n  SOPHUS_FUNC explicit SO2(Transformation const& R)\n      : unit_complex_(Scalar(0.5) * (R(0, 0) + R(1, 1)),\n                      Scalar(0.5) * (R(1, 0) - R(0, 1))) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n {}\",\n                  SOPHUS_FMT_ARG(R));\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: {}\",\n                  SOPHUS_FMT_ARG(R.determinant()));\n  }\n\n  /// Constructor from pair of real and imaginary number.\n  ///\n  /// Precondition: The pair must not be close to zero.\n  ///\n  SOPHUS_FUNC SO2(Scalar const& real, Scalar const& imag)\n      : unit_complex_(real, imag) {\n    Base::normalize();\n  }\n\n  /// Constructor from 2-vector.\n  ///\n  /// Precondition: The vector must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC explicit SO2(Eigen::MatrixBase<D> const& complex)\n      : unit_complex_(complex) {\n    static_assert(std::is_same<typename D::Scalar, Scalar>::value,\n                  \"must be same Scalar type\");\n    Base::normalize();\n  }\n\n  /// Constructor from an rotation angle.\n  ///\n  SOPHUS_FUNC explicit SO2(Scalar theta) {\n    unit_complex_nonconst() = SO2<Scalar>::exp(theta).unit_complex();\n  }\n\n  /// Accessor of unit complex number\n  ///\n  SOPHUS_FUNC ComplexMember const& unit_complex() const {\n    return unit_complex_;\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation angle\n  /// ``theta``) and returns the corresponding element of the group SO(2).\n  ///\n  /// To be more specific, this function computes ``expmat(hat(omega))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of SO(2).\n  ///\n  SOPHUS_FUNC static SO2<Scalar> exp(Tangent const& theta) {\n    using std::cos;\n    using std::sin;\n    return SO2<Scalar>(cos(theta), sin(theta));\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& theta) {\n    using std::cos;\n    using std::sin;\n    return Sophus::Matrix<Scalar, num_parameters, DoF>(-sin(theta), cos(theta));\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    return Sophus::Matrix<Scalar, num_parameters, DoF>(Scalar(0), Scalar(1));\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 2, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    return Point(-point.y(), point.x());\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int) {\n    return generator();\n  }\n\n  /// Returns the infinitesimal generators of SO(2).\n  ///\n  /// The infinitesimal generators of SO(2) is:\n  ///\n  ///     |  0 -1 |\n  ///     |  1  0 |\n  ///\n  SOPHUS_FUNC static Transformation generator() { return hat(Scalar(1)); }\n\n  /// hat-operator\n  ///\n  /// It takes in the scalar representation ``theta`` (= rotation angle) and\n  /// returns the corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SO(2) is defined as\n  ///\n  ///   ``hat(.): R^2 -> R^{2x2},  hat(theta) = theta * G``\n  ///\n  /// with ``G`` being the infinitesimal generator of SO(2).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& theta) {\n    Transformation Omega;\n    // clang-format off\n    Omega <<\n        Scalar(0),   -theta,\n            theta, Scalar(0);\n    // clang-format on\n    return Omega;\n  }\n\n  /// Returns closed SO2 given arbitrary 2x2 matrix.\n  ///\n  template <class S = Scalar>\n  static SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, SO2>\n  fitToSO2(Transformation const& R) {\n    return SO2(makeRotationMatrix(R));\n  }\n\n  /// Lie bracket\n  ///\n  /// It returns the Lie bracket of SO(2). Since SO(2) is a commutative group,\n  /// the Lie bracket is simple ``0``.\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const&, Tangent const&) {\n    return Scalar(0);\n  }\n\n  /// Draw uniform sample from SO(2) manifold.\n  ///\n  template <class UniformRandomBitGenerator>\n  static SO2 sampleUniform(UniformRandomBitGenerator& generator) {\n    static_assert(IsUniformRandomBitGenerator<UniformRandomBitGenerator>::value,\n                  \"generator must meet the UniformRandomBitGenerator concept\");\n    std::uniform_real_distribution<Scalar> uniform(-Constants<Scalar>::pi(),\n                                                   Constants<Scalar>::pi());\n    return SO2(uniform(generator));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 2x2-matrix representation ``Omega`` and maps it to the\n  /// corresponding scalar representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -a |\n  ///                |  a  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    using std::abs;\n    return Omega(1, 0);\n  }\n\n protected:\n  /// Mutator of complex number is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC ComplexMember& unit_complex_nonconst() { return unit_complex_; }\n\n  ComplexMember unit_complex_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n\n/// Specialization of Eigen::Map for ``SO2``; derived from SO2Base.\n///\n/// Allows us to wrap SO2 objects around POD array (e.g. external c style\n/// complex number / tuple).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO2<Scalar_>, Options>\n    : public Sophus::SO2Base<Map<Sophus::SO2<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SO2Base<Map<Sophus::SO2<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so unit_complex_nonconst can be accessed from ``Base``.\n  friend class Sophus::SO2Base<Map<Sophus::SO2<Scalar_>, Options>>;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC\n  explicit Map(Scalar* coeffs) : unit_complex_(coeffs) {}\n\n  /// Accessor of unit complex number.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar>, Options> const& unit_complex() const {\n    return unit_complex_;\n  }\n\n protected:\n  /// Mutator of unit_complex is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC\n  Map<Sophus::Vector2<Scalar>, Options>& unit_complex_nonconst() {\n    return unit_complex_;\n  }\n\n  Map<Matrix<Scalar, 2, 1>, Options> unit_complex_;\n};\n\n/// Specialization of Eigen::Map for ``SO2 const``; derived from SO2Base.\n///\n/// Allows us to wrap SO2 objects around POD array (e.g. external c style\n/// complex number / tuple).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO2<Scalar_> const, Options>\n    : public Sophus::SO2Base<Map<Sophus::SO2<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SO2Base<Map<Sophus::SO2<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar const* coeffs) : unit_complex_(coeffs) {}\n\n  /// Accessor of unit complex number.\n  ///\n  SOPHUS_FUNC Map<Sophus::Vector2<Scalar> const, Options> const& unit_complex()\n      const {\n    return unit_complex_;\n  }\n\n protected:\n  /// Mutator of unit_complex is protected to ensure class invariant.\n  ///\n  Map<Matrix<Scalar, 2, 1> const, Options> const unit_complex_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/so3.hpp",
    "content": "/// @file\n/// Special orthogonal group SO(3) - rotation in 3d.\n\n#pragma once\n\n#include \"rotation_matrix.hpp\"\n#include \"so2.hpp\"\n#include \"types.hpp\"\n\n// Include only the selective set of Eigen headers that we need.\n// This helps when using Sophus with unusual compilers, like nvcc.\n#include <Eigen/src/Geometry/OrthoMethods.h>\n#include <Eigen/src/Geometry/Quaternion.h>\n#include <Eigen/src/Geometry/RotationBase.h>\n\nnamespace Sophus {\ntemplate <class Scalar_, int Options = 0>\nclass SO3;\nusing SO3d = SO3<double>;\nusing SO3f = SO3<float>;\n}  // namespace Sophus\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Sophus::SO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Eigen::Quaternion<Scalar, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO3<Scalar_>, Options_>>\n    : traits<Sophus::SO3<Scalar_, Options_>> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar>, Options>;\n};\n\ntemplate <class Scalar_, int Options_>\nstruct traits<Map<Sophus::SO3<Scalar_> const, Options_>>\n    : traits<Sophus::SO3<Scalar_, Options_> const> {\n  static constexpr int Options = Options_;\n  using Scalar = Scalar_;\n  using QuaternionType = Map<Eigen::Quaternion<Scalar> const, Options>;\n};\n}  // namespace internal\n}  // namespace Eigen\n\nnamespace Sophus {\n\n/// SO3 base type - implements SO3 class but is storage agnostic.\n///\n/// SO(3) is the group of rotations in 3d. As a matrix group, it is the set of\n/// matrices which are orthogonal such that ``R * R' = I`` (with ``R'`` being\n/// the transpose of ``R``) and have a positive determinant. In particular, the\n/// determinant is 1. Internally, the group is represented as a unit quaternion.\n/// Unit quaternion can be seen as members of the special unitary group SU(2).\n/// SU(2) is a double cover of SO(3). Hence, for every rotation matrix ``R``,\n/// there exist two unit quaternions: ``(r, v)`` and ``(-r, -v)``, with ``r``\n/// the real part and ``v`` being the imaginary 3-vector part of the quaternion.\n///\n/// SO(3) is a compact, but non-commutative group. First it is compact since the\n/// set of rotation matrices is a closed and bounded set. Second it is\n/// non-commutative since the equation ``R_1 * R_2 = R_2 * R_1`` does not hold\n/// in general. For example rotating an object by some degrees about its\n/// ``x``-axis and then by some degrees about its y axis, does not lead to the\n/// same orientation when rotation first about ``y`` and then about ``x``.\n///\n/// Class invariant: The 2-norm of ``unit_quaternion`` must be close to 1.\n/// Technically speaking, it must hold that:\n///\n///   ``|unit_quaternion().squaredNorm() - 1| <= Constants::epsilon()``.\ntemplate <class Derived>\nclass SO3Base {\n public:\n  static constexpr int Options = Eigen::internal::traits<Derived>::Options;\n  using Scalar = typename Eigen::internal::traits<Derived>::Scalar;\n  using QuaternionType =\n      typename Eigen::internal::traits<Derived>::QuaternionType;\n  using QuaternionTemporaryType = Eigen::Quaternion<Scalar, Options>;\n\n  /// Degrees of freedom of group, number of dimensions in tangent space.\n  static int constexpr DoF = 3;\n  /// Number of internal parameters used (quaternion is a 4-tuple).\n  static int constexpr num_parameters = 4;\n  /// Group transformations are 3x3 matrices.\n  static int constexpr N = 3;\n  /// Points are 3-dimensional\n  static int constexpr Dim = 3;\n\n  using Transformation = Matrix<Scalar, N, N>;\n  using Point = Vector3<Scalar>;\n  using HomogeneousPoint = Vector4<Scalar>;\n  using Line = ParametrizedLine3<Scalar>;\n  using Hyperplane = Hyperplane3<Scalar>;\n  using Tangent = Vector<Scalar, DoF>;\n  using Adjoint = Matrix<Scalar, DoF, DoF>;\n\n  struct TangentAndTheta {\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n    Tangent tangent;\n    Scalar theta;\n  };\n\n  /// For binary operations the return type is determined with the\n  /// ScalarBinaryOpTraits feature of Eigen. This allows mixing concrete and Map\n  /// types, as well as other compatible scalar types such as Ceres::Jet and\n  /// double scalars with SO3 operations.\n  template <typename OtherDerived>\n  using ReturnScalar = typename Eigen::ScalarBinaryOpTraits<\n      Scalar, typename OtherDerived::Scalar>::ReturnType;\n\n  template <typename OtherDerived>\n  using SO3Product = SO3<ReturnScalar<OtherDerived>>;\n\n  template <typename PointDerived>\n  using PointProduct = Vector3<ReturnScalar<PointDerived>>;\n\n  template <typename HPointDerived>\n  using HomogeneousPointProduct = Vector4<ReturnScalar<HPointDerived>>;\n\n  /// Adjoint transformation\n  //\n  /// This function return the adjoint transformation ``Ad`` of the group\n  /// element ``A`` such that for all ``x`` it holds that\n  /// ``hat(Ad_A * x) = A * hat(x) A^{-1}``. See hat-operator below.\n  //\n  /// For SO(3), it simply returns the rotation matrix corresponding to ``A``.\n  ///\n  SOPHUS_FUNC Adjoint Adj() const { return matrix(); }\n\n  /// Extract rotation angle about canonical X-axis\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, S> angleX() const {\n    Sophus::Matrix3<Scalar> R = matrix();\n    Sophus::Matrix2<Scalar> Rx = R.template block<2, 2>(1, 1);\n    return SO2<Scalar>(makeRotationMatrix(Rx)).log();\n  }\n\n  /// Extract rotation angle about canonical Y-axis\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, S> angleY() const {\n    Sophus::Matrix3<Scalar> R = matrix();\n    Sophus::Matrix2<Scalar> Ry;\n    // clang-format off\n    Ry <<\n      R(0, 0), R(2, 0),\n      R(0, 2), R(2, 2);\n    // clang-format on\n    return SO2<Scalar>(makeRotationMatrix(Ry)).log();\n  }\n\n  /// Extract rotation angle about canonical Z-axis\n  ///\n  template <class S = Scalar>\n  SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, S> angleZ() const {\n    Sophus::Matrix3<Scalar> R = matrix();\n    Sophus::Matrix2<Scalar> Rz = R.template block<2, 2>(0, 0);\n    return SO2<Scalar>(makeRotationMatrix(Rz)).log();\n  }\n\n  /// Returns copy of instance casted to NewScalarType.\n  ///\n  template <class NewScalarType>\n  SOPHUS_FUNC SO3<NewScalarType> cast() const {\n    return SO3<NewScalarType>(unit_quaternion().template cast<NewScalarType>());\n  }\n\n  /// This provides unsafe read/write access to internal data. SO(3) is\n  /// represented by an Eigen::Quaternion (four parameters). When using direct\n  /// write access, the user needs to take care of that the quaternion stays\n  /// normalized.\n  ///\n  /// Note: The first three Scalars represent the imaginary parts, while the\n  /// forth Scalar represent the real part.\n  ///\n  SOPHUS_FUNC Scalar* data() {\n    return unit_quaternion_nonconst().coeffs().data();\n  }\n\n  /// Const version of data() above.\n  ///\n  SOPHUS_FUNC Scalar const* data() const {\n    return unit_quaternion().coeffs().data();\n  }\n\n  /// Returns derivative of  this * SO3::exp(x)  wrt. x at x=0.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, num_parameters, DoF> Dx_this_mul_exp_x_at_0()\n      const {\n    Matrix<Scalar, num_parameters, DoF> J;\n    Eigen::Quaternion<Scalar> const q = unit_quaternion();\n    Scalar const c0 = Scalar(0.5) * q.w();\n    Scalar const c1 = Scalar(0.5) * q.z();\n    Scalar const c2 = -c1;\n    Scalar const c3 = Scalar(0.5) * q.y();\n    Scalar const c4 = Scalar(0.5) * q.x();\n    Scalar const c5 = -c4;\n    Scalar const c6 = -c3;\n    J(0, 0) = c0;\n    J(0, 1) = c2;\n    J(0, 2) = c3;\n    J(1, 0) = c1;\n    J(1, 1) = c0;\n    J(1, 2) = c5;\n    J(2, 0) = c6;\n    J(2, 1) = c4;\n    J(2, 2) = c0;\n    J(3, 0) = c5;\n    J(3, 1) = c6;\n    J(3, 2) = c2;\n\n    return J;\n  }\n\n  /// Returns derivative of log(this^{-1} * x) by x at x=this.\n  ///\n  SOPHUS_FUNC Matrix<Scalar, DoF, num_parameters> Dx_log_this_inv_by_x_at_this()\n      const {\n    auto& q = unit_quaternion();\n    Matrix<Scalar, DoF, num_parameters> J;\n    // clang-format off\n    J << q.w(),  q.z(), -q.y(), -q.x(),\n        -q.z(),  q.w(),  q.x(), -q.y(),\n         q.y(), -q.x(),  q.w(), -q.z();\n    // clang-format on\n    return J * Scalar(2.);\n  }\n\n  /// Returns internal parameters of SO(3).\n  ///\n  /// It returns (q.imag[0], q.imag[1], q.imag[2], q.real), with q being the\n  /// unit quaternion.\n  ///\n  SOPHUS_FUNC Sophus::Vector<Scalar, num_parameters> params() const {\n    return unit_quaternion().coeffs();\n  }\n\n  /// Returns group inverse.\n  ///\n  SOPHUS_FUNC SO3<Scalar> inverse() const {\n    return SO3<Scalar>(unit_quaternion().conjugate());\n  }\n\n  /// Logarithmic map\n  ///\n  /// Computes the logarithm, the inverse of the group exponential which maps\n  /// element of the group (rotation matrices) to elements of the tangent space\n  /// (rotation-vector).\n  ///\n  /// To be specific, this function computes ``vee(logmat(.))`` with\n  /// ``logmat(.)`` being the matrix logarithm and ``vee(.)`` the vee-operator\n  /// of SO(3).\n  ///\n  SOPHUS_FUNC Tangent log() const { return logAndTheta().tangent; }\n\n  /// As above, but also returns ``theta = |omega|``.\n  ///\n  SOPHUS_FUNC TangentAndTheta logAndTheta() const {\n    TangentAndTheta J;\n    using std::abs;\n    using std::atan2;\n    using std::sqrt;\n    Scalar squared_n = unit_quaternion().vec().squaredNorm();\n    Scalar w = unit_quaternion().w();\n\n    Scalar two_atan_nbyw_by_n;\n\n    /// Atan-based log thanks to\n    ///\n    /// C. Hertzberg et al.:\n    /// \"Integrating Generic Sensor Fusion Algorithms with Sound State\n    /// Representation through Encapsulation of Manifolds\"\n    /// Information Fusion, 2011\n\n    if (squared_n <\n        Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon()) {\n      // If quaternion is normalized and n=0, then w should be 1;\n      // w=0 should never happen here!\n      SOPHUS_ENSURE(abs(w) >= Constants<Scalar>::epsilon(),\n                    \"Quaternion ({}) should be normalized!\",\n                    SOPHUS_FMT_ARG(unit_quaternion().coeffs().transpose()));\n      Scalar squared_w = w * w;\n      two_atan_nbyw_by_n =\n          Scalar(2) / w - Scalar(2.0 / 3.0) * (squared_n) / (w * squared_w);\n      J.theta = Scalar(2) * squared_n / w;\n    } else {\n      Scalar n = sqrt(squared_n);\n\n      // w < 0 ==> cos(theta/2) < 0 ==> theta > pi\n      //\n      // By convention, the condition |theta| < pi is imposed by wrapping theta\n      // to pi; The wrap operation can be folded inside evaluation of atan2\n      //\n      // theta - pi = atan(sin(theta - pi), cos(theta - pi))\n      //            = atan(-sin(theta), -cos(theta))\n      //\n      Scalar atan_nbyw =\n          (w < Scalar(0)) ? Scalar(atan2(-n, -w)) : Scalar(atan2(n, w));\n      two_atan_nbyw_by_n = Scalar(2) * atan_nbyw / n;\n      J.theta = two_atan_nbyw_by_n * n;\n    }\n\n    J.tangent = two_atan_nbyw_by_n * unit_quaternion().vec();\n    return J;\n  }\n\n  /// It re-normalizes ``unit_quaternion`` to unit length.\n  ///\n  /// Note: Because of the class invariant, there is typically no need to call\n  /// this function directly.\n  ///\n  SOPHUS_FUNC void normalize() {\n    Scalar length = unit_quaternion_nonconst().norm();\n    SOPHUS_ENSURE(\n        length >= Constants<Scalar>::epsilon(),\n        \"Quaternion ({}) should not be close to zero!\",\n        SOPHUS_FMT_ARG(unit_quaternion_nonconst().coeffs().transpose()));\n    unit_quaternion_nonconst().coeffs() /= length;\n  }\n\n  /// Returns 3x3 matrix representation of the instance.\n  ///\n  /// For SO(3), the matrix representation is an orthogonal matrix ``R`` with\n  /// ``det(R)=1``, thus the so-called \"rotation matrix\".\n  ///\n  SOPHUS_FUNC Transformation matrix() const {\n    return unit_quaternion().toRotationMatrix();\n  }\n\n  /// Assignment-like operator from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO3Base<Derived>& operator=(SO3Base<OtherDerived> const& other) {\n    unit_quaternion_nonconst() = other.unit_quaternion();\n    return *this;\n  }\n\n  template <typename QuaternionProductType, typename QuaternionTypeA,\n            typename QuaternionTypeB>\n  SOPHUS_FUNC static QuaternionProductType QuaternionProduct(\n      const QuaternionTypeA& a, const QuaternionTypeB& b) {\n    return QuaternionProductType(\n        a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),\n        a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),\n        a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),\n        a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x());\n  }\n\n  /// Group multiplication, which is rotation concatenation.\n  ///\n  template <typename OtherDerived>\n  SOPHUS_FUNC SO3Product<OtherDerived> operator*(\n      SO3Base<OtherDerived> const& other) const {\n    using QuaternionProductType =\n        typename SO3Product<OtherDerived>::QuaternionType;\n    const QuaternionType& a = unit_quaternion();\n    const typename OtherDerived::QuaternionType& b = other.unit_quaternion();\n    /// NOTE: We cannot use Eigen's Quaternion multiplication because it always\n    /// returns a Quaternion of the same Scalar as this object, so it is not\n    /// able to multiple Jets and doubles correctly.\n    return SO3Product<OtherDerived>(\n        QuaternionProduct<QuaternionProductType>(a, b));\n  }\n\n  /// Group action on 3-points.\n  ///\n  /// This function rotates a 3 dimensional point ``p`` by the SO3 element\n  ///  ``bar_R_foo`` (= rotation matrix): ``p_bar = bar_R_foo * p_foo``.\n  ///\n  /// Since SO3 is internally represented by a unit quaternion ``q``, it is\n  /// implemented as ``p_bar = q * p_foo * q^{*}``\n  /// with ``q^{*}`` being the quaternion conjugate of ``q``.\n  ///\n  /// Geometrically, ``p``  is rotated by angle ``|omega|`` around the\n  /// axis ``omega/|omega|`` with ``omega := vee(log(bar_R_foo))``.\n  ///\n  /// For ``vee``-operator, see below.\n  ///\n  template <typename PointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<PointDerived, 3>::value>::type>\n  SOPHUS_FUNC PointProduct<PointDerived> operator*(\n      Eigen::MatrixBase<PointDerived> const& p) const {\n    /// NOTE: We cannot use Eigen's Quaternion transformVector because it always\n    /// returns a Vector3 of the same Scalar as this quaternion, so it is not\n    /// able to be applied to Jets and doubles correctly.\n    const QuaternionType& q = unit_quaternion();\n    PointProduct<PointDerived> uv = q.vec().cross(p);\n    uv += uv;\n    return p + q.w() * uv + q.vec().cross(uv);\n  }\n\n  /// Group action on homogeneous 3-points. See above for more details.\n  template <typename HPointDerived,\n            typename = typename std::enable_if<\n                IsFixedSizeVector<HPointDerived, 4>::value>::type>\n  SOPHUS_FUNC HomogeneousPointProduct<HPointDerived> operator*(\n      Eigen::MatrixBase<HPointDerived> const& p) const {\n    const auto rp = *this * p.template head<3>();\n    return HomogeneousPointProduct<HPointDerived>(rp(0), rp(1), rp(2), p(3));\n  }\n\n  /// Group action on lines.\n  ///\n  /// This function rotates a parametrized line ``l(t) = o + t * d`` by the SO3\n  /// element:\n  ///\n  /// Both direction ``d`` and origin ``o`` are rotated as a 3 dimensional point\n  ///\n  SOPHUS_FUNC Line operator*(Line const& l) const {\n    return Line((*this) * l.origin(), (*this) * l.direction());\n  }\n\n  /// Group action on planes.\n  ///\n  /// This function rotates a plane\n  /// ``n.x + d = 0`` by the SO3 element:\n  ///\n  /// Normal vector ``n`` is rotated\n  /// Offset ``d`` is left unchanged\n  ///\n  SOPHUS_FUNC Hyperplane operator*(Hyperplane const& p) const {\n    return Hyperplane((*this) * p.normal(), p.offset());\n  }\n\n  /// In-place group multiplication. This method is only valid if the return\n  /// type of the multiplication is compatible with this SO3's Scalar type.\n  ///\n  template <typename OtherDerived,\n            typename = typename std::enable_if<\n                std::is_same<Scalar, ReturnScalar<OtherDerived>>::value>::type>\n  SOPHUS_FUNC SO3Base<Derived>& operator*=(SO3Base<OtherDerived> const& other) {\n    *static_cast<Derived*>(this) = *this * other;\n    return *this;\n  }\n\n  /// Takes in quaternion, and normalizes it.\n  ///\n  /// Precondition: The quaternion must not be close to zero.\n  ///\n  SOPHUS_FUNC void setQuaternion(Eigen::Quaternion<Scalar> const& quaternion) {\n    unit_quaternion_nonconst() = quaternion;\n    normalize();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC QuaternionType const& unit_quaternion() const {\n    return static_cast<Derived const*>(this)->unit_quaternion();\n  }\n\n private:\n  /// Mutator of unit_quaternion is private to ensure class invariant. That is\n  /// the quaternion must stay close to unit length.\n  ///\n  SOPHUS_FUNC QuaternionType& unit_quaternion_nonconst() {\n    return static_cast<Derived*>(this)->unit_quaternion_nonconst();\n  }\n};\n\n/// SO3 using default storage; derived from SO3Base.\ntemplate <class Scalar_, int Options>\nclass SO3 : public SO3Base<SO3<Scalar_, Options>> {\n public:\n  using Base = SO3Base<SO3<Scalar_, Options>>;\n  static int constexpr DoF = Base::DoF;\n  static int constexpr num_parameters = Base::num_parameters;\n\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n  using QuaternionMember = Eigen::Quaternion<Scalar, Options>;\n\n  /// ``Base`` is friend so unit_quaternion_nonconst can be accessed from\n  /// ``Base``.\n  friend class SO3Base<SO3<Scalar, Options>>;\n\n  using Base::operator=;\n\n  /// Define copy-assignment operator explicitly. The definition of\n  /// implicit copy assignment operator is deprecated in presence of a\n  /// user-declared copy constructor (-Wdeprecated-copy in clang >= 13).\n  SOPHUS_FUNC SO3& operator=(SO3 const& other) = default;\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /// Default constructor initializes unit quaternion to identity rotation.\n  ///\n  SOPHUS_FUNC SO3()\n      : unit_quaternion_(Scalar(1), Scalar(0), Scalar(0), Scalar(0)) {}\n\n  /// Copy constructor\n  ///\n  SOPHUS_FUNC SO3(SO3 const& other) = default;\n\n  /// Copy-like constructor from OtherDerived.\n  ///\n  template <class OtherDerived>\n  SOPHUS_FUNC SO3(SO3Base<OtherDerived> const& other)\n      : unit_quaternion_(other.unit_quaternion()) {}\n\n  /// Constructor from rotation matrix\n  ///\n  /// Precondition: rotation matrix needs to be orthogonal with determinant\n  /// of 1.\n  ///\n  SOPHUS_FUNC SO3(Transformation const& R) : unit_quaternion_(R) {\n    SOPHUS_ENSURE(isOrthogonal(R), \"R is not orthogonal:\\n {}\",\n                  SOPHUS_FMT_ARG(R * R.transpose()));\n    SOPHUS_ENSURE(R.determinant() > Scalar(0), \"det(R) is not positive: {}\",\n                  SOPHUS_FMT_ARG(R.determinant()));\n  }\n\n  /// Constructor from quaternion\n  ///\n  /// Precondition: quaternion must not be close to zero.\n  ///\n  template <class D>\n  SOPHUS_FUNC explicit SO3(Eigen::QuaternionBase<D> const& quat)\n      : unit_quaternion_(quat) {\n    static_assert(\n        std::is_same<typename Eigen::QuaternionBase<D>::Scalar, Scalar>::value,\n        \"Input must be of same scalar type\");\n    Base::normalize();\n  }\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC QuaternionMember const& unit_quaternion() const {\n    return unit_quaternion_;\n  }\n\n  /// Returns the left Jacobian on lie group. See 1st entry in rightmost column\n  /// in: http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17_identities.pdf\n  ///\n  /// A precomputed `theta` can be optionally passed in\n  ///\n  /// Warning: Not to be confused with Dx_exp_x(), which is derivative of the\n  ///          internal quaternion representation of SO3 wrt the tangent vector\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, DoF, DoF> leftJacobian(\n      Tangent const& omega, optional<Scalar> const& theta_o = nullopt) {\n    using std::cos;\n    using std::sin;\n    using std::sqrt;\n\n    Scalar const theta_sq = theta_o ? *theta_o * *theta_o : omega.squaredNorm();\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n    Matrix3<Scalar> const Omega_sq = Omega * Omega;\n    Matrix3<Scalar> V;\n\n    if (theta_sq <\n        Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon()) {\n      V = Matrix3<Scalar>::Identity() + Scalar(0.5) * Omega;\n    } else {\n      Scalar theta = theta_o ? *theta_o : sqrt(theta_sq);\n      V = Matrix3<Scalar>::Identity() +\n          (Scalar(1) - cos(theta)) / theta_sq * Omega +\n          (theta - sin(theta)) / (theta_sq * theta) * Omega_sq;\n    }\n    return V;\n  }\n\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, DoF, DoF> leftJacobianInverse(\n      Tangent const& omega, optional<Scalar> const& theta_o = nullopt) {\n    using std::cos;\n    using std::sin;\n    using std::sqrt;\n    Scalar const theta_sq = theta_o ? *theta_o * *theta_o : omega.squaredNorm();\n    Matrix3<Scalar> const Omega = SO3<Scalar>::hat(omega);\n\n    Matrix3<Scalar> V_inv;\n    if (theta_sq <\n        Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon()) {\n      V_inv = Matrix3<Scalar>::Identity() - Scalar(0.5) * Omega +\n              Scalar(1. / 12.) * (Omega * Omega);\n\n    } else {\n      Scalar const theta = theta_o ? *theta_o : sqrt(theta_sq);\n      Scalar const half_theta = Scalar(0.5) * theta;\n\n      V_inv = Matrix3<Scalar>::Identity() - Scalar(0.5) * Omega +\n              (Scalar(1) -\n               Scalar(0.5) * theta * cos(half_theta) / sin(half_theta)) /\n                  (theta * theta) * (Omega * Omega);\n    }\n    return V_inv;\n  }\n\n  /// Returns derivative of exp(x) wrt. x.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF> Dx_exp_x(\n      Tangent const& omega) {\n    using std::cos;\n    using std::exp;\n    using std::sin;\n    using std::sqrt;\n    Scalar const c0 = omega[0] * omega[0];\n    Scalar const c1 = omega[1] * omega[1];\n    Scalar const c2 = omega[2] * omega[2];\n    Scalar const c3 = c0 + c1 + c2;\n\n    if (c3 < Constants<Scalar>::epsilon()) {\n      return Dx_exp_x_at_0();\n    }\n\n    Scalar const c4 = sqrt(c3);\n    Scalar const c5 = 1.0 / c4;\n    Scalar const c6 = 0.5 * c4;\n    Scalar const c7 = sin(c6);\n    Scalar const c8 = c5 * c7;\n    Scalar const c9 = pow(c3, -3.0L / 2.0L);\n    Scalar const c10 = c7 * c9;\n    Scalar const c11 = Scalar(1.0) / c3;\n    Scalar const c12 = cos(c6);\n    Scalar const c13 = Scalar(0.5) * c11 * c12;\n    Scalar const c14 = c7 * c9 * omega[0];\n    Scalar const c15 = Scalar(0.5) * c11 * c12 * omega[0];\n    Scalar const c16 = -c14 * omega[1] + c15 * omega[1];\n    Scalar const c17 = -c14 * omega[2] + c15 * omega[2];\n    Scalar const c18 = omega[1] * omega[2];\n    Scalar const c19 = -c10 * c18 + c13 * c18;\n    Scalar const c20 = Scalar(0.5) * c5 * c7;\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    J(0, 0) = -c0 * c10 + c0 * c13 + c8;\n    J(0, 1) = c16;\n    J(0, 2) = c17;\n    J(1, 0) = c16;\n    J(1, 1) = -c1 * c10 + c1 * c13 + c8;\n    J(1, 2) = c19;\n    J(2, 0) = c17;\n    J(2, 1) = c19;\n    J(2, 2) = -c10 * c2 + c13 * c2 + c8;\n    J(3, 0) = -c20 * omega[0];\n    J(3, 1) = -c20 * omega[1];\n    J(3, 2) = -c20 * omega[2];\n    return J;\n  }\n\n  /// Returns derivative of exp(x) wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, num_parameters, DoF>\n  Dx_exp_x_at_0() {\n    Sophus::Matrix<Scalar, num_parameters, DoF> J;\n    // clang-format off\n    J <<  Scalar(0.5),   Scalar(0),   Scalar(0),\n            Scalar(0), Scalar(0.5),   Scalar(0),\n            Scalar(0),   Scalar(0), Scalar(0.5),\n            Scalar(0),   Scalar(0),   Scalar(0);\n    // clang-format on\n    return J;\n  }\n\n  /// Returns derivative of exp(x) * p wrt. x_i at x=0.\n  ///\n  SOPHUS_FUNC static Sophus::Matrix<Scalar, 3, DoF> Dx_exp_x_times_point_at_0(\n      Point const& point) {\n    return hat(-point);\n  }\n\n  /// Returns derivative of exp(x).matrix() wrt. ``x_i at x=0``.\n  ///\n  SOPHUS_FUNC static Transformation Dxi_exp_x_matrix_at_0(int i) {\n    return generator(i);\n  }\n\n  /// Group exponential\n  ///\n  /// This functions takes in an element of tangent space (= rotation vector\n  /// ``omega``) and returns the corresponding element of the group SO(3).\n  ///\n  /// To be more specific, this function computes ``expmat(hat(omega))``\n  /// with ``expmat(.)`` being the matrix exponential and ``hat(.)`` being the\n  /// hat()-operator of SO(3).\n  ///\n  SOPHUS_FUNC static SO3<Scalar> exp(Tangent const& omega) {\n    Scalar theta;\n    return expAndTheta(omega, &theta);\n  }\n\n  /// As above, but also returns ``theta = |omega|`` as out-parameter.\n  ///\n  /// Precondition: ``theta`` must not be ``nullptr``.\n  ///\n  SOPHUS_FUNC static SO3<Scalar> expAndTheta(Tangent const& omega,\n                                             Scalar* theta) {\n    SOPHUS_ENSURE(theta != nullptr, \"must not be nullptr.\");\n    using std::abs;\n    using std::cos;\n    using std::sin;\n    using std::sqrt;\n    Scalar theta_sq = omega.squaredNorm();\n\n    Scalar imag_factor;\n    Scalar real_factor;\n    if (theta_sq <\n        Constants<Scalar>::epsilon() * Constants<Scalar>::epsilon()) {\n      *theta = Scalar(0);\n      Scalar theta_po4 = theta_sq * theta_sq;\n      imag_factor = Scalar(0.5) - Scalar(1.0 / 48.0) * theta_sq +\n                    Scalar(1.0 / 3840.0) * theta_po4;\n      real_factor = Scalar(1) - Scalar(1.0 / 8.0) * theta_sq +\n                    Scalar(1.0 / 384.0) * theta_po4;\n    } else {\n      *theta = sqrt(theta_sq);\n      Scalar half_theta = Scalar(0.5) * (*theta);\n      Scalar sin_half_theta = sin(half_theta);\n      imag_factor = sin_half_theta / (*theta);\n      real_factor = cos(half_theta);\n    }\n\n    SO3 q;\n    q.unit_quaternion_nonconst() =\n        QuaternionMember(real_factor, imag_factor * omega.x(),\n                         imag_factor * omega.y(), imag_factor * omega.z());\n    SOPHUS_ENSURE(abs(q.unit_quaternion().squaredNorm() - Scalar(1)) <\n                      Sophus::Constants<Scalar>::epsilon(),\n                  \"SO3::exp failed! omega: {}, real: {}, img: {}\",\n                  SOPHUS_FMT_ARG(omega.transpose()),\n                  SOPHUS_FMT_ARG(real_factor), SOPHUS_FMT_ARG(imag_factor));\n    return q;\n  }\n\n  /// Returns closest SO3 given arbitrary 3x3 matrix.\n  ///\n  template <class S = Scalar>\n  static SOPHUS_FUNC enable_if_t<std::is_floating_point<S>::value, SO3>\n  fitToSO3(Transformation const& R) {\n    return SO3(::Sophus::makeRotationMatrix(R));\n  }\n\n  /// Returns the ith infinitesimal generators of SO(3).\n  ///\n  /// The infinitesimal generators of SO(3) are:\n  ///\n  /// ```\n  ///         |  0  0  0 |\n  ///   G_0 = |  0  0 -1 |\n  ///         |  0  1  0 |\n  ///\n  ///         |  0  0  1 |\n  ///   G_1 = |  0  0  0 |\n  ///         | -1  0  0 |\n  ///\n  ///         |  0 -1  0 |\n  ///   G_2 = |  1  0  0 |\n  ///         |  0  0  0 |\n  /// ```\n  ///\n  /// Precondition: ``i`` must be 0, 1 or 2.\n  ///\n  SOPHUS_FUNC static Transformation generator(int i) {\n    SOPHUS_ENSURE(i >= 0 && i <= 2, \"i should be in range [0,2].\");\n    Tangent e;\n    e.setZero();\n    e[i] = Scalar(1);\n    return hat(e);\n  }\n\n  /// hat-operator\n  ///\n  /// It takes in the 3-vector representation ``omega`` (= rotation vector) and\n  /// returns the corresponding matrix representation of Lie algebra element.\n  ///\n  /// Formally, the hat()-operator of SO(3) is defined as\n  ///\n  ///   ``hat(.): R^3 -> R^{3x3},  hat(omega) = sum_i omega_i * G_i``\n  ///   (for i=0,1,2)\n  ///\n  /// with ``G_i`` being the ith infinitesimal generator of SO(3).\n  ///\n  /// The corresponding inverse is the vee()-operator, see below.\n  ///\n  SOPHUS_FUNC static Transformation hat(Tangent const& omega) {\n    Transformation Omega;\n    // clang-format off\n    Omega <<\n        Scalar(0), -omega(2),  omega(1),\n         omega(2), Scalar(0), -omega(0),\n        -omega(1),  omega(0), Scalar(0);\n    // clang-format on\n    return Omega;\n  }\n\n  /// Lie bracket\n  ///\n  /// It computes the Lie bracket of SO(3). To be more specific, it computes\n  ///\n  ///   ``[omega_1, omega_2]_so3 := vee([hat(omega_1), hat(omega_2)])``\n  ///\n  /// with ``[A,B] := AB-BA`` being the matrix commutator, ``hat(.)`` the\n  /// hat()-operator and ``vee(.)`` the vee()-operator of SO3.\n  ///\n  /// For the Lie algebra so3, the Lie bracket is simply the cross product:\n  ///\n  /// ``[omega_1, omega_2]_so3 = omega_1 x omega_2.``\n  ///\n  SOPHUS_FUNC static Tangent lieBracket(Tangent const& omega1,\n                                        Tangent const& omega2) {\n    return omega1.cross(omega2);\n  }\n\n  /// Construct x-axis rotation.\n  ///\n  static SOPHUS_FUNC SO3 rotX(Scalar const& x) {\n    return SO3::exp(Sophus::Vector3<Scalar>(x, Scalar(0), Scalar(0)));\n  }\n\n  /// Construct y-axis rotation.\n  ///\n  static SOPHUS_FUNC SO3 rotY(Scalar const& y) {\n    return SO3::exp(Sophus::Vector3<Scalar>(Scalar(0), y, Scalar(0)));\n  }\n\n  /// Construct z-axis rotation.\n  ///\n  static SOPHUS_FUNC SO3 rotZ(Scalar const& z) {\n    return SO3::exp(Sophus::Vector3<Scalar>(Scalar(0), Scalar(0), z));\n  }\n\n  /// Draw uniform sample from SO(3) manifold.\n  /// Based on: http://planning.cs.uiuc.edu/node198.html\n  ///\n  template <class UniformRandomBitGenerator>\n  static SO3 sampleUniform(UniformRandomBitGenerator& generator) {\n    static_assert(IsUniformRandomBitGenerator<UniformRandomBitGenerator>::value,\n                  \"generator must meet the UniformRandomBitGenerator concept\");\n\n    std::uniform_real_distribution<Scalar> uniform(Scalar(0), Scalar(1));\n    std::uniform_real_distribution<Scalar> uniform_twopi(\n        Scalar(0), 2 * Constants<Scalar>::pi());\n\n    const Scalar u1 = uniform(generator);\n    const Scalar u2 = uniform_twopi(generator);\n    const Scalar u3 = uniform_twopi(generator);\n\n    const Scalar a = sqrt(1 - u1);\n    const Scalar b = sqrt(u1);\n\n    return SO3(\n        QuaternionMember(a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3)));\n  }\n\n  /// vee-operator\n  ///\n  /// It takes the 3x3-matrix representation ``Omega`` and maps it to the\n  /// corresponding vector representation of Lie algebra.\n  ///\n  /// This is the inverse of the hat()-operator, see above.\n  ///\n  /// Precondition: ``Omega`` must have the following structure:\n  ///\n  ///                |  0 -c  b |\n  ///                |  c  0 -a |\n  ///                | -b  a  0 |\n  ///\n  SOPHUS_FUNC static Tangent vee(Transformation const& Omega) {\n    return Tangent(Omega(2, 1), Omega(0, 2), Omega(1, 0));\n  }\n\n protected:\n  /// Mutator of unit_quaternion is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC QuaternionMember& unit_quaternion_nonconst() {\n    return unit_quaternion_;\n  }\n\n  QuaternionMember unit_quaternion_;\n};\n\n}  // namespace Sophus\n\nnamespace Eigen {\n/// Specialization of Eigen::Map for ``SO3``; derived from SO3Base.\n///\n/// Allows us to wrap SO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO3<Scalar_>, Options>\n    : public Sophus::SO3Base<Map<Sophus::SO3<Scalar_>, Options>> {\n public:\n  using Base = Sophus::SO3Base<Map<Sophus::SO3<Scalar_>, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  /// ``Base`` is friend so unit_quaternion_nonconst can be accessed from\n  /// ``Base``.\n  friend class Sophus::SO3Base<Map<Sophus::SO3<Scalar_>, Options>>;\n\n  using Base::operator=;\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar* coeffs) : unit_quaternion_(coeffs) {}\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar>, Options> const& unit_quaternion()\n      const {\n    return unit_quaternion_;\n  }\n\n protected:\n  /// Mutator of unit_quaternion is protected to ensure class invariant.\n  ///\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar>, Options>&\n  unit_quaternion_nonconst() {\n    return unit_quaternion_;\n  }\n\n  Map<Eigen::Quaternion<Scalar>, Options> unit_quaternion_;\n};\n\n/// Specialization of Eigen::Map for ``SO3 const``; derived from SO3Base.\n///\n/// Allows us to wrap SO3 objects around POD array (e.g. external c style\n/// quaternion).\ntemplate <class Scalar_, int Options>\nclass Map<Sophus::SO3<Scalar_> const, Options>\n    : public Sophus::SO3Base<Map<Sophus::SO3<Scalar_> const, Options>> {\n public:\n  using Base = Sophus::SO3Base<Map<Sophus::SO3<Scalar_> const, Options>>;\n  using Scalar = Scalar_;\n  using Transformation = typename Base::Transformation;\n  using Point = typename Base::Point;\n  using HomogeneousPoint = typename Base::HomogeneousPoint;\n  using Tangent = typename Base::Tangent;\n  using Adjoint = typename Base::Adjoint;\n\n  using Base::operator*=;\n  using Base::operator*;\n\n  SOPHUS_FUNC explicit Map(Scalar const* coeffs) : unit_quaternion_(coeffs) {}\n\n  /// Accessor of unit quaternion.\n  ///\n  SOPHUS_FUNC Map<Eigen::Quaternion<Scalar> const, Options> const&\n  unit_quaternion() const {\n    return unit_quaternion_;\n  }\n\n protected:\n  /// Mutator of unit_quaternion is protected to ensure class invariant.\n  ///\n  Map<Eigen::Quaternion<Scalar> const, Options> const unit_quaternion_;\n};\n}  // namespace Eigen\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/spline.hpp",
    "content": "/// @file\n// Basis spline implementation on Lie Group following:\n// S. Lovegrove, A. Patron-Perez, G. Sibley, BMVC 2013\n// http://www.bmva.org/bmvc/2013/Papers/paper0093/paper0093.pdf\n\n#pragma once\n\n#include \"types.hpp\"\n\nnamespace Sophus {\n\ntemplate <class Scalar>\nclass SplineBasisFunction {\n public:\n  static SOPHUS_FUNC Eigen::Matrix<Scalar, 3, 4> C() {\n    Eigen::Matrix<Scalar, 3, 4> C;\n    Scalar const o(0);\n\n    // clang-format off\n    C << Scalar(5./6),  Scalar(3./6), -Scalar(3./6),  Scalar(1./6),\n         Scalar(1./6),  Scalar(3./6),  Scalar(3./6), -Scalar(2./6),\n                    o,             o,             o,  Scalar(1./6);\n    // clang-format on\n    return C;\n  }\n\n  static SOPHUS_FUNC Vector3<Scalar> B(Scalar const& u) {\n    // SOPHUS_ENSURE(u >= Scalar(0), \"but %\", u);\n    // SOPHUS_ENSURE(u < Scalar(1), \"but %\", u);\n    Scalar u_square(u * u);\n    return C() * Vector4<Scalar>(Scalar(1), u, u_square, u * u_square);\n  }\n\n  static SOPHUS_FUNC Vector3<Scalar> Dt_B(Scalar const& u,\n                                          Scalar const& delta_t) {\n    // SOPHUS_ENSURE(u >= Scalar(0), \"but %\", u);\n    // SOPHUS_ENSURE(u < Scalar(1), \"but %\", u);\n    return (Scalar(1) / delta_t) * C() *\n           Vector4<Scalar>(Scalar(0), Scalar(1), Scalar(2) * u,\n                           Scalar(3) * u * u);\n  }\n\n  static SOPHUS_FUNC Vector3<Scalar> Dt2_B(Scalar const& u,\n                                           Scalar const& delta_t) {\n    // SOPHUS_ENSURE(u >= Scalar(0), \"but %\", u);\n    // SOPHUS_ENSURE(u < Scalar(1), \"but %\", u);\n    return (Scalar(1) / (delta_t * delta_t)) * C() *\n           Vector4<Scalar>(Scalar(0), Scalar(0), Scalar(2), Scalar(6) * u);\n  }\n};\n\ntemplate <class LieGroup_>\nclass BasisSplineFn {\n public:\n  using LieGroup = LieGroup_;\n  using Scalar = typename LieGroup::Scalar;\n  using Transformation = typename LieGroup::Transformation;\n  using Tangent = typename LieGroup::Tangent;\n\n  static LieGroup parent_T_spline(\n      const LieGroup& parent_Ts_control_point,\n      std::tuple<Tangent, Tangent, Tangent> const& control_tagent_vectors,\n      double u) {\n    auto AA = A(control_tagent_vectors, u);\n    return parent_Ts_control_point * std::get<0>(AA) * std::get<1>(AA) *\n           std::get<2>(AA);\n  }\n\n  static Transformation Dt_parent_T_spline(\n      const LieGroup& parent_Ts_control_point,\n      std::tuple<Tangent, Tangent, Tangent> const& control_tagent_vectors,\n      double u, double delta_t) {\n    auto AA = A(control_tagent_vectors, u);\n    auto Dt_AA = Dt_A(AA, control_tagent_vectors, u, delta_t);\n    return parent_Ts_control_point.matrix() *\n           ((std::get<0>(Dt_AA) * std::get<1>(AA).matrix() *\n             std::get<2>(AA).matrix()) +\n            (std::get<0>(AA).matrix() * std::get<1>(Dt_AA) *\n             std::get<2>(AA).matrix()) +\n            (std::get<0>(AA).matrix() * std::get<1>(AA).matrix() *\n             std::get<2>(Dt_AA)));\n  }\n\n  static Transformation Dt2_parent_T_spline(\n      const LieGroup& parent_Ts_control_point,\n      std::tuple<Tangent, Tangent, Tangent> const& control_tagent_vectors,\n      double u, double delta_t) {\n    using Scalar = typename LieGroup::Scalar;\n    auto AA = A(control_tagent_vectors, u);\n    auto Dt_AA = Dt_A(AA, control_tagent_vectors, u, delta_t);\n    auto Dt2_AA = Dt2_A(AA, Dt_AA, control_tagent_vectors, u, delta_t);\n\n    return parent_Ts_control_point.matrix() *\n           ((std::get<0>(Dt2_AA) * std::get<1>(AA).matrix() *\n             std::get<2>(AA).matrix()) +\n            (std::get<0>(AA).matrix() * std::get<1>(Dt2_AA) *\n             std::get<2>(AA).matrix()) +\n            (std::get<0>(AA).matrix() * std::get<1>(AA).matrix() *\n             std::get<2>(Dt2_AA)) +\n            Scalar(2) * ((std::get<0>(Dt_AA) * std::get<1>(Dt_AA) *\n                          std::get<2>(AA).matrix()) +\n                         (std::get<0>(Dt_AA) * std::get<1>(AA).matrix() *\n                          std::get<2>(Dt_AA)) +\n                         (std::get<0>(AA).matrix() * std::get<1>(Dt_AA) *\n                          std::get<2>(Dt_AA))));\n  }\n\n private:\n  static std::tuple<LieGroup, LieGroup, LieGroup> A(\n      std::tuple<Tangent, Tangent, Tangent> const& control_tagent_vectors,\n      double u) {\n    Eigen::Vector3d B = SplineBasisFunction<double>::B(u);\n    return std::make_tuple(\n        LieGroup::exp(B[0] * std::get<0>(control_tagent_vectors)),\n        LieGroup::exp(B[1] * std::get<1>(control_tagent_vectors)),\n        LieGroup::exp(B[2] * std::get<2>(control_tagent_vectors)));\n  }\n\n  static std::tuple<Transformation, Transformation, Transformation> Dt_A(\n      std::tuple<LieGroup, LieGroup, LieGroup> const& AA,\n      const std::tuple<Tangent, Tangent, Tangent>& control_tagent_vectors,\n      double u, double delta_t) {\n    Eigen::Vector3d Dt_B = SplineBasisFunction<double>::Dt_B(u, delta_t);\n    return std::make_tuple(\n        Dt_B[0] * std::get<0>(AA).matrix() *\n            LieGroup::hat(std::get<0>(control_tagent_vectors)),\n        Dt_B[1] * std::get<1>(AA).matrix() *\n            LieGroup::hat(std::get<1>(control_tagent_vectors)),\n        Dt_B[2] * std::get<2>(AA).matrix() *\n            LieGroup::hat(std::get<2>(control_tagent_vectors)));\n  }\n\n  static std::tuple<Transformation, Transformation, Transformation> Dt2_A(\n      std::tuple<LieGroup, LieGroup, LieGroup> const& AA,\n      std::tuple<Transformation, Transformation, Transformation> const& Dt_AA,\n      std::tuple<Tangent, Tangent, Tangent> const& control_tagent_vectors,\n      double u, double delta_t) {\n    Eigen::Vector3d Dt_B = SplineBasisFunction<double>::Dt_B(u, delta_t);\n    Eigen::Vector3d Dt2_B = SplineBasisFunction<double>::Dt2_B(u, delta_t);\n\n    return std::make_tuple(\n        (Dt_B[0] * std::get<0>(Dt_AA).matrix() +\n         Dt2_B[0] * std::get<0>(AA).matrix()) *\n            LieGroup::hat(std::get<0>(control_tagent_vectors)),\n        (Dt_B[1] * std::get<1>(Dt_AA).matrix() +\n         Dt2_B[1] * std::get<1>(AA).matrix()) *\n            LieGroup::hat(std::get<1>(control_tagent_vectors)),\n        (Dt_B[2] * std::get<2>(Dt_AA).matrix() +\n         Dt2_B[2] * std::get<2>(AA).matrix()) *\n            LieGroup::hat(std::get<2>(control_tagent_vectors)));\n  }\n};\n\nenum class SegmentCase { first, normal, last };\n\ntemplate <class LieGroup>\nstruct BasisSplineSegment {\n public:\n  using T = typename LieGroup::Scalar;\n  using Transformation = typename LieGroup::Transformation;\n\n  BasisSplineSegment(SegmentCase segment_case, T const* const raw_ptr0,\n                     T const* const raw_ptr1, T const* const raw_ptr2,\n                     T const* const raw_ptr3)\n      : segment_case_(segment_case),\n        raw_params0_(raw_ptr0),\n        raw_params1_(raw_ptr1),\n        raw_params2_(raw_ptr2),\n        raw_params3_(raw_ptr3) {}\n\n  Eigen::Map<LieGroup const> const world_pose_foo_prev() const {\n    return Eigen::Map<LieGroup const>(raw_params0_);\n  }\n  Eigen::Map<LieGroup const> const world_pose_foo_0() const {\n    return Eigen::Map<LieGroup const>(raw_params1_);\n  }\n\n  Eigen::Map<LieGroup const> const world_pose_foo_1() const {\n    return Eigen::Map<LieGroup const>(raw_params2_);\n  }\n\n  Eigen::Map<LieGroup const> const world_pose_foo_2() const {\n    return Eigen::Map<LieGroup const>(raw_params3_);\n  }\n  LieGroup parent_T_spline(double u) {\n    switch (segment_case_) {\n      case SegmentCase::first:\n        return BasisSplineFn<LieGroup>::parent_T_spline(\n            world_pose_foo_0(),\n            std::make_tuple(\n                (world_pose_foo_0().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_2()).log()),\n            u);\n      case SegmentCase::normal:\n        return BasisSplineFn<LieGroup>::parent_T_spline(\n            world_pose_foo_prev(),\n            std::make_tuple(\n                (world_pose_foo_prev().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_2()).log()),\n            u);\n      case SegmentCase::last:\n        return BasisSplineFn<LieGroup>::parent_T_spline(\n            world_pose_foo_prev(),\n            std::make_tuple(\n                (world_pose_foo_prev().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_1()).log()),\n            u);\n    }\n    SOPHUS_ENSURE(false, \"logic error\");\n  }\n\n  Transformation Dt_parent_T_spline(double u, double delta_t) {\n    switch (segment_case_) {\n      case SegmentCase::first:\n        return BasisSplineFn<LieGroup>::Dt_parent_T_spline(\n            world_pose_foo_0(),\n            std::make_tuple(\n                (world_pose_foo_0().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_2()).log()),\n            u, delta_t);\n      case SegmentCase::normal:\n        return BasisSplineFn<LieGroup>::Dt_parent_T_spline(\n            world_pose_foo_prev(),\n            std::make_tuple(\n                (world_pose_foo_prev().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_2()).log()),\n            u, delta_t);\n      case SegmentCase::last:\n        return BasisSplineFn<LieGroup>::Dt_parent_T_spline(\n            world_pose_foo_prev(),\n            std::make_tuple(\n                (world_pose_foo_prev().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_1()).log()),\n            u, delta_t);\n    }\n    SOPHUS_ENSURE(false, \"logic error\");\n  }\n\n  Transformation Dt2_parent_T_spline(double u, double delta_t) {\n    switch (segment_case_) {\n      case SegmentCase::first:\n        return BasisSplineFn<LieGroup>::Dt2_parent_T_spline(\n            world_pose_foo_0(),\n            std::make_tuple(\n                (world_pose_foo_0().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_2()).log()),\n            u, delta_t);\n      case SegmentCase::normal:\n        return BasisSplineFn<LieGroup>::Dt2_parent_T_spline(\n            world_pose_foo_prev(),\n            std::make_tuple(\n                (world_pose_foo_prev().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_2()).log()),\n            u, delta_t);\n      case SegmentCase::last:\n        return BasisSplineFn<LieGroup>::Dt2_parent_T_spline(\n            world_pose_foo_prev(),\n            std::make_tuple(\n                (world_pose_foo_prev().inverse() * world_pose_foo_0()).log(),\n                (world_pose_foo_0().inverse() * world_pose_foo_1()).log(),\n                (world_pose_foo_1().inverse() * world_pose_foo_1()).log()),\n            u, delta_t);\n    }\n    SOPHUS_ENSURE(false, \"logic error\");\n  }\n\n private:\n  SegmentCase segment_case_;\n  T const* raw_params0_;\n  T const* raw_params1_;\n  T const* raw_params2_;\n  T const* raw_params3_;\n};\n\ntemplate <class LieGroup_>\nclass BasisSplineImpl {\n public:\n  using LieGroup = LieGroup_;\n  using Scalar = typename LieGroup::Scalar;\n  using Transformation = typename LieGroup::Transformation;\n  using Tangent = typename LieGroup::Tangent;\n\n  BasisSplineImpl(const std::vector<LieGroup>& parent_Ts_control_point,\n                  double delta_t)\n      : parent_Ts_control_point_(parent_Ts_control_point), delta_t_(delta_t) {\n    SOPHUS_ENSURE(parent_Ts_control_point_.size() >= 2u, \", but {}\",\n                  parent_Ts_control_point_.size());\n  }\n\n  LieGroup parent_T_spline(int i, double u) const {\n    SOPHUS_ENSURE(i >= 0, \"i = {}\", i);\n    SOPHUS_ENSURE(i < this->getNumSegments(),\n                  \"i = {};  this->getNumSegments() = {};  \"\n                  \"parent_Ts_control_point_.size() = {}\",\n                  i, this->getNumSegments(), parent_Ts_control_point_.size());\n\n    SegmentCase segment_case =\n        i == 0 ? SegmentCase::first\n               : (i == this->getNumSegments() - 1 ? SegmentCase::last\n                                                  : SegmentCase::normal);\n\n    int idx_prev = std::max(0, i - 1);\n    int idx_0 = i;\n    int idx_1 = i + 1;\n    int idx_2 = std::min(i + 2, int(this->parent_Ts_control_point_.size()) - 1);\n\n    return BasisSplineSegment<LieGroup>(\n               segment_case, parent_Ts_control_point_[idx_prev].data(),\n               parent_Ts_control_point_[idx_0].data(),\n               parent_Ts_control_point_[idx_1].data(),\n               parent_Ts_control_point_[idx_2].data())\n        .parent_T_spline(u);\n  }\n\n  Transformation Dt_parent_T_spline(int i, double u) const {\n    SOPHUS_ENSURE(i >= 0, \"i = {}\", i);\n    SOPHUS_ENSURE(i < this->getNumSegments(),\n                  \"i = {};  this->getNumSegments() = {};  \"\n                  \"parent_Ts_control_point_.size() = {}\",\n                  i, this->getNumSegments(), parent_Ts_control_point_.size());\n\n    SegmentCase segment_case =\n        i == 0 ? SegmentCase::first\n               : (i == this->getNumSegments() - 1 ? SegmentCase::last\n                                                  : SegmentCase::normal);\n\n    int idx_prev = std::max(0, i - 1);\n    int idx_0 = i;\n    int idx_1 = i + 1;\n    int idx_2 = std::min(i + 2, int(this->parent_Ts_control_point_.size()) - 1);\n\n    return BasisSplineSegment<LieGroup>(\n               segment_case, parent_Ts_control_point_[idx_prev].data(),\n               parent_Ts_control_point_[idx_0].data(),\n               parent_Ts_control_point_[idx_1].data(),\n               parent_Ts_control_point_[idx_2].data())\n        .Dt_parent_T_spline(u, delta_t_);\n  }\n\n  Transformation Dt2_parent_T_spline(int i, double u) const {\n    SOPHUS_ENSURE(i >= 0, \"i = {}\", i);\n    SOPHUS_ENSURE(i < this->getNumSegments(),\n                  \"i = {};  this->getNumSegments() = {};  \"\n                  \"parent_Ts_control_point_.size() = {}\",\n                  i, this->getNumSegments(), parent_Ts_control_point_.size());\n\n    SegmentCase segment_case =\n        i == 0 ? SegmentCase::first\n               : (i == this->getNumSegments() - 1 ? SegmentCase::last\n                                                  : SegmentCase::normal);\n\n    int idx_prev = std::max(0, i - 1);\n    int idx_0 = i;\n    int idx_1 = i + 1;\n    int idx_2 = std::min(i + 2, int(this->parent_Ts_control_point_.size()) - 1);\n\n    return BasisSplineSegment<LieGroup>(\n               segment_case, parent_Ts_control_point_[idx_prev].data(),\n               parent_Ts_control_point_[idx_0].data(),\n               parent_Ts_control_point_[idx_1].data(),\n               parent_Ts_control_point_[idx_2].data())\n        .Dt2_parent_T_spline(u, delta_t_);\n  }\n\n  const std::vector<LieGroup>& parent_Ts_control_point() const {\n    return parent_Ts_control_point_;\n  }\n\n  std::vector<LieGroup>& parent_Ts_control_point() {\n    return parent_Ts_control_point_;\n  }\n\n  int getNumSegments() const {\n    return int(parent_Ts_control_point_.size()) - 1;\n  }\n\n  double delta_t() const { return delta_t_; }\n\n private:\n  std::vector<LieGroup> parent_Ts_control_point_;\n  double delta_t_;\n};\n\nstruct IndexAndU {\n  int i;\n  double u;\n};\n\ntemplate <class LieGroup_>\nclass BasisSpline {\n public:\n  using LieGroup = LieGroup_;\n  using Scalar = typename LieGroup::Scalar;\n  using Transformation = typename LieGroup::Transformation;\n  using Tangent = typename LieGroup::Tangent;\n\n  BasisSpline(std::vector<LieGroup> parent_Ts_control_point, double t0,\n              double delta_t)\n      : impl_(std::move(parent_Ts_control_point), delta_t), t0_(t0) {}\n\n  LieGroup parent_T_spline(double t) const {\n    IndexAndU index_and_u = this->index_and_u(t);\n\n    return impl_.parent_T_spline(index_and_u.i, index_and_u.u);\n  }\n\n  Transformation Dt_parent_T_spline(double t) const {\n    IndexAndU index_and_u = this->index_and_u(t);\n    return impl_.Dt_parent_T_spline(index_and_u.i, index_and_u.u);\n  }\n\n  Transformation Dt2_parent_T_spline(double t) const {\n    IndexAndU index_and_u = this->index_and_u(t);\n    return impl_.Dt2_parent_T_spline(index_and_u.i, index_and_u.u);\n  }\n\n  double t0() const { return t0_; }\n\n  double tmax() const { return t0_ + impl_.delta_t() * getNumSegments(); }\n\n  const std::vector<LieGroup>& parent_Ts_control_point() const {\n    return impl_.parent_Ts_control_point();\n  }\n\n  std::vector<LieGroup>& parent_Ts_control_point() {\n    return impl_.parent_Ts_control_point();\n  }\n\n  int getNumSegments() const { return impl_.getNumSegments(); }\n\n  double s(double t) const { return (t - t0_) / impl_.delta_t(); }\n\n  double delta_t() const { return impl_.delta_t(); }\n\n  IndexAndU index_and_u(double t) const {\n    SOPHUS_ENSURE(t >= t0_, \"{} vs. {}\", t, t0_);\n    SOPHUS_ENSURE(t <= this->tmax(), \"{} vs. {}\", t, this->tmax());\n\n    double s = this->s(t);\n    double i;\n    IndexAndU index_and_u;\n    index_and_u.u = std::modf(s, &i);\n    index_and_u.i = int(i);\n    if (index_and_u.u > Sophus::Constants<double>::epsilon()) {\n      return index_and_u;\n    }\n\n    // u ~=~ 0.0\n    if (t < 0.5 * this->tmax()) {\n      // First half of spline, keep as is (i, 0.0).\n      return index_and_u;\n    }\n    // Second half of spline, use (i-1, 1.0) instead. This way we can represent\n    // t == tmax (and not just t<tmax).\n    index_and_u.u += 1.0;\n    --index_and_u.i;\n\n    return index_and_u;\n  }\n\n private:\n  BasisSplineImpl<LieGroup> impl_;\n\n  double t0_;\n};\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/test_macros.hpp",
    "content": "#pragma once\n\n#include <iostream>\n#include <sstream>\n\n#include <sophus/types.hpp>\n\nnamespace Sophus {\nnamespace details {\n\ntemplate <class Scalar, class Enable = void>\nclass Pretty {\n public:\n  static std::string impl(Scalar s) {\n    std::stringstream sstr;\n    sstr << s;\n    return sstr.str();\n  }\n};\n\ntemplate <class Ptr>\nclass Pretty<Ptr, enable_if_t<std::is_pointer<Ptr>::value>> {\n public:\n  static std::string impl(Ptr ptr) {\n    std::stringstream sstr;\n    sstr << std::intptr_t(ptr);\n    return sstr.str();\n  }\n};\n\ntemplate <class Scalar, int M, int N>\nclass Pretty<Eigen::Matrix<Scalar, M, N>, void> {\n public:\n  static std::string impl(Matrix<Scalar, M, N> const& v) {\n    std::stringstream sstr;\n    sstr << \"\\n\" << v << \"\\n\";\n    return sstr.str();\n  }\n};\n\ntemplate <class T>\nstd::string pretty(T const& v) {\n  return Pretty<T>::impl(v);\n}\n\ntemplate <class... Args>\nvoid testFailed(bool& passed, char const* func, char const* file, int line,\n                std::string const& msg) {\n  std::fprintf(stderr,\n               \"Test failed in function '%s', \"\n               \"file '%s', line %d.\\n\",\n               func, file, line);\n  std::cerr << msg << \"\\n\\n\";\n  passed = false;\n}\n\n}  // namespace details\n\nvoid processTestResult(bool passed) {\n  if (!passed) {\n    // LCOV_EXCL_START\n    std::cerr << \"failed!\" << std::endl << std::endl;\n    exit(-1);\n    // LCOV_EXCL_STOP\n  }\n  std::cerr << \"passed.\" << std::endl << std::endl;\n}\n}  // namespace Sophus\n\n#define SOPHUS_STRINGIFY(x) #x\n\n/// Tests whether condition is true.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST(passed, condition, descr, ...)                             \\\n  do {                                                                         \\\n    if (!(condition)) {                                                        \\\n      std::string msg = SOPHUS_FMT_STR(\"condition ``{}`` is false\\n\",          \\\n                                       SOPHUS_STRINGIFY(condition));           \\\n      msg += SOPHUS_FMT_STR(descr, ##__VA_ARGS__);                             \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// Tests whether left is equal to right given a threshold.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_EQUAL(passed, left, right, descr, ...)                     \\\n  do {                                                                         \\\n    if (left != right) {                                                       \\\n      std::string msg = SOPHUS_FMT_STR(                                        \\\n          \"{} (={}) is not equal to {} (={})\\n\", SOPHUS_STRINGIFY(left),       \\\n          Sophus::details::pretty(left), SOPHUS_STRINGIFY(right),              \\\n          Sophus::details::pretty(right));                                     \\\n      msg += SOPHUS_FMT_STR(descr, ##__VA_ARGS__);                             \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// Tests whether left is equal to right given a threshold.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_NEQ(passed, left, right, descr, ...)                       \\\n  do {                                                                         \\\n    if (left == right) {                                                       \\\n      std::string msg = SOPHUS_FMT_STR(                                        \\\n          \"{} (={}) should not be equal to {} (={})\\n\",                        \\\n          SOPHUS_STRINGIFY(left), Sophus::details::pretty(left),               \\\n          SOPHUS_STRINGIFY(right), Sophus::details::pretty(right));            \\\n      msg += SOPHUS_FMT_STR(descr, ##__VA_ARGS__);                             \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// Tests whether left is approximately equal to right given a threshold.\n/// The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_APPROX(passed, left, right, thr, descr, ...)               \\\n  do {                                                                         \\\n    auto nrm = Sophus::maxMetric((left), (right));                             \\\n    if (!(nrm < (thr))) {                                                      \\\n      std::string msg = SOPHUS_FMT_STR(                                        \\\n          \"{} (={}) is not approx {} (={}); {} is {}; nrm is {}\\n\",            \\\n          SOPHUS_STRINGIFY(left), Sophus::details::pretty(left),               \\\n          SOPHUS_STRINGIFY(right), Sophus::details::pretty(right),             \\\n          SOPHUS_STRINGIFY(thr), Sophus::details::pretty(thr),                 \\\n          Sophus::details::pretty(nrm));                                       \\\n      msg += SOPHUS_FMT_STR(descr, ##__VA_ARGS__);                             \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n\n/// Tests whether left is NOT approximately equal to right given a\n/// threshold. The in-out parameter passed will be set to false if test fails.\n#define SOPHUS_TEST_NOT_APPROX(passed, left, right, thr, descr, ...)           \\\n  do {                                                                         \\\n    auto nrm = Sophus::maxMetric((left), (right));                             \\\n    if (nrm < (thr)) {                                                         \\\n      std::string msg = SOPHUS_FMT_STR(                                        \\\n          \"{} (={}) is approx {} (={}), but it should not!\\n {} is {}; nrm \"   \\\n          \"is {}\\n\",                                                           \\\n          SOPHUS_STRINGIFY(left), Sophus::details::pretty(left),               \\\n          SOPHUS_STRINGIFY(right), Sophus::details::pretty(right),             \\\n          SOPHUS_STRINGIFY(thr), Sophus::details::pretty(thr),                 \\\n          Sophus::details::pretty(nrm));                                       \\\n      msg += SOPHUS_FMT_STR(descr, ##__VA_ARGS__);                             \\\n      Sophus::details::testFailed(passed, SOPHUS_FUNCTION, __FILE__, __LINE__, \\\n                                  msg);                                        \\\n    }                                                                          \\\n  } while (false)\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/types.hpp",
    "content": "/// @file\n/// Common type aliases.\n\n#pragma once\n\n#include <type_traits>\n#include \"common.hpp\"\n\nnamespace Sophus {\n\ntemplate <class Scalar, int M, int Options = 0>\nusing Vector = Eigen::Matrix<Scalar, M, 1, Options>;\n\ntemplate <class Scalar, int Options = 0>\nusing Vector2 = Vector<Scalar, 2, Options>;\nusing Vector2f = Vector2<float>;\nusing Vector2d = Vector2<double>;\n\ntemplate <class Scalar, int Options = 0>\nusing Vector3 = Vector<Scalar, 3, Options>;\nusing Vector3f = Vector3<float>;\nusing Vector3d = Vector3<double>;\n\ntemplate <class Scalar>\nusing Vector4 = Vector<Scalar, 4>;\nusing Vector4f = Vector4<float>;\nusing Vector4d = Vector4<double>;\n\ntemplate <class Scalar>\nusing Vector6 = Vector<Scalar, 6>;\nusing Vector6f = Vector6<float>;\nusing Vector6d = Vector6<double>;\n\ntemplate <class Scalar>\nusing Vector7 = Vector<Scalar, 7>;\nusing Vector7f = Vector7<float>;\nusing Vector7d = Vector7<double>;\n\ntemplate <class Scalar, int M, int N>\nusing Matrix = Eigen::Matrix<Scalar, M, N>;\n\ntemplate <class Scalar>\nusing Matrix2 = Matrix<Scalar, 2, 2>;\nusing Matrix2f = Matrix2<float>;\nusing Matrix2d = Matrix2<double>;\n\ntemplate <class Scalar>\nusing Matrix3 = Matrix<Scalar, 3, 3>;\nusing Matrix3f = Matrix3<float>;\nusing Matrix3d = Matrix3<double>;\n\ntemplate <class Scalar>\nusing Matrix4 = Matrix<Scalar, 4, 4>;\nusing Matrix4f = Matrix4<float>;\nusing Matrix4d = Matrix4<double>;\n\ntemplate <class Scalar>\nusing Matrix6 = Matrix<Scalar, 6, 6>;\nusing Matrix6f = Matrix6<float>;\nusing Matrix6d = Matrix6<double>;\n\ntemplate <class Scalar>\nusing Matrix7 = Matrix<Scalar, 7, 7>;\nusing Matrix7f = Matrix7<float>;\nusing Matrix7d = Matrix7<double>;\n\ntemplate <class Scalar, int N, int Options = 0>\nusing ParametrizedLine = Eigen::ParametrizedLine<Scalar, N, Options>;\n\ntemplate <class Scalar, int Options = 0>\nusing ParametrizedLine3 = ParametrizedLine<Scalar, 3, Options>;\nusing ParametrizedLine3f = ParametrizedLine3<float>;\nusing ParametrizedLine3d = ParametrizedLine3<double>;\n\ntemplate <class Scalar, int Options = 0>\nusing ParametrizedLine2 = ParametrizedLine<Scalar, 2, Options>;\nusing ParametrizedLine2f = ParametrizedLine2<float>;\nusing ParametrizedLine2d = ParametrizedLine2<double>;\n\ntemplate <class Scalar, int N, int Options = 0>\nusing Hyperplane = Eigen::Hyperplane<Scalar, N, Options>;\n\ntemplate <class Scalar, int Options = 0>\nusing Hyperplane3 = Eigen::Hyperplane<Scalar, 3, Options>;\nusing Hyperplane3f = Hyperplane3<float>;\nusing Hyperplane3d = Hyperplane3<double>;\n\ntemplate <class Scalar, int Options = 0>\nusing Hyperplane2 = Eigen::Hyperplane<Scalar, 2, Options>;\nusing Hyperplane2f = Hyperplane2<float>;\nusing Hyperplane2d = Hyperplane2<double>;\n\nnamespace details {\ntemplate <class Scalar>\nclass MaxMetric {\n public:\n  static Scalar impl(Scalar s0, Scalar s1) {\n    using std::abs;\n    return abs(s0 - s1);\n  }\n};\n\ntemplate <class Scalar, int M, int N>\nclass MaxMetric<Matrix<Scalar, M, N>> {\n public:\n  static Scalar impl(Matrix<Scalar, M, N> const& p0,\n                     Matrix<Scalar, M, N> const& p1) {\n    return (p0 - p1).template lpNorm<Eigen::Infinity>();\n  }\n};\n\ntemplate <class Scalar>\nclass SetToZero {\n public:\n  static void impl(Scalar& s) { s = Scalar(0); }\n};\n\ntemplate <class Scalar, int M, int N>\nclass SetToZero<Matrix<Scalar, M, N>> {\n public:\n  static void impl(Matrix<Scalar, M, N>& v) { v.setZero(); }\n};\n\ntemplate <class T1, class Scalar>\nclass SetElementAt;\n\ntemplate <class Scalar>\nclass SetElementAt<Scalar, Scalar> {\n public:\n  static void impl(Scalar& s, Scalar value, int at) {\n    SOPHUS_ENSURE(at == 0, \"is {}\", SOPHUS_FMT_ARG(at));\n    s = value;\n  }\n};\n\ntemplate <class Scalar, int N>\nclass SetElementAt<Vector<Scalar, N>, Scalar> {\n public:\n  static void impl(Vector<Scalar, N>& v, Scalar value, int at) {\n    SOPHUS_ENSURE(at >= 0 && at < N, \"is {}\", SOPHUS_FMT_ARG(at));\n    v[at] = value;\n  }\n};\n\ntemplate <class Scalar>\nclass SquaredNorm {\n public:\n  static Scalar impl(Scalar const& s) { return s * s; }\n};\n\ntemplate <class Scalar, int N>\nclass SquaredNorm<Matrix<Scalar, N, 1>> {\n public:\n  static Scalar impl(Matrix<Scalar, N, 1> const& s) { return s.squaredNorm(); }\n};\n\ntemplate <class Scalar>\nclass Transpose {\n public:\n  static Scalar impl(Scalar const& s) { return s; }\n};\n\ntemplate <class Scalar, int M, int N>\nclass Transpose<Matrix<Scalar, M, N>> {\n public:\n  static Matrix<Scalar, M, N> impl(Matrix<Scalar, M, N> const& s) {\n    return s.transpose();\n  }\n};\n}  // namespace details\n\n/// Returns maximum metric between two points ``p0`` and ``p1``, with ``p0, p1``\n/// being matrices or a scalars.\n///\ntemplate <class T>\nauto maxMetric(T const& p0, T const& p1)\n    -> decltype(details::MaxMetric<T>::impl(p0, p1)) {\n  return details::MaxMetric<T>::impl(p0, p1);\n}\n\n/// Sets point ``p`` to zero, with ``p`` being a matrix or a scalar.\n///\ntemplate <class T>\nvoid setToZero(T& p) {\n  return details::SetToZero<T>::impl(p);\n}\n\n/// Sets ``i``th component of ``p`` to ``value``, with ``p`` being a\n/// matrix or a scalar. If ``p`` is a scalar, ``i`` must be ``0``.\n///\ntemplate <class T, class Scalar>\nvoid setElementAt(T& p, Scalar value, int i) {\n  return details::SetElementAt<T, Scalar>::impl(p, value, i);\n}\n\n/// Returns the squared 2-norm of ``p``, with ``p`` being a vector or a scalar.\n///\ntemplate <class T>\nauto squaredNorm(T const& p) -> decltype(details::SquaredNorm<T>::impl(p)) {\n  return details::SquaredNorm<T>::impl(p);\n}\n\n/// Returns ``p.transpose()`` if ``p`` is a matrix, and simply ``p`` if m is a\n/// scalar.\n///\ntemplate <class T>\nauto transpose(T const& p) -> decltype(details::Transpose<T>::impl(T())) {\n  return details::Transpose<T>::impl(p);\n}\n\ntemplate <class Scalar>\nstruct IsFloatingPoint {\n  static bool const value = std::is_floating_point<Scalar>::value;\n};\n\ntemplate <class Scalar, int M, int N>\nstruct IsFloatingPoint<Matrix<Scalar, M, N>> {\n  static bool const value = std::is_floating_point<Scalar>::value;\n};\n\ntemplate <class Scalar_>\nstruct GetScalar {\n  using Scalar = Scalar_;\n};\n\ntemplate <class Scalar_, int M, int N>\nstruct GetScalar<Matrix<Scalar_, M, N>> {\n  using Scalar = Scalar_;\n};\n\n/// If the Vector type is of fixed size, then IsFixedSizeVector::value will be\n/// true.\ntemplate <typename Vector, int NumDimensions,\n          typename = typename std::enable_if<\n              Vector::RowsAtCompileTime == NumDimensions &&\n              Vector::ColsAtCompileTime == 1>::type>\nstruct IsFixedSizeVector : std::true_type {};\n\n/// Planes in 3d are hyperplanes.\ntemplate <class T>\nusing Plane3 = Eigen::Hyperplane<T, 3>;\nusing Plane3d = Plane3<double>;\nusing Plane3f = Plane3<float>;\n\n/// Lines in 2d are hyperplanes.\ntemplate <class T>\nusing Line2 = Eigen::Hyperplane<T, 2>;\nusing Line2d = Line2<double>;\nusing Line2f = Line2<float>;\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus/velocities.hpp",
    "content": "#pragma once\n\n#include <functional>\n\n#include \"num_diff.hpp\"\n#include \"se3.hpp\"\n\nnamespace Sophus {\nnamespace experimental {\n// Experimental since the API will certainly change drastically in the future.\n\n// Transforms velocity vector by rotation ``foo_R_bar``.\n//\n// Note: vel_bar can be either a linear or a rotational velocity vector.\n//\ntemplate <class Scalar>\nVector3<Scalar> transformVelocity(SO3<Scalar> const& foo_R_bar,\n                                  Vector3<Scalar> const& vel_bar) {\n  // For rotational velocities note that:\n  //\n  //   vel_bar = vee(foo_R_bar * hat(vel_bar) * foo_R_bar^T)\n  //           = vee(hat(Adj(foo_R_bar) * vel_bar))\n  //           = Adj(foo_R_bar) * vel_bar\n  //           = foo_R_bar * vel_bar.\n  //\n  return foo_R_bar * vel_bar;\n}\n\n// Transforms velocity vector by pose ``foo_T_bar``.\n//\n// Note: vel_bar can be either a linear or a rotational velocity vector.\n//\ntemplate <class Scalar>\nVector3<Scalar> transformVelocity(SE3<Scalar> const& foo_T_bar,\n                                  Vector3<Scalar> const& vel_bar) {\n  return transformVelocity(foo_T_bar.so3(), vel_bar);\n}\n\n// finite difference approximation of instantaneous velocity in frame foo\n//\ntemplate <class Scalar>\nVector3<Scalar> finiteDifferenceRotationalVelocity(\n    std::function<SO3<Scalar>(Scalar)> const& foo_R_bar, Scalar t,\n    Scalar h = Constants<Scalar>::epsilon()) {\n  // https://en.wikipedia.org/w/index.php?title=Angular_velocity&oldid=791867792#Angular_velocity_tensor\n  //\n  // W = dR(t)/dt * R^{-1}(t)\n  Matrix3<Scalar> dR_dt_in_frame_foo = curveNumDiff(\n      [&foo_R_bar](Scalar t0) -> Matrix3<Scalar> {\n        return foo_R_bar(t0).matrix();\n      },\n      t, h);\n  // velocity tensor\n  Matrix3<Scalar> W_in_frame_foo =\n      dR_dt_in_frame_foo * (foo_R_bar(t)).inverse().matrix();\n  return SO3<Scalar>::vee(W_in_frame_foo);\n}\n\n// finite difference approximation of instantaneous velocity in frame foo\n//\ntemplate <class Scalar>\nVector3<Scalar> finiteDifferenceRotationalVelocity(\n    std::function<SE3<Scalar>(Scalar)> const& foo_T_bar, Scalar t,\n    Scalar h = Constants<Scalar>::epsilon()) {\n  return finiteDifferenceRotationalVelocity<Scalar>(\n      [&foo_T_bar](Scalar t) -> SO3<Scalar> { return foo_T_bar(t).so3(); }, t,\n      h);\n}\n\n}  // namespace experimental\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind/README",
    "content": "# Python binding for Sophus library: sophus-pybind \nsophus-pybind implement python binding for sophus that provides access to SO3, SE3, interpolate and iterativeMean features.\nThe user interface is inspired by scipy.spacial.transform.Rotation. \nHere is a specific list of features.\n\n## Feature list\n* SO3\n  * Initialize with from_quat(), from_matrix(), exp()\n  * Convert to functions: to_quat(), to_matrix(), log()\n  * Multiplication with SO3 or 3D points\n  * Operator [] for setting/getting items with index or slices\n  * Inverse, copy, print, and len\n  * Function vectorization\n* SE3\n  * Initialize with from_quat_and_translation(), from_matrix(), from_matrix3x4(), exp()\n  * Convert to functions to_quat_and_translation(), to_matrix(), to_matrix3x4(), log()\n  * Multiplication with SE3 or 3D points\n  * Operator [] for setting/getting items with index or slices\n  * Function vectorization\n  * Inverse, copy, print, and len\n  * Interpolate between two SE3\n  * Iterative mean of a group of SE3 \n\n## Python module (pysophus) installation step \n```\n# Create virtual environment\npython3 -m venv ~/sophus_venv\nsource ~/sophus_venv/bin/activate\n\n# Install package \ngit clone <sophus-package>\ncd Sophus\npip install .\n```\n\n## Example code\nExample code is provided in `sophus_pybind/examples/sophus_quickstart_tutorial.ipynb`\n```\ncd Sophus\npython3 -m jupyter notebook sophus_pybind/examples/sophus_quickstart_tutorial.ipynb\n```\n`python3 -m jupyter` ensures that the jupyter comes from the virtual environment that contains the sophus-pybind module.\n\n\n## Re-generate stub files (.pyi) for annotation\n```\ncd Sophus\n# install pybind11-stubgen that will create stub file for a python module\npip3 install pybind11-stubgen\n\n# Create stub files (Requires sophus-pybind to be installed in prior)\npython3 generate_stubs.py\n\n# Re-install the sophus-pybind project with the stub file\npip install .\n```\n\n## Vectorization detail\nIn python, we choose to export our Sophus::SO3 as a vector of SO3 objects by binding the cpp object `SO3Group` defined below. This is because numerical code in python tends to work with array of values to get efficient program. This approach is inspired by scipy.spatial.transform.Rotation.\n```\nclass SO3Group : public std::vector<Sophus::SO3<Scalar>> \nclass SE3Group : public std::vector<Sophus::SE3<Scalar>> \n```\n\n### Passing a single SO3/SE3 object to c++ code in python binding\nTo allow other python binding c++ code to take in a single SO3/SE3 object, we also build a caster so that, even if we wrap SO3Group/SE3Group in python, those can be implicitly converted to the c++ Sophus::SO3/SE3 object at boundaries between languages.\nThis is so we can pass python SO3/SE3 object to c++ function as if they were regular 1-element Sophus::SO3/SE3 object. This simplifies binding the rest of c++ code. The implicit cast fails if the python object is not a 1-element object."
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind/SE3PyBind.h",
    "content": "#pragma once\n\n#include \"SO3PyBind.h\"\n\n#include <sophus/average.hpp>\n#include <sophus/common.hpp>\n#include <sophus/interpolate.hpp>\n#include <sophus/se3.hpp>\n\n#include <pybind11/eigen.h>\n#include <pybind11/numpy.h>\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n\nnamespace Sophus {\n\n// In python, we choose to export our Sophus::SE3 as a vector of SE3 objects by binding the cpp\n// object `SE3Group` defined below.\ntemplate <typename Scalar>\nclass SE3Group : public std::vector<Sophus::SE3<Scalar>> {\n public:\n  // The empty constructor is not accessible from python.\n  // Python always create at least one identity element (like c++ sophus)\n  SE3Group() = default;\n  // implicit copy conversion from a Sophus::SE3 value\n  SE3Group(const Sophus::SE3<Scalar>& in) {\n    this->push_back(in);\n  }\n};\n} // namespace Sophus\n\n// The following caster makes so that, even if we wrap SE3Group in python, those can be\n// implicitly converted to the c++ Sophus::SE3 object at boundaries between languages.\n// This is so we can pass python SE3 object to c++ function as if they were regular 1-element\n// Sophus::SE3 object. This simplifies binding the rest of c++ code. This implicit cast fails if the\n// python object is not a 1-element SE3 object.\n// NOTE: this caster makes a copy, so can't not be used for passing a reference of a SE3 element to\n// a c++ function.\nnamespace pybind11 {\nnamespace detail {\ntemplate <>\nstruct type_caster<Sophus::SE3<double>> {\n public:\n  PYBIND11_TYPE_CASTER(Sophus::SE3<double>, _(\"SE3\"));\n\n  // converting from python -> c++ type\n  bool load(handle src, bool convert) {\n    try {\n      Sophus::SE3Group<double>& ref = src.cast<Sophus::SE3Group<double>&>();\n      if (ref.size() != 1) {\n        throw std::domain_error(fmt::format(\n            \"A element of size 1 is required here. Input has {} elements.\", ref.size()));\n      }\n      value = ref[0];\n      return true;\n    } catch (const pybind11::cast_error&) {\n      return false; // Conversion failed\n    }\n  }\n\n  // converting from c++ -> python type\n  static handle cast(Sophus::SE3<double> src, return_value_policy policy, handle parent) {\n    return type_caster_base<Sophus::SE3Group<double>>::cast(Sophus::SE3Group<double>(src), policy, parent);\n  }\n};\n} // namespace detail\n} // namespace pybind11\n\nnamespace Sophus {\n/*SE3*/\ntemplate <typename Scalar>\nusing PybindSE3Type = pybind11::class_<SE3Group<Scalar>>;\n\ntemplate <typename Scalar>\nPybindSE3Type<Scalar> exportSE3Transformation(\n    pybind11::module& module,\n    const std::string& name = \"SE3\") {\n  PybindSE3Type<Scalar> type(module, name.c_str());\n\n  type.def(\n      pybind11::init([]() {\n        SE3Group<Scalar> ret;\n        ret.push_back({});\n        return ret;\n      }),\n      \" Default Constructor initializing a group containing 1 identity element\");\n  type.def(pybind11::init<const Sophus::SE3<Scalar>&>(), \"Copy constructor from single element\");\n\n  type.def_static(\"from_matrix\", [](const Eigen::Matrix<Scalar, 4, 4>& matrix) -> SE3Group<Scalar> {\n    return SE3Group<Scalar>{Sophus::SE3<Scalar>::fitToSE3(matrix)};\n  });\n  type.def_static(\"from_matrix\", [](const pybind11::array_t<Scalar>& matrices) -> SE3Group<Scalar> {\n    if (matrices.ndim() != 3 || matrices.shape(1) != 4 || matrices.shape(2) != 4) {\n      throw std::runtime_error(\n          fmt::format(\"The size of the input matrix should be Nx4x4 dimensions.\"));\n    }\n\n    SE3Group<Scalar> output;\n    output.reserve(matrices.shape(0));\n    for (size_t i = 0; i < matrices.shape(0); ++i) {\n      Eigen::Map<const Eigen::Matrix<Scalar, 4, 4, Eigen::RowMajor>> mat(matrices.data(i, 0, 0));\n      output.push_back(Sophus::SE3<Scalar>::fitToSE3(mat));\n    }\n    return output;\n  });\n\n  type.def_static(\n      \"from_matrix3x4\", [](const Eigen::Matrix<Scalar, 3, 4>& matrix) -> SE3Group<Scalar> {\n        return SE3Group<Scalar>{Sophus::SE3<Scalar>(\n            Sophus::SO3<Scalar>::fitToSO3(matrix.template block<3, 3>(0, 0)),\n            matrix.template block<3, 1>(0, 3))};\n      });\n  type.def_static(\n      \"from_matrix3x4\", [](const pybind11::array_t<Scalar>& matrices) -> SE3Group<Scalar> {\n        if (matrices.ndim() != 3 || matrices.shape(1) != 3 || matrices.shape(2) != 4) {\n          throw std::runtime_error(\n              fmt::format(\"The size of the input matrix should be Nx3x4 dimensions.\"));\n        }\n\n        SE3Group<Scalar> output;\n        output.reserve(matrices.shape(0));\n        for (size_t i = 0; i < matrices.shape(0); ++i) {\n          Eigen::Map<const Eigen::Matrix<Scalar, 3, 4, Eigen::RowMajor>> mat(\n              matrices.data(i, 0, 0));\n          output.push_back(Sophus::SE3<Scalar>(\n              Sophus::SO3<Scalar>::fitToSO3(mat.template block<3, 3>(0, 0)),\n              mat.template block<3, 1>(0, 3)));\n        }\n        return output;\n      });\n\n  type.def_static(\n      \"exp\",\n      [](const Eigen::Matrix<Scalar, 3, 1>& translational_part,\n         const Eigen::Matrix<Scalar, 3, 1>& rotvec) -> SE3Group<Scalar> {\n        auto tangentVec = Eigen::Matrix<Scalar, 6, 1>{\n            translational_part[0],\n            translational_part[1],\n            translational_part[2],\n            rotvec[0],\n            rotvec[1],\n            rotvec[2]};\n        return {Sophus::SE3<Scalar>::exp(tangentVec)};\n      },\n      \"Create SE3 from a translational_part (3x1) and a rotation vector (3x1) of magnitude in rad. NOTE: translational_part is not translation vector in SE3\");\n  type.def_static(\n      \"exp\",\n      [](const Eigen::Matrix<Scalar, Eigen::Dynamic, 3>& translational_parts,\n         const Eigen::Matrix<Scalar, Eigen::Dynamic, 3>& rotvecs) -> SE3Group<Scalar> {\n        SE3Group<Scalar> output;\n        output.reserve(rotvecs.rows());\n        for (size_t i = 0; i < rotvecs.rows(); ++i) {\n          auto tangentVec = Eigen::Matrix<Scalar, 6, 1>{\n              translational_parts(i, 0),\n              translational_parts(i, 1),\n              translational_parts(i, 2),\n              rotvecs(i, 0),\n              rotvecs(i, 1),\n              rotvecs(i, 2)};\n          output.emplace_back(Sophus::SE3<Scalar>::exp(tangentVec));\n        }\n        return output;\n      },\n      \"Create a set of SE3 from translational_parts (Nx3) and rotation vectors (Nx3) of magnitude in rad. NOTE: translational_part is not translation vector in SE3\");\n\n  type.def(\n      \"from_quat_and_translation\",\n      [](const Scalar& w,\n         const Eigen::Matrix<Scalar, 3, 1>& xyz,\n         const Eigen::Matrix<Scalar, 3, 1>& translation) -> SE3Group<Scalar> {\n        Eigen::Quaternion<Scalar> quat(w, xyz[0], xyz[1], xyz[2]);\n        quat.normalize();\n        return {Sophus::SE3<Scalar>(quat, translation)};\n      },\n      \"Create SE3 from a quaternion as w, [x, y, z], and translation vector\");\n\n  type.def(\n      \"from_quat_and_translation\",\n      [](const std::vector<Scalar>& x_vec,\n         const Eigen::Matrix<Scalar, -1, 3>& xyz_vec,\n         const Eigen::Matrix<Scalar, -1, 3>& translations) -> SE3Group<Scalar> {\n        if (x_vec.size() != xyz_vec.rows() || x_vec.size() != translations.rows()) {\n          throw std::domain_error(fmt::format(\n              \"Size of the input variables are not the same: x_vec = {}, xyz_vec = {}, translation = {}\",\n              x_vec.size(),\n              xyz_vec.rows(),\n              translations.rows()));\n        }\n        SE3Group<Scalar> output;\n        output.reserve(x_vec.size());\n        for (size_t i = 0; i < x_vec.size(); ++i) {\n          Eigen::Quaternion<Scalar> quat(x_vec[i], xyz_vec(i, 0), xyz_vec(i, 1), xyz_vec(i, 2));\n          quat.normalize();\n          output.push_back(Sophus::SE3<Scalar>(quat, translations.row(i)));\n        }\n        return output;\n      },\n      \"Create SE3 from a list of quaternion as w_vec: Nx1, xyz_vec: Nx3, and a list of translation vectors: Nx3\");\n\n  type.def(\n      \"to_matrix3x4\",\n      [](const SE3Group<Scalar>& transformations) -> pybind11::array_t<Scalar> {\n        pybind11::array_t<Scalar> result(\n            std::vector<long>{long(transformations.size()), 3, 4},\n            std::vector<long>{12 * sizeof(Scalar), 4 * sizeof(Scalar), sizeof(Scalar)});\n\n        for (size_t i = 0; i < transformations.size(); i++) {\n          Eigen::Map<Eigen::Matrix<Scalar, 3, 4, Eigen::RowMajor>> map(\n              result.mutable_data(i, 0, 0));\n          map = transformations[i].matrix3x4();\n        }\n        return result.squeeze();\n      },\n      \"Convert an array of SE3 into an array of transformation matrices of size 3x4\");\n\n  type.def(\n      \"to_matrix\",\n      [](const SE3Group<Scalar>& transformations) -> pybind11::array_t<Scalar> {\n        pybind11::array_t<Scalar> result(\n            std::vector<long>{long(transformations.size()), 4, 4},\n            std::vector<long>{16 * sizeof(Scalar), 4 * sizeof(Scalar), sizeof(Scalar)});\n\n        for (size_t i = 0; i < transformations.size(); i++) {\n          Eigen::Map<Eigen::Matrix<Scalar, 4, 4, Eigen::RowMajor>> map(\n              result.mutable_data(i, 0, 0));\n          map = transformations[i].matrix();\n        }\n        return result.squeeze();\n      },\n      \"Convert an array of SE3 into an array of transformation matrices of size 4x4\");\n\n  type.def(\n      \"to_quat_and_translation\",\n      [](const SE3Group<Scalar>& transformations) -> Eigen::Matrix<Scalar, Eigen::Dynamic, 7> {\n        auto output = Eigen::Matrix<Scalar, Eigen::Dynamic, 7>(transformations.size(), 7);\n        for (size_t i = 0; i < transformations.size(); ++i) {\n          output.row(i) = Eigen::Matrix<Scalar, 1, 7>{\n              transformations[i].so3().unit_quaternion().w(),\n              transformations[i].so3().unit_quaternion().x(),\n              transformations[i].so3().unit_quaternion().y(),\n              transformations[i].so3().unit_quaternion().z(),\n              transformations[i].translation()[0],\n              transformations[i].translation()[1],\n              transformations[i].translation()[2]};\n        }\n        return output;\n      },\n      \"Return quaternion and translation as Nx7 vectors of [quat (w, x, y, z), translation]\");\n\n  type.def(\n      \"log\",\n      [](const SE3Group<Scalar>& transformations) -> Eigen::Matrix<Scalar, Eigen::Dynamic, 6> {\n        auto output = Eigen::Matrix<Scalar, Eigen::Dynamic, 6>(transformations.size(), 6);\n        for (size_t i = 0; i < transformations.size(); ++i) {\n          output.row(i) = transformations[i].log();\n        }\n        return output;\n      },\n      \"Return the log of SE3 as [translational_part, rotation_vector] of dimension Nx6.\");\n\n  type.def(\n      \"inverse\",\n      [](const SE3Group<Scalar>& transformations) -> SE3Group<Scalar> {\n        SE3Group<Scalar> results;\n        results.reserve(transformations.size());\n        for (size_t i = 0; i < transformations.size(); ++i) {\n          results.push_back(transformations[i].inverse());\n        }\n        return results;\n      },\n      \"Compute the inverse of the transformations.\");\n\n  type.def(\n      \"rotation\",\n      [](const SE3Group<Scalar>& transformations) -> SO3Group<Scalar> {\n        SO3Group<Scalar> rotations;\n        rotations.reserve(transformations.size());\n        for (size_t i = 0; i < transformations.size(); ++i) {\n          rotations.push_back(transformations[i].so3());\n        }\n        return rotations;\n      },\n      \"Get the rotation component of the transformation.\");\n  type.def(\n      \"translation\",\n      [](const SE3Group<Scalar>& transformations) -> Eigen::Matrix<Scalar, Eigen::Dynamic, 3> {\n        auto translations = Eigen::Matrix<Scalar, Eigen::Dynamic, 3>(transformations.size(), 3);\n        for (size_t i = 0; i < transformations.size(); ++i) {\n          translations.row(i) = transformations[i].translation();\n        }\n        return translations;\n      },\n      \"Get the translation component of the transformation.\");\n\n  type.def(\"__copy__\", [](const SE3Group<Scalar>& transformations) -> SE3Group<Scalar> {\n    return transformations; // copy is done with the std::vector copy constructor\n  });\n  type.def(\"__repr__\", [](const SE3Group<Scalar>& transformation) -> std::string {\n    std::stringstream stream;\n    stream << fmt::format(\n        \"SE3 (quaternion(w,x,y,z), translation (x,y,z)) (x{})\\n[\", transformation.size());\n    for (const auto& se3 : transformation) {\n      stream << fmt::format(\n          \"[{}, {}, {}, {}, {}, {}, {}],\\n\",\n          se3.unit_quaternion().w(),\n          se3.unit_quaternion().x(),\n          se3.unit_quaternion().y(),\n          se3.unit_quaternion().z(),\n          se3.translation().x(),\n          se3.translation().y(),\n          se3.translation().z());\n    }\n    // replace last to previous characters\n    stream.seekp(-2, stream.cur);\n    stream << \"]\";\n    return stream.str();\n  });\n  type.def(\n      \"__len__\", [](const SE3Group<Scalar>& transformations) { return transformations.size(); });\n\n  type.def(\"__str__\", [](const SE3Group<Scalar>& transformations) -> std::string {\n    return fmt::format(\"Sophus.SE3 (x{})\", transformations.size());\n  });\n\n  type.def(\n      \"__matmul__\",\n      [](const SE3Group<Scalar>& transformations,\n         const SE3Group<Scalar>& other) -> SE3Group<Scalar> {\n        if (other.size() == 0 || transformations.size() == 0) {\n          throw std::domain_error(\"Both operand should have size greater than 0\");\n        }\n        SE3Group<Scalar> result;\n        if (other.size() == 1) {\n          result.reserve(transformations.size());\n          for (size_t i = 0; i < transformations.size(); ++i) {\n            result.push_back(transformations[i] * other[0]);\n          }\n        } else if (transformations.size() == 1) {\n          result.reserve(other.size());\n          for (size_t i = 0; i < other.size(); ++i) {\n            result.push_back(transformations[0] * other[i]);\n          }\n        } else {\n          throw std::domain_error(\n              \"Only allows transformations of size 1 to N (or N to 1) multiplication.\");\n        }\n        return result;\n      });\n\n  type.def(\"__imatmul__\", [](SE3Group<Scalar>& transformations, const SE3Group<Scalar>& other) {\n    if (transformations.size() == 0 || other.size() == 0) {\n      throw std::domain_error(\"Both operand should have size greater than 0\");\n    }\n\n    if (transformations.size() == 1) {\n      for (size_t i = 0; i < other.size(); ++i) {\n        transformations[0] = transformations[0] * other[i];\n      }\n    } else if (other.size() == 1) {\n      for (size_t i = 0; i < transformations.size(); ++i) {\n        transformations[i] = transformations[i] * other[0];\n      }\n    } else {\n      throw std::domain_error(\n          \"Only allows transformations of size 1 to N (or N to 1) multiplication.\");\n    }\n\n    return transformations;\n  });\n\n  type.def(\n      \"__matmul__\",\n      [](const SE3Group<Scalar>& transformations,\n         const Eigen::Matrix<Scalar, 3, Eigen::Dynamic>& matrix)\n          -> Eigen::Matrix<Scalar, 3, Eigen::Dynamic> {\n        if (matrix.cols() == 0 || transformations.size() == 0) {\n          throw std::domain_error(\"Both operand should have size greater than 0\");\n        }\n        if (transformations.size() != 1) {\n          throw std::domain_error(\"Number of transformations must be 1.\");\n        }\n\n        Eigen::Matrix<Scalar, 3, Eigen::Dynamic> result(3, matrix.cols());\n        for (size_t i = 0; i < matrix.cols(); ++i) {\n          result.col(i) = transformations[0] * matrix.col(i);\n        }\n        return result;\n      });\n\n  type.def(\n      \"__getitem__\",\n      [](const SE3Group<Scalar>& se3Vec,\n         pybind11::object index_or_slice_or_list) -> SE3Group<Scalar> {\n        if (pybind11::isinstance<pybind11::slice>(index_or_slice_or_list)) {\n          pybind11::slice slice = index_or_slice_or_list.cast<pybind11::slice>();\n          size_t start, stop, step, slicelength;\n          if (slice.compute(se3Vec.size(), &start, &stop, &step, &slicelength)) {\n            SE3Group<Scalar> result;\n            for (size_t i = 0; i < slicelength; ++i) {\n              result.push_back(se3Vec[start + i * step]);\n            }\n            return result;\n          }\n        } else if (pybind11::isinstance<pybind11::list>(index_or_slice_or_list)) {\n          pybind11::list index_list = index_or_slice_or_list.cast<pybind11::list>();\n          SE3Group<Scalar> result;\n          for (const auto index : index_list) {\n            const auto intIndex = pybind11::cast<int>(index);\n            if (intIndex < 0 || intIndex >= se3Vec.size()) {\n              throw std::out_of_range(\"Index out of range\");\n            }\n            result.push_back(se3Vec[intIndex]);\n          }\n          return result;\n        } else if (pybind11::isinstance<pybind11::int_>(index_or_slice_or_list)) {\n          int index = index_or_slice_or_list.cast<int>();\n          if (index < 0 || index >= se3Vec.size()) {\n            throw std::out_of_range(\"Index out of range\");\n          }\n          return se3Vec[index];\n        }\n        throw pybind11::type_error(\"Invalid index or list or slice\");\n      });\n  // slice version\n  type.def(\n      \"__setitem__\",\n      [](SE3Group<Scalar>& se3Vec,\n         pybind11::object index_or_slice_or_list,\n         const SE3Group<Scalar>& value) {\n        if (pybind11::isinstance<pybind11::slice>(index_or_slice_or_list)) {\n          pybind11::slice slice(index_or_slice_or_list);\n          size_t start, stop, step, slicelength;\n          if (slice.compute(se3Vec.size(), &start, &stop, &step, &slicelength)) {\n            if (value.size() == slicelength) {\n              for (size_t i = 0; i < slicelength; ++i) {\n                se3Vec[start + i * step] = value[i];\n              }\n            } else if (value.size() == 1) {\n              for (size_t i = 0; i < slicelength; ++i) {\n                se3Vec[start + i * step] = value[0];\n              }\n            } else {\n              throw std::out_of_range(\n                  \"The value to assigned should be of size 1 or equal to the size of the slide to be assigned.\");\n            }\n          } else {\n            throw std::out_of_range(\"The slide is invalid.\");\n          }\n        } else if (pybind11::isinstance<pybind11::list>(index_or_slice_or_list)) {\n          pybind11::list list(index_or_slice_or_list);\n          if (value.size() == list.size()) {\n            for (size_t i = 0; i < list.size(); ++i) {\n              se3Vec[i] = value[i];\n            }\n          } else if (value.size() == 1) {\n            for (size_t i = 0; i < list.size(); ++i) {\n              se3Vec[i] = value[0];\n            }\n          } else {\n            throw std::out_of_range(\n                \"The value to assigned should be of size 1 or equal to the size of the list to be assigned.\");\n          }\n        } else if (pybind11::isinstance<pybind11::int_>(index_or_slice_or_list)) {\n          int index = index_or_slice_or_list.cast<int>();\n          if (index < 0 || index >= se3Vec.size()) {\n            throw std::out_of_range(\"Index out of range\");\n          }\n          if (value.size() != 1) {\n            throw std::out_of_range(\"The value to assigned should be of size 1.\");\n          }\n          se3Vec[index] = value[0];\n        } else {\n          throw pybind11::type_error(\"Invalid index or list or slice\");\n        }\n      });\n  return type;\n}\n\nconstexpr int kMaxAverageIteration = 10000;\ntemplate <typename Scalar>\nvoid exportSE3Average(pybind11::module& module) {\n  module.def(\n      \"iterativeMean\",\n      [](const SE3Group<Scalar>& transformations) -> SE3Group<Scalar> {\n        return *(Sophus::iterativeMean<SE3Group<Scalar>>(transformations, kMaxAverageIteration));\n      },\n      \"Compute the iterative mean of a sequence.\");\n}\n\ntemplate <typename Scalar>\nvoid exportSE3Interpolate(pybind11::module& module) {\n  module.def(\n      \"interpolate\",\n      [](const SE3Group<Scalar>& a, const SE3Group<Scalar>& b, double t) -> Sophus::SE3<Scalar> {\n        if (a.size() != b.size() && a.size() != 1) {\n          throw std::domain_error(\"Should have SE3 of size 1.\");\n        }\n        return Sophus::interpolate<Sophus::SE3<double>>(a[0], b[0], t);\n      },\n      \"Interpolate two SE3s of size 1.\");\n}\n\n} // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind/SO3PyBind.h",
    "content": "#pragma once\n\n#include <fmt/format.h>\n#include <sophus/common.hpp>\n#include <sophus/so3.hpp>\n\n#include <pybind11/eigen.h>\n#include <pybind11/numpy.h>\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n\nnamespace Sophus {\n\n// In python, we choose to export our Sophus::SO3 as a vector of SO3 objects by binding the cpp\n// object `SO3Group` defined below. This is because numerical code in python tends to work with\n// array of values to get efficient program. This approach is inspired by\n// scipy.spatial.transform.Rotation.\ntemplate <typename Scalar>\nclass SO3Group : public std::vector<Sophus::SO3<Scalar>> {\n public:\n  // The empty constructor is not accessible from python.\n  // Python always create at least one identity element (like c++ sophus)\n  SO3Group() = default;\n  // implicit copy conversion from a Sophus::SO3 value\n  /* implicit */ SO3Group(const Sophus::SO3<Scalar>& in) {\n    this->push_back(in);\n  }\n};\n} // namespace Sophus\n\n// The following caster makes so that, even if we wrap SO3Group in python, those can be\n// implicitly converted to the c++ Sophus::SO3 object at boundaries between languages.\n// This is so we can pass python SO3 object to c++ function as if they were regular 1-element\n// Sophus::SO3 object. This simplifies binding the rest of c++ code. This implicit cast fails if the\n// python object is not a 1-element SO3 object.\n// NOTE: this caster makes a copy, so can't not be used for passing a reference of a SO3 element to\n// a c++ function.\nnamespace pybind11 {\nnamespace detail {\ntemplate <>\nstruct type_caster<Sophus::SO3<double>> {\n public:\n  PYBIND11_TYPE_CASTER(Sophus::SO3<double>, _(\"SO3\"));\n\n  // converting from python -> c++ type\n  bool load(handle src, bool /*convert*/) {\n    try {\n      Sophus::SO3Group<double>& ref = src.cast<Sophus::SO3Group<double>&>();\n      if (ref.size() != 1) {\n        throw std::domain_error(fmt::format(\n            \"A element of size 1 is required here. Input has {} elements.\", ref.size()));\n      }\n      value = ref[0];\n      return true;\n    } catch (const pybind11::cast_error&) {\n      return false; // Conversion failed\n    }\n  }\n\n  // converting from c++ -> python type\n  static handle cast(Sophus::SO3<double> src, return_value_policy policy, handle parent) {\n    return type_caster_base<Sophus::SO3Group<double>>::cast(Sophus::SO3Group<double>(src), policy, parent);\n  }\n};\n} // namespace detail\n} // namespace pybind11\n\nnamespace Sophus {\ntemplate <typename Scalar>\nusing PybindSO3Group = pybind11::class_<SO3Group<Scalar>>;\n\ntemplate <typename Scalar>\nPybindSO3Group<Scalar> exportSO3Group(pybind11::module& module, const std::string& name) {\n  PybindSO3Group<Scalar> type(module, name.c_str());\n  type.def(\n      pybind11::init([]() {\n        SO3Group<Scalar> ret;\n        ret.push_back({});\n        return ret;\n      }),\n      \" Default Constructor initializing a group containing 1 identity element\");\n  type.def(pybind11::init<const Sophus::SO3<Scalar>&>(), \"Copy constructor from single element\");\n\n  type.def_static(\n      \"exp\",\n      [](const Eigen::Matrix<Scalar, Eigen::Dynamic, 3>& rotvecs) -> SO3Group<Scalar> {\n        SO3Group<Scalar> output;\n        output.reserve(rotvecs.rows());\n        for (size_t i = 0; i < rotvecs.rows(); ++i) {\n          output.emplace_back(Sophus::SO3<Scalar>::exp(rotvecs.row(i)));\n        }\n        return output;\n      },\n      \"Create rotations from rotations vectors of size Nx3 in rad\");\n\n  type.def_static(\n      \"from_quat\",\n      [](const Scalar& w, const Eigen::Matrix<Scalar, 3, 1>& xyz) -> SO3Group<Scalar> {\n        Eigen::Quaternion<Scalar> quat(w, xyz[0], xyz[1], xyz[2]);\n        quat.normalize();\n        return {Sophus::SO3<Scalar>(quat)};\n      },\n      \"Create a rotation from a quaternion as w, [x, y, z]\");\n\n  type.def_static(\n      \"from_quat\",\n      [](const std::vector<Scalar>& x_vec,\n         const Eigen::Matrix<Scalar, -1, 3>& xyz_vec) -> SO3Group<Scalar> {\n        if (x_vec.size() != xyz_vec.rows()) {\n          throw std::runtime_error(fmt::format(\n              \"Size of the real and imagery part is not the same: {} {}\",\n              x_vec.size(),\n              xyz_vec.rows()));\n        }\n        SO3Group<Scalar> output;\n        output.reserve(x_vec.size());\n        for (size_t i = 0; i < x_vec.size(); ++i) {\n          Eigen::Quaternion<Scalar> quat(x_vec[i], xyz_vec(i, 0), xyz_vec(i, 1), xyz_vec(i, 2));\n          quat.normalize();\n          output.push_back(Sophus::SO3<Scalar>(quat));\n        }\n        return output;\n      },\n      \"Create rotations from a list of quaternions as w_vec: Nx1, xyz_vec: Nx3\");\n\n  type.def_static(\"from_matrix\", [](const Eigen::Matrix<Scalar, 3, 3>& matrix) -> SO3Group<Scalar> {\n    return Sophus::SO3<Scalar>::fitToSO3(matrix);\n  });\n  type.def_static(\"from_matrix\", [](const pybind11::array_t<Scalar>& matrices) -> SO3Group<Scalar> {\n    if (matrices.ndim() != 3 || matrices.shape(1) != 3 || matrices.shape(2) != 3) {\n      throw std::runtime_error(\n          fmt::format(\"The size of the input matrix should be Nx3x3 dimensions.\"));\n    }\n\n    SO3Group<Scalar> output;\n    output.reserve(matrices.shape(0));\n    for (size_t i = 0; i < matrices.shape(0); ++i) {\n      Eigen::Map<const Eigen::Matrix<Scalar, 3, 3, Eigen::RowMajor>> mat(matrices.data(i, 0, 0));\n      output.push_back(Sophus::SO3<Scalar>::fitToSO3(mat));\n    }\n    return output;\n  });\n\n  type.def(\n      \"to_quat\",\n      [](const SO3Group<Scalar>& rotations) -> Eigen::Matrix<Scalar, Eigen::Dynamic, 4> {\n        auto quaternions = Eigen::Matrix<Scalar, Eigen::Dynamic, 4>(rotations.size(), 4);\n        for (size_t i = 0; i < rotations.size(); ++i) {\n          quaternions.row(i) = Eigen::Matrix<Scalar, 1, 4>{\n              rotations[i].unit_quaternion().w(),\n              rotations[i].unit_quaternion().x(),\n              rotations[i].unit_quaternion().y(),\n              rotations[i].unit_quaternion().z(),\n          };\n        }\n        return quaternions;\n      },\n      \"Return quaternion as Nx4 vectors with the order [w x y z].\");\n\n  type.def(\n      \"to_matrix\",\n      [](const SO3Group<Scalar>& rotations) -> pybind11::array_t<Scalar> {\n        pybind11::array_t<Scalar> result(\n            std::vector<long>{long(rotations.size()), 3, 3},\n            std::vector<long>{9 * sizeof(Scalar), 3 * sizeof(Scalar), sizeof(Scalar)});\n\n        for (size_t i = 0; i < rotations.size(); i++) {\n          Eigen::Map<Eigen::Matrix<Scalar, 3, 3, Eigen::RowMajor>> map(\n              result.mutable_data(i, 0, 0));\n          map = rotations[i].matrix();\n        }\n        return result.squeeze();\n      },\n      \"Convert an array of SO3 into an array of rotation matrices\");\n\n  type.def(\n      \"log\",\n      [](const SO3Group<Scalar>& rotations) -> Eigen::Matrix<Scalar, Eigen::Dynamic, 3> {\n        auto output = Eigen::Matrix<Scalar, Eigen::Dynamic, 3>(rotations.size(), 3);\n        for (size_t i = 0; i < rotations.size(); ++i) {\n          output.row(i) = rotations[i].log();\n        }\n        return output;\n      },\n      \"Return the rotations vector representation by taking the log operator.\");\n\n  type.def(\n      \"inverse\",\n      [](const SO3Group<Scalar>& rotations) -> SO3Group<Scalar> {\n        SO3Group<Scalar> results;\n        results.reserve(rotations.size());\n        for (size_t i = 0; i < rotations.size(); ++i) {\n          results.push_back(rotations[i].inverse());\n        }\n        return results;\n      },\n      \"Compute the inverse of the rotations.\");\n\n  type.def(\"__copy__\", [](const SO3Group<Scalar>& rotations) -> SO3Group<Scalar> {\n    return rotations; // copy is done with the std::vector copy constructor\n  });\n  type.def(\"__str__\", [](const SO3Group<Scalar>& rotations) -> std::string {\n    return fmt::format(\"Sophus.SO3 (x{})\", rotations.size());\n  });\n  type.def(\"__len__\", [](const SO3Group<Scalar>& rotations) { return rotations.size(); });\n  type.def(\"__repr__\", [](const SO3Group<Scalar>& rotations) -> std::string {\n    std::stringstream stream;\n    stream << fmt::format(\"SO3 (wxyz) (x{})\\n[\", rotations.size());\n    for (const auto& r : rotations) {\n      stream << fmt::format(\n          \"[{}, {}, {}, {}],\\n\",\n          r.unit_quaternion().w(),\n          r.unit_quaternion().x(),\n          r.unit_quaternion().y(),\n          r.unit_quaternion().z());\n    }\n    // replace last to previous characters\n    stream.seekp(-2, stream.cur);\n    stream << \"]\";\n    return stream.str();\n  });\n\n  type.def(\n      \"__matmul__\",\n      [](const SO3Group<Scalar>& rotations, const SO3Group<Scalar>& other) -> SO3Group<Scalar> {\n        if (other.size() == 0 || rotations.size() == 0) {\n          throw std::domain_error(\"Both operand should have size greater than 0\");\n        }\n        SO3Group<Scalar> result;\n        if (other.size() == 1) {\n          result.reserve(rotations.size());\n          for (size_t i = 0; i < rotations.size(); ++i) {\n            result.push_back(rotations[i] * other[0]);\n          }\n        } else if (rotations.size() == 1) {\n          result.reserve(other.size());\n          for (size_t i = 0; i < other.size(); ++i) {\n            result.push_back(rotations[0] * other[i]);\n          }\n        } else {\n          throw std::domain_error(\n              \"Only allows rotations of size 1 to N (or N to 1) multiplication.\");\n        }\n        return result;\n      });\n  type.def(\"__imatmul__\", [](SO3Group<Scalar>& rotations, const SO3Group<Scalar>& other) {\n    if (rotations.size() == 0 || other.size() == 0) {\n      throw std::domain_error(\"Both operand should have size greater than 0\");\n    }\n\n    if (rotations.size() == 1) {\n      for (size_t i = 0; i < other.size(); ++i) {\n        rotations[0] = rotations[0] * other[i];\n      }\n    } else if (other.size() == 1) {\n      for (size_t i = 0; i < rotations.size(); ++i) {\n        rotations[i] = rotations[i] * other[0];\n      }\n    } else {\n      throw std::domain_error(\"Only allows rotations of size 1 to N (or N to 1) multiplication.\");\n    }\n\n    return rotations;\n  });\n\n  type.def(\n      \"__matmul__\",\n      [](const SO3Group<Scalar>& rotations, const Eigen::Matrix<Scalar, 3, Eigen::Dynamic>& matrix)\n          -> Eigen::Matrix<Scalar, 3, Eigen::Dynamic> {\n        if (matrix.cols() == 0 || rotations.size() == 0) {\n          throw std::domain_error(\"Both operand should have size greater than 0\");\n        }\n        if (rotations.size() != 1) {\n          throw std::domain_error(\"Number of rotations must be 1.\");\n        }\n\n        Eigen::Matrix<Scalar, 3, Eigen::Dynamic> result(3, matrix.cols());\n        for (size_t i = 0; i < matrix.cols(); ++i) {\n          result.col(i) = rotations[0] * matrix.col(i);\n        }\n        return result;\n      });\n\n  type.def(\n      \"__getitem__\",\n      [](const SO3Group<Scalar>& so3Vec,\n         pybind11::object index_or_slice_or_list) -> SO3Group<Scalar> {\n        if (pybind11::isinstance<pybind11::slice>(index_or_slice_or_list)) {\n          pybind11::slice slice = index_or_slice_or_list.cast<pybind11::slice>();\n          size_t start, stop, step, slicelength;\n          if (slice.compute(so3Vec.size(), &start, &stop, &step, &slicelength)) {\n            SO3Group<Scalar> result;\n            for (size_t i = 0; i < slicelength; ++i) {\n              result.push_back(so3Vec[start + i * step]);\n            }\n            return result;\n          }\n        } else if (pybind11::isinstance<pybind11::list>(index_or_slice_or_list)) {\n          pybind11::list index_list = index_or_slice_or_list.cast<pybind11::list>();\n          SO3Group<Scalar> result;\n          for (const auto index : index_list) {\n            const auto intIndex = pybind11::cast<int>(index);\n            if (intIndex < 0 || intIndex >= so3Vec.size()) {\n              throw std::out_of_range(\"Index out of range\");\n            }\n            result.push_back(so3Vec[intIndex]);\n          }\n          return result;\n        } else if (pybind11::isinstance<pybind11::int_>(index_or_slice_or_list)) {\n          int index = index_or_slice_or_list.cast<int>();\n          if (index < 0 || index >= so3Vec.size()) {\n            throw std::out_of_range(\"Index out of range\");\n          }\n          return so3Vec[index];\n        }\n        throw pybind11::type_error(\"Invalid index or list or slice\");\n      });\n  // slice version\n  type.def(\n      \"__setitem__\",\n      [](SO3Group<Scalar>& so3Vec,\n         pybind11::object index_or_slice_or_list,\n         const SO3Group<Scalar>& value) {\n        if (pybind11::isinstance<pybind11::slice>(index_or_slice_or_list)) {\n          pybind11::slice slice(index_or_slice_or_list);\n          size_t start, stop, step, slicelength;\n          if (slice.compute(so3Vec.size(), &start, &stop, &step, &slicelength)) {\n            if (value.size() == slicelength) {\n              for (size_t i = 0; i < slicelength; ++i) {\n                so3Vec[start + i * step] = value[i];\n              }\n            } else if (value.size() == 1) {\n              for (size_t i = 0; i < slicelength; ++i) {\n                so3Vec[start + i * step] = value[0];\n              }\n            } else {\n              throw std::out_of_range(\n                  \"The value to assigned should be of size 1 or equal to the size of the slide to be assigned.\");\n            }\n          } else {\n            throw std::out_of_range(\"The slide is invalid.\");\n          }\n        } else if (pybind11::isinstance<pybind11::list>(index_or_slice_or_list)) {\n          pybind11::list list(index_or_slice_or_list);\n          if (value.size() == list.size()) {\n            for (size_t i = 0; i < list.size(); ++i) {\n              so3Vec[i] = value[i];\n            }\n          } else if (value.size() == 1) {\n            for (size_t i = 0; i < list.size(); ++i) {\n              so3Vec[i] = value[0];\n            }\n          } else {\n            throw std::out_of_range(\n                \"The value to assigned should be of size 1 or equal to the size of the list to be assigned.\");\n          }\n        } else if (pybind11::isinstance<pybind11::int_>(index_or_slice_or_list)) {\n          int index = index_or_slice_or_list.cast<int>();\n          if (index < 0 || index >= so3Vec.size()) {\n            throw std::out_of_range(\"Index out of range\");\n          }\n          if (value.size() != 1) {\n            throw std::out_of_range(\n                \"The value to assigned should be of size 1 or equal to the size of the slide to be assigned.\");\n          }\n          so3Vec[index] = value[0];\n        } else {\n          throw pybind11::type_error(\"Invalid index or list or slice\");\n        }\n      });\n  return type;\n}\n\n} // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind/SophusPyBind.h",
    "content": "#pragma once\n\n#include \"SE3PyBind.h\"\n#include \"SO3PyBind.h\"\n\n#include <sstream>\n// By default, Sophus calls std::abort when a pre-condition fails. Register a handler that raises\n// an exception so we don't crash the Python process.\n#ifdef SOPHUS_DISABLE_ENSURES\n#undef SOPHUS_DISABLE_ENSURES\n#endif\n#ifndef SOPHUS_ENABLE_ENSURE_HANDLER\n#define SOPHUS_ENABLE_ENSURE_HANDLER\n#endif\n\nnamespace Sophus {\ninline void\nensureFailed(char const* function, char const* file, int line, char const* description) {\n  std::stringstream message;\n  message << \"'SOPHUS_ENSURE' failed in function '\" << function << \"', on line '\" << line\n          << \"' of file '\" << file << \"'. Full description:\" << std::endl\n          << description;\n  throw std::domain_error(message.str());\n}\n\ninline void exportSophus(pybind11::module& module) {\n  exportSO3Group<double>(module, \"SO3\");\n  exportSE3Transformation<double>(module, \"SE3\");\n\n  exportSE3Average<double>(module);\n  exportSE3Interpolate<double>(module);\n}\n\n} // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind/bindings.cpp",
    "content": "#include \"SophusPyBind.h\"\n\nPYBIND11_MODULE(sophus_pybind, m) { Sophus::exportSophus(m); }\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind/examples/sophus_quickstart_tutorial.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"attachments\": {},\n   \"cell_type\": \"markdown\",\n   \"id\": \"85e0ca79-32de-4fb2-b92f-a9177d7d5652\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Sophus tutorials\\n\",\n    \"### Terminology\\n\",\n    \"* SO3: rotation group in 3D\\n\",\n    \"* SE3: rotation and translation group in 3D\\n\",\n    \"\\n\",\n    \"### Supported features\\n\",\n    \"#### SO3: Rotation group in 3D\\n\",\n    \"* Initialize from rotational vector, quaternion, rotation matrix\\n\",\n    \"* Convert to rotational vector, quaternion, rotation matrix\\n\",\n    \"* Multiplication with SO3 or 3D points\\n\",\n    \"* Operator [] for setting/getting items with index or slices\\n\",\n    \"* Inverse, copy, print, and len\\n\",\n    \"* Function vectorization\\n\",\n    \"\\n\",\n    \"#### SE3\\n\",\n    \"* Initialize from twist vector (rotation vector and translational vector), quaternion and translation, transformation matrix\\n\",\n    \"* Convert to twist vector, quaternion and translation, transformation matrix\\n\",\n    \"* Multiplication with SE3 or 3D points\\n\",\n    \"* Operator [] for setting/getting items with index or slices\\n\",\n    \"* Function vectorization\\n\",\n    \"* Inverse, copy, print, and len\\n\",\n    \"* Interpolate between two SE3\\n\",\n    \"* Iterative mean of a group of SE3 \\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"50dc0645-59ca-48d1-bb45-0fd32b8c0057\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from sophus_pybind import interpolate, iterativeMean, SE3, SO3\\n\",\n    \"import numpy as np\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"09084939-7ee9-42fc-9cf1-41f4583c30dd\",\n   \"metadata\": {},\n   \"source\": [\n    \"# SO3: Rotation group in 3D\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"00f04460-2843-4468-9a76-6a398a546c3c\",\n   \"metadata\": {},\n   \"source\": [\n    \"### From/to rotation vector\\n\",\n    \"* rotation_vector: 3x1 or Nx3\\n\",\n    \"* The magnitude of the vector is the rotation angle in rad, the rotation direction is the norm of the rotation_vector\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"c4a1e41a-eec7-4125-8432-6c0fe854c963\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"rotation_vector = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 1]])\\n\",\n    \"so3vec_from_rotvec = SO3.exp(rotation_vector)\\n\",\n    \"print(f\\\"Initialize from rotation vector {rotation_vector}: \\\\n resulting log: \\\\n {so3vec_from_rotvec.log()}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"62e11068-bc23-4096-9e81-fc2624f28701\",\n   \"metadata\": {},\n   \"source\": [\n    \"### From/to quaternion\\n\",\n    \"* Quaternion in put is represented as w, xyz\\n\",\n    \"  * Vectorized input is w_vec: Nx1 and xyz_vec: Nx3\\n\",\n    \"* Quaternion is the **Hamilton** convention\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"290c007b-59a3-4457-89e4-19327be0c274\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"q_w = 1; q_xyz = np.array([0,0,0])\\n\",\n    \"so3_from_quaternion = SO3.from_quat(q_w, q_xyz)\\n\",\n    \"print(f\\\"Initialize from quaternion [{q_w}, {q_xyz}]: \\\\n resulting quaternion: \\\\n {so3_from_quaternion.to_quat()}\\\")\\n\",\n    \"\\n\",\n    \"# Vectorized initialization\\n\",\n    \"q_w_vec = np.array([1,0])\\n\",\n    \"q_xyz_vec = np.array([[0,0,0], [0.0, 1.0, 0.0]])\\n\",\n    \"so3vec_from_quaterion = SO3.from_quat(q_w_vec, q_xyz_vec)\\n\",\n    \"for i in range(0,len(q_w_vec)):\\n\",\n    \"    print(f\\\"Initialize from quaternion [{q_w_vec[i]}, {q_xyz_vec[i]}]: \\\\n resulting quaternion: \\\\n{so3vec_from_quaterion.to_quat()[i]}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"5a00f27e-882c-48a7-a1dc-56268d79df0a\",\n   \"metadata\": {},\n   \"source\": [\n    \"### From/to rotation matrix\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"20820ff6-2c75-4430-8727-02173e3a9e1c\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"rotation_matrix = [[-1,0,0], [0, 1,0 ], [0,0,-1]]\\n\",\n    \"so3_from_matrix = SO3.from_matrix(rotation_matrix)\\n\",\n    \"print(f\\\"Initialize from matrix {rotation_matrix}: \\\\n resulting matrix: \\\\n{so3_from_matrix.to_matrix()}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"33006cb6-5b3f-4f8b-8cb0-f2d5e1e56792\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Multiplication\\n\",\n    \"* Matrix multiplication uses **@** operator\\n\",\n    \"* Supports\\n\",\n    \"  * Multiple SO3 @ Single SO3\\n\",\n    \"  * Single SO3 @ Multiple SO3\\n\",\n    \"  * Single SO3 @ Multiple 3D points\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"4e75e570-c99d-47d3-8245-4b79df8aa36a\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"so3_result_1 = so3_from_matrix @ so3vec_from_rotvec[0]\\n\",\n    \"so3_result_2 = so3_from_matrix[0] @ so3vec_from_rotvec\\n\",\n    \"\\n\",\n    \"original_points = np.random.rand(3,10)\\n\",\n    \"points_from_a_single_so3 = so3vec_from_rotvec[0] @ original_points\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"7fab0a78-6a5e-4ab0-b9fb-905829b92b8c\",\n   \"metadata\": {},\n   \"source\": [\n    \"### MISC\\n\",\n    \"* Inverse\\n\",\n    \"* copy, str, len, repr\\n\",\n    \"* Operator [] to get/set from indices and slice\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"012c03c4-3ef6-4515-a08a-3e2b720d3590\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"so3_inverse = so3vec_from_rotvec.inverse()\\n\",\n    \"so3_slice = so3_inverse[1:3]\\n\",\n    \"\\n\",\n    \"import copy\\n\",\n    \"print(f\\\"A copy of the SO3 is {copy.copy(so3_inverse)}\\\\n\\\")\\n\",\n    \"print(f\\\"The str of the SO3 is {so3_inverse}\\\\n\\\")\\n\",\n    \"print(f\\\"The len of the SO3 is {len(so3_inverse)}\\\\n\\\")\\n\",\n    \"print(f\\\"The repr of the SO3 is {repr(so3_inverse)}\\\\n\\\")\\n\",\n    \"print(f\\\"A SO3 slice is {so3_slice}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"14a0c1a3-9db5-4c17-98fb-9dd200c9d0a6\",\n   \"metadata\": {},\n   \"source\": [\n    \"# SE3: transformation group in 3D\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1075eb08-c604-44a3-aa89-ad055a11874d\",\n   \"metadata\": {},\n   \"source\": [\n    \"### From/to translational vector and rotation vector(twist)\\n\",\n    \"* SE3.exp(translational_part, rotation_vector)\\n\",\n    \"* [translational_part, rotation_vector] = SE3.log()\\n\",\n    \"  \\n\",\n    \"NOTE: translational_vector is not translation in SE3, it is off by a left jacobian matrix (ref: https://github.com/strasdat/Sophus/blob/main-1.x/sophus/se3.hpp#L859-L860)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"8383f639-cbc6-49ac-b715-141db0f5f9cf\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"translational_part = [[1,2,3],[15,2,9],[10,4,3],[3,2,3]]\\n\",\n    \"rotation_vector = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 1]]\\n\",\n    \"se3_from_rotvec_and_translational = SE3.exp(translational_part, rotation_vector)\\n\",\n    \"twist = se3_from_rotvec_and_translational.log()\\n\",\n    \"print(f\\\"Initialize from rotation vector {rotation_vector} and translational_part {translational_part}: \\\\n \\\\\\n\",\n    \"resulting twist: \\\\n{twist}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"297730dc-0665-44f3-9596-a4886f6fc37b\",\n   \"metadata\": {},\n   \"source\": [\n    \"### From/to quaternion and translation\\n\",\n    \"* SE3.from_quat_and_translation(w, xyz, translation)\\n\",\n    \"* [w, xyz, translation] = SE3.to_quat_and_translation()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3bf0df5e-718d-40b5-b0c0-7e55b8d549ea\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"translation_vector = [[1,2,3],[15,2,9],[10,4,3],[3,2,3]]\\n\",\n    \"w_vec = [1,0,0,1]\\n\",\n    \"xyz_vec = [[0,0,0], [0,1,0], [0,0,1], [0,0,0]]\\n\",\n    \"se3 = SE3.from_quat_and_translation(w_vec, xyz_vec, translation_vector)\\n\",\n    \"quaternion_and_translation = se3.to_quat_and_translation()\\n\",\n    \"print(f\\\"Initialize from quaternion w {w_vec} and xyz {xyz_vec} \\\\nwith translational_part {translational_part}: \\\\n\\\")\\n\",\n    \"print(f\\\"resulting quat and translation output : \\\\n{quaternion_and_translation}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"b6fc4e56-b744-44a1-a0a5-05149199ebfb\",\n   \"metadata\": {},\n   \"source\": [\n    \"### From/to transformation matrix of size 3x4 or 4x4\\n\",\n    \"* SE3.from_matrix(matrix4x4)\\n\",\n    \"* SE3.from_matrix3x4(matrix3x4)\\n\",\n    \"* matrix4x4 = SE3.to_matrix()\\n\",\n    \"* matrix3x4 = SE3.to_matrix3x4()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"f0fab990-2d27-46bb-a836-5c92c4c089a1\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"transformation_matrix = np.array([[[ 1.,  0.,  0.,  1.],\\n\",\n    \"        [ 0.,  1.,  0.,  2.],\\n\",\n    \"        [ 0.,  0.,  1.,  3.],\\n\",\n    \"        [ 0.,  0.,  0.,  1.]],\\n\",\n    \"\\n\",\n    \"       [[-1.,  0.,  0., 15.],\\n\",\n    \"        [ 0.,  1.,  0.,  2.],\\n\",\n    \"        [ 0.,  0., -1.,  9.],\\n\",\n    \"        [ 0.,  0.,  0.,  1.]],\\n\",\n    \"\\n\",\n    \"       [[-1.,  0.,  0., 10.],\\n\",\n    \"        [ 0., -1.,  0.,  4.],\\n\",\n    \"        [ 0.,  0.,  1.,  3.],\\n\",\n    \"        [ 0.,  0.,  0.,  1.]],\\n\",\n    \"\\n\",\n    \"       [[ 1.,  0.,  0.,  3.],\\n\",\n    \"        [ 0.,  1.,  0.,  2.],\\n\",\n    \"        [ 0.,  0.,  1.,  3.],\\n\",\n    \"        [ 0.,  0.,  0.,  1.]]])\\n\",\n    \"se3_from_matrix = SE3.from_matrix(transformation_matrix)\\n\",\n    \"se3_from_matrix3x4 = SE3.from_matrix3x4(transformation_matrix[:, 0:3,:])\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"7a8d25b7-177b-4eda-b6ae-f904a931f696\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Multiplication\\n\",\n    \"* Matrix multiplication uses **@** operator\\n\",\n    \"* Supports\\n\",\n    \"  * Multiple SE3 @ Single SE3\\n\",\n    \"  * Single SE3 @ Multiple SE3\\n\",\n    \"  * Single SE3 @ Multiple 3D points\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"930f1747-f773-464e-bcd4-d69b7579fdcf\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"se3_result_1 = se3_from_matrix @ se3_from_rotvec_and_translational[0]\\n\",\n    \"se3_result_2 = se3_from_matrix[0] @ se3_from_rotvec_and_translational\\n\",\n    \"\\n\",\n    \"original_points = np.random.rand(3,10)\\n\",\n    \"points_from_a_single_se3 = se3_from_matrix[0] @ original_points\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"c5fdcac8-499a-425d-8f06-f1a8ffd0b7fc\",\n   \"metadata\": {},\n   \"source\": [\n    \"### MISC\\n\",\n    \"* Inverse\\n\",\n    \"* copy, str, len, repr\\n\",\n    \"* Operator [] to get/set from indices and slice\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"306380b4-ec6c-4672-aa28-e396e9cd8eac\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"se3_inverse = se3_from_matrix.inverse()\\n\",\n    \"se3_slice = se3_inverse[0:2]\\n\",\n    \"\\n\",\n    \"print(f\\\"A copy of the SE3 is {copy.copy(se3_inverse)}\\\\n\\\")\\n\",\n    \"print(f\\\"The str of the SE3 is {se3_inverse}\\\\n\\\")\\n\",\n    \"print(f\\\"The len of the SE3 is {len(se3_inverse)}\\\\n\\\")\\n\",\n    \"print(f\\\"The repr of the SE3 is {repr(se3_inverse)}\\\\n\\\")\\n\",\n    \"print(f\\\"A SE3 slice is {se3_slice}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"d61cf4ed-8abe-4aec-a11a-377c4ae5d3c6\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": []\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.11.5\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind/tests/sophusPybindTests.py",
    "content": "import unittest\n\nimport numpy as np\nfrom sophus_pybind import interpolate, iterativeMean, SE3, SO3\n\n\nclass SophusPybindTest(unittest.TestCase):\n    def test_sophus(self) -> None:\n        rotvec = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 1]])\n        translational_part = np.array(\n            [[2.13, 4.19, 8.5], [0.87, 1.89, 19.45], [10000, 0.01, 0], [1, 1, 1]]\n        )\n        quat_and_translation = np.array(\n            [\n                [1, 0, 0, 0, 2.13, 4.19, 8.5],\n                [\n                    0.8775825618903728,\n                    0.479425538604203,\n                    0,\n                    0,\n                    0.87,\n                    -7.350739989577756,\n                    17.2354392964228,\n                ],\n                [\n                    0.8775825618903728,\n                    0,\n                    0.479425538604203,\n                    0,\n                    8414.709848078965,\n                    0.01,\n                    -4596.976941318602,\n                ],\n                [\n                    0.647859344852457,\n                    0.4398023303285789,\n                    0.4398023303285789,\n                    0.4398023303285789,\n                    1,\n                    1,\n                    1,\n                ],\n            ]\n        )\n\n        ## exp\n        so3vec = SO3.exp(rotvec)\n        se3vec = SE3.exp(translational_part, rotvec)\n\n        ## from quat and translation\n        se3vec_from_quat = SE3.from_quat_and_translation(\n            quat_and_translation[:, 0],\n            quat_and_translation[:, 1:4],\n            quat_and_translation[:, 4:7],\n        )\n\n        self.assertIsNone(np.testing.assert_equal(len(so3vec), 4))\n        self.assertIsNone(np.testing.assert_equal(len(se3vec), 4))\n        self.assertIsNone(np.testing.assert_equal(len(se3vec_from_quat), 4))\n\n        for i in range(0, 4):\n            ## log\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    so3vec[i].log()[0], se3vec[i].log()[0, 3:6]\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(so3vec[i].log()[0], rotvec[i])\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    se3vec[i].log()[0, 0:3], translational_part[i]\n                )\n            )\n\n            ## matrix\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    so3vec[i].to_matrix(), se3vec[i].to_matrix3x4()[:, 0:3]\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    so3vec[i].to_matrix(), se3vec[i].to_matrix()[0:3, 0:3]\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    SO3.from_matrix(so3vec[i].to_matrix()).to_matrix(),\n                    so3vec[i].to_matrix(),\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    SE3.from_matrix(se3vec[i].to_matrix()).to_matrix(),\n                    se3vec[i].to_matrix(),\n                )\n            )\n\n            ## rotation\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    so3vec[i].log(), se3vec[i].rotation().log()\n                )\n            )\n\n            ## translation\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    se3vec[i].translation()[0], se3vec[i].to_matrix()[0:3, 3]\n                )\n            )\n\n            ## quaternion\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    se3vec_from_quat[i].to_quat_and_translation()[0],\n                    quat_and_translation[i],\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    se3vec_from_quat[i].to_quat_and_translation()[0, 0:4],\n                    so3vec[i].to_quat()[0],\n                )\n            )\n\n            ## inverse\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    se3vec[i].inverse().inverse().log(), se3vec[i].log()\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    so3vec[i].inverse().inverse().log(), so3vec[i].log()\n                )\n            )\n\n            # operator\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    np.linalg.norm((so3vec[i] @ so3vec[i].inverse()).log()), 0\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    (se3vec[i] @ se3vec[i].inverse()).log(),\n                    np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),\n                )\n            )\n            self.assertIsNone(\n                np.testing.assert_array_almost_equal(\n                    (se3vec[i] @ se3vec[i].inverse()).log(),\n                    np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),\n                )\n            )\n\n        # set item\n        so3vec[3] = SO3()\n        se3vec[3] = SE3()\n        self.assertIsNone(\n            np.testing.assert_array_almost_equal(so3vec[3].log()[0], SO3().log()[0])\n        )\n        self.assertIsNone(\n            np.testing.assert_array_almost_equal(se3vec[3].log()[0], SE3().log()[0])\n        )\n\n        # interpolate\n        inter0 = interpolate(se3vec[0], se3vec[1], 0)\n        inter1 = interpolate(se3vec[0], se3vec[1], 1)\n        self.assertIsNone(\n            np.testing.assert_array_almost_equal(se3vec[0].log()[0], inter0.log()[0])\n        )\n        self.assertIsNone(\n            np.testing.assert_array_almost_equal(se3vec[1].log()[0], inter1.log()[0])\n        )\n\n        # average\n        inter_half = interpolate(se3vec[0], se3vec[1], 0.5)\n        average01 = iterativeMean(se3vec[0:2])\n        self.assertIsNone(\n            np.testing.assert_array_almost_equal(\n                average01.log()[0], inter_half.log()[0]\n            )\n        )\n"
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind-stubs/py.typed",
    "content": ""
  },
  {
    "path": "Thirdparty/Sophus/sophus_pybind-stubs/sophus_pybind.pyi",
    "content": "from __future__ import annotations\nimport numpy\nimport typing\n__all__ = ['SE3', 'SO3', 'interpolate', 'iterativeMean']\nclass SE3:\n    @staticmethod\n    @typing.overload\n    def exp(arg0: numpy.ndarray[numpy.float64[3, 1]], arg1: numpy.ndarray[numpy.float64[3, 1]]) -> SE3:\n        \"\"\"\n        Create SE3 from a translational_part (3x1) and a rotation vector (3x1) of magnitude in rad. NOTE: translational_part is not translation vector in SE3\n        \"\"\"\n    @staticmethod\n    @typing.overload\n    def exp(arg0: numpy.ndarray[numpy.float64[m, 3]], arg1: numpy.ndarray[numpy.float64[m, 3]]) -> SE3:\n        \"\"\"\n        Create a set of SE3 from translational_parts (Nx3) and rotation vectors (Nx3) of magnitude in rad. NOTE: translational_part is not translation vector in SE3\n        \"\"\"\n    @staticmethod\n    @typing.overload\n    def from_matrix(arg0: numpy.ndarray[numpy.float64[4, 4]]) -> SE3:\n        ...\n    @staticmethod\n    @typing.overload\n    def from_matrix(arg0: numpy.ndarray[numpy.float64]) -> SE3:\n        ...\n    @staticmethod\n    @typing.overload\n    def from_matrix3x4(arg0: numpy.ndarray[numpy.float64[3, 4]]) -> SE3:\n        ...\n    @staticmethod\n    @typing.overload\n    def from_matrix3x4(arg0: numpy.ndarray[numpy.float64]) -> SE3:\n        ...\n    def __copy__(self) -> SE3:\n        ...\n    def __getitem__(self, arg0: typing.Any) -> SE3:\n        ...\n    def __imatmul__(self, arg0: SE3) -> SE3:\n        ...\n    @typing.overload\n    def __init__(self) -> None:\n        \"\"\"\n         Default Constructor initializing a group containing 1 identity element\n        \"\"\"\n    @typing.overload\n    def __init__(self, arg0: SE3) -> None:\n        \"\"\"\n        Copy constructor from single element\n        \"\"\"\n    def __len__(self) -> int:\n        ...\n    @typing.overload\n    def __matmul__(self, arg0: SE3) -> SE3:\n        ...\n    @typing.overload\n    def __matmul__(self, arg0: numpy.ndarray[numpy.float64[3, n]]) -> numpy.ndarray[numpy.float64[3, n]]:\n        ...\n    def __repr__(self) -> str:\n        ...\n    def __setitem__(self, arg0: typing.Any, arg1: SE3) -> None:\n        ...\n    def __str__(self) -> str:\n        ...\n    @typing.overload\n    def from_quat_and_translation(self, arg0: numpy.ndarray[numpy.float64[3, 1]], arg1: numpy.ndarray[numpy.float64[3, 1]]) -> SE3:\n        \"\"\"\n        Create SE3 from a quaternion as w, [x, y, z], and translation vector\n        \"\"\"\n    @typing.overload\n    def from_quat_and_translation(self, arg0: numpy.ndarray[numpy.float64[m, 3]], arg1: numpy.ndarray[numpy.float64[m, 3]]) -> SE3:\n        \"\"\"\n        Create SE3 from a list of quaternion as w_vec: Nx1, xyz_vec: Nx3, and a list of translation vectors: Nx3\n        \"\"\"\n    def inverse(self) -> SE3:\n        \"\"\"\n        Compute the inverse of the transformations.\n        \"\"\"\n    def log(self) -> numpy.ndarray[numpy.float64[m, 6]]:\n        \"\"\"\n        Return the log of SE3 as [translational_part, rotation_vector] of dimension Nx6.\n        \"\"\"\n    def rotation(self) -> SO3:\n        \"\"\"\n        Get the rotation component of the transformation.\n        \"\"\"\n    def to_matrix(self) -> numpy.ndarray[numpy.float64]:\n        \"\"\"\n        Convert an array of SE3 into an array of transformation matrices of size 4x4\n        \"\"\"\n    def to_matrix3x4(self) -> numpy.ndarray[numpy.float64]:\n        \"\"\"\n        Convert an array of SE3 into an array of transformation matrices of size 3x4\n        \"\"\"\n    def to_quat_and_translation(self) -> numpy.ndarray[numpy.float64[m, 7]]:\n        \"\"\"\n        Return quaternion and translation as Nx7 vectors of [quat (w, x, y, z), translation]\n        \"\"\"\n    def translation(self) -> numpy.ndarray[numpy.float64[m, 3]]:\n        \"\"\"\n        Get the translation component of the transformation.\n        \"\"\"\nclass SO3:\n    @staticmethod\n    def exp(arg0: numpy.ndarray[numpy.float64[m, 3]]) -> SO3:\n        \"\"\"\n        Create rotations from rotations vectors of size Nx3 in rad\n        \"\"\"\n    @staticmethod\n    @typing.overload\n    def from_matrix(arg0: numpy.ndarray[numpy.float64[3, 3]]) -> SO3:\n        ...\n    @staticmethod\n    @typing.overload\n    def from_matrix(arg0: numpy.ndarray[numpy.float64]) -> SO3:\n        ...\n    @staticmethod\n    @typing.overload\n    def from_quat(arg0: float, arg1: numpy.ndarray[numpy.float64[3, 1]]) -> SO3:\n        \"\"\"\n        Create a rotation from a quaternion as w, [x, y, z]\n        \"\"\"\n    @staticmethod\n    @typing.overload\n    def from_quat(arg0: list[float], arg1: numpy.ndarray[numpy.float64[m, 3]]) -> SO3:\n        \"\"\"\n        Create rotations from a list of quaternions as w_vec: Nx1, xyz_vec: Nx3\n        \"\"\"\n    def __copy__(self) -> SO3:\n        ...\n    def __getitem__(self, arg0: typing.Any) -> SO3:\n        ...\n    def __imatmul__(self, arg0: SO3) -> SO3:\n        ...\n    @typing.overload\n    def __init__(self) -> None:\n        \"\"\"\n         Default Constructor initializing a group containing 1 identity element\n        \"\"\"\n    @typing.overload\n    def __init__(self, arg0: SO3) -> None:\n        \"\"\"\n        Copy constructor from single element\n        \"\"\"\n    def __len__(self) -> int:\n        ...\n    @typing.overload\n    def __matmul__(self, arg0: SO3) -> SO3:\n        ...\n    @typing.overload\n    def __matmul__(self, arg0: numpy.ndarray[numpy.float64[3, n]]) -> numpy.ndarray[numpy.float64[3, n]]:\n        ...\n    def __repr__(self) -> str:\n        ...\n    def __setitem__(self, arg0: typing.Any, arg1: SO3) -> None:\n        ...\n    def __str__(self) -> str:\n        ...\n    def inverse(self) -> SO3:\n        \"\"\"\n        Compute the inverse of the rotations.\n        \"\"\"\n    def log(self) -> numpy.ndarray[numpy.float64[m, 3]]:\n        \"\"\"\n        Return the rotations vector representation by taking the log operator.\n        \"\"\"\n    def to_matrix(self) -> numpy.ndarray[numpy.float64]:\n        \"\"\"\n        Convert an array of SO3 into an array of rotation matrices\n        \"\"\"\n    def to_quat(self) -> numpy.ndarray[numpy.float64[m, 4]]:\n        \"\"\"\n        Return quaternion as Nx4 vectors with the order [w x y z].\n        \"\"\"\ndef interpolate(arg0: SE3, arg1: SE3, arg2: float) -> SE3:\n    \"\"\"\n    Interpolate two SE3s of size 1.\n    \"\"\"\ndef iterativeMean(arg0: SE3) -> SE3:\n    \"\"\"\n    Compute the iterative mean of a sequence.\n    \"\"\"\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/Se2_Dx_exp_x.cpp",
    "content": "Scalar const c0 = sin(theta);\nScalar const c1 = cos(theta);\nScalar const c2 = 1.0/theta;\nScalar const c3 = c0*c2;\nScalar const c4 = 1 - c1;\nScalar const c5 = c2*c4;\nScalar const c6 = c1*c2;\nScalar const c7 = pow(theta, -2);\nScalar const c8 = c0*c7;\nScalar const c9 = c4*c7;\nresult[0] = 0;\nresult[1] = 0;\nresult[2] = -c0;\nresult[3] = 0;\nresult[4] = 0;\nresult[5] = c1;\nresult[6] = c3;\nresult[7] = -c5;\nresult[8] = -c3*upsilon[1] + c6*upsilon[0] - c8*upsilon[0] + c9*upsilon[1];\nresult[9] = c5;\nresult[10] = c3;\nresult[11] = c3*upsilon[0] + c6*upsilon[1] - c8*upsilon[1] - c9*upsilon[0];\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/Se2_Dx_log_this.cpp",
    "content": "Scalar const c0 = c[0] - 1;\nScalar const c1 = 0.5*atan2(c[1], c[0]);\nScalar const c2 = c1*c[1]/pow(c0, 2);\nScalar const c3 = pow(c[1], 2);\nScalar const c4 = 1.0/(c3 + pow(c[0], 2));\nScalar const c5 = c4*c[1];\nScalar const c6 = 0.5*t[1];\nScalar const c7 = c5*c6;\nScalar const c8 = 0.5*t[0];\nScalar const c9 = 1.0/c0;\nScalar const c10 = c3*c4*c9;\nScalar const c11 = c1*c9;\nScalar const c12 = c4*c[0];\nScalar const c13 = c5*c8;\nScalar const c14 = c9*c[0];\nScalar const c15 = -c11*c[1];\nresult[0] = c10*c8 + c2*t[0] - c7;\nresult[1] = -c11*t[0] + c12*c6 - c13*c14;\nresult[2] = c15;\nresult[3] = c1;\nresult[4] = c10*c6 + c13 + c2*t[1];\nresult[5] = -c11*t[1] - c12*c8 - c14*c7;\nresult[6] = -c1;\nresult[7] = c15;\nresult[8] = -c5;\nresult[9] = c12;\nresult[10] = 0;\nresult[11] = 0;\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/Se2_Dx_this_mul_exp_x_at_0.cpp",
    "content": "Scalar const c0 = -c[1];\nresult[0] = 0;\nresult[1] = 0;\nresult[2] = c0;\nresult[3] = 0;\nresult[4] = 0;\nresult[5] = c[0];\nresult[6] = c[0];\nresult[7] = c0;\nresult[8] = 0;\nresult[9] = c[1];\nresult[10] = c[0];\nresult[11] = 0;\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/Se3_Dx_exp_x.cpp",
    "content": "Scalar const c0 = pow(omega[0], 2);\nScalar const c1 = pow(omega[1], 2);\nScalar const c2 = pow(omega[2], 2);\nScalar const c3 = c0 + c1 + c2;\nScalar const c4 = sqrt(c3);\nScalar const c5 = 1.0/c4;\nScalar const c6 = 0.5*c4;\nScalar const c7 = sin(c6);\nScalar const c8 = c5*c7;\nScalar const c9 = pow(c3, -3.0/2.0);\nScalar const c10 = c7*c9;\nScalar const c11 = 1.0/c3;\nScalar const c12 = 0.5*c11*cos(c6);\nScalar const c13 = c10*omega[0];\nScalar const c14 = omega[0]*omega[1];\nScalar const c15 = c12*c14 - c13*omega[1];\nScalar const c16 = omega[0]*omega[2];\nScalar const c17 = c12*c16 - c13*omega[2];\nScalar const c18 = omega[1]*omega[2];\nScalar const c19 = -c10*c18 + c12*c18;\nScalar const c20 = 0.5*c8;\nScalar const c21 = sin(c4);\nScalar const c22 = -c21 + c4;\nScalar const c23 = -c1;\nScalar const c24 = -c2;\nScalar const c25 = c23 + c24;\nScalar const c26 = c25*c9;\nScalar const c27 = cos(c4);\nScalar const c28 = 1 - c27;\nScalar const c29 = c11*c28;\nScalar const c30 = c29*omega[2];\nScalar const c31 = c22*c9;\nScalar const c32 = c31*omega[1];\nScalar const c33 = c32*omega[0];\nScalar const c34 = c29*omega[1];\nScalar const c35 = c31*omega[2];\nScalar const c36 = c35*omega[0];\nScalar const c37 = 3*c22/pow(c3, 5.0/2.0);\nScalar const c38 = c25*c37;\nScalar const c39 = c5*omega[0];\nScalar const c40 = -c27*c39 + c39;\nScalar const c41 = c40*c9;\nScalar const c42 = c0*c37;\nScalar const c43 = c16*c41 + c35 - c42*omega[2];\nScalar const c44 = c21*c9;\nScalar const c45 = c14*c44;\nScalar const c46 = 2*c28/pow(c3, 2);\nScalar const c47 = c14*c46;\nScalar const c48 = c45 - c47;\nScalar const c49 = c14*c41 + c32 - c42*omega[1];\nScalar const c50 = c16*c44;\nScalar const c51 = c16*c46;\nScalar const c52 = -c50 + c51;\nScalar const c53 = -2*c32;\nScalar const c54 = c5*omega[1];\nScalar const c55 = -c27*c54 + c54;\nScalar const c56 = c1*c44;\nScalar const c57 = c1*c46;\nScalar const c58 = c55*c9;\nScalar const c59 = c16*c58;\nScalar const c60 = c37*omega[0];\nScalar const c61 = -c18*c60;\nScalar const c62 = c29 + c61;\nScalar const c63 = c31*omega[0];\nScalar const c64 = -c1*c60 + c14*c58 + c63;\nScalar const c65 = c18*c44;\nScalar const c66 = c18*c46;\nScalar const c67 = -c65 + c66;\nScalar const c68 = -2*c35;\nScalar const c69 = c5*omega[2];\nScalar const c70 = -c27*c69 + c69;\nScalar const c71 = c2*c44;\nScalar const c72 = c2*c46;\nScalar const c73 = c70*c9;\nScalar const c74 = c14*c73;\nScalar const c75 = -c29 + c61;\nScalar const c76 = c65 - c66;\nScalar const c77 = c16*c73 - c2*c60 + c63;\nScalar const c78 = -c0;\nScalar const c79 = c24 + c78;\nScalar const c80 = c29*omega[0];\nScalar const c81 = c32*omega[2];\nScalar const c82 = -2*c63;\nScalar const c83 = c79*c9;\nScalar const c84 = c0*c44;\nScalar const c85 = c0*c46;\nScalar const c86 = c18*c41;\nScalar const c87 = c50 - c51;\nScalar const c88 = c37*c79;\nScalar const c89 = -c45 + c47;\nScalar const c90 = c37*omega[2];\nScalar const c91 = -c1*c90 + c18*c58 + c35;\nScalar const c92 = c37*omega[1];\nScalar const c93 = c18*c73 - c2*c92 + c32;\nScalar const c94 = c23 + c78;\nScalar const c95 = c9*c94;\nresult[0] = 0;\nresult[1] = 0;\nresult[2] = 0;\nresult[3] = -c0*c10 + c0*c12 + c8;\nresult[4] = c15;\nresult[5] = c17;\nresult[6] = 0;\nresult[7] = 0;\nresult[8] = 0;\nresult[9] = c15;\nresult[10] = -c1*c10 + c1*c12 + c8;\nresult[11] = c19;\nresult[12] = 0;\nresult[13] = 0;\nresult[14] = 0;\nresult[15] = c17;\nresult[16] = c19;\nresult[17] = -c10*c2 + c12*c2 + c8;\nresult[18] = 0;\nresult[19] = 0;\nresult[20] = 0;\nresult[21] = -c20*omega[0];\nresult[22] = -c20*omega[1];\nresult[23] = -c20*omega[2];\nresult[24] = c22*c26 + 1;\nresult[25] = -c30 + c33;\nresult[26] = c34 + c36;\nresult[27] = upsilon[0]*(c26*c40 - c38*omega[0]) + upsilon[1]*(c49 + c52) + upsilon[2]*(c43 + c48);\nresult[28] = upsilon[0]*(c26*c55 - c38*omega[1] + c53) + upsilon[1]*(c64 + c67) + upsilon[2]*(c56 - c57 + c59 + c62);\nresult[29] = upsilon[0]*(c26*c70 - c38*omega[2] + c68) + upsilon[1]*(-c71 + c72 + c74 + c75) + upsilon[2]*(c76 + c77);\nresult[30] = c30 + c33;\nresult[31] = c31*c79 + 1;\nresult[32] = -c80 + c81;\nresult[33] = upsilon[0]*(c49 + c87) + upsilon[1]*(c40*c83 - c60*c79 + c82) + upsilon[2]*(c75 - c84 + c85 + c86);\nresult[34] = upsilon[0]*(c64 + c76) + upsilon[1]*(c55*c83 - c88*omega[1]) + upsilon[2]*(c89 + c91);\nresult[35] = upsilon[0]*(c62 + c71 - c72 + c74) + upsilon[1]*(c68 + c70*c83 - c88*omega[2]) + upsilon[2]*(c52 + c93);\nresult[36] = -c34 + c36;\nresult[37] = c80 + c81;\nresult[38] = c31*c94 + 1;\nresult[39] = upsilon[0]*(c43 + c89) + upsilon[1]*(c62 + c84 - c85 + c86) + upsilon[2]*(c40*c95 - c60*c94 + c82);\nresult[40] = upsilon[0]*(-c56 + c57 + c59 + c75) + upsilon[1]*(c48 + c91) + upsilon[2]*(c53 + c55*c95 - c92*c94);\nresult[41] = upsilon[0]*(c67 + c77) + upsilon[1]*(c87 + c93) + upsilon[2]*(c70*c95 - c90*c94);\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/Se3_Dx_log_this.cpp",
    "content": "Scalar const c0 = pow(q.z(), 2);\nScalar const c1 = 1.0/q.w();\nScalar const c2 = pow(q.x(), 2);\nScalar const c3 = pow(q.y(), 2);\nScalar const c4 = c0 + c2 + c3;\nScalar const c5 = sqrt(c4);\nScalar const c6 = atan(c1*c5);\nScalar const c7 = pow(c6, 2);\nScalar const c8 = 8*c7;\nScalar const c9 = pow(c4, -2);\nScalar const c10 = c9*q.x();\nScalar const c11 = c10*c8;\nScalar const c12 = c0*c11;\nScalar const c13 = c6/pow(c4, 3.0/2.0);\nScalar const c14 = 8*c13;\nScalar const c15 = pow(q.w(), -2);\nScalar const c16 = 1.0/(c15*c4 + 1);\nScalar const c17 = c1*c16;\nScalar const c18 = c17*q.x();\nScalar const c19 = c0*c18;\nScalar const c20 = c14*c19;\nScalar const c21 = c12 - c20;\nScalar const c22 = c11*c3;\nScalar const c23 = c14*c3;\nScalar const c24 = c18*c23;\nScalar const c25 = c22 - c24;\nScalar const c26 = c21 + c25;\nScalar const c27 = 4*c7;\nScalar const c28 = 1.0/c4;\nScalar const c29 = c2*c28;\nScalar const c30 = c27*c29;\nScalar const c31 = c27*c28;\nScalar const c32 = c3*c31;\nScalar const c33 = c0*c31;\nScalar const c34 = c30 + c32 + c33;\nScalar const c35 = 1.0/c34;\nScalar const c36 = sqrt(c34);\nScalar const c37 = 0.5*c36;\nScalar const c38 = sin(c37);\nScalar const c39 = cos(c37);\nScalar const c40 = (1.0/2.0)*c39/c38;\nScalar const c41 = -c36*c40 + 1;\nScalar const c42 = c35*c41;\nScalar const c43 = c28*q.x();\nScalar const c44 = pow(q.x(), 3);\nScalar const c45 = c44*c9;\nScalar const c46 = c17*c44;\nScalar const c47 = c13*c46;\nScalar const c48 = -c43*c8 + c45*c8 - 8*c47;\nScalar const c49 = c26 + c48;\nScalar const c50 = -c32;\nScalar const c51 = -c33;\nScalar const c52 = c50 + c51;\nScalar const c53 = c41/pow(c34, 2);\nScalar const c54 = c52*c53;\nScalar const c55 = 1.0*c7;\nScalar const c56 = c10*c55;\nScalar const c57 = 1.0*c13;\nScalar const c58 = 1.0*c3;\nScalar const c59 = c13*c58;\nScalar const c60 = c0*c57;\nScalar const c61 = c31*q.x();\nScalar const c62 = c10*c27;\nScalar const c63 = 4*c13;\nScalar const c64 = c3*c63;\nScalar const c65 = -c0*c62 + c18*c64 + c19*c63 - c27*c45 - c3*c62 + 4*c47 + c61;\nScalar const c66 = 0.25*pow(c39, 2)/pow(c38, 2);\nScalar const c67 = c40/c36;\nScalar const c68 = -c0*c56 + c18*c59 + c18*c60 - c3*c56 + c43*c55 - c45*c55 + c46*c57 + c65*c66 - c65*c67;\nScalar const c69 = c35*c52;\nScalar const c70 = c57*q.x();\nScalar const c71 = c70*q.z();\nScalar const c72 = c31*q.y();\nScalar const c73 = c42*c72;\nScalar const c74 = c17*c43;\nScalar const c75 = 1.0*c74;\nScalar const c76 = c75*q.z();\nScalar const c77 = -c71 + c73 + c76;\nScalar const c78 = c9*q.y();\nScalar const c79 = c2*c78;\nScalar const c80 = c79*c8;\nScalar const c81 = c35*c61;\nScalar const c82 = c68*c81;\nScalar const c83 = c53*c61;\nScalar const c84 = c49*c83;\nScalar const c85 = c17*q.y();\nScalar const c86 = c2*c85;\nScalar const c87 = c14*c86;\nScalar const c88 = -c42*c80 + c42*c87 + c82*q.y() + c84*q.y();\nScalar const c89 = c70*q.y();\nScalar const c90 = c31*q.z();\nScalar const c91 = c42*c90;\nScalar const c92 = c75*q.y();\nScalar const c93 = c89 + c91 - c92;\nScalar const c94 = c9*q.z();\nScalar const c95 = c2*c94;\nScalar const c96 = c8*c95;\nScalar const c97 = c17*q.z();\nScalar const c98 = c2*c97;\nScalar const c99 = c14*c98;\nScalar const c100 = -c42*c96 + c42*c99 + c82*q.z() + c84*q.z();\nScalar const c101 = c0*c78;\nScalar const c102 = c101*c8;\nScalar const c103 = c0*c85;\nScalar const c104 = c103*c14;\nScalar const c105 = c102 - c104;\nScalar const c106 = c28*q.y();\nScalar const c107 = pow(q.y(), 3);\nScalar const c108 = c107*c9;\nScalar const c109 = c107*c17;\nScalar const c110 = c109*c13;\nScalar const c111 = -c106*c8 + c108*c8 - 8*c110;\nScalar const c112 = c80 - c87;\nScalar const c113 = c105 + c112;\nScalar const c114 = c111 + c113;\nScalar const c115 = c2*c57;\nScalar const c116 = -c101*c27 + c103*c63 - c108*c27 + 4*c110 - c27*c79 + c63*c86 + c72;\nScalar const c117 = -c101*c55 + c106*c55 - c108*c55 + c109*c57 + c115*c85 + c116*c66 - c116*c67 - c55*c79 + c60*c85;\nScalar const c118 = c17*c28;\nScalar const c119 = c118*c58;\nScalar const c120 = c6/c5;\nScalar const c121 = 1.0*c120;\nScalar const c122 = q.y()*q.z();\nScalar const c123 = c122*c42;\nScalar const c124 = -c11*c123 + c123*c14*c18;\nScalar const c125 = -c121 + c124;\nScalar const c126 = c117*c81;\nScalar const c127 = c114*c83;\nScalar const c128 = c126*q.z() + c127*q.z();\nScalar const c129 = c122*c57;\nScalar const c130 = c42*c61;\nScalar const c131 = c106*c97;\nScalar const c132 = 1.0*c131;\nScalar const c133 = -c129 + c130 + c132;\nScalar const c134 = c126*q.y() + c127*q.y() - c22*c42 + c24*c42;\nScalar const c135 = c3*c94;\nScalar const c136 = c135*c8;\nScalar const c137 = c23*c97;\nScalar const c138 = c136 - c137;\nScalar const c139 = c28*q.z();\nScalar const c140 = pow(q.z(), 3);\nScalar const c141 = c140*c9;\nScalar const c142 = c140*c17;\nScalar const c143 = -c139*c8 - c14*c142 + c141*c8;\nScalar const c144 = c96 - c99;\nScalar const c145 = c138 + c144;\nScalar const c146 = c143 + c145;\nScalar const c147 = -c135*c27 - c141*c27 + c142*c63 - c27*c95 + c63*c98 + c64*c97 + c90;\nScalar const c148 = c115*c97 - c135*c55 + c139*c55 - c141*c55 + c142*c57 + c147*c66 - c147*c67 - c55*c95 + c59*c97;\nScalar const c149 = c0*c118;\nScalar const c150 = 1.0*c149;\nScalar const c151 = c121 + c124;\nScalar const c152 = c148*c81;\nScalar const c153 = c146*c83;\nScalar const c154 = c152*q.y() + c153*q.y();\nScalar const c155 = c129 + c130 - c132;\nScalar const c156 = -c12*c42 + c152*q.z() + c153*q.z() + c20*c42;\nScalar const c157 = c15*c16;\nScalar const c158 = 1.0*c157;\nScalar const c159 = c158*q.z();\nScalar const c160 = c121*c157;\nScalar const c161 = c120*c157;\nScalar const c162 = 4*c161;\nScalar const c163 = -c0*c162 - c162*c2 - c162*c3;\nScalar const c164 = -c0*c160 - c160*c2 - c160*c3 + c163*c66 - c163*c67;\nScalar const c165 = c164*c81;\nScalar const c166 = 8*c161;\nScalar const c167 = c166*c2;\nScalar const c168 = c166*c3;\nScalar const c169 = c0*c166;\nScalar const c170 = c168 + c169;\nScalar const c171 = c167 + c170;\nScalar const c172 = c171*c83;\nScalar const c173 = c166*c42*q.x();\nScalar const c174 = c165*q.y() + c172*q.y() - c173*q.y();\nScalar const c175 = c158*q.y();\nScalar const c176 = c165*q.z() + c172*q.z() - c173*q.z();\nScalar const c177 = c121*q.z();\nScalar const c178 = c130*q.y();\nScalar const c179 = c121*q.y();\nScalar const c180 = c130*q.z();\nScalar const c181 = -c30;\nScalar const c182 = c181 + c51;\nScalar const c183 = c182*c53;\nScalar const c184 = c182*c35;\nScalar const c185 = c17*c29;\nScalar const c186 = 1.0*c185;\nScalar const c187 = c72*q.z();\nScalar const c188 = c187*c35;\nScalar const c189 = c187*c53;\nScalar const c190 = c188*c68 + c189*c49;\nScalar const c191 = c71 + c73 - c76;\nScalar const c192 = -c89 + c91 + c92;\nScalar const c193 = c114*c189 + c117*c188 - c136*c42 + c137*c42;\nScalar const c194 = -c102*c42 + c104*c42 + c146*c189 + c148*c188;\nScalar const c195 = c158*q.x();\nScalar const c196 = -c123*c166 + c164*c188 + c171*c189;\nScalar const c197 = c121*q.x();\nScalar const c198 = c73*q.z();\nScalar const c199 = c181 + c50;\nScalar const c200 = c199*c53;\nScalar const c201 = c199*c35;\nScalar const c202 = 2*c120;\nScalar const c203 = 2*c13;\nScalar const c204 = c203*q.x();\nScalar const c205 = 2*c74;\nScalar const c206 = -c204*q.y() + c205*q.y();\nScalar const c207 = -c204*q.z() + c205*q.z();\nScalar const c208 = 2*c157;\nScalar const c209 = -c122*c203 + 2*c131;\nresult[0] = t[0]*(c26*c42 + c49*c54 + c68*c69) + t[1]*(c77 + c88) + t[2]*(c100 + c93);\nresult[1] = t[0]*(c114*c54 + c117*c69 + c42*(c105 + c111)) + t[1]*(c133 + c134) + t[2]*(-c119 + c125 + c128 + c59);\nresult[2] = t[0]*(c146*c54 + c148*c69 + c42*(c138 + c143)) + t[1]*(c150 + c151 + c154 - c60) + t[2]*(c155 + c156);\nresult[3] = t[0]*(c164*c69 + c170*c42 + c171*c54) + t[1]*(-c159 + c174) + t[2]*(c175 + c176);\nresult[4] = c42*c52 + 1;\nresult[5] = c177 + c178;\nresult[6] = -c179 + c180;\nresult[7] = t[0]*(c191 + c88) + t[1]*(c183*c49 + c184*c68 + c42*(c21 + c48)) + t[2]*(-c115 + c151 + c186 + c190);\nresult[8] = t[0]*(c134 + c155) + t[1]*(c113*c42 + c114*c183 + c117*c184) + t[2]*(c192 + c193);\nresult[9] = t[0]*(c125 - c150 + c154 + c60) + t[1]*(c146*c183 + c148*c184 + c42*(c143 + c144)) + t[2]*(c194 + c77);\nresult[10] = t[0]*(c159 + c174) + t[1]*(c164*c184 + c171*c183 + c42*(c167 + c169)) + t[2]*(-c195 + c196);\nresult[11] = -c177 + c178;\nresult[12] = c182*c42 + 1;\nresult[13] = c197 + c198;\nresult[14] = t[0]*(c100 + c192) + t[1]*(c115 + c125 - c186 + c190) + t[2]*(c200*c49 + c201*c68 + c42*(c25 + c48));\nresult[15] = t[0]*(c119 + c128 + c151 - c59) + t[1]*(c193 + c93) + t[2]*(c114*c200 + c117*c201 + c42*(c111 + c112));\nresult[16] = t[0]*(c133 + c156) + t[1]*(c191 + c194) + t[2]*(c145*c42 + c146*c200 + c148*c201);\nresult[17] = t[0]*(-c175 + c176) + t[1]*(c195 + c196) + t[2]*(c164*c201 + c171*c200 + c42*(c167 + c168));\nresult[18] = c179 + c180;\nresult[19] = -c197 + c198;\nresult[20] = c199*c42 + 1;\nresult[21] = 2*c185 - c2*c203 + c202;\nresult[22] = c206;\nresult[23] = c207;\nresult[24] = -c208*q.x();\nresult[25] = 0;\nresult[26] = 0;\nresult[27] = 0;\nresult[28] = c206;\nresult[29] = 2*c118*c3 + c202 - c203*c3;\nresult[30] = c209;\nresult[31] = -c208*q.y();\nresult[32] = 0;\nresult[33] = 0;\nresult[34] = 0;\nresult[35] = c207;\nresult[36] = c209;\nresult[37] = -c0*c203 + 2*c149 + c202;\nresult[38] = -c208*q.z();\nresult[39] = 0;\nresult[40] = 0;\nresult[41] = 0;\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/Se3_Dx_this_mul_exp_x_at_0.cpp",
    "content": "Scalar const c0 = 0.5*q.w();\nScalar const c1 = 0.5*q.z();\nScalar const c2 = -c1;\nScalar const c3 = 0.5*q.y();\nScalar const c4 = 0.5*q.x();\nScalar const c5 = -c4;\nScalar const c6 = -c3;\nScalar const c7 = pow(q.x(), 2);\nScalar const c8 = pow(q.y(), 2);\nScalar const c9 = -c8;\nScalar const c10 = pow(q.w(), 2);\nScalar const c11 = pow(q.z(), 2);\nScalar const c12 = c10 - c11;\nScalar const c13 = 2*q.w();\nScalar const c14 = c13*q.z();\nScalar const c15 = 2*q.x();\nScalar const c16 = c15*q.y();\nScalar const c17 = c13*q.y();\nScalar const c18 = c15*q.z();\nScalar const c19 = -c7;\nScalar const c20 = c13*q.x();\nScalar const c21 = 2*q.y()*q.z();\nresult[0] = 0;\nresult[1] = 0;\nresult[2] = 0;\nresult[3] = c0;\nresult[4] = c2;\nresult[5] = c3;\nresult[6] = 0;\nresult[7] = 0;\nresult[8] = 0;\nresult[9] = c1;\nresult[10] = c0;\nresult[11] = c5;\nresult[12] = 0;\nresult[13] = 0;\nresult[14] = 0;\nresult[15] = c6;\nresult[16] = c4;\nresult[17] = c0;\nresult[18] = 0;\nresult[19] = 0;\nresult[20] = 0;\nresult[21] = c5;\nresult[22] = c6;\nresult[23] = c2;\nresult[24] = c12 + c7 + c9;\nresult[25] = -c14 + c16;\nresult[26] = c17 + c18;\nresult[27] = 0;\nresult[28] = 0;\nresult[29] = 0;\nresult[30] = c14 + c16;\nresult[31] = c12 + c19 + c8;\nresult[32] = -c20 + c21;\nresult[33] = 0;\nresult[34] = 0;\nresult[35] = 0;\nresult[36] = -c17 + c18;\nresult[37] = c20 + c21;\nresult[38] = c10 + c11 + c19 + c9;\nresult[39] = 0;\nresult[40] = 0;\nresult[41] = 0;\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So2_Dx_exp_x.cpp",
    "content": "result[0] = -sin(theta);\nresult[1] = cos(theta);\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So2_Dx_log_exp_x_times_this_at_0.cpp",
    "content": "result = 1;\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So2_Dx_log_this.cpp",
    "content": "result = -c[1]/(pow(c[0], 2) + pow(c[1], 2));\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So2_Dx_this_mul_exp_x_at_0.cpp",
    "content": "result[0] = -c[1];\nresult[1] = c[0];\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So3_Dx_exp_x.cpp",
    "content": "Scalar const c0 = pow(omega[0], 2);\nScalar const c1 = pow(omega[1], 2);\nScalar const c2 = pow(omega[2], 2);\nScalar const c3 = c0 + c1 + c2;\nScalar const c4 = sqrt(c3);\nScalar const c5 = 0.5*c4;\nScalar const c6 = sin(c5);\nScalar const c7 = c6/c4;\nScalar const c8 = c6/pow(c3, 3.0/2.0);\nScalar const c9 = 0.5*cos(c5)/c3;\nScalar const c10 = c8*omega[0];\nScalar const c11 = c9*omega[0];\nScalar const c12 = -c10*omega[1] + c11*omega[1];\nScalar const c13 = -c10*omega[2] + c11*omega[2];\nScalar const c14 = omega[1]*omega[2];\nScalar const c15 = -c14*c8 + c14*c9;\nScalar const c16 = 0.5*c7;\nresult[0] = -c0*c8 + c0*c9 + c7;\nresult[1] = c12;\nresult[2] = c13;\nresult[3] = c12;\nresult[4] = -c1*c8 + c1*c9 + c7;\nresult[5] = c15;\nresult[6] = c13;\nresult[7] = c15;\nresult[8] = -c2*c8 + c2*c9 + c7;\nresult[9] = -c16*omega[0];\nresult[10] = -c16*omega[1];\nresult[11] = -c16*omega[2];\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So3_Dx_log_exp_x_times_this_at_0.cpp",
    "content": "Scalar const c0 = 1.0*pow(q.x(), 4);\nScalar const c1 = 1.0*pow(q.y(), 4);\nScalar const c2 = 1.0*pow(q.z(), 4);\nScalar const c3 = pow(q.y(), 2);\nScalar const c4 = pow(q.x(), 2);\nScalar const c5 = 2.0*c4;\nScalar const c6 = pow(q.z(), 2);\nScalar const c7 = c3*c6;\nScalar const c8 = 1.0/(c0 + c1 + c2 + c3*c5 + c5*c6 + 2.0*c7);\nScalar const c9 = 1.0*c4;\nScalar const c10 = sqrt(c3 + c4 + c6);\nScalar const c11 = c10*atan(c10/q.w());\nScalar const c12 = 1.0*c11;\nScalar const c13 = c12*q.w();\nScalar const c14 = c13*c6 + c3*c9;\nScalar const c15 = c13*c3 + c6*c9;\nScalar const c16 = pow(q.z(), 3);\nScalar const c17 = 1.0*c16;\nScalar const c18 = c11*c17;\nScalar const c19 = c9*q.z();\nScalar const c20 = c11*c19;\nScalar const c21 = c3*q.z();\nScalar const c22 = c12*c21;\nScalar const c23 = pow(q.y(), 3);\nScalar const c24 = 1.0*q.x();\nScalar const c25 = 1.0*pow(q.x(), 3);\nScalar const c26 = c6*q.y();\nScalar const c27 = c11*c24;\nScalar const c28 = c27*q.w();\nScalar const c29 = c23*c24 + c24*c26 + c25*q.y() - c28*q.y();\nScalar const c30 = 1.0*c23;\nScalar const c31 = c11*c30;\nScalar const c32 = c11*c9;\nScalar const c33 = c32*q.y();\nScalar const c34 = c12*c26;\nScalar const c35 = c16*c24 + c21*c24 + c25*q.z() - c28*q.z();\nScalar const c36 = c32*q.w() + 1.0*c7;\nScalar const c37 = c11*c25;\nScalar const c38 = c27*c3;\nScalar const c39 = c27*c6;\nScalar const c40 = -c13*q.y()*q.z() + c17*q.y() + c19*q.y() + c30*q.z();\nresult[0] = c8*(c0 + c14 + c15);\nresult[1] = c8*(c18 + c20 + c22 + c29);\nresult[2] = c8*(-c31 - c33 - c34 + c35);\nresult[3] = c8*(-c18 - c20 - c22 + c29);\nresult[4] = c8*(c1 + c14 + c36);\nresult[5] = c8*(c37 + c38 + c39 + c40);\nresult[6] = c8*(c31 + c33 + c34 + c35);\nresult[7] = c8*(-c37 - c38 - c39 + c40);\nresult[8] = c8*(c15 + c2 + c36);\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So3_Dx_log_this.cpp",
    "content": "Scalar const c0 = pow(q.x(), 2);\nScalar const c1 = pow(q.y(), 2);\nScalar const c2 = pow(q.z(), 2);\nScalar const c3 = c0 + c1 + c2;\nScalar const c4 = sqrt(c3);\nScalar const c5 = 1.0/q.w();\nScalar const c6 = 2*atan(c4*c5);\nScalar const c7 = c6/c4;\nScalar const c8 = c6/pow(c3, 3.0/2.0);\nScalar const c9 = 2*c5/(c3*(c3/pow(q.w(), 2) + 1));\nScalar const c10 = c8*q.x();\nScalar const c11 = c9*q.x();\nScalar const c12 = -c10*q.y() + c11*q.y();\nScalar const c13 = -c10*q.z() + c11*q.z();\nScalar const c14 = q.y()*q.z();\nScalar const c15 = -c14*c8 + c14*c9;\nresult[0] = -c0*c8 + c0*c9 + c7;\nresult[1] = c12;\nresult[2] = c13;\nresult[3] = c12;\nresult[4] = -c1*c8 + c1*c9 + c7;\nresult[5] = c15;\nresult[6] = c13;\nresult[7] = c15;\nresult[8] = -c2*c8 + c2*c9 + c7;\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/cpp_gencode/So3_Dx_this_mul_exp_x_at_0.cpp",
    "content": "Scalar const c0 = 0.5*q.w();\nScalar const c1 = 0.5*q.z();\nScalar const c2 = -c1;\nScalar const c3 = 0.5*q.y();\nScalar const c4 = 0.5*q.x();\nScalar const c5 = -c4;\nScalar const c6 = -c3;\nresult[0] = c0;\nresult[1] = c2;\nresult[2] = c3;\nresult[3] = c1;\nresult[4] = c0;\nresult[5] = c5;\nresult[6] = c6;\nresult[7] = c4;\nresult[8] = c0;\nresult[9] = c5;\nresult[10] = c6;\nresult[11] = c2;\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/run_tests.sh",
    "content": "#!/bin/bash\n\nEXIT=0\n\npython3 -m sophus.complex || EXIT=$?\npython3 -m sophus.quaternion || EXIT=$?\npython3 -m sophus.dual_quaternion || EXIT=$?\npython3 -m sophus.so2 || EXIT=$?\npython3 -m sophus.se2 || EXIT=$?\npython3 -m sophus.so3 || EXIT=$?\npython3 -m sophus.se3 || EXIT=$?\n\nexit $EXIT\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/__init__.py",
    "content": ""
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/complex.py",
    "content": "import unittest\n\nimport sympy\n\n\nclass Complex:\n    \"\"\" Complex class \"\"\"\n\n    def __init__(self, real, imag):\n        self.real = real\n        self.imag = imag\n\n    def __mul__(self, right):\n        \"\"\" complex multiplication \"\"\"\n        return Complex(self.real * right.real - self.imag * right.imag,\n                       self.imag * right.real + self.real * right.imag)\n\n    def __add__(self, right):\n        return Complex(self.real + right.real, self.imag + right.imag)\n\n    def __neg__(self):\n        return Complex(-self.real, -self.image)\n\n    def __truediv__(self, scalar):\n        \"\"\" scalar division \"\"\"\n        return Complex(self.real / scalar, self.imag / scalar)\n\n    def __repr__(self):\n        return \"( \" + repr(self.real) + \" + \" + repr(self.imag) + \"i )\"\n\n    def __getitem__(self, key):\n        \"\"\" We use the following convention [real, imag] \"\"\"\n        if key == 0:\n            return self.real\n        else:\n            return self.imag\n\n    def squared_norm(self):\n        \"\"\" squared norm when considering the complex number as tuple \"\"\"\n        return self.real**2 + self.imag**2\n\n    def conj(self):\n        \"\"\" complex conjugate \"\"\"\n        return Complex(self.real, -self.imag)\n\n    def inv(self):\n        \"\"\" complex inverse \"\"\"\n        return self.conj() / self.squared_norm()\n\n    @staticmethod\n    def identity():\n        return Complex(1, 0)\n\n    @staticmethod\n    def zero():\n        return Complex(0, 0)\n\n    def __eq__(self, other):\n        if isinstance(self, other.__class__):\n            return self.real == other.real and self.imag == other.imag\n        return False\n\n    def subs(self, x, y):\n        return Complex(self.real.subs(x, y), self.imag.subs(x, y))\n\n    def simplify(self):\n        return Complex(sympy.simplify(self.real),\n                       sympy.simplify(self.imag))\n\n    @staticmethod\n    def Da_a_mul_b(a, b):\n        \"\"\" derivatice of complex muliplication wrt left multiplier a \"\"\"\n        return sympy.Matrix([[b.real, -b.imag],\n                             [b.imag, b.real]])\n\n    @staticmethod\n    def Db_a_mul_b(a, b):\n        \"\"\" derivatice of complex muliplication wrt right multiplicand b \"\"\"\n        return sympy.Matrix([[a.real, -a.imag],\n                             [a.imag, a.real]])\n\n\nclass TestComplex(unittest.TestCase):\n    def setUp(self):\n        x, y = sympy.symbols('x y', real=True)\n        u, v = sympy.symbols('u v', real=True)\n        self.a = Complex(x, y)\n        self.b = Complex(u, v)\n\n    def test_multiplications(self):\n        product = self.a * self.a.inv()\n        self.assertEqual(product.simplify(),\n                         Complex.identity())\n        product = self.a.inv() * self.a\n        self.assertEqual(product.simplify(),\n                         Complex.identity())\n\n    def test_derivatives(self):\n        d = sympy.Matrix(2, 2, lambda r, c: sympy.diff(\n            (self.a * self.b)[r], self.a[c]))\n        self.assertEqual(d,\n                         Complex.Da_a_mul_b(self.a, self.b))\n        d = sympy.Matrix(2, 2, lambda r, c: sympy.diff(\n            (self.a * self.b)[r], self.b[c]))\n        self.assertEqual(d,\n                         Complex.Db_a_mul_b(self.a, self.b))\n\n\nif __name__ == '__main__':\n    unittest.main()\n    print('hello')\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/cse_codegen.py",
    "content": "import io\n\nimport sympy\n\n\ndef cse_codegen(symbols):\n    cse_results = sympy.cse(symbols, sympy.numbered_symbols(\"c\"))\n    output = io.StringIO()\n    for helper in cse_results[0]:\n        output.write(\"Scalar const \")\n        output.write(sympy.printing.ccode(helper[1], helper[0]))\n        output.write(\"\\n\")\n    assert len(cse_results[1]) == 1\n\n    output.write(sympy.printing.ccode(cse_results[1][0], \"result\"))\n    output.write(\"\\n\")\n    output.seek(0)\n    return output\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/dual_quaternion.py",
    "content": "import unittest\n\nimport sympy\n\nfrom sophus.matrix import Vector3\nfrom sophus.quaternion import Quaternion\n\n\nclass DualQuaternion:\n    \"\"\" Dual quaternion class \"\"\"\n\n    def __init__(self, real_q, inf_q):\n        \"\"\" Dual quaternion consists of a real quaternion, and an infinitesimal\n            quaternion \"\"\"\n        self.real_q = real_q\n        self.inf_q = inf_q\n\n    def __mul__(self, right):\n        \"\"\" dual quaternion multiplication \"\"\"\n        return DualQuaternion(self.real_q * right.real_q,\n                              self.real_q * right.inf_q +\n                              self.inf_q * right.real_q)\n\n    def __truediv__(self, scalar):\n        \"\"\" scalar division \"\"\"\n        return DualQuaternion(self.real_q / scalar, self.inf_q / scalar)\n\n    def __repr__(self):\n        return \"( \" + repr(self.real_q) + \\\n               \" + \" + repr(self.inf_q) + \")\"\n\n    def __getitem__(self, key):\n        assert (key >= 0 and key < 8)\n        if key < 4:\n            return self.real_q[key]\n        else:\n            return self.inf_q[key - 4]\n\n    def squared_norm(self):\n        \"\"\" squared norm when considering the dual quaternion as 8-tuple \"\"\"\n        return self.real_q.squared_norm() + self.inf_q.squared_norm()\n\n    def conj(self):\n        \"\"\" dual quaternion conjugate \"\"\"\n        return DualQuaternion(self.real_q.conj(), self.inf_q.conj())\n\n    def inv(self):\n        \"\"\" dual quaternion inverse \"\"\"\n        return DualQuaternion(self.real_q.inv(),\n                              -self.real_q.inv() * self.inf_q *\n                              self.real_q.inv())\n\n    def simplify(self):\n        return DualQuaternion(self.real_q.simplify(),\n                              self.inf_q.simplify())\n\n    @staticmethod\n    def identity():\n        return DualQuaternion(Quaternion.identity(),\n                              Quaternion.zero())\n\n    def __eq__(self, other):\n        if isinstance(self, other.__class__):\n            return self.real_q == other.real_q and self.inf_q == other.inf_q\n        return False\n\n\nclass TestDualQuaternion(unittest.TestCase):\n    def setUp(self):\n        w, s0, s1, s2 = sympy.symbols('w s0 s1 s2', real=True)\n        x, t0, t1, t2 = sympy.symbols('x t0 t1 t2', real=True)\n        y, u0, u1, u2 = sympy.symbols('y u0 u1 u2', real=True)\n        z, v0, v1, v2 = sympy.symbols('z v0 v1 v2', real=True)\n\n        s = Vector3(s0, s1, s2)\n        t = Vector3(t0, t1, t2)\n        u = Vector3(u0, u1, u2)\n        v = Vector3(v0, v1, v2)\n        self.a = DualQuaternion(Quaternion(w, s),\n                                Quaternion(x, t))\n        self.b = DualQuaternion(Quaternion(y, u),\n                                Quaternion(z, v))\n\n    def test_multiplications(self):\n        product = self.a * self.a.inv()\n        self.assertEqual(product.simplify(),\n                         DualQuaternion.identity())\n        product = self.a.inv() * self.a\n        self.assertEqual(product.simplify(),\n                         DualQuaternion.identity())\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/matrix.py",
    "content": "import sys\n\nimport sympy\n\nassert sys.version_info >= (3, 5)\n\n\ndef dot(left, right):\n    assert(isinstance(left, sympy.Matrix))\n    assert(isinstance(right, sympy.Matrix))\n\n    sum = 0\n    for c in range(0, left.cols):\n        for r in range(0, left.rows):\n            sum += left[r, c] * right[r, c]\n    return sum\n\n\ndef squared_norm(m):\n    assert(isinstance(m, sympy.Matrix))\n    return dot(m, m)\n\n\ndef Vector2(x, y):\n    return sympy.Matrix([x, y])\n\n\ndef ZeroVector2():\n    return Vector2(0, 0)\n\n\ndef Vector3(x, y, z):\n    return sympy.Matrix([x, y, z])\n\n\ndef ZeroVector3():\n    return Vector3(0, 0, 0)\n\n\ndef Vector4(a, b, c, d):\n    return sympy.Matrix([a, b, c, d])\n\n\ndef ZeroVector4():\n    return Vector4(0, 0, 0, 0)\n\n\ndef Vector5(a, b, c, d, e):\n    return sympy.Matrix([a, b, c, d, e])\n\n\ndef ZeroVector5():\n    return Vector5(0, 0, 0, 0, 0)\n\n\ndef Vector6(a, b, c, d, e, f):\n    return sympy.Matrix([a, b, c, d, e, f])\n\n\ndef ZeroVector6():\n    return Vector6(0, 0, 0, 0, 0, 0)\n\n\ndef proj(v):\n    m, n = v.shape\n    assert m > 1\n    assert n == 1\n    list = [v[i] / v[m - 1] for i in range(0, m - 1)]\n    r = sympy.Matrix(m - 1, 1,  list)\n    return r\n\n\ndef unproj(v):\n    m, n = v.shape\n    assert m >= 1\n    assert n == 1\n    return v.col_join(sympy.Matrix.ones(1, 1))\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/quaternion.py",
    "content": "\"\"\" run with: python3 -m sophus.quaternion \"\"\"\n\nimport unittest\n\nimport sympy\n\nfrom sophus.matrix import Vector3\nfrom sophus.matrix import squared_norm\n\n\nclass Quaternion:\n    \"\"\" Quaternion class \"\"\"\n\n    def __init__(self, real, vec):\n        \"\"\" Quaternion consists of a real scalar, and an imaginary 3-vector \"\"\"\n        assert isinstance(vec, sympy.Matrix)\n        assert vec.shape == (3, 1), vec.shape\n        self.real = real\n        self.vec = vec\n\n    def __mul__(self, right):\n        \"\"\" quaternion multiplication \"\"\"\n        return Quaternion(self[3] * right[3] - self.vec.dot(right.vec),\n                          self[3] * right.vec + right[3] * self.vec +\n                          self.vec.cross(right.vec))\n\n    def __add__(self, right):\n        \"\"\" quaternion multiplication \"\"\"\n        return Quaternion(self[3] + right[3], self.vec + right.vec)\n\n    def __neg__(self):\n        return Quaternion(-self[3], -self.vec)\n\n    def __truediv__(self, scalar):\n        \"\"\" scalar division \"\"\"\n        return Quaternion(self.real / scalar, self.vec / scalar)\n\n    def __repr__(self):\n        return \"( \" + repr(self[3]) + \" + \" + repr(self.vec) + \"i )\"\n\n    def __getitem__(self, key):\n        \"\"\" We use the following convention [vec0, vec1, vec2, real] \"\"\"\n        assert (key >= 0 and key < 4)\n        if key == 3:\n            return self.real\n        else:\n            return self.vec[key]\n\n    def squared_norm(self):\n        \"\"\" squared norm when considering the quaternion as 4-tuple \"\"\"\n        return squared_norm(self.vec) + self.real**2\n\n    def conj(self):\n        \"\"\" quaternion conjugate \"\"\"\n        return Quaternion(self.real, -self.vec)\n\n    def inv(self):\n        \"\"\" quaternion inverse \"\"\"\n        return self.conj() / self.squared_norm()\n\n    @staticmethod\n    def identity():\n        return Quaternion(1, Vector3(0, 0, 0))\n\n    @staticmethod\n    def zero():\n        return Quaternion(0, Vector3(0, 0, 0))\n\n    def subs(self, x, y):\n        return Quaternion(self.real.subs(x, y), self.vec.subs(x, y))\n\n    def simplify(self):\n        v = sympy.simplify(self.vec)\n        return Quaternion(sympy.simplify(self.real),\n                          Vector3(v[0], v[1], v[2]))\n\n    def __eq__(self, other):\n        if isinstance(self, other.__class__):\n            return self.real == other.real and self.vec == other.vec\n        return False\n\n    @staticmethod\n    def Da_a_mul_b(a, b):\n        \"\"\" derivatice of quaternion muliplication wrt left multiplier a \"\"\"\n        v0 = b.vec[0]\n        v1 = b.vec[1]\n        v2 = b.vec[2]\n        y = b.real\n        return sympy.Matrix([[y, v2, -v1, v0],\n                             [-v2, y, v0, v1],\n                             [v1, -v0, y, v2],\n                             [-v0, -v1, -v2, y]])\n\n    @staticmethod\n    def Db_a_mul_b(a, b):\n        \"\"\" derivatice of quaternion muliplication wrt right multiplicand b \"\"\"\n        u0 = a.vec[0]\n        u1 = a.vec[1]\n        u2 = a.vec[2]\n        x = a.real\n        return sympy.Matrix([[x, -u2, u1, u0],\n                             [u2, x, -u0, u1],\n                             [-u1, u0, x, u2],\n                             [-u0, -u1, -u2, x]])\n\n\nclass TestQuaternion(unittest.TestCase):\n    def setUp(self):\n        x, u0, u1, u2 = sympy.symbols('x u0 u1 u2', real=True)\n        y, v0, v1, v2 = sympy.symbols('y v0 v1 v2', real=True)\n        u = Vector3(u0, u1, u2)\n        v = Vector3(v0, v1, v2)\n        self.a = Quaternion(x, u)\n        self.b = Quaternion(y, v)\n\n    def test_multiplications(self):\n        product = self.a * self.a.inv()\n        self.assertEqual(product.simplify(),\n                         Quaternion.identity())\n        product = self.a.inv() * self.a\n        self.assertEqual(product.simplify(),\n                         Quaternion.identity())\n\n    def test_derivatives(self):\n        d = sympy.Matrix(4, 4, lambda r, c: sympy.diff(\n            (self.a * self.b)[r], self.a[c]))\n        self.assertEqual(d,\n                         Quaternion.Da_a_mul_b(self.a, self.b))\n        d = sympy.Matrix(4, 4, lambda r, c: sympy.diff(\n            (self.a * self.b)[r], self.b[c]))\n        self.assertEqual(d,\n                         Quaternion.Db_a_mul_b(self.a, self.b))\n\n\nif __name__ == '__main__':\n    unittest.main()\n    print('hello')\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/se2.py",
    "content": "import functools\nimport unittest\n\nimport sympy\n\nfrom sophus.complex import Complex\nfrom sophus.cse_codegen import cse_codegen\nfrom sophus.matrix import Vector2\nfrom sophus.matrix import Vector3\nfrom sophus.matrix import ZeroVector2\nfrom sophus.matrix import ZeroVector3\nfrom sophus.matrix import proj\nfrom sophus.matrix import unproj\nfrom sophus.so2 import So2\n\n\nclass Se2:\n    \"\"\" 2 dimensional group of rigid body transformations \"\"\"\n\n    def __init__(self, so2, t):\n        \"\"\" internally represented by a unit complex number z and a translation\n            2-vector \"\"\"\n        self.so2 = so2\n        self.t = t\n\n    @staticmethod\n    def exp(v):\n        \"\"\" exponential map \"\"\"\n        theta = v[2]\n        so2 = So2.exp(theta)\n\n        a = so2.z.imag / theta\n        b = (1 - so2.z.real) / theta\n\n        t = Vector2(a * v[0] - b * v[1],\n                    b * v[0] + a * v[1])\n        return Se2(so2, t)\n\n    def log(self):\n        theta = self.so2.log()\n        halftheta = 0.5 * theta\n        a = -(halftheta * self.so2.z.imag) / (self.so2.z.real - 1)\n\n        V_inv = sympy.Matrix([[a, halftheta],\n                              [-halftheta, a]])\n        upsilon = V_inv * self.t\n        return Vector3(upsilon[0], upsilon[1], theta)\n\n    def calc_Dx_log_this(self):\n        return sympy.Matrix(3, 4, lambda r, c: sympy.diff(self.log()[r],\n                            self[c]))\n\n    def __repr__(self):\n        return \"Se2: [\" + repr(self.so2) + \" \" + repr(self.t)\n\n    @staticmethod\n    def hat(v):\n        upsilon = Vector2(v[0], v[1])\n        theta = v[2]\n        return So2.hat(theta).\\\n            row_join(upsilon).\\\n            col_join(sympy.Matrix.zeros(1, 3))\n\n    def matrix(self):\n        \"\"\" returns matrix representation \"\"\"\n        R = self.so2.matrix()\n        return (R.row_join(self.t)).col_join(sympy.Matrix(1, 3, [0, 0, 1]))\n\n    def __mul__(self, right):\n        \"\"\" left-multiplication\n            either rotation concatenation or point-transform \"\"\"\n        if isinstance(right, sympy.Matrix):\n            assert right.shape == (2, 1), right.shape\n            return self.so2 * right + self.t\n        elif isinstance(right, Se2):\n            return Se2(self.so2 * right.so2,\n                       self.t + self.so2 * right.t)\n        assert False, \"unsupported type: {0}\".format(type(right))\n\n    def __getitem__(self, key):\n        \"\"\" We use the following convention [q0, q1, q2, q3, t0, t1, t2] \"\"\"\n        assert (key >= 0 and key < 4)\n        if key < 2:\n            return self.so2[key]\n        else:\n            return self.t[key - 2]\n\n    @staticmethod\n    def calc_Dx_exp_x(x):\n        return sympy.Matrix(4, 3, lambda r, c:\n                            sympy.diff(Se2.exp(x)[r], x[c]))\n\n    @staticmethod\n    def Dx_exp_x_at_0():\n        return sympy.Matrix([[0, 0, 0],\n                             [0, 0, 1],\n                             [1, 0, 0],\n                             [0, 1, 0]])\n\n    def calc_Dx_this_mul_exp_x_at_0(self, x):\n        return sympy.Matrix(4, 3, lambda r, c:\n                            sympy.diff((self * Se2.exp(x))[r], x[c])). \\\n            subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n    @staticmethod\n    def calc_Dx_exp_x_at_0(x):\n        return Se2.calc_Dx_exp_x(x).subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n    @staticmethod\n    def Dxi_x_matrix(x, i):\n        if i < 2:\n            return So2.Dxi_x_matrix(x, i).\\\n                row_join(sympy.Matrix.zeros(2, 1)).\\\n                col_join(sympy.Matrix.zeros(1, 3))\n        M = sympy.Matrix.zeros(3, 3)\n        M[i - 2, 2] = 1\n        return M\n\n    @staticmethod\n    def calc_Dxi_x_matrix(x, i):\n        return sympy.Matrix(3, 3, lambda r, c:\n                            sympy.diff(x.matrix()[r, c], x[i]))\n\n    @staticmethod\n    def Dxi_exp_x_matrix(x, i):\n        T = Se2.exp(x)\n        Dx_exp_x = Se2.calc_Dx_exp_x(x)\n        list = [Dx_exp_x[j, i] * Se2.Dxi_x_matrix(T, j) for j in range(0, 4)]\n        return functools.reduce((lambda a, b: a + b), list)\n\n    @staticmethod\n    def calc_Dxi_exp_x_matrix(x, i):\n        return sympy.Matrix(3, 3, lambda r, c:\n                            sympy.diff(Se2.exp(x).matrix()[r, c], x[i]))\n\n    @staticmethod\n    def Dxi_exp_x_matrix_at_0(i):\n        v = ZeroVector3()\n        v[i] = 1\n        return Se2.hat(v)\n\n    @staticmethod\n    def calc_Dxi_exp_x_matrix_at_0(x, i):\n        return sympy.Matrix(3, 3, lambda r, c:\n                            sympy.diff(Se2.exp(x).matrix()[r, c], x[i])\n                            ).subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n\nclass TestSe2(unittest.TestCase):\n    def setUp(self):\n        upsilon0, upsilon1, theta = sympy.symbols(\n            'upsilon[0], upsilon[1], theta',\n            real=True)\n        x, y = sympy.symbols('c[0] c[1]', real=True)\n        p0, p1 = sympy.symbols('p0 p1', real=True)\n        t0, t1 = sympy.symbols('t[0] t[1]', real=True)\n        self.upsilon_theta = Vector3(\n            upsilon0, upsilon1, theta)\n        self.t = Vector2(t0, t1)\n        self.a = Se2(So2(Complex(x, y)), self.t)\n        self.p = Vector2(p0, p1)\n\n    def test_exp_log(self):\n        for v in [Vector3(0., 1, 0.5),\n                  Vector3(0.1, 0.1, 0.1),\n                  Vector3(0.01, 0.2, 0.03)]:\n            w = Se2.exp(v).log()\n            for i in range(0, 3):\n                self.assertAlmostEqual(v[i], w[i])\n\n    def test_matrix(self):\n        T_foo_bar = Se2.exp(self.upsilon_theta)\n        Tmat_foo_bar = T_foo_bar.matrix()\n        point_bar = self.p\n        p1_foo = T_foo_bar * point_bar\n        p2_foo = proj(Tmat_foo_bar * unproj(point_bar))\n        self.assertEqual(sympy.simplify(p1_foo - p2_foo),\n                         ZeroVector2())\n\n    def test_derivatives(self):\n        self.assertEqual(sympy.simplify(\n            Se2.calc_Dx_exp_x_at_0(self.upsilon_theta) -\n            Se2.Dx_exp_x_at_0()),\n            sympy.Matrix.zeros(4, 3))\n        for i in range(0, 4):\n            self.assertEqual(sympy.simplify(Se2.calc_Dxi_x_matrix(self.a, i) -\n                                            Se2.Dxi_x_matrix(self.a, i)),\n                             sympy.Matrix.zeros(3, 3))\n        for i in range(0, 3):\n            self.assertEqual(sympy.simplify(\n                Se2.Dxi_exp_x_matrix(self.upsilon_theta, i) -\n                Se2.calc_Dxi_exp_x_matrix(self.upsilon_theta, i)),\n                sympy.Matrix.zeros(3, 3))\n            self.assertEqual(sympy.simplify(\n                Se2.Dxi_exp_x_matrix_at_0(i) -\n                Se2.calc_Dxi_exp_x_matrix_at_0(self.upsilon_theta, i)),\n                sympy.Matrix.zeros(3, 3))\n\n    def test_codegen(self):\n        stream = cse_codegen(Se2.calc_Dx_exp_x(self.upsilon_theta))\n        filename = \"cpp_gencode/Se2_Dx_exp_x.cpp\"\n\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_this_mul_exp_x_at_0(\n            self.upsilon_theta))\n        filename = \"cpp_gencode/Se2_Dx_this_mul_exp_x_at_0.cpp\"\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_log_this())\n        filename = \"cpp_gencode/Se2_Dx_log_this.cpp\"\n\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/se3.py",
    "content": "import functools\nimport unittest\n\nimport sympy\n\nfrom sophus.cse_codegen import cse_codegen\nfrom sophus.matrix import Vector3\nfrom sophus.matrix import Vector6\nfrom sophus.matrix import ZeroVector3\nfrom sophus.matrix import ZeroVector6\nfrom sophus.matrix import proj\nfrom sophus.matrix import squared_norm\nfrom sophus.matrix import unproj\nfrom sophus.quaternion import Quaternion\nfrom sophus.so3 import So3\n\n\nclass Se3:\n    \"\"\" 3 dimensional group of rigid body transformations \"\"\"\n\n    def __init__(self, so3, t):\n        \"\"\" internally represented by a unit quaternion q and a translation\n            3-vector \"\"\"\n        assert isinstance(so3, So3)\n        assert isinstance(t, sympy.Matrix)\n        assert t.shape == (3, 1), t.shape\n\n        self.so3 = so3\n        self.t = t\n\n    @staticmethod\n    def exp(v):\n        \"\"\" exponential map \"\"\"\n        upsilon = v[0:3, :]\n        omega = Vector3(v[3], v[4], v[5])\n        so3 = So3.exp(omega)\n        Omega = So3.hat(omega)\n        Omega_sq = Omega * Omega\n        theta = sympy.sqrt(squared_norm(omega))\n        V = (sympy.Matrix.eye(3) +\n             (1 - sympy.cos(theta)) / (theta**2) * Omega +\n             (theta - sympy.sin(theta)) / (theta**3) * Omega_sq)\n        return Se3(so3, V * upsilon)\n\n    def log(self):\n\n        omega = self.so3.log()\n        theta = sympy.sqrt(squared_norm(omega))\n        Omega = So3.hat(omega)\n\n        half_theta = 0.5 * theta\n\n        V_inv = sympy.Matrix.eye(3) - 0.5 * Omega + (1 - theta * sympy.cos(\n            half_theta) / (2 * sympy.sin(half_theta))) / (theta * theta) *\\\n            (Omega * Omega)\n        upsilon = V_inv * self.t\n        return upsilon.col_join(omega)\n\n    def calc_Dx_log_this(self):\n        return sympy.Matrix(6, 7, lambda r, c: sympy.diff(self.log()[r],\n                            self[c]))\n\n    def __repr__(self):\n        return \"Se3: [\" + repr(self.so3) + \" \" + repr(self.t)\n\n    def inverse(self):\n        invR = self.so3.inverse()\n        return Se3(invR, invR * (-1 * self.t))\n\n    @staticmethod\n    def hat(v):\n        \"\"\" R^6 => R^4x4  \"\"\"\n        \"\"\" returns 4x4-matrix representation ``Omega`` \"\"\"\n        upsilon = Vector3(v[0], v[1], v[2])\n        omega = Vector3(v[3], v[4], v[5])\n        return So3.hat(omega).\\\n            row_join(upsilon).\\\n            col_join(sympy.Matrix.zeros(1, 4))\n\n    @staticmethod\n    def vee(Omega):\n        \"\"\" R^4x4 => R^6 \"\"\"\n        \"\"\" returns 6-vector representation of Lie algebra \"\"\"\n        \"\"\" This is the inverse of the hat-operator \"\"\"\n\n        head = Vector3(Omega[0, 3], Omega[1, 3], Omega[2, 3])\n        tail = So3.vee(Omega[0:3, 0:3])\n        upsilon_omega = \\\n            Vector6(head[0], head[1], head[2], tail[0], tail[1], tail[2])\n        return upsilon_omega\n\n    def matrix(self):\n        \"\"\" returns matrix representation \"\"\"\n        R = self.so3.matrix()\n        return (R.row_join(self.t)).col_join(sympy.Matrix(1, 4, [0, 0, 0, 1]))\n\n    def __mul__(self, right):\n        \"\"\" left-multiplication\n            either rotation concatenation or point-transform \"\"\"\n        if isinstance(right, sympy.Matrix):\n            assert right.shape == (3, 1), right.shape\n            return self.so3 * right + self.t\n        elif isinstance(right, Se3):\n            r = self.so3 * right.so3\n            t = self.t + self.so3 * right.t\n            return Se3(r, t)\n        assert False, \"unsupported type: {0}\".format(type(right))\n\n    def __getitem__(self, key):\n        \"\"\" We use the following convention [q0, q1, q2, q3, t0, t1, t2] \"\"\"\n        assert (key >= 0 and key < 7)\n        if key < 4:\n            return self.so3[key]\n        else:\n            return self.t[key - 4]\n\n    @staticmethod\n    def calc_Dx_exp_x(x):\n        return sympy.Matrix(7, 6, lambda r, c:\n                            sympy.diff(Se3.exp(x)[r], x[c]))\n\n    @staticmethod\n    def Dx_exp_x_at_0():\n        return sympy.Matrix([[0.0, 0.0, 0.0, 0.5, 0.0, 0.0],\n                             [0.0, 0.0, 0.0, 0.0, 0.5, 0.0],\n                             [0.0, 0.0, 0.0, 0.0, 0.0, 0.5],\n                             [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n                             [1.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n                             [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n                             [0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])\n\n    def calc_Dx_this_mul_exp_x_at_0(self, x):\n        return sympy.Matrix(7, 6, lambda r, c:\n                            sympy.diff((self * Se3.exp(x))[r], x[c])). \\\n            subs(x[0], 0).subs(x[1], 0).subs(x[2], 0).\\\n            subs(x[3], 0).subs(x[4], 0).limit(x[5], 0)\n\n    @staticmethod\n    def calc_Dx_exp_x_at_0(x):\n        return Se3.calc_Dx_exp_x(x).subs(x[0], 0).subs(x[1], 0).subs(x[2], 0).\\\n            subs(x[3], 0).subs(x[4], 0).limit(x[5], 0)\n\n    @staticmethod\n    def Dxi_x_matrix(x, i):\n        if i < 4:\n            return So3.Dxi_x_matrix(x, i).\\\n                row_join(sympy.Matrix.zeros(3, 1)).\\\n                col_join(sympy.Matrix.zeros(1, 4))\n        M = sympy.Matrix.zeros(4, 4)\n        M[i - 4, 3] = 1\n        return M\n\n    @staticmethod\n    def calc_Dxi_x_matrix(x, i):\n        return sympy.Matrix(4, 4, lambda r, c:\n                            sympy.diff(x.matrix()[r, c], x[i]))\n\n    @staticmethod\n    def Dxi_exp_x_matrix(x, i):\n        T = Se3.exp(x)\n        Dx_exp_x = Se3.calc_Dx_exp_x(x)\n        list = [Dx_exp_x[j, i] * Se3.Dxi_x_matrix(T, j) for j in range(0, 7)]\n        return functools.reduce((lambda a, b: a + b), list)\n\n    @staticmethod\n    def calc_Dxi_exp_x_matrix(x, i):\n        return sympy.Matrix(4, 4, lambda r, c:\n                            sympy.diff(Se3.exp(x).matrix()[r, c], x[i]))\n\n    @staticmethod\n    def Dxi_exp_x_matrix_at_0(i):\n        v = ZeroVector6()\n        v[i] = 1\n        return Se3.hat(v)\n\n    @staticmethod\n    def calc_Dxi_exp_x_matrix_at_0(x, i):\n        return sympy.Matrix(4, 4, lambda r, c:\n                            sympy.diff(Se3.exp(x).matrix()[r, c], x[i])\n                            ).subs(x[0], 0).subs(x[1], 0).subs(x[2], 0).\\\n            subs(x[3], 0).subs(x[4], 0).limit(x[5], 0)\n\n\nclass TestSe3(unittest.TestCase):\n    def setUp(self):\n        upsilon0, upsilon1, upsilon2, omega0, omega1, omega2 = sympy.symbols(\n            'upsilon[0], upsilon[1], upsilon[2], omega[0], omega[1], omega[2]',\n            real=True)\n        x, v0, v1, v2 = sympy.symbols('q.w() q.x() q.y() q.z()', real=True)\n        p0, p1, p2 = sympy.symbols('p0 p1 p2', real=True)\n        t0, t1, t2 = sympy.symbols('t[0] t[1] t[2]', real=True)\n        v = Vector3(v0, v1, v2)\n        self.upsilon_omega = Vector6(\n            upsilon0, upsilon1, upsilon2, omega0, omega1, omega2)\n        self.t = Vector3(t0, t1, t2)\n        self.a = Se3(So3(Quaternion(x, v)), self.t)\n        self.p = Vector3(p0, p1, p2)\n\n    def test_exp_log(self):\n        for v in [Vector6(0., 1, 0.5, 2., 1, 0.5),\n                  Vector6(0.1, 0.1, 0.1, 0., 1, 0.5),\n                  Vector6(0.01, 0.2, 0.03, 0.01, 0.2, 0.03)]:\n            w = Se3.exp(v).log()\n            for i in range(0, 3):\n                self.assertAlmostEqual(v[i], w[i])\n\n    def test_matrix(self):\n        T_foo_bar = Se3.exp(self.upsilon_omega)\n        Tmat_foo_bar = T_foo_bar.matrix()\n        point_bar = self.p\n        p1_foo = T_foo_bar * point_bar\n        p2_foo = proj(Tmat_foo_bar * unproj(point_bar))\n        self.assertEqual(sympy.simplify(p1_foo - p2_foo),\n                         ZeroVector3())\n\n    def test_derivatives(self):\n        self.assertEqual(sympy.simplify(\n            Se3.calc_Dx_exp_x_at_0(self.upsilon_omega) -\n            Se3.Dx_exp_x_at_0()),\n            sympy.Matrix.zeros(7, 6))\n\n        for i in range(0, 7):\n            self.assertEqual(sympy.simplify(Se3.calc_Dxi_x_matrix(self.a, i) -\n                                            Se3.Dxi_x_matrix(self.a, i)),\n                             sympy.Matrix.zeros(4, 4))\n        for i in range(0, 6):\n            self.assertEqual(sympy.simplify(\n                Se3.Dxi_exp_x_matrix(self.upsilon_omega, i) -\n                Se3.calc_Dxi_exp_x_matrix(self.upsilon_omega, i)),\n                sympy.Matrix.zeros(4, 4))\n            self.assertEqual(sympy.simplify(\n                Se3.Dxi_exp_x_matrix_at_0(i) -\n                Se3.calc_Dxi_exp_x_matrix_at_0(self.upsilon_omega, i)),\n                sympy.Matrix.zeros(4, 4))\n\n    def test_codegen(self):\n        stream = cse_codegen(self.a.calc_Dx_exp_x(self.upsilon_omega))\n        filename = \"cpp_gencode/Se3_Dx_exp_x.cpp\"\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_this_mul_exp_x_at_0(\n            self.upsilon_omega))\n        filename = \"cpp_gencode/Se3_Dx_this_mul_exp_x_at_0.cpp\"\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_log_this())\n        filename = \"cpp_gencode/Se3_Dx_log_this.cpp\"\n\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/so2.py",
    "content": "import functools\nimport unittest\n\nimport sympy\n\nfrom sophus.complex import Complex\nfrom sophus.cse_codegen import cse_codegen\nfrom sophus.matrix import Vector2\nfrom sophus.matrix import ZeroVector2\n\n\nclass So2:\n    \"\"\" 2 dimensional group of orthogonal matrices with determinant 1 \"\"\"\n\n    def __init__(self, z):\n        \"\"\" internally represented by a unit complex number z \"\"\"\n        self.z = z\n\n    @staticmethod\n    def exp(theta):\n        \"\"\" exponential map \"\"\"\n        return So2(\n            Complex(\n                sympy.cos(theta),\n                sympy.sin(theta)))\n\n    def log(self):\n        \"\"\" logarithmic map\"\"\"\n        return sympy.atan2(self.z.imag, self.z.real)\n\n    def calc_Dx_log_this(self):\n        return sympy.diff(self.log(), self[0])\n\n    def calc_Dx_log_exp_x_times_this_at_0(self, x):\n        return sympy.diff((So2.exp(x)*self).log(), x).limit(x, 0)\n\n    def __repr__(self):\n        return \"So2:\" + repr(self.z)\n\n    @staticmethod\n    def hat(theta):\n        return sympy.Matrix([[0, -theta],\n                             [theta, 0]])\n\n    def matrix(self):\n        \"\"\" returns matrix representation \"\"\"\n        return sympy.Matrix([\n            [self.z.real, -self.z.imag],\n            [self.z.imag,  self.z.real]])\n\n    def __mul__(self, right):\n        \"\"\" left-multiplication\n            either rotation concatenation or point-transform \"\"\"\n        if isinstance(right, sympy.Matrix):\n            assert right.shape == (2, 1), right.shape\n            return self.matrix() * right\n        elif isinstance(right, So2):\n            return So2(self.z * right.z)\n        assert False, \"unsupported type: {0}\".format(type(right))\n\n    def __getitem__(self, key):\n        return self.z[key]\n\n    @staticmethod\n    def calc_Dx_exp_x(x):\n        return sympy.Matrix(2, 1, lambda r, c:\n                            sympy.diff(So2.exp(x)[r], x))\n\n    @staticmethod\n    def Dx_exp_x_at_0():\n        return sympy.Matrix([0, 1])\n\n    @staticmethod\n    def calc_Dx_exp_x_at_0(x):\n        return So2.calc_Dx_exp_x(x).limit(x, 0)\n\n    def calc_Dx_this_mul_exp_x_at_0(self, x):\n        return sympy.Matrix(2, 1, lambda r, c:\n                            sympy.diff((self * So2.exp(x))[r], x))\\\n            .limit(x, 0)\n\n    @staticmethod\n    def Dxi_x_matrix(x, i):\n        if i == 0:\n            return sympy.Matrix([[1, 0],\n                                 [0, 1]])\n        if i == 1:\n            return sympy.Matrix([[0, -1],\n                                 [1, 0]])\n\n    @staticmethod\n    def calc_Dxi_x_matrix(x, i):\n        return sympy.Matrix(2, 2, lambda r, c:\n                            sympy.diff(x.matrix()[r, c], x[i]))\n\n    @staticmethod\n    def Dx_exp_x_matrix(x):\n        R = So2.exp(x)\n        Dx_exp_x = So2.calc_Dx_exp_x(x)\n        list = [Dx_exp_x[j] * So2.Dxi_x_matrix(R, j) for j in [0, 1]]\n        return functools.reduce((lambda a, b: a + b), list)\n\n    @staticmethod\n    def calc_Dx_exp_x_matrix(x):\n        return sympy.Matrix(2, 2, lambda r, c:\n                            sympy.diff(So2.exp(x).matrix()[r, c], x))\n\n    @staticmethod\n    def Dx_exp_x_matrix_at_0():\n        return So2.hat(1)\n\n    @staticmethod\n    def calc_Dx_exp_x_matrix_at_0(x):\n        return sympy.Matrix(2, 2, lambda r, c:\n                            sympy.diff(So2.exp(x).matrix()[r, c], x)\n                            ).limit(x, 0)\n\n\nclass TestSo2(unittest.TestCase):\n    def setUp(self):\n        self.theta = sympy.symbols(\n            'theta', real=True)\n        x, y = sympy.symbols('c[0] c[1]', real=True)\n        p0, p1 = sympy.symbols('p0 p1', real=True)\n        self.a = So2(Complex(x, y))\n        self.p = Vector2(p0, p1)\n\n    def test_exp_log(self):\n        for theta in [0.,  0.5, 0.1]:\n            w = So2.exp(theta).log()\n            self.assertAlmostEqual(theta, w)\n\n    def test_matrix(self):\n        R_foo_bar = So2.exp(self.theta)\n        Rmat_foo_bar = R_foo_bar.matrix()\n        point_bar = self.p\n        p1_foo = R_foo_bar * point_bar\n        p2_foo = Rmat_foo_bar * point_bar\n        self.assertEqual(sympy.simplify(p1_foo - p2_foo),\n                         ZeroVector2())\n\n    def test_derivatives(self):\n        self.assertEqual(sympy.simplify(So2.calc_Dx_exp_x_at_0(self.theta) -\n                                        So2.Dx_exp_x_at_0()),\n                         sympy.Matrix.zeros(2, 1))\n        for i in [0, 1]:\n            self.assertEqual(sympy.simplify(So2.calc_Dxi_x_matrix(self.a, i) -\n                                            So2.Dxi_x_matrix(self.a, i)),\n                             sympy.Matrix.zeros(2, 2))\n\n        self.assertEqual(sympy.simplify(\n            So2.Dx_exp_x_matrix(self.theta) -\n            So2.calc_Dx_exp_x_matrix(self.theta)),\n            sympy.Matrix.zeros(2, 2))\n        self.assertEqual(sympy.simplify(\n            So2.Dx_exp_x_matrix_at_0() -\n            So2.calc_Dx_exp_x_matrix_at_0(self.theta)),\n            sympy.Matrix.zeros(2, 2))\n\n    def test_codegen(self):\n        stream = cse_codegen(So2.calc_Dx_exp_x(self.theta))\n        filename = \"cpp_gencode/So2_Dx_exp_x.cpp\"\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(\n            self.a.calc_Dx_this_mul_exp_x_at_0(self.theta))\n        filename = \"cpp_gencode/So2_Dx_this_mul_exp_x_at_0.cpp\"\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_log_this())\n        filename = \"cpp_gencode/So2_Dx_log_this.cpp\"\n\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_log_exp_x_times_this_at_0(\n            self.theta))\n        filename = \"cpp_gencode/So2_Dx_log_exp_x_times_this_at_0.cpp\"\n\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Thirdparty/Sophus/sympy/sophus/so3.py",
    "content": "import functools\nimport unittest\n\nimport sympy\n\nfrom sophus.cse_codegen import cse_codegen\nfrom sophus.matrix import Vector3\nfrom sophus.matrix import ZeroVector3\nfrom sophus.matrix import squared_norm\nfrom sophus.quaternion import Quaternion\n\n\nclass So3:\n    \"\"\" 3 dimensional group of orthogonal matrices with determinant 1 \"\"\"\n\n    def __init__(self, q):\n        \"\"\" internally represented by a unit quaternion q \"\"\"\n        self.q = q\n\n    @staticmethod\n    def exp(v):\n        \"\"\" exponential map \"\"\"\n        theta_sq = squared_norm(v)\n        theta = sympy.sqrt(theta_sq)\n        return So3(\n            Quaternion(\n                sympy.cos(0.5 * theta),\n                sympy.sin(0.5 * theta) / theta * v))\n\n    def log(self):\n        \"\"\" logarithmic map\"\"\"\n        n = sympy.sqrt(squared_norm(self.q.vec))\n        return 2 * sympy.atan(n / self.q.real) / n * self.q.vec\n\n    def calc_Dx_log_this(self):\n        return sympy.Matrix(3, 3, lambda r, c: sympy.diff(self.log()[r],\n                                                          self[c]))\n\n    def calc_Dx_log_exp_x_times_this_at_0(self, x):\n        return sympy.Matrix(3, 3, lambda r, c:\n                            sympy.diff((So3.exp(x)*self).log()[r], x[c])).subs(\n                            x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n    def __repr__(self):\n        return \"So3:\" + repr(self.q)\n\n    def inverse(self):\n        return So3(self.q.conj())\n\n    @staticmethod\n    def hat(o):\n        return sympy.Matrix([[0, -o[2], o[1]],\n                             [o[2], 0, -o[0]],\n                             [-o[1], o[0], 0]])\n\n    \"\"\"vee-operator\n\n    It takes the 3x3-matrix representation ``Omega`` and maps it to the\n    corresponding vector representation of Lie algebra.\n\n    This is the inverse of the hat-operator, see above.\n\n    Precondition: ``Omega`` must have the following structure:\n\n                   |  0 -c  b |\n                   |  c  0 -a |\n                   | -b  a  0 |\n    \"\"\"\n    @staticmethod\n    def vee(Omega):\n        v = Vector3(Omega.row(2).col(1),\n                    Omega.row(0).col(2),\n                    Omega.row(1).col(0))\n        return v\n\n    def matrix(self):\n        \"\"\" returns matrix representation \"\"\"\n        return sympy.Matrix([[\n            1 - 2 * self.q.vec[1]**2 - 2 * self.q.vec[2]**2,\n            2 * self.q.vec[0] * self.q.vec[1] -\n            2 * self.q.vec[2] * self.q[3],\n            2 * self.q.vec[0] * self.q.vec[2] +\n            2 * self.q.vec[1] * self.q[3]\n        ], [\n            2 * self.q.vec[0] * self.q.vec[1] +\n            2 * self.q.vec[2] * self.q[3],\n            1 - 2 * self.q.vec[0]**2 - 2 * self.q.vec[2]**2,\n            2 * self.q.vec[1] * self.q.vec[2] -\n            2 * self.q.vec[0] * self.q[3]\n        ], [\n            2 * self.q.vec[0] * self.q.vec[2] -\n            2 * self.q.vec[1] * self.q[3],\n            2 * self.q.vec[1] * self.q.vec[2] +\n            2 * self.q.vec[0] * self.q[3],\n            1 - 2 * self.q.vec[0]**2 - 2 * self.q.vec[1]**2\n        ]])\n\n    def __mul__(self, right):\n        \"\"\" left-multiplication\n            either rotation concatenation or point-transform \"\"\"\n        if isinstance(right, sympy.Matrix):\n            assert right.shape == (3, 1), right.shape\n            return (self.q * Quaternion(0, right) * self.q.conj()).vec\n        elif isinstance(right, So3):\n            return So3(self.q * right.q)\n        assert False, \"unsupported type: {0}\".format(type(right))\n\n    def __getitem__(self, key):\n        return self.q[key]\n\n    @staticmethod\n    def calc_Dx_exp_x(x):\n        return sympy.Matrix(4, 3, lambda r, c:\n                            sympy.diff(So3.exp(x)[r], x[c]))\n\n    @staticmethod\n    def Dx_exp_x_at_0():\n        return sympy.Matrix([[0.5, 0.0, 0.0],\n                             [0.0, 0.5, 0.0],\n                             [0.0, 0.0, 0.5],\n                             [0.0, 0.0, 0.0]])\n\n    @staticmethod\n    def calc_Dx_exp_x_at_0(x):\n        return So3.calc_Dx_exp_x(x).subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n    def calc_Dx_this_mul_exp_x_at_0(self, x):\n        return sympy.Matrix(4, 3, lambda r, c:\n                            sympy.diff((self * So3.exp(x))[r], x[c]))\\\n            .subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n    def calc_Dx_exp_x_mul_this_at_0(self, x):\n        return sympy.Matrix(3, 4, lambda r, c:\n                            sympy.diff((self * So3.exp(x))[c], x[r, 0]))\\\n            .subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n    @staticmethod\n    def Dxi_x_matrix(x, i):\n        if i == 0:\n            return sympy.Matrix([[0, 2 * x[1], 2 * x[2]],\n                                 [2 * x[1], -4 * x[0], -2 * x[3]],\n                                 [2 * x[2], 2 * x[3], -4 * x[0]]])\n        if i == 1:\n            return sympy.Matrix([[-4 * x[1], 2 * x[0], 2 * x[3]],\n                                 [2 * x[0], 0, 2 * x[2]],\n                                 [-2 * x[3], 2 * x[2], -4 * x[1]]])\n        if i == 2:\n            return sympy.Matrix([[-4 * x[2], -2 * x[3], 2 * x[0]],\n                                 [2 * x[3], -4 * x[2], 2 * x[1]],\n                                 [2 * x[0], 2 * x[1], 0]])\n        if i == 3:\n            return sympy.Matrix([[0, -2 * x[2], 2 * x[1]],\n                                 [2 * x[2], 0, -2 * x[0]],\n                                 [-2 * x[1], 2 * x[0], 0]])\n\n    @staticmethod\n    def calc_Dxi_x_matrix(x, i):\n        return sympy.Matrix(3, 3, lambda r, c:\n                            sympy.diff(x.matrix()[r, c], x[i]))\n\n    @staticmethod\n    def Dxi_exp_x_matrix(x, i):\n        R = So3.exp(x)\n        Dx_exp_x = So3.calc_Dx_exp_x(x)\n        list = [Dx_exp_x[j, i] * So3.Dxi_x_matrix(R, j) for j in [0, 1, 2, 3]]\n        return functools.reduce((lambda a, b: a + b), list)\n\n    @staticmethod\n    def calc_Dxi_exp_x_matrix(x, i):\n        return sympy.Matrix(3, 3, lambda r, c:\n                            sympy.diff(So3.exp(x).matrix()[r, c], x[i]))\n\n    @staticmethod\n    def Dxi_exp_x_matrix_at_0(i):\n        v = ZeroVector3()\n        v[i] = 1\n        return So3.hat(v)\n\n    @staticmethod\n    def calc_Dxi_exp_x_matrix_at_0(x, i):\n        return sympy.Matrix(3, 3, lambda r, c:\n                            sympy.diff(So3.exp(x).matrix()[r, c], x[i])\n                            ).subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)\n\n\nclass TestSo3(unittest.TestCase):\n    def setUp(self):\n        omega0, omega1, omega2 = sympy.symbols(\n            'omega[0], omega[1], omega[2]', real=True)\n        x, v0, v1, v2 = sympy.symbols('q.w() q.x() q.y() q.z()', real=True)\n        p0, p1, p2 = sympy.symbols('p0 p1 p2', real=True)\n        v = Vector3(v0, v1, v2)\n        self.omega = Vector3(omega0, omega1, omega2)\n        self.a = So3(Quaternion(x, v))\n        self.p = Vector3(p0, p1, p2)\n\n    def test_exp_log(self):\n        for o in [Vector3(0., 1, 0.5),\n                  Vector3(0.1, 0.1, 0.1),\n                  Vector3(0.01, 0.2, 0.03)]:\n            w = So3.exp(o).log()\n            for i in range(0, 3):\n                self.assertAlmostEqual(o[i], w[i])\n\n    def test_matrix(self):\n        R_foo_bar = So3.exp(self.omega)\n        Rmat_foo_bar = R_foo_bar.matrix()\n        point_bar = self.p\n        p1_foo = R_foo_bar * point_bar\n        p2_foo = Rmat_foo_bar * point_bar\n        self.assertEqual(sympy.simplify(p1_foo - p2_foo),\n                         ZeroVector3())\n\n    def test_derivatives(self):\n        self.assertEqual(sympy.simplify(So3.calc_Dx_exp_x_at_0(self.omega) -\n                                        So3.Dx_exp_x_at_0()),\n                         sympy.Matrix.zeros(4, 3))\n\n        for i in [0, 1, 2, 3]:\n            self.assertEqual(sympy.simplify(So3.calc_Dxi_x_matrix(self.a, i) -\n                                            So3.Dxi_x_matrix(self.a, i)),\n                             sympy.Matrix.zeros(3, 3))\n        for i in [0, 1, 2]:\n            self.assertEqual(sympy.simplify(\n                So3.Dxi_exp_x_matrix(self.omega, i) -\n                So3.calc_Dxi_exp_x_matrix(self.omega, i)),\n                sympy.Matrix.zeros(3, 3))\n            self.assertEqual(sympy.simplify(\n                So3.Dxi_exp_x_matrix_at_0(i) -\n                So3.calc_Dxi_exp_x_matrix_at_0(self.omega, i)),\n                sympy.Matrix.zeros(3, 3))\n\n    def test_codegen(self):\n        stream = cse_codegen(So3.calc_Dx_exp_x(self.omega))\n        filename = \"cpp_gencode/So3_Dx_exp_x.cpp\"\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(\n            self.a.calc_Dx_this_mul_exp_x_at_0(self.omega))\n        filename = \"cpp_gencode/So3_Dx_this_mul_exp_x_at_0.cpp\"\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_log_this())\n        filename = \"cpp_gencode/So3_Dx_log_this.cpp\"\n\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n        stream = cse_codegen(self.a.calc_Dx_log_exp_x_times_this_at_0(\n                               self.omega))\n        filename = \"cpp_gencode/So3_Dx_log_exp_x_times_this_at_0.cpp\"\n\n        # set to true to generate codegen files\n        if False:\n            file = open(filename, \"w\")\n            for line in stream:\n                file.write(line)\n            file.close()\n        else:\n            file = open(filename, \"r\")\n            file_lines = file.readlines()\n            for i, line in enumerate(stream):\n                self.assertEqual(line, file_lines[i])\n            file.close()\n        stream.close\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Thirdparty/Sophus/test/CMakeLists.txt",
    "content": "ADD_SUBDIRECTORY(core)\nADD_SUBDIRECTORY(ceres)\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/CMakeLists.txt",
    "content": "# Make sure Ceres knows where to find Eigen\nlist(APPEND SEARCH_HEADERS ${EIGEN3_INCLUDE_DIR})\n\n# git clone https://ceres-solver.googlesource.com/ceres-solver\nfind_package(Ceres 2.1.0 QUIET)\n\nfunction(add_test_ceres source postfix)\n  add_executable(${source}_${postfix} ${source}.cpp)\n  target_link_libraries(${source}_${postfix} sophus Ceres::ceres)\n  target_compile_definitions(${source}_${postfix} PRIVATE ${ARGN})\n  add_test(${source}_${postfix} ${source}_${postfix})\nendfunction()\n\nif(Ceres_FOUND)\n  message(STATUS \"CERES found\")\n\n  # Tests to run\n  set(TEST_SOURCES\n      test_ceres_so3\n      test_ceres_rxso3\n      test_ceres_se3\n      test_ceres_sim3\n      test_ceres_so2\n      test_ceres_rxso2\n      test_ceres_se2\n      test_ceres_sim2)\n\n  foreach(test_src ${TEST_SOURCES})\n    add_test_ceres(${test_src} \"local_parameterization\")\n  endforeach()\n  foreach(test_src ${TEST_SOURCES})\n    add_test_ceres(${test_src} \"manifold\")\n  endforeach()\nendif(Ceres_FOUND)\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_rxso2.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/rxso2.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::RxSO2d> {\n  static double Norm(const typename Sophus::RxSO2d::Tangent &t) {\n    return std::abs(t[0]);\n  }\n};\n\nint main(int, char **) {\n  using RxSO2d = Sophus::RxSO2d;\n  using Tangent = RxSO2d::Tangent;\n  using Point = RxSO2d::Point;\n  double const kPi = Sophus::Constants<double>::pi();\n\n  StdVector<RxSO2d> rxso2_vec;\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(0.2, 1.)));\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(0.2, 1.1)));\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(0., 1.1)));\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(0.00001, 0.)));\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(0.00001, 0.00001)));\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(kPi, 0.9)));\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(0.2, 0)) *\n                      RxSO2d::exp(Tangent(kPi, 0.0)) *\n                      RxSO2d::exp(Tangent(-0.2, 0)));\n  rxso2_vec.push_back(RxSO2d::exp(Tangent(0.3, 0)) *\n                      RxSO2d::exp(Tangent(kPi, 0.001)) *\n                      RxSO2d::exp(Tangent(-0.3, 0)));\n\n  StdVector<Point> point_vec;\n  point_vec.push_back(Point(1.012, 2.73));\n  point_vec.push_back(Point(9.2, -7.3));\n  point_vec.push_back(Point(2.5, 0.1));\n  point_vec.push_back(Point(12.3, 1.9));\n  point_vec.push_back(Point(-3.21, 3.42));\n  point_vec.push_back(Point(-8.0, 6.1));\n  point_vec.push_back(Point(0.0, 2.5));\n  point_vec.push_back(Point(7.1, 7.8));\n  point_vec.push_back(Point(5.8, 9.2));\n\n  std::cerr << \"Test Ceres RxSO2\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::RxSO2>(rxso2_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_rxso3.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/rxso3.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::RxSO3d> {\n  static double Norm(const typename Sophus::RxSO3d::Tangent &t) {\n    return t.template head<3>().norm();\n  }\n};\n\nint main(int, char **) {\n  using RxSO3d = Sophus::RxSO3d;\n  using Point = RxSO3d::Point;\n  using Tangent = RxSO3d::Tangent;\n  double const kPi = Sophus::Constants<double>::pi();\n\n  StdVector<RxSO3d> rxso3_vec;\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0.2, 0.5, 0.0, 1.)));\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0.2, 0.5, -1.0, 1.1)));\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0., 0., 0., 1.1)));\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0., 0., 0.00001, 0.)));\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0., 0., 0.00001, 0.00001)));\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0., 0., 0.00001, 0)));\n\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(kPi, 0, 0, 0.9)));\n\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0.2, -0.5, 0, 0)) *\n                      RxSO3d::exp(Tangent(kPi, 0, 0, 0)) *\n                      RxSO3d::exp(Tangent(-0.2, -0.5, 0, 0)));\n\n  rxso3_vec.push_back(RxSO3d::exp(Tangent(0.3, 0.5, 0.1, 0)) *\n                      RxSO3d::exp(Tangent(kPi, 0, 0, 0)) *\n                      RxSO3d::exp(Tangent(-0.3, -0.5, -0.1, 0)));\n\n  StdVector<Point> point_vec;\n  point_vec.push_back(Point(1.012, 2.73, -1.4));\n  point_vec.push_back(Point(9.2, -7.3, -4.4));\n  point_vec.push_back(Point(2.5, 0.1, 9.1));\n  point_vec.push_back(Point(12.3, 1.9, 3.8));\n  point_vec.push_back(Point(-3.21, 3.42, 2.3));\n  point_vec.push_back(Point(-8.0, 6.1, -1.1));\n  point_vec.push_back(Point(0.0, 2.5, 5.9));\n  point_vec.push_back(Point(7.1, 7.8, -14));\n  point_vec.push_back(Point(5.8, 9.2, 0.0));\n\n  std::cerr << \"Test Ceres RxSO2\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::RxSO3>(rxso3_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_se2.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/se2.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::SE2d> {\n  static double Norm(const typename Sophus::SE2d::Tangent &t) {\n    return std::abs(t[2]);\n  }\n};\n\nint main(int, char **) {\n  using SE2d = Sophus::SE2d;\n  using SO2d = Sophus::SO2d;\n  using Point = SE2d::Point;\n  double const kPi = Sophus::Constants<double>::pi();\n\n  StdVector<SE2d> se2_vec;\n  se2_vec.push_back(SE2d(SO2d(0.0), Point(0, 0)));\n  se2_vec.push_back(SE2d(SO2d(0.2), Point(10, 0)));\n  se2_vec.push_back(SE2d(SO2d(0.), Point(0, 100)));\n  se2_vec.push_back(SE2d(SO2d(-1.), Point(20, -1)));\n  se2_vec.push_back(SE2d(SO2d(0.00001), Point(-0.00000001, 0.0000000001)));\n  se2_vec.push_back(SE2d(SO2d(0.2), Point(0, 0)) *\n                    SE2d(SO2d(kPi), Point(0, 0)) *\n                    SE2d(SO2d(-0.2), Point(0, 0)));\n  se2_vec.push_back(SE2d(SO2d(0.3), Point(2, 0)) *\n                    SE2d(SO2d(kPi), Point(0, 0)) *\n                    SE2d(SO2d(-0.3), Point(0, 6)));\n\n  StdVector<Point> point_vec;\n  point_vec.push_back(Point(1.012, 2.73));\n  point_vec.push_back(Point(9.2, -7.3));\n  point_vec.push_back(Point(2.5, 0.1));\n  point_vec.push_back(Point(12.3, 1.9));\n  point_vec.push_back(Point(-3.21, 3.42));\n  point_vec.push_back(Point(-8.0, 6.1));\n  point_vec.push_back(Point(0.0, 2.5));\n  point_vec.push_back(Point(7.1, 7.8));\n  point_vec.push_back(Point(5.8, 9.2));\n\n  std::cerr << \"Test Ceres SE2\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::SE2>(se2_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_se3.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/se3.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::SE3d> {\n  static double Norm(const typename Sophus::SE3d::Tangent &t) {\n    return t.template tail<3>().norm();\n  }\n};\n\nint main(int, char **) {\n  using SE3d = Sophus::SE3d;\n  using SO3d = Sophus::SO3d;\n  using Point = SE3d::Point;\n  double const kPi = Sophus::Constants<double>::pi();\n\n  StdVector<SE3d> se3_vec;\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0.2, 0.5, 0.0)), Point(0, 0, 0)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0.2, 0.5, -1.0)), Point(10, 0, 0)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0., 0., 0.)), Point(0, 100, 5)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0., 0., 0.00001)), Point(0, 0, 0)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0., 0., 0.00001)),\n                         Point(0, -0.00000001, 0.0000000001)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0., 0., 0.00001)), Point(0.01, 0, 0)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(kPi, 0, 0)), Point(4, -5, 0)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0.2, 0.5, 0.0)), Point(0, 0, 0)) *\n                    SE3d(SO3d::exp(Point(kPi, 0, 0)), Point(0, 0, 0)) *\n                    SE3d(SO3d::exp(Point(-0.2, -0.5, -0.0)), Point(0, 0, 0)));\n  se3_vec.push_back(SE3d(SO3d::exp(Point(0.3, 0.5, 0.1)), Point(2, 0, -7)) *\n                    SE3d(SO3d::exp(Point(kPi, 0, 0)), Point(0, 0, 0)) *\n                    SE3d(SO3d::exp(Point(-0.3, -0.5, -0.1)), Point(0, 6, 0)));\n\n  StdVector<Point> point_vec;\n  point_vec.push_back(Point(1.012, 2.73, -1.4));\n  point_vec.push_back(Point(9.2, -7.3, -4.4));\n  point_vec.push_back(Point(2.5, 0.1, 9.1));\n  point_vec.push_back(Point(12.3, 1.9, 3.8));\n  point_vec.push_back(Point(-3.21, 3.42, 2.3));\n  point_vec.push_back(Point(-8.0, 6.1, -1.1));\n  point_vec.push_back(Point(0.0, 2.5, 5.9));\n  point_vec.push_back(Point(7.1, 7.8, -14));\n  point_vec.push_back(Point(5.8, 9.2, 0.0));\n\n  std::cerr << \"Test Ceres SE3\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::SE3>(se3_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_sim2.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/sim2.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::Sim2d> {\n  static double Norm(const typename Sophus::Sim2d::Tangent &t) {\n    return std::abs(t[2]);\n  }\n};\n\nint main(int, char **) {\n  using Sim2d = Sophus::Sim2d;\n  using RxSO2d = Sophus::RxSO2d;\n  using Point = Sim2d::Point;\n  using Vector2d = Sophus::Vector2d;\n  double const kPi = Sophus::Constants<double>::pi();\n\n  StdVector<Sim2d> sim2_vec;\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(0.2, 1.)), Point(0, 0)));\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(0.2, 1.1)), Point(10, 0)));\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(0., 0.)), Point(0, 10)));\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(0.00001, 0.)), Point(0, 0)));\n\n  sim2_vec.push_back(\n      Sim2d(RxSO2d::exp(Vector2d(0.00001, 0.0000001)), Point(1, -1.00000001)));\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(0., 0.)), Point(0.01, 0)));\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(kPi, 0.9)), Point(4, 0)));\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(0.2, 0)), Point(0, 0)) *\n                     Sim2d(RxSO2d::exp(Vector2d(kPi, 0)), Point(0, 0)) *\n                     Sim2d(RxSO2d::exp(Vector2d(-0.2, 0)), Point(0, 0)));\n\n  sim2_vec.push_back(Sim2d(RxSO2d::exp(Vector2d(0.3, 0)), Point(2, -7)) *\n                     Sim2d(RxSO2d::exp(Vector2d(kPi, 0)), Point(0, 0)) *\n                     Sim2d(RxSO2d::exp(Vector2d(-0.3, 0)), Point(0, 6)));\n\n  StdVector<Point> point_vec;\n  point_vec.push_back(Point(1.012, 2.73));\n  point_vec.push_back(Point(9.2, -7.3));\n  point_vec.push_back(Point(2.5, 0.1));\n  point_vec.push_back(Point(12.3, 1.9));\n  point_vec.push_back(Point(-3.21, 3.42));\n  point_vec.push_back(Point(-8.0, 6.1));\n  point_vec.push_back(Point(0.0, 2.5));\n  point_vec.push_back(Point(7.1, 7.8));\n  point_vec.push_back(Point(5.8, 9.2));\n\n  std::cerr << \"Test Ceres Sim2\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::Sim2>(sim2_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_sim3.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/sim3.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::Sim3d> {\n  static double Norm(const typename Sophus::Sim3d::Tangent &t) {\n    return t.template segment<3>(3).norm();\n  }\n};\n\nint main(int, char **) {\n  using RxSO3d = Sophus::RxSO3d;\n  using Sim3d = Sophus::Sim3d;\n  using Point = Sim3d::Point;\n  using Vector4d = Eigen::Vector4d;\n  double const kPi = Sophus::Constants<double>::pi();\n\n  StdVector<Sim3d> sim3_vec;\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0.2, 0.5, 0.0, 1.)), Point(0, 0, 0)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0.2, 0.5, -1.0, 1.1)), Point(10, 0, 0)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0., 0., 0., 0.001)), Point(0, 10, 5)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0., 0., 0., 1.1)), Point(0, 10, 5)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0., 0., 0.00001, 0.)), Point(0, 0, 0)));\n  sim3_vec.push_back(Sim3d(RxSO3d::exp(Vector4d(0., 0., 0.00001, 0.0000001)),\n                           Point(1, -1.00000001, 2.0000000001)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0., 0., 0.00001, 0)), Point(0.01, 0, 0)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(kPi, 0, 0, 0.9)), Point(4, -5, 0)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0.2, 0.5, 0.0, 0)), Point(0, 0, 0)) *\n      Sim3d(RxSO3d::exp(Vector4d(kPi, 0, 0, 0)), Point(0, 0, 0)) *\n      Sim3d(RxSO3d::exp(Vector4d(-0.2, -0.5, -0.0, 0)), Point(0, 0, 0)));\n  sim3_vec.push_back(\n      Sim3d(RxSO3d::exp(Vector4d(0.3, 0.5, 0.1, 0)), Point(2, 0, -7)) *\n      Sim3d(RxSO3d::exp(Vector4d(kPi, 0, 0, 0)), Point(0, 0, 0)) *\n      Sim3d(RxSO3d::exp(Vector4d(-0.3, -0.5, -0.1, 0)), Point(0, 6, 0)));\n\n  StdVector<Point> point_vec;\n  point_vec.push_back(Point(1.012, 2.73, -1.4));\n  point_vec.push_back(Point(9.2, -7.3, -4.4));\n  point_vec.push_back(Point(2.5, 0.1, 9.1));\n  point_vec.push_back(Point(12.3, 1.9, 3.8));\n  point_vec.push_back(Point(-3.21, 3.42, 2.3));\n  point_vec.push_back(Point(-8.0, 6.1, -1.1));\n  point_vec.push_back(Point(0.0, 2.5, 5.9));\n  point_vec.push_back(Point(7.1, 7.8, -14));\n  point_vec.push_back(Point(5.8, 9.2, 0.0));\n\n  std::cerr << \"Test Ceres Sim3\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::Sim3>(sim3_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_so2.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/so2.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::SO2d> {\n  static double Norm(const typename Sophus::SO2d::Tangent &t) {\n    return std::abs(t);\n  }\n};\n\nint main(int, char **) {\n  using SO2d = Sophus::SO2d;\n  using Point = SO2d::Point;\n  double const kPi = Sophus::Constants<double>::pi();\n\n  StdVector<SO2d> so2_vec;\n  so2_vec.emplace_back(SO2d::exp(0.0));\n  so2_vec.emplace_back(SO2d::exp(0.2));\n  so2_vec.emplace_back(SO2d::exp(10.));\n  so2_vec.emplace_back(SO2d::exp(0.00001));\n  so2_vec.emplace_back(SO2d::exp(kPi));\n  so2_vec.emplace_back(SO2d::exp(0.2) * SO2d::exp(kPi) * SO2d::exp(-0.2));\n  so2_vec.emplace_back(SO2d::exp(-0.3) * SO2d::exp(kPi) * SO2d::exp(0.3));\n\n  StdVector<Point> point_vec;\n  point_vec.emplace_back(Point(1.012, 2.73));\n  point_vec.emplace_back(Point(9.2, -7.3));\n  point_vec.emplace_back(Point(2.5, 0.1));\n  point_vec.emplace_back(Point(12.3, 1.9));\n  point_vec.emplace_back(Point(-3.21, 3.42));\n  point_vec.emplace_back(Point(-8.0, 6.1));\n  point_vec.emplace_back(Point(0.0, 2.5));\n  point_vec.emplace_back(Point(7.1, 7.8));\n  point_vec.emplace_back(Point(5.8, 9.2));\n\n  std::cerr << \"Test Ceres SO2\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::SO2>(so2_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/test_ceres_so3.cpp",
    "content": "#include <ceres/ceres.h>\n#include <iostream>\n#include <sophus/se3.hpp>\n\n#include \"tests.hpp\"\n\ntemplate <typename T>\nusing StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n\ntemplate <>\nstruct RotationalPart<Sophus::SO3d> {\n  static double Norm(const typename Sophus::SO3d::Tangent &t) {\n    return t.norm();\n  }\n};\n\nint main(int, char **) {\n  using SO3Type = Sophus::SO3<double>;\n  using Point = SO3Type::Point;\n  using Tangent = SO3Type::Tangent;\n  double const kPi = Sophus::Constants<double>::pi();\n  double const epsilon = Sophus::Constants<double>::epsilon();\n\n  SO3Type C_0 = SO3Type::exp(Tangent(0.1, 0.05, -0.7));\n\n  Tangent axis_0(0.18005924, -0.54563405, 0.81845107);\n\n  StdVector<SO3Type> so3_vec;\n  // Generic tests\n  so3_vec.push_back(SO3Type::exp(Point(0.2, 0.5, 0.0)));\n  so3_vec.push_back(SO3Type::exp(Point(0.2, 0.5, -1.0)));\n  so3_vec.push_back(SO3Type::exp(Point(0., 0., 0.)));\n  so3_vec.push_back(SO3Type::exp(Point(0., 0., 0.00001)));\n  so3_vec.push_back(SO3Type::exp(Point(kPi, 0, 0)));\n  so3_vec.push_back(SO3Type::exp(Point(0.2, 0.5, 0.0)) *\n                    SO3Type::exp(Point(kPi, 0, 0)) *\n                    SO3Type::exp(Point(-0.2, -0.5, -0.0)));\n  so3_vec.push_back(SO3Type::exp(Point(0.3, 0.5, 0.1)) *\n                    SO3Type::exp(Point(kPi, 0, 0)) *\n                    SO3Type::exp(Point(-0.3, -0.5, -0.1)));\n\n  // Checks if ceres optimization will proceed correctly given problematic\n  // or close-to-singular initial conditions, i.e. approx. 180-deg rotation,\n  // which trips a flaw in old implementation of SO3::log() where the\n  // tangent vector's magnitude is set to a constant close to \\pi whenever\n  // the input rotation's rotation angle is within some tolerance of \\pi,\n  // giving zero gradients wrt scalar part of quaternion.\n  so3_vec.push_back(C_0);\n  // Generic rotation angle < pi\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * 1.0));\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * -1.0));\n  // Generic rotation angle > pi\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * 4.0));\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * -4.0));\n  // Singular rotation angle = pi\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * kPi));\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * -kPi));\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * kPi * (1.0 + epsilon)));\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * kPi * (1.0 - epsilon)));\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * -kPi * (1.0 + epsilon)));\n  so3_vec.push_back(C_0 * SO3Type::exp(axis_0 * -kPi * (1.0 - epsilon)));\n\n  StdVector<Point> point_vec;\n  point_vec.push_back(Point(1.012, 2.73, -1.4));\n  point_vec.push_back(Point(9.2, -7.3, -4.4));\n  point_vec.push_back(Point(2.5, 0.1, 9.1));\n  point_vec.push_back(Point(12.3, 1.9, 3.8));\n  point_vec.push_back(Point(-3.21, 3.42, 2.3));\n  point_vec.push_back(Point(-8.0, 6.1, -1.1));\n  point_vec.push_back(Point(0.0, 2.5, 5.9));\n  point_vec.push_back(Point(7.1, 7.8, -14));\n  point_vec.push_back(Point(5.8, 9.2, 0.0));\n\n  std::cerr << \"Test Ceres SO3\" << std::endl;\n  Sophus::LieGroupCeresTests<Sophus::SO3>(so3_vec, point_vec).testAll();\n  return 0;\n}\n"
  },
  {
    "path": "Thirdparty/Sophus/test/ceres/tests.hpp",
    "content": "#pragma once\n\n#include <ceres/ceres.h>\n\n#include <sophus/ceres_manifold.hpp>\n#include <sophus/test_macros.hpp>\n\ntemplate <typename LieGroup>\nstruct RotationalPart;\n\nnamespace Sophus {\n\ntemplate <int N>\ndouble dot(const Vector<double, N>& v1, const Vector<double, N>& v2) {\n  return v1.dot(v2);\n}\n\ndouble dot(const double& a, const double& b) { return a * b; }\n\ntemplate <int N>\ndouble squaredNorm(const Vector<double, N>& vec) {\n  return vec.squaredNorm();\n}\n\ndouble squaredNorm(const double& scalar) { return scalar * scalar; }\n\ntemplate <typename T>\nT Zero() {\n  return T::Zero();\n}\n\ntemplate <>\ndouble Zero<double>() {\n  return 0.;\n}\n\ntemplate <typename T>\ntypename T::Scalar* data(T& t) {\n  return t.data();\n}\n\ndouble* data(double& d) { return &d; }\n\ntemplate <typename T>\nconst typename T::Scalar* data(const T& t) {\n  return t.data();\n}\n\nconst double* data(const double& d) { return &d; }\n\ntemplate <typename T>\nstruct Random {\n  template <typename R>\n  T static sample(R& rng) {\n    std::normal_distribution<double> rnorm;\n    static_assert(T::RowsAtCompileTime >= 0,\n                  \"Matrix should have known size at compile-time\");\n    static_assert(T::ColsAtCompileTime >= 0,\n                  \"Matrix should have known size at compile-time\");\n    T res;\n    for (Eigen::Index i = 0; i < res.size(); ++i) res.data()[i] = rnorm(rng);\n    return res;\n  }\n};\n\ntemplate <>\nstruct Random<double> {\n  using T = double;\n\n  template <typename R>\n  T static sample(R& rng) {\n    std::normal_distribution<double> rnorm;\n    return rnorm(rng);\n  }\n};\n\ntemplate <template <typename, int = 0> class LieGroup_>\nstruct LieGroupCeresTests {\n  template <typename T>\n  using LieGroup = LieGroup_<T>;\n  using LieGroupd = LieGroup<double>;\n  using Pointd = typename LieGroupd::Point;\n  using Tangentd = typename LieGroupd::Tangent;\n  template <typename T>\n  using StdVector = std::vector<T, Eigen::aligned_allocator<T>>;\n  static int constexpr N = LieGroupd::N;\n  static int constexpr num_parameters = LieGroupd::num_parameters;\n  static int constexpr DoF = LieGroupd::DoF;\n\n  struct TestLieGroupCostFunctor {\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n    TestLieGroupCostFunctor(const LieGroupd& T_aw) : T_aw(T_aw) {}\n\n    template <class T>\n    bool operator()(T const* const sT_wa, T* sResiduals) const {\n      Eigen::Map<LieGroup<T> const> const T_wa(sT_wa);\n      // Mapper class is only used to facciliate difference between\n      // SO2 (which uses Scalar as tangent vector type) and other groups\n      // (which use Vector<...> as tangent vector type).\n      //\n      // Feel free to use direct dereferencing or Eigen::Map depending\n      // on ypur use-case for concrete application\n      //\n      // We only use Mapper class in order to make tests universally\n      // compatible with LieGroup::Tangent being Scalar or Vector\n      using Mapper = Mapper<typename LieGroup<T>::Tangent>;\n      typename Mapper::Map residuals = Mapper::map(sResiduals);\n\n      // We are able to mix Sophus types with doubles and Jet types withou\n      // needing to cast to T.\n      residuals = (T_aw * T_wa).log();\n      // Reverse order of multiplication. This forces the compiler to verify\n      // that (Jet, double) and (double, Jet) LieGroup multiplication work\n      // correctly.\n      residuals = (T_wa * T_aw).log();\n      // Finally, ensure that Jet-to-Jet multiplication works.\n      residuals = (T_wa * T_aw.template cast<T>()).log();\n      return true;\n    }\n\n    LieGroupd T_aw;\n  };\n  struct TestPointCostFunctor {\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n    TestPointCostFunctor(const LieGroupd& T_aw, const Pointd& point_a)\n        : T_aw(T_aw), point_a(point_a) {}\n\n    template <class T>\n    bool operator()(T const* const sT_wa, T const* const spoint_b,\n                    T* sResiduals) const {\n      using LieGroupT = LieGroup<T>;\n      using PointT = typename LieGroupT::Point;\n      Eigen::Map<LieGroupT const> const T_wa(sT_wa);\n      Eigen::Map<PointT const> point_b(spoint_b);\n      Eigen::Map<PointT> residuals(sResiduals);\n\n      // Multiply LieGroupd by Jet Vector3.\n      PointT point_b_prime = T_aw * point_b;\n      // Ensure Jet LieGroup multiplication with Jet Vector3.\n      point_b_prime = T_aw.template cast<T>() * point_b;\n\n      // Multiply Jet LieGroup with Vector3d.\n      PointT point_a_prime = T_wa * point_a;\n      // Ensure Jet LieGroup multiplication with Jet Vector3.\n      point_a_prime = T_wa * point_a.template cast<T>();\n\n      residuals = point_b_prime - point_a_prime;\n      return true;\n    }\n\n    LieGroupd T_aw;\n    Pointd point_a;\n  };\n\n  struct TestGraphFunctor {\n    template <typename T>\n    bool operator()(const T* a, const T* b, T* residuals) const {\n      using LieGroupT = LieGroup<T>;\n      Eigen::Map<const LieGroupT> A(a);\n      Eigen::Map<const LieGroupT> B(b);\n      using Mapper = Mapper<typename LieGroupT::Tangent>;\n      typename Mapper::Map diff_log = Mapper::map(residuals);\n\n      // Jet LieGroup multiplication with LieGroupd\n      diff_log = (diff * (B.inverse() * A)).log();\n      return true;\n    }\n\n    TestGraphFunctor(const LieGroupd& diff) : diff(diff) {}\n    const LieGroupd diff;\n  };\n\n  bool testAll() {\n    bool passed = true;\n    for (size_t i = 0; i < group_vec.size(); ++i) {\n      for (size_t j = 0; j < group_vec.size(); ++j) {\n        if (i == j) continue;\n        for (size_t k = 0; k < point_vec.size(); ++k) {\n          for (size_t l = 0; l < point_vec.size(); ++l) {\n            if (k == l) continue;\n            std::cerr << \"Simple test #\" << i << \", \" << j << \", \" << k << \", \"\n                      << l;\n            passed &=\n                test(group_vec[i], group_vec[j], point_vec[k], point_vec[l]);\n            processTestResult(passed);\n          }\n        }\n      }\n    }\n    for (size_t i = 0; i < group_vec.size(); ++i) {\n      for (size_t j = 0; j < group_vec.size(); ++j) {\n        passed &= testManifold(group_vec[i], group_vec[j]);\n        processTestResult(passed);\n      }\n    }\n    int Ns[] = {20, 40, 80, 160};\n    for (auto N : Ns) {\n      std::cerr << \"Averaging test: N = \" << N;\n      passed &= testAveraging(N, .5, .1);\n      processTestResult(passed);\n    }\n    return passed;\n  }\n\n  bool testAveraging(const size_t num_vertices, const double sigma_init,\n                     const double sigma_observation) {\n    if (!num_vertices) return true;\n    const double sigma_init_elementwise = sigma_init / std::sqrt(DoF);\n    const double sigma_observation_elementwise =\n        sigma_observation / std::sqrt(DoF);\n    // Running Lie group averaging on a K_n graph with a random initialization\n    // noise and random noise in observations\n    ceres::Problem problem;\n\n    // \"Random\" initialization in order to keep tests repeatable\n    std::mt19937 rng(2021);\n    StdVector<LieGroupd> V(num_vertices), V_estimate;\n    V_estimate.reserve(num_vertices);\n    double initial_error = 0.;\n    auto parametrization = new Sophus::Manifold<LieGroup_>;\n\n    // All vertices are initialized with an i.i.d noise with normal\n    // distribution; Scaling is adjusted in order to maintain the same\n    // expectation of squared norm for all groups\n    for (size_t i = 0; i < num_vertices; ++i) {\n      auto& v = V[i];\n      v = LieGroupd::sampleUniform(rng);\n      const Tangentd delta_log =\n          Random<Tangentd>::sample(rng) * sigma_init_elementwise;\n      const LieGroupd delta = LieGroupd::exp(delta_log);\n      V_estimate.emplace_back(v * delta);\n      initial_error += squaredNorm(delta_log);\n      problem.AddParameterBlock(V_estimate.back().data(),\n                                LieGroupd::num_parameters, parametrization);\n    }\n\n    // For simplicity of graph generation, we use a complete (undirected) graph.\n    // Each edge (observation) has i.i.d noise with multivariate normal\n    // distribution; Scaling is adjusted in order to maintain the same\n    // expectation of squared norm for all groups\n    for (size_t i = 0; i < num_vertices; ++i)\n      for (size_t j = i + 1; j < num_vertices; ++j) {\n        LieGroupd diff = V[i].inverse() * V[j];\n        const auto delta_log =\n            Random<typename LieGroupd::Tangent>::sample(rng) *\n            sigma_observation_elementwise;\n        const auto delta = LieGroupd::exp(delta_log);\n        ceres::CostFunction* cost =\n            new ceres::AutoDiffCostFunction<TestGraphFunctor, LieGroupd::DoF,\n                                            LieGroupd::num_parameters,\n                                            LieGroupd::num_parameters>(\n                new TestGraphFunctor(diff * delta));\n        // For real-world problems you should consider using robust\n        // loss-function\n        problem.AddResidualBlock(cost, nullptr, V_estimate[i].data(),\n                                 V_estimate[j].data());\n      }\n\n    ceres::Solver::Options options;\n    options.gradient_tolerance = 1e-2 * Sophus::Constants<double>::epsilon();\n    options.function_tolerance = 1e-2 * Sophus::Constants<double>::epsilon();\n    options.parameter_tolerance = 1e-2 * Sophus::Constants<double>::epsilon();\n    options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;\n\n    ceres::Solver::Summary summary;\n    Solve(options, &problem, &summary);\n\n    // Computing final error in the estimates\n    double final_error = 0.;\n    for (size_t i = 0; i < num_vertices; ++i) {\n      final_error += squaredNorm((V[i].inverse() * V_estimate[i]).log());\n    }\n\n    // Expecting reasonable decrease of both estimates' errors and residuals\n    return summary.final_cost < .25 * summary.initial_cost &&\n           final_error < .25 * initial_error;\n  }\n\n  bool test(LieGroupd const& T_w_targ, LieGroupd const& T_w_init,\n            Pointd const& point_a_init, Pointd const& point_b) {\n    static constexpr int kNumPointParameters = Pointd::RowsAtCompileTime;\n\n    // Optimization parameters.\n    LieGroupd T_wr = T_w_init;\n    Pointd point_a = point_a_init;\n\n    // Build the problem.\n    ceres::Problem problem;\n\n    // Specify local update rule for our parameter\n\n    auto parameterization = new Sophus::Manifold<LieGroup_>;\n    problem.AddParameterBlock(T_wr.data(), num_parameters, parameterization);\n\n    // Create and add cost functions. Derivatives will be evaluated via\n    // automatic differentiation\n    ceres::CostFunction* cost_function1 =\n        new ceres::AutoDiffCostFunction<TestLieGroupCostFunctor, LieGroupd::DoF,\n                                        LieGroupd::num_parameters>(\n            new TestLieGroupCostFunctor(T_w_targ.inverse()));\n    problem.AddResidualBlock(cost_function1, nullptr, T_wr.data());\n    ceres::CostFunction* cost_function2 =\n        new ceres::AutoDiffCostFunction<TestPointCostFunctor,\n                                        kNumPointParameters, num_parameters,\n                                        kNumPointParameters>(\n            new TestPointCostFunctor(T_w_targ.inverse(), point_b));\n    problem.AddResidualBlock(cost_function2, nullptr, T_wr.data(),\n                             point_a.data());\n\n    // Set solver options (precision / method)\n    ceres::Solver::Options options;\n    options.gradient_tolerance = 0.01 * Sophus::Constants<double>::epsilon();\n    options.function_tolerance = 0.01 * Sophus::Constants<double>::epsilon();\n    options.linear_solver_type = ceres::DENSE_QR;\n    options.max_num_iterations = 100;\n\n    // Solve\n    ceres::Solver::Summary summary;\n    Solve(options, &problem, &summary);\n\n    // Difference between target and parameter\n    double const mse = squaredNorm((T_w_targ.inverse() * T_wr).log());\n    bool const passed = mse < 10. * Sophus::Constants<double>::epsilon();\n    return passed;\n  }\n\n  bool testManifold(const LieGroupd& x, const LieGroupd& y) {\n    // ceres/manifold_test_utils.h is google-test based; here we check all the\n    // same invariants\n    const Tangentd delta = (x.inverse() * y).log();\n    const Tangentd zero = Zero<Tangentd>();\n    Sophus::Manifold<LieGroup_> manifold;\n\n    LieGroupd test_group;\n\n    bool passed = true;\n    auto coeffs =\n        Eigen::Map<const Eigen::Matrix<double, num_parameters, 1>>(x.data());\n    auto coeffs_y =\n        Eigen::Map<const Eigen::Matrix<double, num_parameters, 1>>(y.data());\n    std::cerr << \"XPlusZeroIsXAt \" << coeffs.transpose() << std::endl;\n    passed &= xPlusZeroIsXAt(x);\n    std::cerr << \"XMinusXIsZeroAt \" << coeffs.transpose() << std::endl;\n    passed &= xMinusXIsZeroAt(x);\n    std::cerr << \"MinusPlusIsIdentityAt \" << coeffs.transpose() << std::endl;\n    passed &= minusPlusIsIdentityAt(x, delta);\n    std::cerr << \"MinusPlusIsIdentityAt \" << coeffs.transpose() << std::endl;\n    passed &= minusPlusIsIdentityAt(x, zero);\n    std::cerr << \"PlusMinusIsIdentityAt \" << coeffs.transpose() << std::endl;\n    passed &= plusMinusIsIdentityAt(x, x);\n    std::cerr << \"PlusMinusIsIdentityAt \" << coeffs.transpose() << \" \"\n              << coeffs_y.transpose() << std::endl;\n    passed &= plusMinusIsIdentityAt(x, y);\n    std::cerr << \"MinusPlusJacobianIsIdentityAt \" << coeffs.transpose()\n              << std::endl;\n    passed &= minusPlusJacobianIsIdentityAt(x);\n    return passed;\n  }\n\n  bool xPlusZeroIsXAt(const LieGroupd& x) {\n    const Tangentd zero = Zero<Tangentd>();\n    Sophus::Manifold<LieGroup_> manifold;\n    LieGroupd test_group;\n\n    bool passed = true;\n\n    passed &= manifold.Plus(x.data(), data(zero), test_group.data());\n    processTestResult(passed);\n    const double error = squaredNorm((x.inverse() * test_group).log());\n    passed &= error < Sophus::Constants<double>::epsilon();\n    processTestResult(passed);\n    return passed;\n  }\n\n  bool xMinusXIsZeroAt(const LieGroupd& x) {\n    Sophus::Manifold<LieGroup_> manifold;\n    LieGroupd test_group;\n    Tangentd test_tangent;\n\n    bool passed = true;\n\n    passed &= manifold.Minus(x.data(), x.data(), data(test_tangent));\n    processTestResult(passed);\n    const double error = squaredNorm(test_tangent);\n    passed &= error < Sophus::Constants<double>::epsilon();\n    processTestResult(passed);\n    return passed;\n  }\n\n  bool minusPlusIsIdentityAt(const LieGroupd& x, const Tangentd& delta) {\n    if (RotationalPart<LieGroupd>::Norm(delta) >\n        Sophus::Constants<double>::pi() *\n            (1. - Sophus::Constants<double>::epsilon()))\n      return true;\n    Sophus::Manifold<LieGroup_> manifold;\n    LieGroupd test_group;\n    Tangentd test_tangent;\n\n    bool passed = true;\n\n    passed &= manifold.Plus(x.data(), data(delta), test_group.data());\n    processTestResult(passed);\n\n    passed &= manifold.Minus(test_group.data(), x.data(), data(test_tangent));\n    processTestResult(passed);\n\n    const Tangentd diff = test_tangent - delta;\n    const double error = squaredNorm(diff);\n    passed &= error < Sophus::Constants<double>::epsilon();\n    processTestResult(passed);\n    return passed;\n  }\n\n  bool plusMinusIsIdentityAt(const LieGroupd& x, const LieGroupd& y) {\n    Sophus::Manifold<LieGroup_> manifold;\n    LieGroupd test_group;\n    Tangentd test_tangent;\n\n    bool passed = true;\n\n    passed &= manifold.Minus(y.data(), x.data(), data(test_tangent));\n    processTestResult(passed);\n\n    passed &= manifold.Plus(x.data(), data(test_tangent), test_group.data());\n    processTestResult(passed);\n\n    const double error = squaredNorm((y.inverse() * test_group).log());\n    passed &= error < Sophus::Constants<double>::epsilon();\n    processTestResult(passed);\n    return passed;\n  }\n\n  bool minusPlusJacobianIsIdentityAt(const LieGroupd& x) {\n    Sophus::Manifold<LieGroup_> manifold;\n    LieGroupd test_group;\n\n    bool passed = true;\n\n    Eigen::Matrix<double, num_parameters, DoF,\n                  DoF == 1 ? Eigen::ColMajor : Eigen::RowMajor>\n        Jplus;\n    Eigen::Matrix<double, DoF, num_parameters, Eigen::RowMajor> Jminus;\n\n    passed &= manifold.PlusJacobian(x.data(), Jplus.data());\n    processTestResult(passed);\n\n    passed &= manifold.MinusJacobian(x.data(), Jminus.data());\n    processTestResult(passed);\n\n    const Eigen::Matrix<double, DoF, DoF> diff =\n        Jminus * Jplus - Eigen::Matrix<double, DoF, DoF>::Identity();\n\n    std::cerr << diff << std::endl;\n    const double error = diff.squaredNorm();\n    passed &= error < Sophus::Constants<double>::epsilon();\n    processTestResult(passed);\n    return passed;\n  }\n\n  LieGroupCeresTests(const StdVector<LieGroupd>& group_vec,\n                     const StdVector<Pointd>& point_vec)\n      : group_vec(group_vec), point_vec(point_vec) {}\n\n  StdVector<LieGroupd> group_vec;\n  StdVector<Pointd> point_vec;\n};\n\n}  // namespace Sophus\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/CMakeLists.txt",
    "content": "#Tests to run\nSET( TEST_SOURCES test_common\n                  test_cartesian2\n                  test_cartesian3\n                  test_so2\n                  test_se2\n                  test_rxso2\n                  test_sim2\n                  test_so3\n                  test_se3\n                  test_rxso3\n                  test_sim3\n                  test_velocities\n                  test_geometry)\nfind_package( Ceres 2.1.0 QUIET )\n\nFOREACH(test_src ${TEST_SOURCES})\n  ADD_EXECUTABLE( ${test_src} ${test_src}.cpp tests.hpp ../../sophus/test_macros.hpp)\n  TARGET_LINK_LIBRARIES( ${test_src} sophus)\n  if( Ceres_FOUND )\n      TARGET_LINK_LIBRARIES( ${test_src} Ceres::ceres )\n      ADD_DEFINITIONS(-DSOPHUS_CERES)\n  endif(Ceres_FOUND)\n  ADD_TEST( ${test_src} ${test_src} )\nENDFOREACH(test_src)\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_cartesian2.cpp",
    "content": "#include <iostream>\n\n#include <sophus/cartesian.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::Cartesian<double, 2>>;\ntemplate class Map<Sophus::Cartesian<double, 2> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class Cartesian<double, 2, Eigen::AutoAlign>;\ntemplate class Cartesian<float, 2, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class Cartesian<ceres::Jet<double, 2>, 2>;\n#endif\n\ntemplate <class Scalar_>\nclass Tests {\n public:\n  using Scalar = Scalar_;\n  using Point = typename Cartesian<Scalar, 2>::Point;\n  using Tangent = typename Cartesian<Scalar, 2>::Tangent;\n\n  Tests() {\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0.5);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(-1);\n    tangent_vec_.push_back(tmp);\n\n    for (const auto& t : tangent_vec_) {\n      cartesian2_vec_.push_back(Cartesian<Scalar, 2>(t));\n    }\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(2)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3)));\n    point_vec_.push_back(Point(Scalar(-5), Scalar(-6)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<Cartesian<Scalar, 2>> tests(cartesian2_vec_, tangent_vec_,\n                                              point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  std::vector<Cartesian<Scalar, 2>,\n              Eigen::aligned_allocator<Cartesian<Scalar, 2>>>\n      cartesian2_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_cartesian() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test Cartesian2\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 2> tests: \" << endl;\n  Tests<ceres::Jet<double, 2>>().runAll();\n#endif\n\n  return 0;\n}\n\n}  // namespace Sophus\n\nint main() { return Sophus::test_cartesian(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_cartesian3.cpp",
    "content": "#include <iostream>\n\n#include <sophus/cartesian.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::Cartesian<double, 3>>;\ntemplate class Map<Sophus::Cartesian<double, 3> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class Cartesian<double, 3, Eigen::AutoAlign>;\ntemplate class Cartesian<float, 3, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class Cartesian<ceres::Jet<double, 3>, 3>;\n#endif\n\ntemplate <class Scalar_>\nclass Tests {\n public:\n  using Scalar = Scalar_;\n  using Point = typename Cartesian<Scalar, 3>::Point;\n  using Tangent = typename Cartesian<Scalar, 3>::Tangent;\n\n  Tests() {\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(1), Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(0), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(1), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(-1), Scalar(2);\n    tangent_vec_.push_back(tmp);\n\n    for (const auto& t : tangent_vec_) {\n      cartesian3_vec_.push_back(Cartesian<Scalar, 3>(t));\n    }\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(2), Scalar(4)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3), Scalar(0.5)));\n    point_vec_.push_back(Point(Scalar(-5), Scalar(-6), Scalar(7)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<Cartesian<Scalar, 3>> tests(cartesian3_vec_, tangent_vec_,\n                                              point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  std::vector<Cartesian<Scalar, 3>,\n              Eigen::aligned_allocator<Cartesian<Scalar, 3>>>\n      cartesian3_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_cartesian() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test Cartesian3\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n\n}  // namespace Sophus\n\nint main() { return Sophus::test_cartesian(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_common.cpp",
    "content": "#include <iostream>\n\n#include <sophus/interpolate.hpp>\n#include <sophus/num_diff.hpp>\n#include <sophus/se3.hpp>\n#include <sophus/spline.hpp>\n#include <sophus/test_macros.hpp>\n\nnamespace Sophus {\n\nnamespace {\n\nbool testSmokeDetails() {\n  bool passed = true;\n  std::cout << details::pretty(4.2) << std::endl;\n  std::cout << details::pretty(Vector2f(1, 2)) << std::endl;\n  bool dummy = true;\n  details::testFailed(dummy, \"dummyFunc\", \"dummyFile\", 99,\n                      \"This is just a practice alarm!\");\n  SOPHUS_TEST_EQUAL(passed, dummy, false, \"\");\n\n  double val = transpose(42.0);\n  SOPHUS_TEST_EQUAL(passed, val, 42.0, \"\");\n  Matrix<float, 1, 2> row = transpose(Vector2f(1, 7));\n  Matrix<float, 1, 2> expected_row(1, 7);\n  SOPHUS_TEST_EQUAL(passed, row, expected_row, \"\");\n\n  optional<int> opt(nullopt);\n  SOPHUS_TEST(passed, !opt, \"\");\n\n  return passed;\n}\n\nbool testSpline() {\n  double const kSmallEpsSqrt = Constants<double>::epsilonSqrt();\n\n  bool passed = true;\n  for (double delta_t : {0.1, 0.5, 1.0}) {\n    for (double u : {0.0000, 0.1, 0.5, 0.51, 0.9, 0.999}) {\n      std::cout << details::pretty(SplineBasisFunction<double>::B(u))\n                << std::endl;\n\n      Eigen::Vector3d Dt_B = SplineBasisFunction<double>::Dt_B(u, delta_t);\n      std::cout << details::pretty(Dt_B) << std::endl;\n\n      Eigen::Vector3d Dt_B2 = curveNumDiff(\n          [delta_t](double u_bar) -> Eigen::Vector3d {\n            return SplineBasisFunction<double>::B(u_bar) / delta_t;\n          },\n          u);\n      SOPHUS_TEST_APPROX(passed, Dt_B, Dt_B2, kSmallEpsSqrt,\n                         \"Dt_, u={}, delta_t={}\", u, delta_t);\n\n      Eigen::Vector3d Dt2_B = SplineBasisFunction<double>::Dt2_B(u, delta_t);\n\n      Eigen::Vector3d Dt2_B2 = curveNumDiff(\n          [delta_t](double u_bar) -> Eigen::Vector3d {\n            return SplineBasisFunction<double>::Dt_B(u_bar, delta_t) / delta_t;\n          },\n          u);\n      SOPHUS_TEST_APPROX(passed, Dt2_B, Dt2_B2, kSmallEpsSqrt,\n                         \"Dt2_B, u={}, delta_t={}\", u, delta_t);\n    }\n  }\n\n  SE3d T_world_foo = SE3d::rotX(0.4) * SE3d::rotY(0.2);\n  SE3d T_world_bar =\n      SE3d::rotZ(-0.4) * SE3d::rotY(0.4) * SE3d::trans(0.0, 1.0, -2.0);\n\n  std::vector<SE3d> control_poses;\n  control_poses.push_back(interpolate(T_world_foo, T_world_bar, 0.0));\n\n  for (double p = 0.2; p < 1.0; p += 0.2) {\n    SE3d T_world_inter = interpolate(T_world_foo, T_world_bar, p);\n    control_poses.push_back(T_world_inter);\n  }\n\n  BasisSplineImpl<SE3d> spline(control_poses, 1.0);\n\n  SE3d T = spline.parent_T_spline(0.0, 1.0);\n  SE3d T2 = spline.parent_T_spline(1.0, 0.0);\n\n  Eigen::Matrix3d R = T.so3().matrix();\n  Eigen::Matrix3d R2 = T2.so3().matrix();\n  Eigen::Vector3d t = T.translation();\n  Eigen::Vector3d t2 = T2.translation();\n\n  SOPHUS_TEST_APPROX(passed, R, R2, kSmallEpsSqrt, \"lambdsa\");\n\n  SOPHUS_TEST_APPROX(passed, t, t2, kSmallEpsSqrt, \"lambdsa\");\n\n  Eigen::Matrix4d Dt_parent_T_spline = spline.Dt_parent_T_spline(0.0, 0.5);\n  Eigen::Matrix4d Dt_parent_T_spline2 = curveNumDiff(\n      [&](double u_bar) -> Eigen::Matrix4d {\n        return spline.parent_T_spline(0.0, u_bar).matrix();\n      },\n      0.5);\n  SOPHUS_TEST_APPROX(passed, Dt_parent_T_spline, Dt_parent_T_spline2,\n                     kSmallEpsSqrt, \"Dt_parent_T_spline\");\n\n  Eigen::Matrix4d Dt2_parent_T_spline = spline.Dt2_parent_T_spline(0.0, 0.5);\n  Eigen::Matrix4d Dt2_parent_T_spline2 = curveNumDiff(\n      [&](double u_bar) -> Eigen::Matrix4d {\n        return spline.Dt_parent_T_spline(0.0, u_bar).matrix();\n      },\n      0.5);\n  SOPHUS_TEST_APPROX(passed, Dt2_parent_T_spline, Dt2_parent_T_spline2,\n                     kSmallEpsSqrt, \"Dt2_parent_T_spline\");\n\n  return passed;\n}\n\nvoid runAll() {\n  std::cerr << \"Common tests:\" << std::endl;\n  bool passed = testSmokeDetails();\n  passed &= testSpline();\n  processTestResult(passed);\n}\n\n}  // namespace\n}  // namespace Sophus\n\nint main() { Sophus::runAll(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_geometry.cpp",
    "content": "#include <iostream>\n\n#include <sophus/geometry.hpp>\n#include <sophus/test_macros.hpp>\n#include \"tests.hpp\"\n\nnamespace Sophus {\n\nnamespace {\n\ntemplate <class T>\nbool test2dGeometry() {\n  bool passed = true;\n  T const eps = Constants<T>::epsilon();\n\n  for (int i = 0; i < 20; ++i) {\n    // Roundtrip test:\n    Vector2<T> normal_foo = Vector2<T>::Random().normalized();\n    Sophus::SO2<T> R_foo_plane = SO2FromNormal(normal_foo);\n    Vector2<T> resultNormal_foo = normalFromSO2(R_foo_plane);\n    SOPHUS_TEST_APPROX(passed, normal_foo, resultNormal_foo, eps, \"\");\n  }\n\n  for (int i = 0; i < 20; ++i) {\n    // Roundtrip test:\n    Line2<T> line_foo = makeHyperplaneUnique(\n        Line2<T>(Vector2<T>::Random().normalized(), Vector2<T>::Random()));\n    Sophus::SE2<T> T_foo_plane = SE2FromLine(line_foo);\n    Line2<T> resultPlane_foo = lineFromSE2(T_foo_plane);\n    SOPHUS_TEST_APPROX(passed, line_foo.normal().eval(),\n                       resultPlane_foo.normal().eval(), eps, \"\");\n    SOPHUS_TEST_APPROX(passed, line_foo.offset(), resultPlane_foo.offset(), eps,\n                       \"\");\n  }\n\n  std::vector<SE2<T>, Eigen::aligned_allocator<SE2<T>>> Ts_foo_line =\n      getTestSE2s<T>();\n\n  for (SE2<T> const& T_foo_line : Ts_foo_line) {\n    Line2<T> line_foo = lineFromSE2(T_foo_line);\n    SE2<T> T2_foo_line = SE2FromLine(line_foo);\n    Line2<T> line2_foo = lineFromSE2(T2_foo_line);\n    SOPHUS_TEST_APPROX(passed, line_foo.normal().eval(),\n                       line2_foo.normal().eval(), eps, \"\");\n    SOPHUS_TEST_APPROX(passed, line_foo.offset(), line2_foo.offset(), eps, \"\");\n  }\n\n  return passed;\n}\n\ntemplate <class T>\nbool test3dGeometry() {\n  bool passed = true;\n  T const eps = Constants<T>::epsilon();\n\n  Vector3<T> normal_foo = Vector3<T>(1, 2, 0).normalized();\n  Matrix3<T> R_foo_plane = rotationFromNormal(normal_foo);\n  SOPHUS_TEST_APPROX(passed, normal_foo, R_foo_plane.col(2).eval(), eps, \"\");\n  // Just testing that the function normalizes the input normal and hint\n  // direction correctly:\n  Matrix3<T> R2_foo_plane = rotationFromNormal((T(0.9) * normal_foo).eval());\n  SOPHUS_TEST_APPROX(passed, normal_foo, R2_foo_plane.col(2).eval(), eps, \"\");\n  Matrix3<T> R3_foo_plane =\n      rotationFromNormal(normal_foo, Vector3<T>(T(0.9), T(0), T(0)),\n                         Vector3<T>(T(0), T(1.1), T(0)));\n  SOPHUS_TEST_APPROX(passed, normal_foo, R3_foo_plane.col(2).eval(), eps, \"\");\n\n  normal_foo = Vector3<T>(1, 0, 0);\n  R_foo_plane = rotationFromNormal(normal_foo);\n  SOPHUS_TEST_APPROX(passed, normal_foo, R_foo_plane.col(2).eval(), eps, \"\");\n  SOPHUS_TEST_APPROX(passed, Vector3<T>(0, 1, 0), R_foo_plane.col(1).eval(),\n                     eps, \"\");\n\n  normal_foo = Vector3<T>(0, 1, 0);\n  R_foo_plane = rotationFromNormal(normal_foo);\n  SOPHUS_TEST_APPROX(passed, normal_foo, R_foo_plane.col(2).eval(), eps, \"\");\n  SOPHUS_TEST_APPROX(passed, Vector3<T>(1, 0, 0), R_foo_plane.col(0).eval(),\n                     eps, \"\");\n\n  for (int i = 0; i < 20; ++i) {\n    // Roundtrip test:\n    Vector3<T> normal_foo = Vector3<T>::Random().normalized();\n    Sophus::SO3<T> R_foo_plane = SO3FromNormal(normal_foo);\n    Vector3<T> resultNormal_foo = normalFromSO3(R_foo_plane);\n    SOPHUS_TEST_APPROX(passed, normal_foo, resultNormal_foo, eps, \"\");\n  }\n\n  for (int i = 0; i < 20; ++i) {\n    // Roundtrip test:\n    Plane3<T> plane_foo = makeHyperplaneUnique(\n        Plane3<T>(Vector3<T>::Random().normalized(), Vector3<T>::Random()));\n    Sophus::SE3<T> T_foo_plane = SE3FromPlane(plane_foo);\n    Plane3<T> resultPlane_foo = planeFromSE3(T_foo_plane);\n    SOPHUS_TEST_APPROX(passed, plane_foo.normal().eval(),\n                       resultPlane_foo.normal().eval(), eps, \"\");\n    SOPHUS_TEST_APPROX(passed, plane_foo.offset(), resultPlane_foo.offset(),\n                       eps, \"\");\n  }\n\n  std::vector<SE3<T>, Eigen::aligned_allocator<SE3<T>>> Ts_foo_plane =\n      getTestSE3s<T>();\n\n  for (SE3<T> const& T_foo_plane : Ts_foo_plane) {\n    Plane3<T> plane_foo = planeFromSE3(T_foo_plane);\n    SE3<T> T2_foo_plane = SE3FromPlane(plane_foo);\n    Plane3<T> plane2_foo = planeFromSE3(T2_foo_plane);\n    SOPHUS_TEST_APPROX(passed, plane_foo.normal().eval(),\n                       plane2_foo.normal().eval(), eps, \"\");\n    SOPHUS_TEST_APPROX(passed, plane_foo.offset(), plane2_foo.offset(), eps,\n                       \"\");\n  }\n\n  return passed;\n}\n\nvoid runAll() {\n  std::cerr << \"Geometry (Lines/Planes) tests:\" << std::endl;\n  std::cerr << \"Double tests: \" << std::endl;\n  bool passed = test2dGeometry<double>();\n  passed = test3dGeometry<double>();\n  processTestResult(passed);\n  std::cerr << \"Float tests: \" << std::endl;\n  passed = test2dGeometry<float>();\n  passed = test3dGeometry<float>();\n  processTestResult(passed);\n}\n\n}  // namespace\n}  // namespace Sophus\n\nint main() { Sophus::runAll(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_rxso2.cpp",
    "content": "#include <iostream>\n\n#include <sophus/rxso2.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::RxSO2<double>>;\ntemplate class Map<Sophus::RxSO2<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class RxSO2<double, Eigen::AutoAlign>;\ntemplate class RxSO2<float, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class RxSO2<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar>\nclass Tests {\n public:\n  using SO2Type = SO2<Scalar>;\n  using RxSO2Type = RxSO2<Scalar>;\n  using RotationMatrixType = typename SO2<Scalar>::Transformation;\n  using Point = typename RxSO2<Scalar>::Point;\n  using Tangent = typename RxSO2<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(0.2, 1.)));\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(0.2, 1.1)));\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(0., 1.1)));\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(0.00001, 0.)));\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(0.00001, 0.00001)));\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(kPi, 0.9)));\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(0.2, 0)) *\n                         RxSO2Type::exp(Tangent(kPi, 0.0)) *\n                         RxSO2Type::exp(Tangent(-0.2, 0)));\n    rxso2_vec_.push_back(RxSO2Type::exp(Tangent(0.3, 0)) *\n                         RxSO2Type::exp(Tangent(kPi, 0.001)) *\n                         RxSO2Type::exp(Tangent(-0.3, 0)));\n\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(2);\n    tangent_vec_.push_back(tmp);\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(4)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3)));\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool> testFit() {\n    bool passed = true;\n    for (int i = 0; i < 10; ++i) {\n      Matrix2<Scalar> M = Matrix2<Scalar>::Random();\n      for (Scalar scale : {Scalar(0.01), Scalar(0.99), Scalar(1), Scalar(10)}) {\n        Matrix2<Scalar> R = makeRotationMatrix(M);\n        Matrix2<Scalar> sR = scale * R;\n        SOPHUS_TEST(passed, isScaledOrthogonalAndPositive(sR),\n                    \"isScaledOrthogonalAndPositive(sR): {} *\\n{}\",\n                    SOPHUS_FMT_ARG(scale), SOPHUS_FMT_ARG(R));\n        Matrix2<Scalar> sR_cols_swapped;\n        sR_cols_swapped << sR.col(1), sR.col(0);\n        SOPHUS_TEST(passed, !isScaledOrthogonalAndPositive(sR_cols_swapped),\n                    \"isScaledOrthogonalAndPositive(-sR): {} *\\n{}\",\n                    SOPHUS_FMT_ARG(scale), SOPHUS_FMT_ARG(R));\n      }\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool> testFit() {\n    return true;\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testSaturation();\n    passed &= testRawDataAcces();\n    passed &= testConstructors();\n    passed &= testFit();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<RxSO2Type> tests(rxso2_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testSaturation() {\n    using std::cos;\n    using std::log;\n    using std::sin;\n\n    bool passed = true;\n    // Test if product of two small group elements has correct scale\n    RxSO2Type small1(Scalar(1.1) * Constants<Scalar>::epsilon(), SO2Type());\n    RxSO2Type small2(Scalar(1.1) * Constants<Scalar>::epsilon(),\n                     SO2Type::exp(Constants<Scalar>::pi()));\n    RxSO2Type saturated_product = small1 * small2;\n    SOPHUS_TEST_APPROX(passed, saturated_product.scale(),\n                       Constants<Scalar>::epsilon(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, saturated_product.so2().matrix(),\n                       (small1.so2() * small2.so2()).matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    /*\n     * Test if group exponential produces group elements\n     * that can be multiplied safely even for large scale factors\n     */\n    const Tangent large_log(Scalar(1.), std::numeric_limits<Scalar>::max());\n    const Tangent regular_log(Scalar(2.), Scalar(0.));\n    const RxSO2Type large = RxSO2Type::exp(large_log);\n    const RxSO2Type regular = RxSO2Type::exp(regular_log);\n    const RxSO2Type product = regular * large;\n    SOPHUS_TEST(passed, isfinite(large.scale()), \"\");\n    SOPHUS_TEST(passed, isfinite(product.scale()), \"\");\n\n    // Test if saturation is handled correctly with imprecision of IEEE754-2008\n    std::mt19937 rng;\n    std::uniform_real_distribution<double> uniform(0., Constants<double>::pi());\n    Tangent small_log;\n    while (true) {\n      // Note: sample double and convert to Scalar for compatibility with\n      // ceres::Jet\n      const Scalar phi = Scalar(uniform(rng));\n      const Scalar c = cos(phi);\n      const Scalar s = sin(phi);\n      if (c * c + s * s < Scalar(1.)) {\n        small_log[0] = phi;\n        break;\n      }\n    }\n    small_log[1] = log(Constants<Scalar>::epsilon() / Scalar(2.));\n\n    const RxSO2Type small_exp = RxSO2Type::exp(small_log);\n    SOPHUS_TEST_APPROX(passed, small_exp.scale(), Constants<Scalar>::epsilon(),\n                       Constants<Scalar>::epsilon(), \"\");\n    return passed;\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 2, 1> raw = {0, 1};\n    Eigen::Map<RxSO2Type const> map_of_const_rxso2(raw.data());\n    SOPHUS_TEST_APPROX(passed, map_of_const_rxso2.complex().eval(), raw,\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_rxso2.complex().data(), raw.data(),\n                      \"\");\n    Eigen::Map<RxSO2Type const> const_shallow_copy = map_of_const_rxso2;\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.complex().eval(),\n                      map_of_const_rxso2.complex().eval(), \"\");\n\n    Eigen::Matrix<Scalar, 2, 1> raw2 = {1, 0};\n    Eigen::Map<RxSO2Type> map_of_rxso2(raw2.data());\n    SOPHUS_TEST_APPROX(passed, map_of_rxso2.complex().eval(), raw2,\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_rxso2.complex().data(), raw2.data(), \"\");\n    Eigen::Map<RxSO2Type> shallow_copy = map_of_rxso2;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.complex().eval(),\n                      map_of_rxso2.complex().eval(), \"\");\n\n    RxSO2Type const const_so2(raw2);\n    for (int i = 0; i < 2; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_so2.data()[i], raw2.data()[i], \"\");\n    }\n\n    RxSO2Type so2(raw2);\n    for (int i = 0; i < 2; ++i) {\n      so2.data()[i] = raw[i];\n    }\n\n    for (int i = 0; i < 2; ++i) {\n      SOPHUS_TEST_EQUAL(passed, so2.data()[i], raw.data()[i], \"\");\n    }\n\n    // regression: test that rotationMatrix API doesn't change underlying value\n    // for non-const-map and compiles at all for const-map\n    Eigen::Matrix<Scalar, 2, 1> raw3 = {Scalar(2), Scalar(0)};\n    Eigen::Map<RxSO2Type> map_of_rxso2_3(raw3.data());\n    Eigen::Map<const RxSO2Type> const_map_of_rxso2_3(raw3.data());\n    RxSO2Type rxso2_copy3 = map_of_rxso2_3;\n    const RotationMatrixType r_ref = map_of_rxso2_3.so2().matrix();\n\n    const RotationMatrixType r = map_of_rxso2_3.rotationMatrix();\n    SOPHUS_TEST_APPROX(passed, r_ref, r, Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_rxso2_3.complex().eval(),\n                       rxso2_copy3.complex().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    const RotationMatrixType r_const = const_map_of_rxso2_3.rotationMatrix();\n    SOPHUS_TEST_APPROX(passed, r_ref, r_const, Constants<Scalar>::epsilon(),\n                       \"\");\n    SOPHUS_TEST_APPROX(passed, const_map_of_rxso2_3.complex().eval(),\n                       rxso2_copy3.complex().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    Eigen::Matrix<Scalar, 2, 1> data1, data2;\n    data1 << Scalar(.1), Scalar(.2);\n    data2 << Scalar(.5), Scalar(.4);\n\n    Eigen::Map<RxSO2Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    RxSO2Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = RxSO2Type::exp(Tangent(Scalar(0.2), Scalar(0.5)));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    RxSO2Type rxso2;\n    Scalar scale(1.2);\n    rxso2.setScale(scale);\n    SOPHUS_TEST_APPROX(passed, scale, rxso2.scale(),\n                       Constants<Scalar>::epsilon(), \"setScale\");\n    Scalar angle(0.2);\n    rxso2.setAngle(angle);\n    SOPHUS_TEST_APPROX(passed, angle, rxso2.angle(),\n                       Constants<Scalar>::epsilon(), \"setAngle\");\n    SOPHUS_TEST_APPROX(passed, scale, rxso2.scale(),\n                       Constants<Scalar>::epsilon(),\n                       \"setAngle leaves scale as is\");\n\n    auto so2 = rxso2_vec_[0].so2();\n    rxso2.setSO2(so2);\n    SOPHUS_TEST_APPROX(passed, scale, rxso2.scale(),\n                       Constants<Scalar>::epsilon(), \"setSO2\");\n    SOPHUS_TEST_APPROX(passed, RxSO2Type(scale, so2).matrix(), rxso2.matrix(),\n                       Constants<Scalar>::epsilon(), \"RxSO2(scale, SO2)\");\n    SOPHUS_TEST_APPROX(passed, RxSO2Type(scale, so2.matrix()).matrix(),\n                       rxso2.matrix(), Constants<Scalar>::epsilon(),\n                       \"RxSO2(scale, SO2)\");\n    Matrix2<Scalar> R = SO2<Scalar>::exp(Scalar(0.2)).matrix();\n    Matrix2<Scalar> sR = R * Scalar(1.3);\n    SOPHUS_TEST_APPROX(passed, RxSO2Type(sR).matrix(), sR,\n                       Constants<Scalar>::epsilon(), \"RxSO2(sR)\");\n    rxso2.setScaledRotationMatrix(sR);\n    SOPHUS_TEST_APPROX(passed, sR, rxso2.matrix(), Constants<Scalar>::epsilon(),\n                       \"setScaleRotationMatrix\");\n    rxso2.setScale(scale);\n    rxso2.setRotationMatrix(R);\n    SOPHUS_TEST_APPROX(passed, R, rxso2.rotationMatrix(),\n                       Constants<Scalar>::epsilon(), \"setRotationMatrix\");\n    SOPHUS_TEST_APPROX(passed, scale, rxso2.scale(),\n                       Constants<Scalar>::epsilon(), \"setScale\");\n\n    return passed;\n  }\n\n  std::vector<RxSO2Type, Eigen::aligned_allocator<RxSO2Type>> rxso2_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_rxso2() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test RxSO2\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n  return 0;\n}\n\n}  // namespace Sophus\n\nint main() { return Sophus::test_rxso2(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_rxso3.cpp",
    "content": "#include <iostream>\n\n#include <sophus/rxso3.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::RxSO3<double>>;\ntemplate class Map<Sophus::RxSO3<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class RxSO3<double, Eigen::AutoAlign>;\ntemplate class RxSO3<float, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class RxSO3<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar_>\nclass Tests {\n public:\n  using Scalar = Scalar_;\n  using SO3Type = SO3<Scalar>;\n  using RxSO3Type = RxSO3<Scalar>;\n  using RotationMatrixType = typename SO3<Scalar>::Transformation;\n  using Point = typename RxSO3<Scalar>::Point;\n  using Tangent = typename RxSO3<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    rxso3_vec_.push_back(RxSO3Type::exp(\n        Tangent(Scalar(0.2), Scalar(0.5), Scalar(0.0), Scalar(1.))));\n    rxso3_vec_.push_back(RxSO3Type::exp(\n        Tangent(Scalar(0.2), Scalar(0.5), Scalar(-1.0), Scalar(1.1))));\n    rxso3_vec_.push_back(RxSO3Type::exp(\n        Tangent(Scalar(0.), Scalar(0.), Scalar(0.), Scalar(1.1))));\n    rxso3_vec_.push_back(RxSO3Type::exp(\n        Tangent(Scalar(0.), Scalar(0.), Scalar(0.00001), Scalar(0.))));\n    rxso3_vec_.push_back(RxSO3Type::exp(\n        Tangent(Scalar(0.), Scalar(0.), Scalar(0.00001), Scalar(0.00001))));\n    rxso3_vec_.push_back(RxSO3Type::exp(\n        Tangent(Scalar(0.), Scalar(0.), Scalar(0.00001), Scalar(0))));\n    rxso3_vec_.push_back(\n        RxSO3Type::exp(Tangent(kPi, Scalar(0), Scalar(0), Scalar(0.9))));\n    rxso3_vec_.push_back(\n        RxSO3Type::exp(\n            Tangent(Scalar(0.2), Scalar(-0.5), Scalar(0), Scalar(0))) *\n        RxSO3Type::exp(Tangent(kPi, Scalar(0), Scalar(0), Scalar(0))) *\n        RxSO3Type::exp(\n            Tangent(-Scalar(0.2), Scalar(-0.5), Scalar(0), Scalar(0))));\n    rxso3_vec_.push_back(\n        RxSO3Type::exp(\n            Tangent(Scalar(0.3), Scalar(0.5), Scalar(0.1), Scalar(0))) *\n        RxSO3Type::exp(Tangent(kPi, Scalar(0), Scalar(0), Scalar(0))) *\n        RxSO3Type::exp(\n            Tangent(Scalar(-0.3), Scalar(-0.5), Scalar(-0.1), Scalar(0))));\n\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0), Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(1), Scalar(0), Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(0), Scalar(1), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(1), Scalar(0), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(-1), Scalar(0), Scalar(2);\n    tangent_vec_.push_back(tmp);\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(2), Scalar(4)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3), Scalar(0.5)));\n    point_vec_.push_back(Point(Scalar(-5), Scalar(-6), Scalar(7)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testSaturation();\n    passed &= testRawDataAcces();\n    passed &= testConstructors();\n    passed &= testFit();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<RxSO3Type> tests(rxso3_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testSaturation() {\n    using std::log;\n\n    bool passed = true;\n    // Test if product of two small group elements has correct scale\n    RxSO3Type small1(Constants<Scalar>::epsilon(), SO3Type());\n    RxSO3Type small2(Constants<Scalar>::epsilon(),\n                     SO3Type::exp(Vector3<Scalar>(Constants<Scalar>::pi(),\n                                                  Scalar(0), Scalar(0))));\n    RxSO3Type saturated_product = small1 * small2;\n    SOPHUS_TEST_APPROX(passed, saturated_product.scale(),\n                       Constants<Scalar>::epsilon(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, saturated_product.so3().matrix(),\n                       (small1.so3() * small2.so3()).matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n    /*\n     * Test if group exponential produces group elements\n     * that can be multiplied safely even for large scale factors\n     */\n    const Tangent large_log(Scalar(1.), Scalar(2.), Scalar(3.),\n                            std::numeric_limits<Scalar>::max());\n    const Tangent regular_log(Scalar(4.), Scalar(5.), Scalar(6.), Scalar(0.));\n    const RxSO3Type large = RxSO3Type::exp(large_log);\n    const RxSO3Type regular = RxSO3Type::exp(regular_log);\n    const RxSO3Type product = regular * large;\n    SOPHUS_TEST(passed, isfinite(large.scale()), \"\");\n    SOPHUS_TEST(passed, isfinite(product.scale()), \"\");\n\n    // Test if saturation is handled correctly with imprecision of IEEE754-2008\n    Tangent small_log;\n    while (true) {\n      // Note: use cast() since Random() doesn't work with ceres::Jet Scalar\n      // types\n      const typename SO3Type::Tangent so3_tangent =\n          Eigen::Vector3d::Random().cast<Scalar>();\n      const SO3Type so3_exp = SO3Type::exp(so3_tangent);\n      if (so3_exp.unit_quaternion().squaredNorm() >= Scalar(1.)) continue;\n      small_log << so3_tangent, log(Constants<Scalar>::epsilon() / Scalar(2.));\n      break;\n    }\n\n    const RxSO3Type small_exp = RxSO3Type::exp(small_log);\n    SOPHUS_TEST_APPROX(passed, small_exp.quaternion().squaredNorm(),\n                       Constants<Scalar>::epsilon(),\n                       Constants<Scalar>::epsilon(), \"\");\n    return passed;\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 4, 1> raw = {Scalar(0), Scalar(1), Scalar(0),\n                                       Scalar(0)};\n    Eigen::Map<RxSO3Type const> map_of_const_rxso3(raw.data());\n    SOPHUS_TEST_APPROX(passed, map_of_const_rxso3.quaternion().coeffs().eval(),\n                       raw, Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_rxso3.quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    Eigen::Map<RxSO3Type const> const_shallow_copy = map_of_const_rxso3;\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.quaternion().coeffs().eval(),\n                      map_of_const_rxso3.quaternion().coeffs().eval(), \"\");\n\n    Eigen::Matrix<Scalar, 4, 1> raw2 = {Scalar(1), Scalar(0), Scalar(0),\n                                        Scalar(0)};\n    Eigen::Map<RxSO3Type> map_of_rxso3(raw.data());\n    Eigen::Quaternion<Scalar> quat;\n    quat.coeffs() = raw2;\n    map_of_rxso3.setQuaternion(quat);\n    SOPHUS_TEST_APPROX(passed, map_of_rxso3.quaternion().coeffs().eval(), raw2,\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_rxso3.quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    SOPHUS_TEST_NEQ(passed, map_of_rxso3.quaternion().coeffs().data(),\n                    quat.coeffs().data(), \"\");\n    Eigen::Map<RxSO3Type> shallow_copy = map_of_rxso3;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.quaternion().coeffs().eval(),\n                      map_of_rxso3.quaternion().coeffs().eval(), \"\");\n\n    RxSO3Type const const_so3(quat);\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_so3.data()[i], raw2.data()[i], \"\");\n    }\n\n    RxSO3Type so3(quat);\n    for (int i = 0; i < 4; ++i) {\n      so3.data()[i] = raw[i];\n    }\n\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, so3.data()[i], raw.data()[i], \"\");\n    }\n\n    // regression: test that rotationMatrix API doesn't change underlying value\n    // for non-const-map and compiles at all for const-map\n    Eigen::Matrix<Scalar, 4, 1> raw3 = {Scalar(2), Scalar(0), Scalar(0),\n                                        Scalar(0)};\n    Eigen::Map<RxSO3Type> map_of_rxso3_3(raw3.data());\n    Eigen::Map<const RxSO3Type> const_map_of_rxso3_3(raw3.data());\n    RxSO3Type rxso3_copy3 = map_of_rxso3_3;\n    const RotationMatrixType r_ref = map_of_rxso3_3.so3().matrix();\n\n    const RotationMatrixType r = map_of_rxso3_3.rotationMatrix();\n    SOPHUS_TEST_APPROX(passed, r_ref, r, Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_rxso3_3.quaternion().coeffs().eval(),\n                       rxso3_copy3.quaternion().coeffs().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    const RotationMatrixType r_const = const_map_of_rxso3_3.rotationMatrix();\n    SOPHUS_TEST_APPROX(passed, r_ref, r_const, Constants<Scalar>::epsilon(),\n                       \"\");\n    SOPHUS_TEST_APPROX(passed,\n                       const_map_of_rxso3_3.quaternion().coeffs().eval(),\n                       rxso3_copy3.quaternion().coeffs().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    Eigen::Matrix<Scalar, 4, 1> data1, data2;\n    data1 << Scalar(.1), Scalar(.2), Scalar(.3), Scalar(.4);\n    data2 << Scalar(.5), Scalar(.4), Scalar(.3), Scalar(.2);\n\n    Eigen::Map<RxSO3Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    RxSO3Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = RxSO3Type::exp(\n        Tangent(Scalar(0.2), Scalar(0.5), Scalar(-1.0), Scalar(1.1)));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    RxSO3Type rxso3;\n    Scalar scale(1.2);\n    rxso3.setScale(scale);\n    SOPHUS_TEST_APPROX(passed, scale, rxso3.scale(),\n                       Constants<Scalar>::epsilon(), \"setScale\");\n    auto so3 = rxso3_vec_[0].so3();\n    rxso3.setSO3(so3);\n    SOPHUS_TEST_APPROX(passed, scale, rxso3.scale(),\n                       Constants<Scalar>::epsilon(), \"setScale\");\n    SOPHUS_TEST_APPROX(passed, RxSO3Type(scale, so3).matrix(), rxso3.matrix(),\n                       Constants<Scalar>::epsilon(), \"RxSO3(scale, SO3)\");\n    SOPHUS_TEST_APPROX(passed, RxSO3Type(scale, so3.matrix()).matrix(),\n                       rxso3.matrix(), Constants<Scalar>::epsilon(),\n                       \"RxSO3(scale, matrix3x3)\");\n    const Eigen::Quaternion<Scalar> q = rxso3.quaternion();\n    SOPHUS_TEST_APPROX(passed, RxSO3Type(scale, q).matrix(),\n                       RxSO3Type(scale, SO3<Scalar>(q)).matrix(),\n                       Constants<Scalar>::epsilon(),\n                       \"RxSO3(scale, unit_quaternion)\");\n    Matrix3<Scalar> R =\n        SO3<Scalar>::exp(Point(Scalar(0.2), Scalar(0.5), Scalar(-1.0)))\n            .matrix();\n    Matrix3<Scalar> sR = R * Scalar(1.3);\n    SOPHUS_TEST_APPROX(passed, RxSO3Type(sR).matrix(), sR,\n                       Constants<Scalar>::epsilon(), \"RxSO3(sR)\");\n    rxso3.setScaledRotationMatrix(sR);\n    SOPHUS_TEST_APPROX(passed, sR, rxso3.matrix(), Constants<Scalar>::epsilon(),\n                       \"setScaleRotationMatrix\");\n    rxso3.setScale(scale);\n    rxso3.setRotationMatrix(R);\n    SOPHUS_TEST_APPROX(passed, R, rxso3.rotationMatrix(),\n                       Constants<Scalar>::epsilon(), \"setRotationMatrix\");\n    SOPHUS_TEST_APPROX(passed, scale, rxso3.scale(),\n                       Constants<Scalar>::epsilon(), \"setScale\");\n\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool> testFit() {\n    bool passed = true;\n    for (int i = 0; i < 10; ++i) {\n      Matrix3<Scalar> M = Matrix3<Scalar>::Random();\n      for (Scalar scale : {Scalar(0.01), Scalar(0.99), Scalar(1), Scalar(10)}) {\n        Matrix3<Scalar> R = makeRotationMatrix(M);\n        Matrix3<Scalar> sR = scale * R;\n        SOPHUS_TEST(passed, isScaledOrthogonalAndPositive(sR),\n                    \"isScaledOrthogonalAndPositive(sR): {} *\\n{}\",\n                    SOPHUS_FMT_ARG(scale), SOPHUS_FMT_ARG(R));\n        Matrix3<Scalar> sR_cols_swapped;\n        sR_cols_swapped << sR.col(1), sR.col(0), sR.col(2);\n        SOPHUS_TEST(passed, !isScaledOrthogonalAndPositive(sR_cols_swapped),\n                    \"isScaledOrthogonalAndPositive(-sR): {} *\\n{}\",\n                    SOPHUS_FMT_ARG(scale), SOPHUS_FMT_ARG(R));\n      }\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool> testFit() {\n    return true;\n  }\n\n  std::vector<RxSO3Type, Eigen::aligned_allocator<RxSO3Type>> rxso3_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_rxso3() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test RxSO3\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n\n}  // namespace Sophus\n\nint main() { return Sophus::test_rxso3(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_se2.cpp",
    "content": "#include <iostream>\n\n#include <sophus/se2.hpp>\n#include <unsupported/Eigen/MatrixFunctions>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::SE2<double>>;\ntemplate class Map<Sophus::SE2<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class SE2<double, Eigen::AutoAlign>;\ntemplate class SE2<double, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class SE2<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar>\nclass Tests {\n public:\n  using SE2Type = SE2<Scalar>;\n  using SO2Type = SO2<Scalar>;\n  using Point = typename SE2<Scalar>::Point;\n  using Tangent = typename SE2<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    se2_vec_.push_back(\n        SE2Type(SO2Type(Scalar(0.0)), Point(Scalar(0), Scalar(0))));\n    se2_vec_.push_back(\n        SE2Type(SO2Type(Scalar(0.2)), Point(Scalar(10), Scalar(0))));\n    se2_vec_.push_back(\n        SE2Type(SO2Type(Scalar(0.)), Point(Scalar(0), Scalar(100))));\n    se2_vec_.push_back(\n        SE2Type(SO2Type(Scalar(-1.)), Point(Scalar(20), -Scalar(1))));\n    se2_vec_.push_back(\n        SE2Type(SO2Type(Scalar(0.00001)),\n                Point(Scalar(-0.00000001), Scalar(0.0000000001))));\n    se2_vec_.push_back(\n        SE2Type(SO2Type(Scalar(0.2)), Point(Scalar(0), Scalar(0))) *\n        SE2Type(SO2Type(kPi), Point(Scalar(0), Scalar(0))) *\n        SE2Type(SO2Type(Scalar(-0.2)), Point(Scalar(0), Scalar(0))));\n    se2_vec_.push_back(\n        SE2Type(SO2Type(Scalar(0.3)), Point(Scalar(2), Scalar(0))) *\n        SE2Type(SO2Type(kPi), Point(Scalar(0), Scalar(0))) *\n        SE2Type(SO2Type(Scalar(-0.3)), Point(Scalar(0), Scalar(6))));\n\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(1), Scalar(1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(1), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(-1), Scalar(-1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(30), Scalar(5), Scalar(20);\n    tangent_vec_.push_back(tmp);\n\n    point_vec_.push_back(Point(1, 2));\n    point_vec_.push_back(Point(1, -3));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testRawDataAcces();\n    passed &= testMutatingAccessors();\n    passed &= testConstructors();\n    passed &= testFit();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<SE2Type> tests(se2_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 4, 1> raw;\n    raw << Scalar(0), Scalar(1), Scalar(0), Scalar(3);\n    Eigen::Map<SE2Type const> const_se2_map(raw.data());\n    SOPHUS_TEST_APPROX(passed, const_se2_map.unit_complex().eval(),\n                       raw.template head<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, const_se2_map.translation().eval(),\n                       raw.template tail<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_se2_map.unit_complex().data(), raw.data(),\n                      \"\");\n    SOPHUS_TEST_EQUAL(passed, const_se2_map.translation().data(),\n                      raw.data() + 2, \"\");\n    Eigen::Map<SE2Type const> const_shallow_copy = const_se2_map;\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.unit_complex().eval(),\n                      const_se2_map.unit_complex().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.translation().eval(),\n                      const_se2_map.translation().eval(), \"\");\n\n    Eigen::Matrix<Scalar, 4, 1> raw2;\n    raw2 << Scalar(1), Scalar(0), Scalar(3), Scalar(1);\n    Eigen::Map<SE2Type> map_of_se3(raw.data());\n    map_of_se3.setComplex(raw2.template head<2>());\n    map_of_se3.translation() = raw2.template tail<2>();\n    SOPHUS_TEST_APPROX(passed, map_of_se3.unit_complex().eval(),\n                       raw2.template head<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_se3.translation().eval(),\n                       raw2.template tail<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_se3.unit_complex().data(), raw.data(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_se3.translation().data(), raw.data() + 2,\n                      \"\");\n    SOPHUS_TEST_NEQ(passed, map_of_se3.unit_complex().data(), raw2.data(), \"\");\n    Eigen::Map<SE2Type> shallow_copy = map_of_se3;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.unit_complex().eval(),\n                      map_of_se3.unit_complex().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.translation().eval(),\n                      map_of_se3.translation().eval(), \"\");\n    Eigen::Map<SE2Type> const const_map_of_se2 = map_of_se3;\n    SOPHUS_TEST_EQUAL(passed, const_map_of_se2.unit_complex().eval(),\n                      map_of_se3.unit_complex().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_map_of_se2.translation().eval(),\n                      map_of_se3.translation().eval(), \"\");\n\n    SE2Type const const_se2(raw2.template head<2>().eval(),\n                            raw2.template tail<2>().eval());\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_se2.data()[i], raw2.data()[i], \"\");\n    }\n\n    SE2Type se2(raw2.template head<2>().eval(), raw2.template tail<2>().eval());\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se2.data()[i], raw2.data()[i], \"\");\n    }\n\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se2.data()[i], raw.data()[i], \"\");\n    }\n\n    SE2Type trans = SE2Type::transX(Scalar(0.2));\n    SOPHUS_TEST_APPROX(passed, trans.translation().x(), Scalar(0.2),\n                       Constants<Scalar>::epsilon(), \"\");\n    trans = SE2Type::transY(Scalar(0.7));\n    SOPHUS_TEST_APPROX(passed, trans.translation().y(), Scalar(0.7),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    Eigen::Matrix<Scalar, 4, 1> data1, data2;\n    data1 << Scalar(0), Scalar(1), Scalar(1), Scalar(2);\n    data1 << Scalar(1), Scalar(0), Scalar(2), Scalar(1);\n\n    Eigen::Map<SE2Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    SE2Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = SE2Type::trans(Scalar(4), Scalar(5)) * SE2Type::rot(Scalar(0.5));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testMutatingAccessors() {\n    bool passed = true;\n    SE2Type se2;\n    SO2Type R(Scalar(0.2));\n    se2.setRotationMatrix(R.matrix());\n    SOPHUS_TEST_APPROX(passed, se2.rotationMatrix(), R.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    Eigen::Matrix<Scalar, 4, 1> raw;\n    raw << Scalar(1), Scalar(0), Scalar(3), Scalar(1);\n    Eigen::Map<SE2Type> map_of_se2(raw.data());\n    map_of_se2.setRotationMatrix(R.matrix());\n    SOPHUS_TEST_APPROX(passed, map_of_se2.rotationMatrix(), R.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    Matrix3<Scalar> I = Matrix3<Scalar>::Identity();\n    SOPHUS_TEST_EQUAL(passed, SE2Type().matrix(), I, \"\");\n\n    SE2Type se2 = se2_vec_.front();\n    Point translation = se2.translation();\n    SO2Type so2 = se2.so2();\n\n    SOPHUS_TEST_APPROX(passed, SE2Type(so2.log(), translation).matrix(),\n                       se2.matrix(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, SE2Type(so2, translation).matrix(), se2.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, SE2Type(so2.matrix(), translation).matrix(),\n                       se2.matrix(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed,\n                       SE2Type(so2.unit_complex(), translation).matrix(),\n                       se2.matrix(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, SE2Type(se2.matrix()).matrix(), se2.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool> testFit() {\n    bool passed = true;\n    for (int i = 0; i < 100; ++i) {\n      Matrix3<Scalar> T = Matrix3<Scalar>::Random();\n      SE2Type se2 = SE2Type::fitToSE2(T);\n      SE2Type se2_2 = SE2Type::fitToSE2(se2.matrix());\n\n      SOPHUS_TEST_APPROX(passed, se2.matrix(), se2_2.matrix(),\n                         Constants<Scalar>::epsilon(), \"\");\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool> testFit() {\n    return true;\n  }\n\n  std::vector<SE2Type, Eigen::aligned_allocator<SE2Type>> se2_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_se2() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test SE2\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n}  // namespace Sophus\n\nint main() { return Sophus::test_se2(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_se3.cpp",
    "content": "#include <iostream>\n\n#include <sophus/se3.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::SE3<double>>;\ntemplate class Map<Sophus::SE3<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class SE3<double, Eigen::AutoAlign>;\ntemplate class SE3<float, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class SE3<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar>\nclass Tests {\n public:\n  using SE3Type = SE3<Scalar>;\n  using SO3Type = SO3<Scalar>;\n  using Point = typename SE3<Scalar>::Point;\n  using Tangent = typename SE3<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    se3_vec_ = getTestSE3s<Scalar>();\n\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(1), Scalar(0), Scalar(1), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(-5), Scalar(10), Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(1), Scalar(0), Scalar(0), Scalar(0), Scalar(1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(-1), Scalar(0), Scalar(-1), Scalar(1), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(30), Scalar(5), Scalar(-1), Scalar(20), Scalar(-1), Scalar(0);\n    tangent_vec_.push_back(tmp);\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(2), Scalar(4)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3), Scalar(0.5)));\n    point_vec_.push_back(Point(Scalar(-5), Scalar(-6), Scalar(7)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testRawDataAcces();\n    passed &= testMutatingAccessors();\n    passed &= testConstructors();\n    passed &= testFit();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<SE3Type> tests(se3_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 7, 1> raw;\n    raw << Scalar(0), Scalar(1), Scalar(0), Scalar(0), Scalar(1), Scalar(3),\n        Scalar(2);\n    Eigen::Map<SE3Type const> map_of_const_se3(raw.data());\n    SOPHUS_TEST_APPROX(\n        passed, map_of_const_se3.unit_quaternion().coeffs().eval(),\n        raw.template head<4>().eval(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_const_se3.translation().eval(),\n                       raw.template tail<3>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed,\n                      map_of_const_se3.unit_quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_se3.translation().data(),\n                      raw.data() + 4, \"\");\n    Eigen::Map<SE3Type const> const_shallow_copy = map_of_const_se3;\n    SOPHUS_TEST_EQUAL(passed,\n                      const_shallow_copy.unit_quaternion().coeffs().eval(),\n                      map_of_const_se3.unit_quaternion().coeffs().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.translation().eval(),\n                      map_of_const_se3.translation().eval(), \"\");\n\n    Eigen::Matrix<Scalar, 7, 1> raw2;\n    raw2 << Scalar(1), Scalar(0), Scalar(0), Scalar(0), Scalar(3), Scalar(2),\n        Scalar(1);\n    Eigen::Map<SE3Type> map_of_se3(raw.data());\n    Eigen::Quaternion<Scalar> quat;\n    quat.coeffs() = raw2.template head<4>();\n    map_of_se3.setQuaternion(quat);\n    map_of_se3.translation() = raw2.template tail<3>();\n    SOPHUS_TEST_APPROX(passed, map_of_se3.unit_quaternion().coeffs().eval(),\n                       raw2.template head<4>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_se3.translation().eval(),\n                       raw2.template tail<3>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_se3.unit_quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_se3.translation().data(), raw.data() + 4,\n                      \"\");\n    SOPHUS_TEST_NEQ(passed, map_of_se3.unit_quaternion().coeffs().data(),\n                    quat.coeffs().data(), \"\");\n    Eigen::Map<SE3Type> shallow_copy = map_of_se3;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.unit_quaternion().coeffs().eval(),\n                      map_of_se3.unit_quaternion().coeffs().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.translation().eval(),\n                      map_of_se3.translation().eval(), \"\");\n    Eigen::Map<SE3Type> const const_map_of_se3 = map_of_se3;\n    SOPHUS_TEST_EQUAL(passed,\n                      const_map_of_se3.unit_quaternion().coeffs().eval(),\n                      map_of_se3.unit_quaternion().coeffs().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_map_of_se3.translation().eval(),\n                      map_of_se3.translation().eval(), \"\");\n\n    SE3Type const const_se3(quat, raw2.template tail<3>().eval());\n    for (int i = 0; i < 7; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_se3.data()[i], raw2.data()[i], \"\");\n    }\n\n    SE3Type se3(quat, raw2.template tail<3>().eval());\n    for (int i = 0; i < 7; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se3.data()[i], raw2.data()[i], \"\");\n    }\n\n    for (int i = 0; i < 7; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se3.data()[i], raw.data()[i], \"\");\n    }\n    SE3Type trans = SE3Type::transX(Scalar(0.2));\n    SOPHUS_TEST_APPROX(passed, trans.translation().x(), Scalar(0.2),\n                       Constants<Scalar>::epsilon(), \"\");\n    trans = SE3Type::transY(Scalar(0.7));\n    SOPHUS_TEST_APPROX(passed, trans.translation().y(), Scalar(0.7),\n                       Constants<Scalar>::epsilon(), \"\");\n    trans = SE3Type::transZ(Scalar(-0.2));\n    SOPHUS_TEST_APPROX(passed, trans.translation().z(), Scalar(-0.2),\n                       Constants<Scalar>::epsilon(), \"\");\n    Tangent t;\n    t << Scalar(0), Scalar(0), Scalar(0), Scalar(0.2), Scalar(0), Scalar(0);\n    SOPHUS_TEST_EQUAL(passed, SE3Type::rotX(Scalar(0.2)).matrix(),\n                      SE3Type::exp(t).matrix(), \"\");\n    t << Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(-0.2), Scalar(0);\n    SOPHUS_TEST_EQUAL(passed, SE3Type::rotY(Scalar(-0.2)).matrix(),\n                      SE3Type::exp(t).matrix(), \"\");\n    t << Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(1.1);\n    SOPHUS_TEST_EQUAL(passed, SE3Type::rotZ(Scalar(1.1)).matrix(),\n                      SE3Type::exp(t).matrix(), \"\");\n\n    Eigen::Matrix<Scalar, 7, 1> data1, data2;\n    data1 << Scalar(0), Scalar(1), Scalar(0), Scalar(0), Scalar(1), Scalar(2),\n        Scalar(3);\n    data1 << Scalar(0), Scalar(0), Scalar(1), Scalar(0), Scalar(3), Scalar(2),\n        Scalar(1);\n\n    Eigen::Map<SE3Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    SE3Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = SE3Type::trans(Scalar(4), Scalar(5), Scalar(6)) *\n           SE3Type::rotZ(Scalar(0.5));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testMutatingAccessors() {\n    bool passed = true;\n    SE3Type se3;\n    SO3Type R(SO3Type::exp(Point(Scalar(0.2), Scalar(0.5), Scalar(0.0))));\n    se3.setRotationMatrix(R.matrix());\n    SOPHUS_TEST_APPROX(passed, se3.rotationMatrix(), R.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 4, 4> I = Eigen::Matrix<Scalar, 4, 4>::Identity();\n    SOPHUS_TEST_EQUAL(passed, SE3Type().matrix(), I, \"\");\n\n    SE3Type se3 = se3_vec_.front();\n    Point translation = se3.translation();\n    SO3Type so3 = se3.so3();\n\n    SOPHUS_TEST_APPROX(passed, SE3Type(so3, translation).matrix(), se3.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, SE3Type(so3.matrix(), translation).matrix(),\n                       se3.matrix(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed,\n                       SE3Type(so3.unit_quaternion(), translation).matrix(),\n                       se3.matrix(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, SE3Type(se3.matrix()).matrix(), se3.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool> testFit() {\n    bool passed = true;\n\n    for (int i = 0; i < 100; ++i) {\n      Matrix4<Scalar> T = Matrix4<Scalar>::Random();\n      SE3Type se3 = SE3Type::fitToSE3(T);\n      SE3Type se3_2 = SE3Type::fitToSE3(se3.matrix());\n\n      SOPHUS_TEST_APPROX(passed, se3.matrix(), se3_2.matrix(),\n                         Constants<Scalar>::epsilon(), \"\");\n    }\n    for (Scalar const angle :\n         {Scalar(0.0), Scalar(0.1), Scalar(0.3), Scalar(-0.7)}) {\n      SOPHUS_TEST_APPROX(passed, SE3Type::rotX(angle).angleX(), angle,\n                         Constants<Scalar>::epsilon(), \"\");\n      SOPHUS_TEST_APPROX(passed, SE3Type::rotY(angle).angleY(), angle,\n                         Constants<Scalar>::epsilon(), \"\");\n      SOPHUS_TEST_APPROX(passed, SE3Type::rotZ(angle).angleZ(), angle,\n                         Constants<Scalar>::epsilon(), \"\");\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool> testFit() {\n    return true;\n  }\n\n  std::vector<SE3Type, Eigen::aligned_allocator<SE3Type>> se3_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_se3() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test SE3\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n}  // namespace Sophus\n\nint main() { return Sophus::test_se3(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_sim2.cpp",
    "content": "#include <iostream>\n\n#include <unsupported/Eigen/MatrixFunctions>\n\n#include <sophus/sim2.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::Sim2<double>>;\ntemplate class Map<Sophus::Sim2<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class Sim2<double, Eigen::AutoAlign>;\ntemplate class Sim2<float, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class Sim2<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar>\nclass Tests {\n public:\n  using Sim2Type = Sim2<Scalar>;\n  using RxSO2Type = RxSO2<Scalar>;\n  using Point = typename Sim2<Scalar>::Point;\n  using Vector2Type = Vector2<Scalar>;\n  using Tangent = typename Sim2<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0.2, 1.)), Point(0, 0)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0.2, 1.1)), Point(10, 0)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0., 0.)), Point(0, 10)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0.00001, 0.)), Point(0, 0)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0.00001, 0.0000001)),\n                 Point(1, -1.00000001)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0., 0.)), Point(0.01, 0)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(kPi, 0.9)), Point(4, 0)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0.2, 0)), Point(0, 0)) *\n        Sim2Type(RxSO2Type::exp(Vector2Type(kPi, 0)), Point(0, 0)) *\n        Sim2Type(RxSO2Type::exp(Vector2Type(-0.2, 0)), Point(0, 0)));\n    sim2_vec_.push_back(\n        Sim2Type(RxSO2Type::exp(Vector2Type(0.3, 0)), Point(2, -7)) *\n        Sim2Type(RxSO2Type::exp(Vector2Type(kPi, 0)), Point(0, 0)) *\n        Sim2Type(RxSO2Type::exp(Vector2Type(-0.3, 0)), Point(0, 6)));\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0), Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(1), Scalar(0), Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(1), Scalar(1), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(-1), Scalar(0), Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(30), Scalar(5), Scalar(-1), Scalar(1.5);\n    tangent_vec_.push_back(tmp);\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(4)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testRawDataAcces();\n    passed &= testConstructors();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<Sim2Type> tests(sim2_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 4, 1> raw;\n    raw << Scalar(0), Scalar(1), Scalar(3), Scalar(2);\n    Eigen::Map<Sim2Type const> map_of_const_sim2(raw.data());\n    SOPHUS_TEST_APPROX(passed, map_of_const_sim2.complex().eval(),\n                       raw.template head<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_const_sim2.translation().eval(),\n                       raw.template tail<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_sim2.complex().data(), raw.data(),\n                      \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_sim2.translation().data(),\n                      raw.data() + 2, \"\");\n    Eigen::Map<Sim2Type const> const_shallow_copy = map_of_const_sim2;\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.complex().eval(),\n                      map_of_const_sim2.complex().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.translation().eval(),\n                      map_of_const_sim2.translation().eval(), \"\");\n\n    Eigen::Matrix<Scalar, 4, 1> raw2;\n    raw2 << Scalar(1), Scalar(0), Scalar(2), Scalar(1);\n    Eigen::Map<Sim2Type> map_of_sim2(raw.data());\n    Vector2<Scalar> z;\n    z = raw2.template head<2>();\n    map_of_sim2.setComplex(z);\n    map_of_sim2.translation() = raw2.template tail<2>();\n    SOPHUS_TEST_APPROX(passed, map_of_sim2.complex().eval(),\n                       raw2.template head<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_sim2.translation().eval(),\n                       raw2.template tail<2>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_sim2.complex().data(), raw.data(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_sim2.translation().data(), raw.data() + 2,\n                      \"\");\n    SOPHUS_TEST_NEQ(passed, map_of_sim2.complex().data(), z.data(), \"\");\n    Eigen::Map<Sim2Type> shallow_copy = map_of_sim2;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.complex().eval(),\n                      map_of_sim2.complex().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.translation().eval(),\n                      map_of_sim2.translation().eval(), \"\");\n    Eigen::Map<Sim2Type> const const_map_of_sim3 = map_of_sim2;\n    SOPHUS_TEST_EQUAL(passed, const_map_of_sim3.complex().eval(),\n                      map_of_sim2.complex().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_map_of_sim3.translation().eval(),\n                      map_of_sim2.translation().eval(), \"\");\n\n    Sim2Type const const_sim2(z, raw2.template tail<2>().eval());\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_sim2.data()[i], raw2.data()[i], \"\");\n    }\n\n    Sim2Type se3(z, raw2.template tail<2>().eval());\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se3.data()[i], raw2.data()[i], \"\");\n    }\n\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se3.data()[i], raw.data()[i], \"\");\n    }\n\n    Eigen::Matrix<Scalar, 4, 1> data1, data2;\n    data1 << Scalar(0), Scalar(2), Scalar(1), Scalar(2);\n    data2 << Scalar(2), Scalar(0), Scalar(2), Scalar(1);\n\n    Eigen::Map<Sim2Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    Sim2Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = Sim2Type(RxSO2Type::exp(Vector2Type(-1, 1)),\n                    Point(Scalar(10), Scalar(0)));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 3, 3> I = Eigen::Matrix<Scalar, 3, 3>::Identity();\n    SOPHUS_TEST_EQUAL(passed, Sim2Type().matrix(), I, \"\");\n\n    Sim2Type sim2 = sim2_vec_.front();\n    Point translation = sim2.translation();\n    RxSO2Type rxso2 = sim2.rxso2();\n\n    SOPHUS_TEST_APPROX(passed, Sim2Type(rxso2, translation).matrix(),\n                       sim2.matrix(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, Sim2Type(rxso2.complex(), translation).matrix(),\n                       sim2.matrix(), Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, Sim2Type(sim2.matrix()).matrix(), sim2.matrix(),\n                       Constants<Scalar>::epsilon(), \"\");\n\n    Scalar scale(1.2);\n    sim2.setScale(scale);\n    SOPHUS_TEST_APPROX(passed, scale, sim2.scale(),\n                       Constants<Scalar>::epsilon(), \"setScale\", \"\");\n\n    sim2.setComplex(sim2_vec_[0].rxso2().complex());\n    SOPHUS_TEST_APPROX(passed, sim2_vec_[0].rxso2().complex(),\n                       sim2_vec_[0].rxso2().complex(),\n                       Constants<Scalar>::epsilon(), \"setComplex\", \"\");\n    return passed;\n  }\n\n  std::vector<Sim2Type, Eigen::aligned_allocator<Sim2Type>> sim2_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_sim3() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test Sim2\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n}  // namespace Sophus\n\nint main() { return Sophus::test_sim3(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_sim3.cpp",
    "content": "#include <iostream>\n\n#include <unsupported/Eigen/MatrixFunctions>\n\n#include <sophus/sim3.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::Sim3<double>>;\ntemplate class Map<Sophus::Sim3<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class Sim3<double, Eigen::AutoAlign>;\ntemplate class Sim3<float, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class Sim3<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar>\nclass Tests {\n public:\n  using Sim3Type = Sim3<Scalar>;\n  using RxSO3Type = RxSO3<Scalar>;\n  using Point = typename Sim3<Scalar>::Point;\n  using Vector4Type = Vector4<Scalar>;\n  using Tangent = typename Sim3<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.2), Scalar(0.5),\n                                            Scalar(0.0), Scalar(1.))),\n                 Point(Scalar(0), Scalar(0), Scalar(0))));\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.2), Scalar(0.5),\n                                            Scalar(-1.0), Scalar(1.1))),\n                 Point(Scalar(10), Scalar(0), Scalar(0))));\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.), Scalar(0.), Scalar(0.),\n                                            Scalar(0.))),\n                 Point(Scalar(0), Scalar(10), Scalar(5))));\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.), Scalar(0.), Scalar(0.),\n                                            Scalar(1.1))),\n                 Point(Scalar(0), Scalar(10), Scalar(5))));\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.), Scalar(0.),\n                                            Scalar(0.00001), Scalar(0.))),\n                 Point(Scalar(0), Scalar(0), Scalar(0))));\n    sim3_vec_.push_back(Sim3Type(\n        RxSO3Type::exp(Vector4Type(Scalar(0.), Scalar(0.), Scalar(0.00001),\n                                   Scalar(0.0000001))),\n        Point(Scalar(1), Scalar(-1.00000001), Scalar(2.0000000001))));\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.), Scalar(0.),\n                                            Scalar(0.00001), Scalar(0))),\n                 Point(Scalar(0.01), Scalar(0), Scalar(0))));\n    sim3_vec_.push_back(Sim3Type(\n        RxSO3Type::exp(Vector4Type(kPi, Scalar(0), Scalar(0), Scalar(0.9))),\n        Point(Scalar(4), Scalar(-5), Scalar(0))));\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.2), Scalar(0.5),\n                                            Scalar(0.0), Scalar(0))),\n                 Point(Scalar(0), Scalar(0), Scalar(0))) *\n        Sim3Type(\n            RxSO3Type::exp(Vector4Type(kPi, Scalar(0), Scalar(0), Scalar(0))),\n            Point(Scalar(0), Scalar(0), Scalar(0))) *\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(-0.2), Scalar(-0.5),\n                                            Scalar(-0.0), Scalar(0))),\n                 Point(Scalar(0), Scalar(0), Scalar(0))));\n    sim3_vec_.push_back(\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.3), Scalar(0.5),\n                                            Scalar(0.1), Scalar(0))),\n                 Point(Scalar(2), Scalar(0), Scalar(-7))) *\n        Sim3Type(\n            RxSO3Type::exp(Vector4Type(kPi, Scalar(0), Scalar(0), Scalar(0))),\n            Point(Scalar(0), Scalar(0), Scalar(0))) *\n        Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(-0.3), Scalar(-0.5),\n                                            Scalar(-0.1), Scalar(0))),\n                 Point(Scalar(0), Scalar(6), Scalar(0))));\n    Tangent tmp;\n    tmp << Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(0),\n        Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(1), Scalar(0), Scalar(0), Scalar(0), Scalar(0), Scalar(0),\n        Scalar(0);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(1), Scalar(0), Scalar(1), Scalar(0), Scalar(0),\n        Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(0), Scalar(0), Scalar(1), Scalar(0), Scalar(1), Scalar(0),\n        Scalar(0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(-1), Scalar(1), Scalar(0), Scalar(0), Scalar(0), Scalar(1),\n        Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(20), Scalar(-1), Scalar(0), Scalar(-1), Scalar(1), Scalar(0),\n        Scalar(-0.1);\n    tangent_vec_.push_back(tmp);\n    tmp << Scalar(30), Scalar(5), Scalar(-1), Scalar(20), Scalar(-1), Scalar(0),\n        Scalar(1.5);\n    tangent_vec_.push_back(tmp);\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(2), Scalar(4)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3), Scalar(0.5)));\n    point_vec_.push_back(Point(Scalar(-5), Scalar(-6), Scalar(7)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testRawDataAcces();\n    passed &= testConstructors();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<Sim3Type> tests(sim3_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 7, 1> raw;\n    raw << Scalar(0), Scalar(1), Scalar(0), Scalar(0), Scalar(1), Scalar(3),\n        Scalar(2);\n    Eigen::Map<Sim3Type const> map_of_const_sim3(raw.data());\n    SOPHUS_TEST_APPROX(passed, map_of_const_sim3.quaternion().coeffs().eval(),\n                       raw.template head<4>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_const_sim3.translation().eval(),\n                       raw.template tail<3>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_sim3.quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_sim3.translation().data(),\n                      raw.data() + 4, \"\");\n    Eigen::Map<Sim3Type const> const_shallow_copy = map_of_const_sim3;\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.quaternion().coeffs().eval(),\n                      map_of_const_sim3.quaternion().coeffs().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.translation().eval(),\n                      map_of_const_sim3.translation().eval(), \"\");\n\n    Eigen::Matrix<Scalar, 7, 1> raw2;\n    raw2 << Scalar(1), Scalar(0), Scalar(0), Scalar(0), Scalar(3), Scalar(2),\n        Scalar(1);\n    Eigen::Map<Sim3Type> map_of_sim3(raw.data());\n    Eigen::Quaternion<Scalar> quat;\n    quat.coeffs() = raw2.template head<4>();\n    map_of_sim3.setQuaternion(quat);\n    map_of_sim3.translation() = raw2.template tail<3>();\n    SOPHUS_TEST_APPROX(passed, map_of_sim3.quaternion().coeffs().eval(),\n                       raw2.template head<4>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_APPROX(passed, map_of_sim3.translation().eval(),\n                       raw2.template tail<3>().eval(),\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_sim3.quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_sim3.translation().data(), raw.data() + 4,\n                      \"\");\n    SOPHUS_TEST_NEQ(passed, map_of_sim3.quaternion().coeffs().data(),\n                    quat.coeffs().data(), \"\");\n    Eigen::Map<Sim3Type> shallow_copy = map_of_sim3;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.quaternion().coeffs().eval(),\n                      map_of_sim3.quaternion().coeffs().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.translation().eval(),\n                      map_of_sim3.translation().eval(), \"\");\n    Eigen::Map<Sim3Type> const const_map_of_sim3 = map_of_sim3;\n    SOPHUS_TEST_EQUAL(passed, const_map_of_sim3.quaternion().coeffs().eval(),\n                      map_of_sim3.quaternion().coeffs().eval(), \"\");\n    SOPHUS_TEST_EQUAL(passed, const_map_of_sim3.translation().eval(),\n                      map_of_sim3.translation().eval(), \"\");\n\n    Sim3Type const const_sim3(quat, raw2.template tail<3>().eval());\n    for (int i = 0; i < 7; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_sim3.data()[i], raw2.data()[i], \"\");\n    }\n\n    Sim3Type se3(quat, raw2.template tail<3>().eval());\n    for (int i = 0; i < 7; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se3.data()[i], raw2.data()[i], \"\");\n    }\n\n    for (int i = 0; i < 7; ++i) {\n      SOPHUS_TEST_EQUAL(passed, se3.data()[i], raw.data()[i], \"\");\n    }\n\n    Eigen::Matrix<Scalar, 7, 1> data1, data2;\n    data1 << Scalar(0), Scalar(2), Scalar(0), Scalar(0), Scalar(1), Scalar(2),\n        Scalar(3);\n    data2 << Scalar(0), Scalar(0), Scalar(2), Scalar(0), Scalar(3), Scalar(2),\n        Scalar(1);\n\n    Eigen::Map<Sim3Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    Sim3Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = Sim3Type(RxSO3Type::exp(Vector4Type(Scalar(0.2), Scalar(0.5),\n                                               Scalar(-1.0), Scalar(1.1))),\n                    Point(Scalar(10), Scalar(0), Scalar(0)));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 4, 4> I = Eigen::Matrix<Scalar, 4, 4>::Identity();\n    SOPHUS_TEST_EQUAL(passed, Sim3Type().matrix(), I, \"\");\n\n    Sim3Type sim3 = sim3_vec_.front();\n    Point translation = sim3.translation();\n    RxSO3Type rxso3 = sim3.rxso3();\n\n    SOPHUS_TEST_APPROX(passed, Sim3Type(rxso3, translation).matrix(),\n                       sim3.matrix(), Constants<Scalar>::epsilon(),\n                       \"Sim3(RxSO3, translation)\");\n    SOPHUS_TEST_APPROX(passed,\n                       Sim3Type(rxso3.quaternion(), translation).matrix(),\n                       sim3.matrix(), Constants<Scalar>::epsilon(),\n                       \"Sim3(quaternion, translation)\");\n    SOPHUS_TEST_APPROX(\n        passed,\n        Sim3Type(rxso3.scale(), rxso3.quaternion().normalized(), translation)\n            .matrix(),\n        sim3.matrix(), Constants<Scalar>::epsilon(),\n        \"Sim3(scale, unit_quaternion, translation)\");\n    SOPHUS_TEST_APPROX(passed, Sim3Type(sim3.matrix()).matrix(), sim3.matrix(),\n                       Constants<Scalar>::epsilon(), \"Sim3(matrix4x4)\");\n\n    Scalar scale(1.2);\n    sim3.setScale(scale);\n    SOPHUS_TEST_APPROX(passed, scale, sim3.scale(),\n                       Constants<Scalar>::epsilon(), \"setScale\");\n\n    sim3.setQuaternion(sim3_vec_[0].rxso3().quaternion());\n    SOPHUS_TEST_APPROX(passed, sim3_vec_[0].rxso3().quaternion().coeffs(),\n                       sim3_vec_[0].rxso3().quaternion().coeffs(),\n                       Constants<Scalar>::epsilon(), \"setQuaternion\");\n    return passed;\n  }\n\n  std::vector<Sim3Type, Eigen::aligned_allocator<Sim3Type>> sim3_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_sim3() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test Sim3\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n}  // namespace Sophus\n\nint main() { return Sophus::test_sim3(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_so2.cpp",
    "content": "#include <iostream>\n\n#include <sophus/so2.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::SO2<double>>;\ntemplate class Map<Sophus::SO2<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class SO2<double, Eigen::AutoAlign>;\ntemplate class SO2<float, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class SO2<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar>\nclass Tests {\n public:\n  using SO2Type = SO2<Scalar>;\n  using Point = typename SO2<Scalar>::Point;\n  using Tangent = typename SO2<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    so2_vec_.push_back(SO2Type::exp(Scalar(0.0)));\n    so2_vec_.push_back(SO2Type::exp(Scalar(0.2)));\n    so2_vec_.push_back(SO2Type::exp(Scalar(10.)));\n    so2_vec_.push_back(SO2Type::exp(Scalar(0.00001)));\n    so2_vec_.push_back(SO2Type::exp(kPi));\n    so2_vec_.push_back(SO2Type::exp(Scalar(0.2)) * SO2Type::exp(kPi) *\n                       SO2Type::exp(Scalar(-0.2)));\n    so2_vec_.push_back(SO2Type::exp(Scalar(-0.3)) * SO2Type::exp(kPi) *\n                       SO2Type::exp(Scalar(0.3)));\n\n    tangent_vec_.push_back(Tangent(Scalar(0)));\n    tangent_vec_.push_back(Tangent(Scalar(1)));\n    tangent_vec_.push_back(Tangent(Scalar(kPi / 2.)));\n    tangent_vec_.push_back(Tangent(Scalar(-1)));\n    tangent_vec_.push_back(Tangent(Scalar(20)));\n    tangent_vec_.push_back(Tangent(Scalar(kPi / 2. + 0.0001)));\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(2)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testUnity();\n    passed &= testRawDataAcces();\n    passed &= testConstructors();\n    passed &= testFit();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<SO2Type> tests(so2_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testUnity() {\n    bool passed = true;\n    // Test that the complex number magnitude stays close to one.\n    SO2Type current_q;\n    for (std::size_t i = 0; i < 1000; ++i) {\n      for (SO2Type const& q : so2_vec_) {\n        current_q *= q;\n      }\n    }\n    SOPHUS_TEST_APPROX(passed, current_q.unit_complex().norm(), Scalar(1),\n                       Constants<Scalar>::epsilon(), \"Magnitude drift\");\n    return passed;\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Vector2<Scalar> raw = {0, 1};\n    Eigen::Map<SO2Type const> map_of_const_so2(raw.data());\n    SOPHUS_TEST_APPROX(passed, map_of_const_so2.unit_complex().eval(), raw,\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_const_so2.unit_complex().data(),\n                      raw.data(), \"\");\n    Eigen::Map<SO2Type const> const_shallow_copy = map_of_const_so2;\n    SOPHUS_TEST_EQUAL(passed, const_shallow_copy.unit_complex().eval(),\n                      map_of_const_so2.unit_complex().eval(), \"\");\n\n    Vector2<Scalar> raw2 = {1, 0};\n    Eigen::Map<SO2Type> map_of_so2(raw.data());\n    map_of_so2.setComplex(raw2);\n    SOPHUS_TEST_APPROX(passed, map_of_so2.unit_complex().eval(), raw2,\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_so2.unit_complex().data(), raw.data(), \"\");\n    SOPHUS_TEST_NEQ(passed, map_of_so2.unit_complex().data(), raw2.data(), \"\");\n    Eigen::Map<SO2Type> shallow_copy = map_of_so2;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.unit_complex().eval(),\n                      map_of_so2.unit_complex().eval(), \"\");\n\n    SO2Type const const_so2(raw2);\n    for (int i = 0; i < 2; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_so2.data()[i], raw2.data()[i], \"\");\n    }\n\n    SO2Type so2(raw2);\n    for (int i = 0; i < 2; ++i) {\n      so2.data()[i] = raw[i];\n    }\n\n    for (int i = 0; i < 2; ++i) {\n      SOPHUS_TEST_EQUAL(passed, so2.data()[i], raw.data()[i], \"\");\n    }\n\n    Vector2<Scalar> data1 = {1, 0}, data2 = {0, 1};\n    Eigen::Map<SO2Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    SO2Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = SO2Type(Scalar(0.5));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    Matrix2<Scalar> R = so2_vec_.front().matrix();\n    SO2Type so2(R);\n    SOPHUS_TEST_APPROX(passed, R, so2.matrix(), Constants<Scalar>::epsilon(),\n                       \"\");\n\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool> testFit() {\n    bool passed = true;\n\n    for (int i = 0; i < 100; ++i) {\n      Matrix2<Scalar> R = Matrix2<Scalar>::Random();\n      SO2Type so2 = SO2Type::fitToSO2(R);\n      SO2Type so2_2 = SO2Type::fitToSO2(so2.matrix());\n\n      SOPHUS_TEST_APPROX(passed, so2.matrix(), so2_2.matrix(),\n                         Constants<Scalar>::epsilon(), \"\");\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool> testFit() {\n    return true;\n  }\n\n  std::vector<SO2Type, Eigen::aligned_allocator<SO2Type>> so2_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_so2() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test SO2\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n}  // namespace Sophus\n\nint main() { return Sophus::test_so2(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_so3.cpp",
    "content": "#include <iostream>\n\n#include <sophus/interpolate.hpp>\n#include <sophus/so3.hpp>\n#include \"tests.hpp\"\n\n// Explicit instantiate all class templates so that all member methods\n// get compiled and for code coverage analysis.\nnamespace Eigen {\ntemplate class Map<Sophus::SO3<double>>;\ntemplate class Map<Sophus::SO3<double> const>;\n}  // namespace Eigen\n\nnamespace Sophus {\n\ntemplate class SO3<double, Eigen::AutoAlign>;\ntemplate class SO3<float, Eigen::DontAlign>;\n#if SOPHUS_CERES\ntemplate class SO3<ceres::Jet<double, 3>>;\n#endif\n\ntemplate <class Scalar>\nclass Tests {\n public:\n  using SO3Type = SO3<Scalar>;\n  using Point = typename SO3<Scalar>::Point;\n  using Tangent = typename SO3<Scalar>::Tangent;\n  Scalar const kPi = Constants<Scalar>::pi();\n\n  Tests() {\n    so3_vec_.push_back(SO3Type(Eigen::Quaternion<Scalar>(\n        Scalar(0.1e-11), Scalar(0.), Scalar(1.), Scalar(0.))));\n    so3_vec_.push_back(SO3Type(Eigen::Quaternion<Scalar>(\n        Scalar(-1), Scalar(0.00001), Scalar(0.0), Scalar(0.0))));\n    so3_vec_.push_back(\n        SO3Type::exp(Point(Scalar(0.2), Scalar(0.5), Scalar(0.0))));\n    so3_vec_.push_back(\n        SO3Type::exp(Point(Scalar(0.2), Scalar(0.5), Scalar(-1.0))));\n    so3_vec_.push_back(SO3Type::exp(Point(Scalar(0.), Scalar(0.), Scalar(0.))));\n    so3_vec_.push_back(\n        SO3Type::exp(Point(Scalar(0.), Scalar(0.), Scalar(0.00001))));\n    so3_vec_.push_back(SO3Type::exp(Point(kPi, Scalar(0), Scalar(0))));\n    so3_vec_.push_back(\n        SO3Type::exp(Point(Scalar(0.2), Scalar(0.5), Scalar(0.0))) *\n        SO3Type::exp(Point(kPi, Scalar(0), Scalar(0))) *\n        SO3Type::exp(Point(Scalar(-0.2), Scalar(-0.5), Scalar(-0.0))));\n    so3_vec_.push_back(\n        SO3Type::exp(Point(Scalar(0.3), Scalar(0.5), Scalar(0.1))) *\n        SO3Type::exp(Point(kPi, Scalar(0), Scalar(0))) *\n        SO3Type::exp(Point(Scalar(-0.3), Scalar(-0.5), Scalar(-0.1))));\n    tangent_vec_.push_back(Tangent(Scalar(0), Scalar(0), Scalar(0)));\n    tangent_vec_.push_back(Tangent(Scalar(1), Scalar(0), Scalar(0)));\n    tangent_vec_.push_back(Tangent(Scalar(0), Scalar(1), Scalar(0)));\n    tangent_vec_.push_back(\n        Tangent(Scalar(kPi / 2.), Scalar(kPi / 2.), Scalar(0)));\n    tangent_vec_.push_back(Tangent(Scalar(-1), Scalar(1), Scalar(0)));\n    tangent_vec_.push_back(Tangent(Scalar(20), Scalar(-1), Scalar(0)));\n    tangent_vec_.push_back(Tangent(Scalar(30), Scalar(5), Scalar(-1)));\n\n    point_vec_.push_back(Point(Scalar(1), Scalar(2), Scalar(4)));\n    point_vec_.push_back(Point(Scalar(1), Scalar(-3), Scalar(0.5)));\n    point_vec_.push_back(Point(Scalar(-5), Scalar(-6), Scalar(7)));\n  }\n\n  void runAll() {\n    bool passed = testLieProperties();\n    passed &= testUnity();\n    passed &= testRawDataAcces();\n    passed &= testConstructors();\n    passed &= testSampleUniformSymmetry();\n    passed &= testFit();\n    processTestResult(passed);\n  }\n\n private:\n  bool testLieProperties() {\n    LieGroupTests<SO3Type> tests(so3_vec_, tangent_vec_, point_vec_);\n    return tests.doAllTestsPass();\n  }\n\n  bool testUnity() {\n    bool passed = true;\n    // Test that the complex number magnitude stays close to one.\n    SO3Type current_q;\n    for (size_t i = 0; i < 1000; ++i) {\n      for (SO3Type const& q : so3_vec_) {\n        current_q *= q;\n      }\n    }\n    SOPHUS_TEST_APPROX(passed, current_q.unit_quaternion().norm(), Scalar(1),\n                       Constants<Scalar>::epsilon(), \"Magnitude drift\");\n    return passed;\n  }\n\n  bool testRawDataAcces() {\n    bool passed = true;\n    Eigen::Matrix<Scalar, 4, 1> raw = {Scalar(0), Scalar(1), Scalar(0),\n                                       Scalar(0)};\n    Eigen::Map<SO3Type const> map_of_const_so3(raw.data());\n    SOPHUS_TEST_APPROX(passed,\n                       map_of_const_so3.unit_quaternion().coeffs().eval(), raw,\n                       Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed,\n                      map_of_const_so3.unit_quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    Eigen::Map<SO3Type const> const_shallow_copy = map_of_const_so3;\n    SOPHUS_TEST_EQUAL(passed,\n                      const_shallow_copy.unit_quaternion().coeffs().eval(),\n                      map_of_const_so3.unit_quaternion().coeffs().eval(), \"\");\n\n    Eigen::Matrix<Scalar, 4, 1> raw2 = {Scalar(1), Scalar(0), Scalar(0),\n                                        Scalar(0)};\n    Eigen::Map<SO3Type> map_of_so3(raw.data());\n    Eigen::Quaternion<Scalar> quat;\n    quat.coeffs() = raw2;\n    map_of_so3.setQuaternion(quat);\n    SOPHUS_TEST_APPROX(passed, map_of_so3.unit_quaternion().coeffs().eval(),\n                       raw2, Constants<Scalar>::epsilon(), \"\");\n    SOPHUS_TEST_EQUAL(passed, map_of_so3.unit_quaternion().coeffs().data(),\n                      raw.data(), \"\");\n    SOPHUS_TEST_NEQ(passed, map_of_so3.unit_quaternion().coeffs().data(),\n                    quat.coeffs().data(), \"\");\n    Eigen::Map<SO3Type> shallow_copy = map_of_so3;\n    SOPHUS_TEST_EQUAL(passed, shallow_copy.unit_quaternion().coeffs().eval(),\n                      map_of_so3.unit_quaternion().coeffs().eval(), \"\");\n\n    SO3Type const const_so3(quat);\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, const_so3.data()[i], raw2.data()[i], \"\");\n    }\n\n    SO3Type so3(quat);\n    for (int i = 0; i < 4; ++i) {\n      so3.data()[i] = raw[i];\n    }\n\n    for (int i = 0; i < 4; ++i) {\n      SOPHUS_TEST_EQUAL(passed, so3.data()[i], raw.data()[i], \"\");\n    }\n\n    SOPHUS_TEST_EQUAL(\n        passed, SO3Type::rotX(Scalar(0.2)).matrix(),\n        SO3Type::exp(Point(Scalar(0.2), Scalar(0), Scalar(0))).matrix(), \"\");\n    SOPHUS_TEST_EQUAL(\n        passed, SO3Type::rotY(Scalar(-0.2)).matrix(),\n        SO3Type::exp(Point(Scalar(0), Scalar(-0.2), Scalar(0))).matrix(), \"\");\n    SOPHUS_TEST_EQUAL(\n        passed, SO3Type::rotZ(Scalar(1.1)).matrix(),\n        SO3Type::exp(Point(Scalar(0), Scalar(0), Scalar(1.1))).matrix(), \"\");\n\n    Vector4<Scalar> data1(Scalar{1}, Scalar{0}, Scalar{0}, Scalar{0});\n    Vector4<Scalar> data2(Scalar{0}, Scalar{1}, Scalar{0}, Scalar{0});\n    Eigen::Map<SO3Type> map1(data1.data()), map2(data2.data());\n\n    // map -> map assignment\n    map2 = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), map2.matrix(), \"\");\n\n    // map -> type assignment\n    SO3Type copy;\n    copy = map1;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    // type -> map assignment\n    copy = SO3Type::rotZ(Scalar(0.5));\n    map1 = copy;\n    SOPHUS_TEST_EQUAL(passed, map1.matrix(), copy.matrix(), \"\");\n\n    return passed;\n  }\n\n  bool testConstructors() {\n    bool passed = true;\n    Matrix3<Scalar> R = so3_vec_.front().matrix();\n    SO3Type so3(R);\n    SOPHUS_TEST_APPROX(passed, R, so3.matrix(), Constants<Scalar>::epsilon(),\n                       \"\");\n\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool>\n  testSampleUniformSymmetry() {\n    bool passed = true;\n    std::default_random_engine generator(0);\n\n    // A non-rigorous test for checking that our sampleUniform() function is\n    // giving us symmetric results\n    //\n    // We (a) split the output space in half, (b) apply a series of random\n    // rotations to a point, (c) check which half of the output space each\n    // transformed point ends up, and then (d) apply a standard \"coin toss\"\n    // chi-square test\n\n    for (size_t trial = 0; trial < 5; trial++) {\n      std::normal_distribution<Scalar> normal(0, 10);\n\n      // Pick a random plane to split the output space by\n      Point plane_normal(normal(generator), normal(generator),\n                         normal(generator));\n      plane_normal /= plane_normal.norm();\n\n      // Pick a random point to be rotated\n      Point input_point(normal(generator), normal(generator),\n                        normal(generator));\n      input_point /= input_point.norm();\n\n      // Randomly rotate points and track # that land on each side of plane\n      size_t positive_count = 0;\n      size_t negative_count = 0;\n      size_t samples = 5000;\n      for (size_t i = 0; i < samples; ++i) {\n        SO3Type R = SO3Type::sampleUniform(generator);\n        if (plane_normal.dot(R * input_point) > 0)\n          positive_count++;\n        else\n          negative_count++;\n      }\n\n      // Chi-square computation, compare against critical value (p=0.01)\n      double expected_count = static_cast<double>(samples) / 2.0;\n      double chi_square =\n          pow(positive_count - expected_count, 2.0) / expected_count +\n          pow(negative_count - expected_count, 2.0) / expected_count;\n      SOPHUS_TEST(passed, chi_square < 6.635, \"\");\n    }\n\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool>\n  testSampleUniformSymmetry() {\n    return true;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool> testFit() {\n    bool passed = true;\n\n    for (int i = 0; i < 100; ++i) {\n      Matrix3<Scalar> R = Matrix3<Scalar>::Random();\n      SO3Type so3 = SO3Type::fitToSO3(R);\n      SO3Type so3_2 = SO3Type::fitToSO3(so3.matrix());\n\n      SOPHUS_TEST_APPROX(passed, so3.matrix(), so3_2.matrix(),\n                         Constants<Scalar>::epsilon(), \"\");\n    }\n\n    for (Scalar const angle :\n         {Scalar(0.0), Scalar(0.1), Scalar(0.3), Scalar(-0.7)}) {\n      SOPHUS_TEST_APPROX(passed, SO3Type::rotX(angle).angleX(), angle,\n                         Constants<Scalar>::epsilon(), \"\");\n      SOPHUS_TEST_APPROX(passed, SO3Type::rotY(angle).angleY(), angle,\n                         Constants<Scalar>::epsilon(), \"\");\n      SOPHUS_TEST_APPROX(passed, SO3Type::rotZ(angle).angleZ(), angle,\n                         Constants<Scalar>::epsilon(), \"\");\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool> testFit() {\n    return true;\n  }\n\n  std::vector<SO3Type, Eigen::aligned_allocator<SO3Type>> so3_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\nint test_so3() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test SO3\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  Tests<double>().runAll();\n  cerr << \"Float tests: \" << endl;\n  Tests<float>().runAll();\n\n#if SOPHUS_CERES\n  cerr << \"ceres::Jet<double, 3> tests: \" << endl;\n  Tests<ceres::Jet<double, 3>>().runAll();\n#endif\n\n  return 0;\n}\n}  // namespace Sophus\n\nint main() { return Sophus::test_so3(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/test_velocities.cpp",
    "content": "#include <iostream>\n\n#include \"tests.hpp\"\n\n#include <sophus/velocities.hpp>\n\nnamespace Sophus {\nnamespace experimental {\n\ntemplate <class Scalar>\nbool tests_linear_velocities() {\n  bool passed = true;\n  std::vector<SE3<Scalar>, Eigen::aligned_allocator<SE3<Scalar>>> bar_Ts_baz;\n\n  for (size_t i = 0; i < 10; ++i) {\n    bar_Ts_baz.push_back(SE3<Scalar>::rotX(i * 0.001) *\n                         SE3<Scalar>::rotY(i * 0.001) *\n                         SE3<Scalar>::transX(0.01 * i));\n  }\n\n  SE3<Scalar> foo_T_bar =\n      SE3<Scalar>::rotX(0.5) * SE3<Scalar>::rotZ(0.2) * SE3<Scalar>::transY(2);\n\n  std::vector<SE3<Scalar>, Eigen::aligned_allocator<SE3<Scalar>>> foo_Ts_baz;\n  for (auto const& bar_T_baz : bar_Ts_baz) {\n    foo_Ts_baz.push_back(foo_T_bar * bar_T_baz);\n  }\n\n  auto gen_linear_vels =\n      [](std::vector<SE3<Scalar>, Eigen::aligned_allocator<SE3<Scalar>>> const&\n             a_Ts_b) {\n        std::vector<Vector3<Scalar>, Eigen::aligned_allocator<Vector3<Scalar>>>\n            linearVels_a;\n        for (size_t i = 0; i < a_Ts_b.size() - 1; ++i) {\n          linearVels_a.push_back(a_Ts_b[i + 1].translation() -\n                                 a_Ts_b[i].translation());\n        }\n        return linearVels_a;\n      };\n\n  // linear velocities in frame bar\n  std::vector<Vector3<Scalar>, Eigen::aligned_allocator<Vector3<Scalar>>>\n      linearVels_bar = gen_linear_vels(bar_Ts_baz);\n  // linear velocities in frame foo\n  std::vector<Vector3<Scalar>, Eigen::aligned_allocator<Vector3<Scalar>>>\n      linearVels_foo = gen_linear_vels(foo_Ts_baz);\n\n  for (size_t i = 0; i < linearVels_bar.size(); ++i) {\n    SOPHUS_TEST_APPROX(passed, linearVels_foo[i],\n                       transformVelocity(foo_T_bar, linearVels_bar[i]),\n                       sqrt(Constants<Scalar>::epsilon()), \"\");\n  }\n  return passed;\n}\n\ntemplate <class Scalar>\nbool tests_rotational_velocities() {\n  bool passed = true;\n\n  SE3<Scalar> foo_T_bar =\n      SE3<Scalar>::rotX(0.5) * SE3<Scalar>::rotZ(0.2) * SE3<Scalar>::transY(2);\n\n  // One parameter subgroup of SE3, motion through space given time t.\n  auto bar_T_baz = [](Scalar t) -> SE3<Scalar> {\n    return SE3<Scalar>::rotX(t * Scalar(0.01)) *\n           SE3<Scalar>::rotY(t * Scalar(0.0001)) *\n           SE3<Scalar>::transX(t * Scalar(0.0001));\n  };\n\n  std::vector<Scalar> ts = {Scalar(0), Scalar(0.3), Scalar(1)};\n\n  Scalar h = Constants<Scalar>::epsilon();\n  for (Scalar t : ts) {\n    // finite difference approximiation of instantanious velocity in frame bar\n    Vector3<Scalar> rotVel_in_frame_bar =\n        finiteDifferenceRotationalVelocity<Scalar>(bar_T_baz, t, h);\n\n    // finite difference approximiation of instantanious velocity in frame foo\n    Vector3<Scalar> rotVel_in_frame_foo =\n        finiteDifferenceRotationalVelocity<Scalar>(\n            [&foo_T_bar, bar_T_baz](Scalar t) -> SE3<Scalar> {\n              return foo_T_bar * bar_T_baz(t);\n            },\n            t, h);\n\n    Vector3<Scalar> rotVel_in_frame_bar2 =\n        transformVelocity(foo_T_bar.inverse(), rotVel_in_frame_foo);\n    SOPHUS_TEST_APPROX(\n        passed, rotVel_in_frame_bar, rotVel_in_frame_bar2,\n        // not too tight threshold, because of finit difference approximation\n        std::sqrt(Constants<Scalar>::epsilon()), \"\");\n\n    // The rotational velocities rotVel_in_frame_foo and rotVel_in_frame_bar\n    // should not be equal since they are in different frames (foo != bar).\n    SOPHUS_TEST_NOT_APPROX(passed, rotVel_in_frame_foo, rotVel_in_frame_bar,\n                           Scalar(1e-3), \"\");\n\n    // Expect same result when using adjoint instead since:\n    //  vee(bar_R_foo * hat(vel_foo) * bar_R_foo^T = bar_R_foo 8 vel_foo.\n    SOPHUS_TEST_APPROX(\n        passed, transformVelocity(foo_T_bar.inverse(), rotVel_in_frame_foo),\n        SO3<Scalar>::vee(foo_T_bar.so3().inverse().matrix() *\n                         SO3<Scalar>::hat(rotVel_in_frame_foo) *\n                         foo_T_bar.so3().matrix()),\n        Constants<Scalar>::epsilon(), \"\");\n  }\n  return passed;\n}\n\nint test_velocities() {\n  using std::cerr;\n  using std::endl;\n\n  cerr << \"Test Velocities\" << endl << endl;\n  cerr << \"Double tests: \" << endl;\n  bool passed = tests_linear_velocities<double>();\n  passed &= tests_rotational_velocities<double>();\n  processTestResult(passed);\n\n  cerr << \"Float tests: \" << endl;\n  passed = tests_linear_velocities<float>();\n  passed &= tests_rotational_velocities<float>();\n  processTestResult(passed);\n\n  return 0;\n}\n}  // namespace experimental\n}  // namespace Sophus\n\nint main() { return Sophus::experimental::test_velocities(); }\n"
  },
  {
    "path": "Thirdparty/Sophus/test/core/tests.hpp",
    "content": "#ifndef SOPUHS_TESTS_HPP\n#define SOPUHS_TESTS_HPP\n\n#include <array>\n\n#include <Eigen/StdVector>\n#include <unsupported/Eigen/MatrixFunctions>\n\n#include <sophus/average.hpp>\n#include <sophus/interpolate.hpp>\n#include <sophus/num_diff.hpp>\n#include <sophus/spline.hpp>\n#include <sophus/test_macros.hpp>\n\n#ifdef SOPHUS_CERES\n#include <ceres/jet.h>\n#endif\n\nnamespace Sophus {\n\n// compatibility with ceres::Jet types\n#if SOPHUS_CERES\nusing ceres::isfinite;\n#else\nusing std::isfinite;\n#endif\n\ntemplate <typename Scalar>\nHyperplane<Scalar, 2> through(const Vector<Scalar, 2>* points) {\n  return Hyperplane<Scalar, 2>::Through(points[0], points[1]);\n}\n\ntemplate <typename Scalar>\nHyperplane<Scalar, 3> through(const Vector<Scalar, 3>* points) {\n  return Hyperplane<Scalar, 3>::Through(points[0], points[1], points[2]);\n}\n\ntemplate <class LieGroup_>\nclass LieGroupTests {\n public:\n  using LieGroup = LieGroup_;\n  using Scalar = typename LieGroup::Scalar;\n  using Transformation = typename LieGroup::Transformation;\n  using Tangent = typename LieGroup::Tangent;\n  using Point = typename LieGroup::Point;\n  using HomogeneousPoint = typename LieGroup::HomogeneousPoint;\n  using ConstPointMap = Eigen::Map<const Point>;\n  using Line = typename LieGroup::Line;\n  using Hyperplane = typename LieGroup::Hyperplane;\n  using Adjoint = typename LieGroup::Adjoint;\n  static int constexpr Dim = LieGroup::Dim;\n  static int constexpr N = LieGroup::N;\n  static int constexpr DoF = LieGroup::DoF;\n  static int constexpr num_parameters = LieGroup::num_parameters;\n\n  LieGroupTests(\n      std::vector<LieGroup, Eigen::aligned_allocator<LieGroup>> const&\n          group_vec,\n      std::vector<Tangent, Eigen::aligned_allocator<Tangent>> const&\n          tangent_vec,\n      std::vector<Point, Eigen::aligned_allocator<Point>> const& point_vec)\n      : group_vec_(group_vec),\n        tangent_vec_(tangent_vec),\n        point_vec_(point_vec) {}\n\n  bool adjointTest() {\n    bool passed = true;\n    for (size_t i = 0; i < group_vec_.size(); ++i) {\n      Transformation T = group_vec_[i].matrix();\n      Adjoint Ad = group_vec_[i].Adj();\n      for (size_t j = 0; j < tangent_vec_.size(); ++j) {\n        Tangent x = tangent_vec_[j];\n\n        Transformation I;\n        I.setIdentity();\n        Tangent ad1 = Ad * x;\n        Tangent ad2 = LieGroup::vee(T * LieGroup::hat(x) *\n                                    group_vec_[i].inverse().matrix());\n        SOPHUS_TEST_APPROX(passed, ad1, ad2, Scalar(10) * kSmallEps,\n                           \"Adjoint case {}, {}\", i, j);\n      }\n    }\n    return passed;\n  }\n\n  // For the time being, leftJacobian and leftJacobianInverse are only\n  // implemented for SO3 and SE3\n  template <class G = LieGroup>\n  enable_if_t<std::is_same<G, SO3<Scalar>>::value ||\n                  std::is_same<G, SE3<Scalar>>::value,\n              bool>\n  leftJacobianTest() {\n    bool passed = true;\n    for (const auto& x : tangent_vec_) {\n      LieGroup const inv_exp_x = LieGroup::exp(x).inverse();\n\n      // Explicit implement the derivative in the Lie Group in first principles\n      // as a vector field: D_x f(x) = D_h log(f(x + h) . f(x)^{-1})\n      Matrix<Scalar, DoF, DoF> const J_num =\n          vectorFieldNumDiff<Scalar, DoF, DoF>(\n              [&inv_exp_x](Tangent const& x_plus_delta) {\n                return (LieGroup::exp(x_plus_delta) * inv_exp_x).log();\n              },\n              x);\n\n      // Analytical left Jacobian\n      Matrix<Scalar, DoF, DoF> const J = LieGroup::leftJacobian(x);\n      SOPHUS_TEST_APPROX(passed, J, J_num, Scalar(100) * kSmallEpsSqrt,\n                         \"Left Jacobian\");\n\n      Matrix<Scalar, DoF, DoF> J_inv = LieGroup::leftJacobianInverse(x);\n\n      SOPHUS_TEST_APPROX(passed, J, J_inv.inverse().eval(),\n                         Scalar(100) * kSmallEpsSqrt,\n                         \"Left Jacobian and its analytical Inverse\");\n    }\n\n    return passed;\n  }\n\n  template <class G = LieGroup>\n  enable_if_t<!(std::is_same<G, SO3<Scalar>>::value ||\n                std::is_same<G, SE3<Scalar>>::value),\n              bool>\n  leftJacobianTest() {\n    return true;\n  }\n\n  bool moreJacobiansTest() {\n    bool passed = true;\n    for (auto const& point : point_vec_) {\n      Matrix<Scalar, Dim, DoF> J = LieGroup::Dx_exp_x_times_point_at_0(point);\n      Tangent t;\n      setToZero(t);\n      Matrix<Scalar, Dim, DoF> const J_num =\n          vectorFieldNumDiff<Scalar, Dim, DoF>(\n              [point](Tangent const& x) { return LieGroup::exp(x) * point; },\n              t);\n\n      SOPHUS_TEST_APPROX(passed, J, J_num, kSmallEpsSqrt,\n                         \"Dx_exp_x_times_point_at_0\");\n    }\n    return passed;\n  }\n\n  bool contructorAndAssignmentTest() {\n    bool passed = true;\n    for (LieGroup foo_T_bar : group_vec_) {\n      LieGroup foo_T2_bar = foo_T_bar;\n      SOPHUS_TEST_APPROX(passed, foo_T_bar.matrix(), foo_T2_bar.matrix(),\n                         kSmallEps, \"Copy constructor: {}\\nvs\\n {}\",\n                         SOPHUS_FMT_ARG(transpose(foo_T_bar.matrix())),\n                         SOPHUS_FMT_ARG(transpose(foo_T2_bar.matrix())));\n      LieGroup foo_T3_bar;\n      foo_T3_bar = foo_T_bar;\n      SOPHUS_TEST_APPROX(passed, foo_T_bar.matrix(), foo_T3_bar.matrix(),\n                         kSmallEps, \"Copy assignment: {}\\nvs\\n {}\",\n                         SOPHUS_FMT_ARG(transpose(foo_T_bar.matrix())),\n                         SOPHUS_FMT_ARG(transpose(foo_T3_bar.matrix())));\n\n      LieGroup foo_T4_bar(foo_T_bar.matrix());\n      SOPHUS_TEST_APPROX(passed, foo_T_bar.matrix(), foo_T4_bar.matrix(),\n                         kSmallEps,\n                         \"Constructor from homogeneous matrix: {}\\nvs\\n {}\",\n                         SOPHUS_FMT_ARG(transpose(foo_T_bar.matrix())),\n                         SOPHUS_FMT_ARG(transpose(foo_T4_bar.matrix())));\n\n      Eigen::Map<LieGroup> foo_Tmap_bar(foo_T_bar.data());\n      LieGroup foo_T5_bar = foo_Tmap_bar;\n      SOPHUS_TEST_APPROX(passed, foo_T_bar.matrix(), foo_T5_bar.matrix(),\n                         kSmallEps,\n                         \"Assignment from Eigen::Map type: {}\\nvs\\n {}\",\n                         SOPHUS_FMT_ARG(transpose(foo_T_bar.matrix())),\n                         SOPHUS_FMT_ARG(transpose(foo_T5_bar.matrix())));\n\n      Eigen::Map<LieGroup const> foo_Tcmap_bar(foo_T_bar.data());\n      LieGroup foo_T6_bar;\n      foo_T6_bar = foo_Tcmap_bar;\n      SOPHUS_TEST_APPROX(passed, foo_T_bar.matrix(), foo_T5_bar.matrix(),\n                         kSmallEps,\n                         \"Assignment from Eigen::Map type: {}\\nvs\\n {}\",\n                         SOPHUS_FMT_ARG(transpose(foo_T_bar.matrix())),\n                         SOPHUS_FMT_ARG(transpose(foo_T5_bar.matrix())));\n\n      LieGroup I;\n      Eigen::Map<LieGroup> foo_Tmap2_bar(I.data());\n      foo_Tmap2_bar = foo_T_bar;\n      SOPHUS_TEST_APPROX(passed, foo_Tmap2_bar.matrix(), foo_T_bar.matrix(),\n                         kSmallEps,\n                         \"Assignment to Eigen::Map type: {}\\nvs\\n {}\",\n                         SOPHUS_FMT_ARG(transpose(foo_Tmap2_bar.matrix())),\n                         SOPHUS_FMT_ARG(transpose(foo_T_bar.matrix())));\n    }\n    return passed;\n  }\n\n  bool derivativeTest() {\n    bool passed = true;\n\n    LieGroup g;\n    for (int i = 0; i < DoF; ++i) {\n      Transformation Gi = g.Dxi_exp_x_matrix_at_0(i);\n      Transformation Gi2 = curveNumDiff(\n          [i](Scalar xi) -> Transformation {\n            Tangent x;\n            setToZero(x);\n            setElementAt(x, xi, i);\n            return LieGroup::exp(x).matrix();\n          },\n          Scalar(0));\n      SOPHUS_TEST_APPROX(passed, Gi, Gi2, kSmallEpsSqrt,\n                         \"Dxi_exp_x_matrix_at_ case {}\", i);\n    }\n\n    return passed;\n  }\n\n  template <class G = LieGroup>\n  bool additionalDerivativeTest() {\n    bool passed = true;\n    for (size_t j = 0; j < tangent_vec_.size(); ++j) {\n      Tangent a = tangent_vec_[j];\n      Eigen::Matrix<Scalar, num_parameters, DoF> J = LieGroup::Dx_exp_x(a);\n      Eigen::Matrix<Scalar, num_parameters, DoF> J_num =\n          vectorFieldNumDiff<Scalar, num_parameters, DoF>(\n              [](Tangent const& x) -> Sophus::Vector<Scalar, num_parameters> {\n                return LieGroup::exp(x).params();\n              },\n              a);\n\n      SOPHUS_TEST_APPROX(passed, J, J_num, 3 * kSmallEpsSqrt,\n                         \"Dx_exp_x case: {}\", j);\n    }\n\n    Tangent o;\n    setToZero(o);\n    Eigen::Matrix<Scalar, num_parameters, DoF> J = LieGroup::Dx_exp_x_at_0();\n    Eigen::Matrix<Scalar, num_parameters, DoF> J_num =\n        vectorFieldNumDiff<Scalar, num_parameters, DoF>(\n            [](Tangent const& x) -> Sophus::Vector<Scalar, num_parameters> {\n              return LieGroup::exp(x).params();\n            },\n            o);\n    SOPHUS_TEST_APPROX(passed, J, J_num, kSmallEpsSqrt, \"Dx_exp_x_at_0\");\n\n    for (size_t i = 0; i < group_vec_.size(); ++i) {\n      LieGroup T = group_vec_[i];\n\n      Eigen::Matrix<Scalar, num_parameters, DoF> J = T.Dx_this_mul_exp_x_at_0();\n      Eigen::Matrix<Scalar, num_parameters, DoF> J_num =\n          vectorFieldNumDiff<Scalar, num_parameters, DoF>(\n              [T](Tangent const& x) -> Sophus::Vector<Scalar, num_parameters> {\n                return (T * LieGroup::exp(x)).params();\n              },\n              o);\n\n      SOPHUS_TEST_APPROX(passed, J, J_num, kSmallEpsSqrt,\n                         \"Dx_this_mul_exp_x_at_0 case: {}\", i);\n    }\n\n    for (size_t i = 0; i < group_vec_.size(); ++i) {\n      LieGroup T = group_vec_[i];\n\n      Eigen::Matrix<Scalar, DoF, DoF> J =\n          T.Dx_log_this_inv_by_x_at_this() * T.Dx_this_mul_exp_x_at_0();\n      Eigen::Matrix<Scalar, DoF, DoF> J_exp =\n          Eigen::Matrix<Scalar, DoF, DoF>::Identity();\n\n      SOPHUS_TEST_APPROX(passed, J, J_exp, kSmallEpsSqrt,\n                         \"Dy_log_this_inv_by_at_x case: {}\", i);\n    }\n    return passed;\n  }\n\n  bool productTest() {\n    bool passed = true;\n\n    for (size_t i = 0; i < group_vec_.size() - 1; ++i) {\n      LieGroup T1 = group_vec_[i];\n      LieGroup T2 = group_vec_[i + 1];\n      LieGroup mult = T1 * T2;\n      T1 *= T2;\n      SOPHUS_TEST_APPROX(passed, T1.matrix(), mult.matrix(), kSmallEps,\n                         \"Product case: {}\", i);\n    }\n    return passed;\n  }\n\n  bool expLogTest() {\n    bool passed = true;\n\n    for (size_t i = 0; i < group_vec_.size(); ++i) {\n      Transformation T1 = group_vec_[i].matrix();\n      Transformation T2 = LieGroup::exp(group_vec_[i].log()).matrix();\n      SOPHUS_TEST_APPROX(passed, T1, T2, kSmallEps, \"G - exp(log(G)) case: {}\",\n                         i);\n    }\n    return passed;\n  }\n\n  bool expMapTest() {\n    bool passed = true;\n    for (size_t i = 0; i < tangent_vec_.size(); ++i) {\n      Tangent omega = tangent_vec_[i];\n      Transformation exp_x = LieGroup::exp(omega).matrix();\n      Transformation expmap_hat_x = (LieGroup::hat(omega)).exp();\n      SOPHUS_TEST_APPROX(passed, exp_x, expmap_hat_x, Scalar(10) * kSmallEps,\n                         \"expmap(hat(x)) - exp(x) case: {}\", i);\n    }\n    return passed;\n  }\n\n  bool groupActionTest() {\n    bool passed = true;\n\n    for (size_t i = 0; i < group_vec_.size(); ++i) {\n      for (size_t j = 0; j < point_vec_.size(); ++j) {\n        Point const& p = point_vec_[j];\n        Point point1 = group_vec_[i] * p;\n\n        HomogeneousPoint hp = p.homogeneous();\n        HomogeneousPoint hpoint1 = group_vec_[i] * hp;\n\n        ConstPointMap p_map(p.data());\n        Point pointmap1 = group_vec_[i] * p_map;\n\n        Transformation T = group_vec_[i].matrix();\n        Point gt_point1 = map(T, p);\n\n        SOPHUS_TEST_APPROX(passed, point1, gt_point1, kSmallEps,\n                           \"Transform point case: {}\", i);\n        SOPHUS_TEST_APPROX(passed, hpoint1.hnormalized().eval(), gt_point1,\n                           kSmallEps, \"Transform homogeneous point case: {}\",\n                           i);\n        SOPHUS_TEST_APPROX(passed, pointmap1, gt_point1, kSmallEps,\n                           \"Transform map point case: {}\", i);\n      }\n    }\n    return passed;\n  }\n\n  bool lineActionTest() {\n    bool passed = point_vec_.size() > 1;\n\n    for (size_t i = 0; i < group_vec_.size(); ++i) {\n      for (size_t j = 0; j + 1 < point_vec_.size(); ++j) {\n        Point const& p1 = point_vec_[j];\n        Point const& p2 = point_vec_[j + 1];\n        Line l = Line::Through(p1, p2);\n        Point p1_t = group_vec_[i] * p1;\n        Point p2_t = group_vec_[i] * p2;\n        Line l_t = group_vec_[i] * l;\n\n        SOPHUS_TEST_APPROX(passed, l_t.squaredDistance(p1_t),\n                           static_cast<Scalar>(0), kSmallEps,\n                           \"Transform line case (1st point) : {}\", i);\n        SOPHUS_TEST_APPROX(passed, l_t.squaredDistance(p2_t),\n                           static_cast<Scalar>(0), kSmallEps,\n                           \"Transform line case (2nd point) : {}\", i);\n        SOPHUS_TEST_APPROX(passed, l_t.direction().squaredNorm(),\n                           l.direction().squaredNorm(), kSmallEps,\n                           \"Transform line case (direction) : {}\", i);\n      }\n    }\n    return passed;\n  }\n\n  bool planeActionTest() {\n    const int PointDim = Point::RowsAtCompileTime;\n    bool passed = point_vec_.size() >= PointDim;\n    for (size_t i = 0; i < group_vec_.size(); ++i) {\n      for (size_t j = 0; j + PointDim - 1 < point_vec_.size(); ++j) {\n        Point points[PointDim], points_t[PointDim];\n        for (int k = 0; k < PointDim; ++k) {\n          points[k] = point_vec_[j + k];\n          points_t[k] = group_vec_[i] * points[k];\n        }\n\n        Hyperplane const plane = through(points);\n\n        Hyperplane const plane_t = group_vec_[i] * plane;\n\n        for (int k = 0; k < PointDim; ++k) {\n          SOPHUS_TEST_APPROX(passed, plane_t.signedDistance(points_t[k]),\n                             static_cast<Scalar>(0.), kSmallEps,\n                             \"Transform plane case (point #{}): {}\", k, i);\n        }\n        SOPHUS_TEST_APPROX(passed, plane_t.normal().squaredNorm(),\n                           plane.normal().squaredNorm(), kSmallEps,\n                           \"Transform plane case (normal): {}\", i);\n      }\n    }\n    return passed;\n  }\n\n  bool lieBracketTest() {\n    bool passed = true;\n    for (size_t i = 0; i < tangent_vec_.size(); ++i) {\n      for (size_t j = 0; j < tangent_vec_.size(); ++j) {\n        Tangent tangent1 =\n            LieGroup::lieBracket(tangent_vec_[i], tangent_vec_[j]);\n        Transformation hati = LieGroup::hat(tangent_vec_[i]);\n        Transformation hatj = LieGroup::hat(tangent_vec_[j]);\n\n        Tangent tangent2 = LieGroup::vee(hati * hatj - hatj * hati);\n        SOPHUS_TEST_APPROX(passed, tangent1, tangent2, kSmallEps,\n                           \"Lie Bracket case: {}\", i);\n      }\n    }\n    return passed;\n  }\n\n  bool veeHatTest() {\n    bool passed = true;\n    for (size_t i = 0; i < tangent_vec_.size(); ++i) {\n      SOPHUS_TEST_APPROX(passed, Tangent(tangent_vec_[i]),\n                         LieGroup::vee(LieGroup::hat(tangent_vec_[i])),\n                         kSmallEps, \"Hat-vee case: {}\", i);\n    }\n    return passed;\n  }\n\n  bool newDeleteSmokeTest() {\n    bool passed = true;\n    LieGroup* raw_ptr = nullptr;\n    raw_ptr = new LieGroup();\n    SOPHUS_TEST_NEQ(passed, reinterpret_cast<std::uintptr_t>(raw_ptr), 0, \"\");\n    delete raw_ptr;\n    return passed;\n  }\n\n  bool interpolateAndMeanTest() {\n    bool passed = true;\n    using std::sqrt;\n    Scalar const eps = Constants<Scalar>::epsilon();\n    Scalar const sqrt_eps = sqrt(eps);\n    // TODO: Improve accuracy of ``interpolate`` (and hence ``exp`` and ``log``)\n    //       so that we can use more accurate bounds in these tests, i.e.\n    //       ``eps`` instead of ``sqrt_eps``.\n\n    for (LieGroup const& foo_T_bar : group_vec_) {\n      for (LieGroup const& foo_T_baz : group_vec_) {\n        // Test boundary conditions ``alpha=0`` and ``alpha=1``.\n        LieGroup foo_T_quiz = interpolate(foo_T_bar, foo_T_baz, Scalar(0));\n        SOPHUS_TEST_APPROX(passed, foo_T_quiz.matrix(), foo_T_bar.matrix(),\n                           sqrt_eps, \"\");\n        foo_T_quiz = interpolate(foo_T_bar, foo_T_baz, Scalar(1));\n        SOPHUS_TEST_APPROX(passed, foo_T_quiz.matrix(), foo_T_baz.matrix(),\n                           sqrt_eps, \"\");\n      }\n    }\n    for (Scalar alpha :\n         {Scalar(0.1), Scalar(0.5), Scalar(0.75), Scalar(0.99)}) {\n      for (LieGroup const& foo_T_bar : group_vec_) {\n        for (LieGroup const& foo_T_baz : group_vec_) {\n          LieGroup foo_T_quiz = interpolate(foo_T_bar, foo_T_baz, alpha);\n          // test left-invariance:\n          //\n          // dash_T_foo * interp(foo_T_bar, foo_T_baz)\n          // == interp(dash_T_foo * foo_T_bar, dash_T_foo * foo_T_baz)\n\n          if (interp_details::Traits<LieGroup>::hasShortestPathAmbiguity(\n                  foo_T_bar.inverse() * foo_T_baz)) {\n            // skip check since there is a shortest path ambiguity\n            continue;\n          }\n          for (LieGroup const& dash_T_foo : group_vec_) {\n            LieGroup dash_T_quiz = interpolate(dash_T_foo * foo_T_bar,\n                                               dash_T_foo * foo_T_baz, alpha);\n            SOPHUS_TEST_APPROX(passed, dash_T_quiz.matrix(),\n                               (dash_T_foo * foo_T_quiz).matrix(), sqrt_eps,\n                               \"\");\n          }\n          // test inverse-invariance:\n          //\n          // interp(foo_T_bar, foo_T_baz).inverse()\n          // == interp(foo_T_bar.inverse(), dash_T_foo.inverse())\n          LieGroup quiz_T_foo =\n              interpolate(foo_T_bar.inverse(), foo_T_baz.inverse(), alpha);\n          SOPHUS_TEST_APPROX(passed, quiz_T_foo.inverse().matrix(),\n                             foo_T_quiz.matrix(), sqrt_eps, \"\");\n        }\n      }\n\n      for (LieGroup const& bar_T_foo : group_vec_) {\n        for (LieGroup const& baz_T_foo : group_vec_) {\n          LieGroup quiz_T_foo = interpolate(bar_T_foo, baz_T_foo, alpha);\n          // test right-invariance:\n          //\n          // interp(bar_T_foo, bar_T_foo) * foo_T_dash\n          // == interp(bar_T_foo * foo_T_dash, bar_T_foo * foo_T_dash)\n\n          if (interp_details::Traits<LieGroup>::hasShortestPathAmbiguity(\n                  bar_T_foo * baz_T_foo.inverse())) {\n            // skip check since there is a shortest path ambiguity\n            continue;\n          }\n          for (LieGroup const& foo_T_dash : group_vec_) {\n            LieGroup quiz_T_dash = interpolate(bar_T_foo * foo_T_dash,\n                                               baz_T_foo * foo_T_dash, alpha);\n            SOPHUS_TEST_APPROX(passed, quiz_T_dash.matrix(),\n                               (quiz_T_foo * foo_T_dash).matrix(), sqrt_eps,\n                               \"\");\n          }\n        }\n      }\n    }\n\n    for (LieGroup const& foo_T_bar : group_vec_) {\n      for (LieGroup const& foo_T_baz : group_vec_) {\n        if (interp_details::Traits<LieGroup>::hasShortestPathAmbiguity(\n                foo_T_bar.inverse() * foo_T_baz)) {\n          // skip check since there is a shortest path ambiguity\n          continue;\n        }\n\n        // test average({A, B}) == interp(A, B):\n        LieGroup foo_T_quiz = interpolate(foo_T_bar, foo_T_baz, 0.5);\n        optional<LieGroup> foo_T_iaverage = iterativeMean(\n            std::array<LieGroup, 2>({{foo_T_bar, foo_T_baz}}), 20);\n        optional<LieGroup> foo_T_average =\n            average(std::array<LieGroup, 2>({{foo_T_bar, foo_T_baz}}));\n        SOPHUS_TEST(passed, bool(foo_T_average),\n                    \"log(foo_T_bar): {}\\nlog(foo_T_baz): {}\",\n                    SOPHUS_FMT_ARG(transpose(foo_T_bar.log())),\n                    SOPHUS_FMT_ARG(transpose(foo_T_baz.log())), \"\");\n        if (foo_T_average) {\n          SOPHUS_TEST_APPROX(\n              passed, foo_T_quiz.matrix(), foo_T_average->matrix(), sqrt_eps,\n              \"log(foo_T_bar): {}\\nlog(foo_T_baz): {}\\n\"\n              \"log(interp): {}\\nlog(average): {}\",\n              SOPHUS_FMT_ARG(transpose(foo_T_bar.log())),\n              SOPHUS_FMT_ARG(transpose(foo_T_baz.log())),\n              SOPHUS_FMT_ARG(transpose(foo_T_quiz.log())),\n              SOPHUS_FMT_ARG(transpose(foo_T_average->log())), \"\");\n        }\n        SOPHUS_TEST(passed, bool(foo_T_iaverage),\n                    \"log(foo_T_bar): {}\\nlog(foo_T_baz): {}\\n\"\n                    \"log(interp): {}\\nlog(iaverage): {}\",\n                    SOPHUS_FMT_ARG(transpose(foo_T_bar.log())),\n                    SOPHUS_FMT_ARG(transpose(foo_T_baz.log())),\n                    SOPHUS_FMT_ARG(transpose(foo_T_quiz.log())),\n                    SOPHUS_FMT_ARG(transpose(foo_T_iaverage->log())), \"\");\n        if (foo_T_iaverage) {\n          SOPHUS_TEST_APPROX(passed, foo_T_quiz.matrix(),\n                             foo_T_iaverage->matrix(), sqrt_eps,\n                             \"log(foo_T_bar): {}\\nlog(foo_T_baz): {}\",\n                             SOPHUS_FMT_ARG(transpose(foo_T_bar.log())),\n                             SOPHUS_FMT_ARG(transpose(foo_T_baz.log())), \"\");\n        }\n      }\n    }\n\n    return passed;\n  }\n\n  bool testRandomSmoke() {\n    bool passed = true;\n    std::default_random_engine engine;\n    for (int i = 0; i < 100; ++i) {\n      LieGroup g = LieGroup::sampleUniform(engine);\n      SOPHUS_TEST_EQUAL(passed, g.params(), g.params(), \"\");\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_same<S, float>::value, bool> testSpline() {\n    // skip tests for Scalar == float\n    return true;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_same<S, float>::value, bool> testSpline() {\n    // run tests for Scalar != float\n    bool passed = true;\n\n    for (LieGroup const& T_world_foo : group_vec_) {\n      for (LieGroup const& T_world_bar : group_vec_) {\n        std::vector<LieGroup> control_poses;\n        control_poses.push_back(interpolate(T_world_foo, T_world_bar, 0.0));\n\n        for (double p = 0.2; p < 1.0; p += 0.2) {\n          LieGroup T_world_inter = interpolate(T_world_foo, T_world_bar, p);\n          control_poses.push_back(T_world_inter);\n        }\n\n        BasisSplineImpl<LieGroup> spline(control_poses, 1.0);\n\n        LieGroup T = spline.parent_T_spline(0.0, 1.0);\n        LieGroup T2 = spline.parent_T_spline(1.0, 0.0);\n\n        SOPHUS_TEST_APPROX(passed, T.matrix(), T2.matrix(), 10 * kSmallEpsSqrt,\n                           \"parent_T_spline\");\n\n        Transformation Dt_parent_T_spline = spline.Dt_parent_T_spline(0.0, 0.5);\n        Transformation Dt_parent_T_spline2 = curveNumDiff(\n            [&](double u_bar) -> Transformation {\n              return spline.parent_T_spline(0.0, u_bar).matrix();\n            },\n            0.5);\n        SOPHUS_TEST_APPROX(passed, Dt_parent_T_spline, Dt_parent_T_spline2,\n                           100 * kSmallEpsSqrt, \"Dt_parent_T_spline\");\n\n        Transformation Dt2_parent_T_spline =\n            spline.Dt2_parent_T_spline(0.0, 0.5);\n        Transformation Dt2_parent_T_spline2 = curveNumDiff(\n            [&](double u_bar) -> Transformation {\n              return spline.Dt_parent_T_spline(0.0, u_bar).matrix();\n            },\n            0.5);\n        SOPHUS_TEST_APPROX(passed, Dt2_parent_T_spline, Dt2_parent_T_spline2,\n                           20 * kSmallEpsSqrt, \"Dt2_parent_T_spline\");\n\n        for (double frac : {0.01, 0.25, 0.5, 0.9, 0.99}) {\n          double t0 = 1.0;\n          double delta_t = 0.1;\n          BasisSpline<LieGroup> spline(control_poses, t0, delta_t);\n          double t = t0 + frac * delta_t;\n\n          Transformation Dt_parent_T_spline = spline.Dt_parent_T_spline(t);\n          Transformation Dt_parent_T_spline2 = curveNumDiff(\n              [&](double t_bar) -> Transformation {\n                return spline.parent_T_spline(t_bar).matrix();\n              },\n              t);\n          SOPHUS_TEST_APPROX(passed, Dt_parent_T_spline, Dt_parent_T_spline2,\n                             80 * kSmallEpsSqrt, \"Dt_parent_T_spline\");\n\n          Transformation Dt2_parent_T_spline = spline.Dt2_parent_T_spline(t);\n          Transformation Dt2_parent_T_spline2 = curveNumDiff(\n              [&](double t_bar) -> Transformation {\n                return spline.Dt_parent_T_spline(t_bar).matrix();\n              },\n              t);\n          SOPHUS_TEST_APPROX(passed, Dt2_parent_T_spline, Dt2_parent_T_spline2,\n                             20 * kSmallEpsSqrt, \"Dt2_parent_T_spline\");\n        }\n      }\n    }\n    return passed;\n  }\n\n  template <class S = Scalar>\n  enable_if_t<std::is_floating_point<S>::value, bool> doAllTestsPass() {\n    return doesLargeTestSetPass();\n  }\n\n  template <class S = Scalar>\n  enable_if_t<!std::is_floating_point<S>::value, bool> doAllTestsPass() {\n    return doesSmallTestSetPass();\n  }\n\n private:\n  bool doesSmallTestSetPass() {\n    bool passed = true;\n    passed &= adjointTest();\n    passed &= contructorAndAssignmentTest();\n    passed &= productTest();\n    passed &= expLogTest();\n    passed &= groupActionTest();\n    passed &= lineActionTest();\n    passed &= planeActionTest();\n    passed &= lieBracketTest();\n    passed &= veeHatTest();\n    passed &= newDeleteSmokeTest();\n    return passed;\n  }\n\n  bool doesLargeTestSetPass() {\n    bool passed = true;\n    passed &= doesSmallTestSetPass();\n    passed &= additionalDerivativeTest();\n    passed &= derivativeTest();\n    passed &= expMapTest();\n    passed &= interpolateAndMeanTest();\n    passed &= testRandomSmoke();\n    passed &= testSpline();\n    passed &= leftJacobianTest();\n    passed &= moreJacobiansTest();\n    return passed;\n  }\n\n  Scalar const kSmallEps = Constants<Scalar>::epsilon();\n  Scalar const kSmallEpsSqrt = Constants<Scalar>::epsilonSqrt();\n\n  Eigen::Matrix<Scalar, N - 1, 1> map(\n      Eigen::Matrix<Scalar, N, N> const& T,\n      Eigen::Matrix<Scalar, N - 1, 1> const& p) {\n    return T.template topLeftCorner<N - 1, N - 1>() * p +\n           T.template topRightCorner<N - 1, 1>();\n  }\n\n  Eigen::Matrix<Scalar, N, 1> map(Eigen::Matrix<Scalar, N, N> const& T,\n                                  Eigen::Matrix<Scalar, N, 1> const& p) {\n    return T * p;\n  }\n\n  std::vector<LieGroup, Eigen::aligned_allocator<LieGroup>> group_vec_;\n  std::vector<Tangent, Eigen::aligned_allocator<Tangent>> tangent_vec_;\n  std::vector<Point, Eigen::aligned_allocator<Point>> point_vec_;\n};\n\ntemplate <class Scalar>\nstd::vector<SE3<Scalar>, Eigen::aligned_allocator<SE3<Scalar>>> getTestSE3s() {\n  Scalar const kPi = Constants<Scalar>::pi();\n  std::vector<SE3<Scalar>, Eigen::aligned_allocator<SE3<Scalar>>> se3_vec;\n  se3_vec.push_back(SE3<Scalar>(\n      SO3<Scalar>::exp(Vector3<Scalar>(Scalar(0.2), Scalar(0.5), Scalar(0.0))),\n      Vector3<Scalar>(Scalar(0), Scalar(0), Scalar(0))));\n  se3_vec.push_back(SE3<Scalar>(\n      SO3<Scalar>::exp(Vector3<Scalar>(Scalar(0.2), Scalar(0.5), Scalar(-1.0))),\n      Vector3<Scalar>(Scalar(10), Scalar(0), Scalar(0))));\n  se3_vec.push_back(\n      SE3<Scalar>::trans(Vector3<Scalar>(Scalar(0), Scalar(100), Scalar(5))));\n  se3_vec.push_back(SE3<Scalar>::rotZ(Scalar(0.00001)));\n  se3_vec.push_back(\n      SE3<Scalar>::trans(Scalar(0), Scalar(-0.00000001), Scalar(0.0000000001)) *\n      SE3<Scalar>::rotZ(Scalar(0.00001)));\n  se3_vec.push_back(SE3<Scalar>::transX(Scalar(0.01)) *\n                    SE3<Scalar>::rotZ(Scalar(0.00001)));\n  se3_vec.push_back(SE3<Scalar>::trans(Scalar(4), Scalar(-5), Scalar(0)) *\n                    SE3<Scalar>::rotX(kPi));\n  se3_vec.push_back(\n      SE3<Scalar>(SO3<Scalar>::exp(\n                      Vector3<Scalar>(Scalar(0.2), Scalar(0.5), Scalar(0.0))),\n                  Vector3<Scalar>(Scalar(0), Scalar(0), Scalar(0))) *\n      SE3<Scalar>::rotX(kPi) *\n      SE3<Scalar>(SO3<Scalar>::exp(Vector3<Scalar>(Scalar(-0.2), Scalar(-0.5),\n                                                   Scalar(-0.0))),\n                  Vector3<Scalar>(Scalar(0), Scalar(0), Scalar(0))));\n  se3_vec.push_back(\n      SE3<Scalar>(SO3<Scalar>::exp(\n                      Vector3<Scalar>(Scalar(0.3), Scalar(0.5), Scalar(0.1))),\n                  Vector3<Scalar>(Scalar(2), Scalar(0), Scalar(-7))) *\n      SE3<Scalar>::rotX(kPi) *\n      SE3<Scalar>(SO3<Scalar>::exp(Vector3<Scalar>(Scalar(-0.3), Scalar(-0.5),\n                                                   Scalar(-0.1))),\n                  Vector3<Scalar>(Scalar(0), Scalar(6), Scalar(0))));\n  return se3_vec;\n}\n\ntemplate <class T>\nstd::vector<SE2<T>, Eigen::aligned_allocator<SE2<T>>> getTestSE2s() {\n  T const kPi = Constants<T>::pi();\n  std::vector<SE2<T>, Eigen::aligned_allocator<SE2<T>>> se2_vec;\n  se2_vec.push_back(SE2<T>());\n  se2_vec.push_back(SE2<T>(SO2<T>(0.2), Vector2<T>(10, 0)));\n  se2_vec.push_back(SE2<T>::transY(100));\n  se2_vec.push_back(SE2<T>::trans(Vector2<T>(1, 2)));\n  se2_vec.push_back(SE2<T>(SO2<T>(-1.), Vector2<T>(20, -1)));\n  se2_vec.push_back(\n      SE2<T>(SO2<T>(0.00001), Vector2<T>(-0.00000001, 0.0000000001)));\n  se2_vec.push_back(SE2<T>(SO2<T>(0.3), Vector2<T>(2, 0)) * SE2<T>::rot(kPi) *\n                    SE2<T>(SO2<T>(-0.3), Vector2<T>(0, 6)));\n  return se2_vec;\n}\n}  // namespace Sophus\n#endif  // TESTS_HPP\n"
  },
  {
    "path": "Thirdparty/tessil-src/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.1)\ninclude(GNUInstallDirs)\n\n\nproject(tsl-robin-map VERSION 0.6.3)\n\nadd_library(robin_map INTERFACE)\n# Use tsl::robin_map as target, more consistent with other libraries conventions (Boost, Qt, ...)\nadd_library(tsl::robin_map ALIAS robin_map)\n\ntarget_include_directories(robin_map INTERFACE\n                           \"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>\"\n                           \"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>\")\n\nlist(APPEND headers \"${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_growth_policy.h\"\n                    \"${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_hash.h\"\n                    \"${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_map.h\"\n                    \"${CMAKE_CURRENT_SOURCE_DIR}/include/tsl/robin_set.h\")\ntarget_sources(robin_map INTERFACE \"$<BUILD_INTERFACE:${headers}>\")\n\nif(MSVC)\n    target_sources(robin_map INTERFACE\n                   \"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/tsl-robin-map.natvis>\"\n                   \"$<INSTALL_INTERFACE:${CMAKE_INSTALL_DATAROOTDIR}/tsl-robin-map.natvis>\")\nendif()\n\n\n\n\n# Installation (only compatible with CMake version >= 3.3)\nif(${CMAKE_VERSION} VERSION_GREATER \"3.2\")\n    include(CMakePackageConfigHelpers)\n\n    ## Install include directory and potential natvis file\n    install(DIRECTORY \"${CMAKE_CURRENT_SOURCE_DIR}/include/tsl\"\n            DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}\")\n\n    if(MSVC)\n        install(FILES \"${CMAKE_CURRENT_SOURCE_DIR}/tsl-robin-map.natvis\"\n                DESTINATION \"${CMAKE_INSTALL_DATAROOTDIR}\")\n    endif()\n\n\n\n    ## Create and install tsl-robin-mapConfig.cmake\n    configure_package_config_file(\"${CMAKE_CURRENT_SOURCE_DIR}/cmake/tsl-robin-mapConfig.cmake.in\"\n                                  \"${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfig.cmake\"\n                                  INSTALL_DESTINATION \"${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map\")\n\n    install(FILES \"${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfig.cmake\"\n            DESTINATION \"${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map\")\n\n\n\n    ## Create and install tsl-robin-mapTargets.cmake\n    install(TARGETS robin_map\n            EXPORT tsl-robin-mapTargets)\n\n    install(EXPORT tsl-robin-mapTargets\n            NAMESPACE tsl::\n            DESTINATION \"${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map\")\n\n\n\n    ## Create and install tsl-robin-mapConfigVersion.cmake\n    # tsl-robin-map is header-only and does not depend on the architecture.\n    # Remove CMAKE_SIZEOF_VOID_P from tsl-robin-mapConfigVersion.cmake so that a\n    # tsl-robin-mapConfig.cmake generated for a 64 bit target can be used for 32 bit\n    # targets and vice versa.\n    set(CMAKE_SIZEOF_VOID_P_BACKUP ${CMAKE_SIZEOF_VOID_P})\n    unset(CMAKE_SIZEOF_VOID_P)\n    write_basic_package_version_file(\"${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfigVersion.cmake\"\n                                     COMPATIBILITY SameMajorVersion)\n    set(CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P_BACKUP})\n\n    install(FILES \"${CMAKE_CURRENT_BINARY_DIR}/tsl-robin-mapConfigVersion.cmake\"\n            DESTINATION \"${CMAKE_INSTALL_DATAROOTDIR}/cmake/tsl-robin-map\")\nendif()\n"
  },
  {
    "path": "Thirdparty/tessil-src/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Thirdparty/tessil-src/README.md",
    "content": "[![Build Status](https://travis-ci.org/Tessil/robin-map.svg?branch=master)](https://travis-ci.org/Tessil/robin-map) [![Build status](https://ci.appveyor.com/api/projects/status/lo79n4ya4nta79q4/branch/master?svg=true)](https://ci.appveyor.com/project/Tessil/robin-map/branch/master)\n\n## A C++ implementation of a fast hash map and hash set using robin hood hashing\n\nThe robin-map library is a C++ implementation of a fast hash map and hash set using open-addressing and linear robin hood hashing with backward shift deletion to resolve collisions.\n\nFour classes are provided: `tsl::robin_map`, `tsl::robin_set`, `tsl::robin_pg_map` and `tsl::robin_pg_set`. The first two are faster and use a power of two growth policy, the last two use a prime growth policy instead and are able to cope better with a poor hash function. Use the prime version if there is a chance of repeating patterns in the lower bits of your hash (e.g. you are storing pointers with an identity hash function). See [GrowthPolicy](#growth-policy) for details.\n\nA **benchmark** of `tsl::robin_map` against other hash maps may be found [here](https://tessil.github.io/2016/08/29/benchmark-hopscotch-map.html). This page also gives some advices on which hash table structure you should try for your use case (useful if you are a bit lost with the multiple hash tables implementations in the `tsl` namespace).\n\n### Key features\n\n- Header-only library, just add the [include](include/) directory to your include path and you are ready to go. If you use CMake, you can also use the `tsl::robin_map` exported target from the [CMakeLists.txt](CMakeLists.txt).\n- Fast hash table, check the [benchmark](https://tessil.github.io/2016/08/29/benchmark-hopscotch-map.html) for some numbers.\n- Support for move-only and non-default constructible key/value.\n- Support for heterogeneous lookups allowing the usage of `find` with a type different than `Key` (e.g. if you have a map that uses `std::unique_ptr<foo>` as key, you can use a `foo*` or a `std::uintptr_t` as key parameter to `find` without constructing a `std::unique_ptr<foo>`, see [example](#heterogeneous-lookups)).\n- No need to reserve any sentinel value from the keys.\n- Possibility to store the hash value alongside the stored key-value for faster rehash and lookup if the hash or the key equal functions are expensive to compute. Note that hash may be stored even if not asked explicitly when the library can detect that it will have no impact on the size of the structure in memory due to alignment. See the [StoreHash](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#details) template parameter for details.\n- If the hash is known before a lookup, it is possible to pass it as parameter to speed-up the lookup (see `precalculated_hash` parameter in [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#a35021b11aabb61820236692a54b3a0f8)).\n- The library can be used with exceptions disabled (through `-fno-exceptions` option on Clang and GCC, without an `/EH` option on MSVC or simply by defining `TSL_NO_EXCEPTIONS`). `std::terminate` is used in replacement of the `throw` instruction when exceptions are disabled.\n- API closely similar to `std::unordered_map` and `std::unordered_set`.\n\n### Differences compared to `std::unordered_map`\n\n`tsl::robin_map` tries to have an interface similar to `std::unordered_map`, but some differences exist.\n- The **strong exception guarantee only holds** if the following statement is true `std::is_nothrow_swappable<value_type>::value && std::is_nothrow_move_constructible<value_type>::value` (where `value_type` is `Key` for `tsl::robin_set` and `std::pair<Key, T>` for `tsl::robin_map`). Otherwise if an exception is thrown during the swap or the move, the structure may end up in a undefined state. Note that per the standard, a `value_type` with a noexcept copy constructor and no move constructor also satisfies this condition and will thus guarantee the strong exception guarantee for the structure (see [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#details) for details).\n- The type `Key`, and also `T` in case of map, must be swappable. They must also be copy and/or move constructible.\n- Iterator invalidation doesn't behave in the same way, any operation modifying the hash table invalidate them (see [API](https://tessil.github.io/robin-map/classtsl_1_1robin__map.html#details) for details).\n- References and pointers to keys or values in the map are invalidated in the same way as iterators to these keys-values.\n- For iterators of `tsl::robin_map`, `operator*()` and `operator->()` return a reference and a pointer to `const std::pair<Key, T>` instead of `std::pair<const Key, T>` making the value `T` not modifiable. To modify the value you have to call the `value()` method of the iterator to get a mutable reference. Example:\n```c++\ntsl::robin_map<int, int> map = {{1, 1}, {2, 1}, {3, 1}};\nfor(auto it = map.begin(); it != map.end(); ++it) {\n    //it->second = 2; // Illegal\n    it.value() = 2; // Ok\n}\n```\n- No support for some buckets related methods (like `bucket_size`, `bucket`, ...).\n\nThese differences also apply between `std::unordered_set` and `tsl::robin_set`.\n\nThread-safety guarantees are the same as `std::unordered_map/set` (i.e. possible to have multiple readers with no writer).\n\n### Growth policy\n\nThe library supports multiple growth policies through the `GrowthPolicy` template parameter. Three policies are provided by the library but you can easily implement your own if needed.\n\n* **[tsl::rh::power_of_two_growth_policy.](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1power__of__two__growth__policy.html)** Default policy used by `tsl::robin_map/set`. This policy keeps the size of the bucket array of the hash table to a power of two. This constraint allows the policy to avoid the usage of the slow modulo operation to map a hash to a bucket, instead of <code>hash % 2<sup>n</sup></code>, it uses <code>hash & (2<sup>n</sup> - 1)</code> (see [fast modulo](https://en.wikipedia.org/wiki/Modulo_operation#Performance_issues)). Fast but this may cause a lot of collisions with a poor hash function as the modulo with a power of two only masks the most significant bits in the end.\n* **[tsl::rh::prime_growth_policy.](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1prime__growth__policy.html)** Default policy used by `tsl::robin_pg_map/set`. The policy keeps the size of the bucket array of the hash table to a prime number. When mapping a hash to a bucket, using a prime number as modulo will result in a better distribution of the hash across the buckets even with a poor hash function. To allow the compiler to optimize the modulo operation, the policy use a lookup table with constant primes modulos (see [API](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1prime__growth__policy.html#details) for details). Slower than `tsl::rh::power_of_two_growth_policy` but more secure.\n* **[tsl::rh::mod_growth_policy.](https://tessil.github.io/robin-map/classtsl_1_1rh_1_1mod__growth__policy.html)** The policy grows the map by a customizable growth factor passed in parameter. It then just use the modulo operator to map a hash to a bucket. Slower but more flexible.\n\n\nTo implement your own policy, you have to implement the following interface.\n\n```c++\nstruct custom_policy {\n    // Called on hash table construction and rehash, min_bucket_count_in_out is the minimum buckets\n    // that the hash table needs. The policy can change it to a higher number of buckets if needed \n    // and the hash table will use this value as bucket count. If 0 bucket is asked, then the value\n    // must stay at 0.\n    explicit custom_policy(std::size_t& min_bucket_count_in_out);\n    \n    // Return the bucket [0, bucket_count()) to which the hash belongs. \n    // If bucket_count() is 0, it must always return 0.\n    std::size_t bucket_for_hash(std::size_t hash) const noexcept;\n    \n    // Return the number of buckets that should be used on next growth\n    std::size_t next_bucket_count() const;\n    \n    // Maximum number of buckets supported by the policy\n    std::size_t max_bucket_count() const;\n    \n    // Reset the growth policy as if the policy was created with a bucket count of 0.\n    // After a clear, the policy must always return 0 when bucket_for_hash() is called.\n    void clear() noexcept;\n}\n```\n\n### Installation\n\nTo use robin-map, just add the [include](include/) directory to your include path. It is a **header-only** library.\n\nIf you use CMake, you can also use the `tsl::robin_map` exported target from the [CMakeLists.txt](CMakeLists.txt) with `target_link_libraries`. \n```cmake\n# Example where the robin-map project is stored in a third-party directory\nadd_subdirectory(third-party/robin-map)\ntarget_link_libraries(your_target PRIVATE tsl::robin_map)  \n```\n\nIf the project has been installed through `make install`, you can also use `find_package(tsl-robin-map REQUIRED)` instead of `add_subdirectory`.\n\nThe library is available in [vcpkg](https://github.com/Microsoft/vcpkg/tree/master/ports/robin-map) and [conan](https://bintray.com/tessil/tsl/tsl-robin-map%3Atessil). It's also present in [Debian](https://packages.debian.org/buster/robin-map-dev), [Ubuntu](https://packages.ubuntu.com/disco/robin-map-dev) and [Fedora](https://apps.fedoraproject.org/packages/robin-map-devel) package repositories.\n\nThe code should work with any C++11 standard-compliant compiler and has been tested with GCC 4.8.4, Clang 3.5.0 and Visual Studio 2015.\n\nTo run the tests you will need the Boost Test library and CMake.\n\n```bash\ngit clone https://github.com/Tessil/robin-map.git\ncd robin-map/tests\nmkdir build\ncd build\ncmake ..\ncmake --build .\n./tsl_robin_map_tests\n```\n\n### Usage\n\nThe API can be found [here](https://tessil.github.io/robin-map/). \n\nAll methods are not documented yet, but they replicate the behavior of the ones in `std::unordered_map` and `std::unordered_set`, except if specified otherwise.\n\n\n### Example\n\n```c++\n#include <cstdint>\n#include <iostream>\n#include <string>\n#include <tsl/robin_map.h>\n#include <tsl/robin_set.h>\n\nint main() {\n    tsl::robin_map<std::string, int> map = {{\"a\", 1}, {\"b\", 2}};\n    map[\"c\"] = 3;\n    map[\"d\"] = 4;\n    \n    map.insert({\"e\", 5});\n    map.erase(\"b\");\n    \n    for(auto it = map.begin(); it != map.end(); ++it) {\n        //it->second += 2; // Not valid.\n        it.value() += 2;\n    }\n    \n    // {d, 6} {a, 3} {e, 7} {c, 5}\n    for(const auto& key_value : map) {\n        std::cout << \"{\" << key_value.first << \", \" << key_value.second << \"}\" << std::endl;\n    }\n    \n        \n    if(map.find(\"a\") != map.end()) {\n        std::cout << \"Found \\\"a\\\".\" << std::endl;\n    }\n    \n    const std::size_t precalculated_hash = std::hash<std::string>()(\"a\");\n    // If we already know the hash beforehand, we can pass it in parameter to speed-up lookups.\n    if(map.find(\"a\", precalculated_hash) != map.end()) {\n        std::cout << \"Found \\\"a\\\" with hash \" << precalculated_hash << \".\" << std::endl;\n    }\n    \n    \n    /*\n     * Calculating the hash and comparing two std::string may be slow. \n     * We can store the hash of each std::string in the hash map to make \n     * the inserts and lookups faster by setting StoreHash to true.\n     */ \n    tsl::robin_map<std::string, int, std::hash<std::string>, \n                   std::equal_to<std::string>,\n                   std::allocator<std::pair<std::string, int>>,\n                   true> map2;\n                       \n    map2[\"a\"] = 1;\n    map2[\"b\"] = 2;\n    \n    // {a, 1} {b, 2}\n    for(const auto& key_value : map2) {\n        std::cout << \"{\" << key_value.first << \", \" << key_value.second << \"}\" << std::endl;\n    }\n    \n    \n    \n    \n    tsl::robin_set<int> set;\n    set.insert({1, 9, 0});\n    set.insert({2, -1, 9});\n    \n    // {0} {1} {2} {9} {-1}\n    for(const auto& key : set) {\n        std::cout << \"{\" << key << \"}\" << std::endl;\n    }\n}  \n```\n\n#### Heterogeneous lookups\n\nHeterogeneous overloads allow the usage of other types than `Key` for lookup and erase operations as long as the used types are hashable and comparable to `Key`.\n\nTo activate the heterogeneous overloads in `tsl::robin_map/set`, the qualified-id `KeyEqual::is_transparent` must be valid. It works the same way as for [`std::map::find`](http://en.cppreference.com/w/cpp/container/map/find). You can either use [`std::equal_to<>`](http://en.cppreference.com/w/cpp/utility/functional/equal_to_void) or define your own function object.\n\nBoth `KeyEqual` and `Hash` will need to be able to deal with the different types.\n\n```c++\n#include <functional>\n#include <iostream>\n#include <string>\n#include <tsl/robin_map.h>\n\n\nstruct employee {\n    employee(int id, std::string name) : m_id(id), m_name(std::move(name)) {\n    }\n    \n    // Either we include the comparators in the class and we use `std::equal_to<>`...\n    friend bool operator==(const employee& empl, int empl_id) {\n        return empl.m_id == empl_id;\n    }\n    \n    friend bool operator==(int empl_id, const employee& empl) {\n        return empl_id == empl.m_id;\n    }\n    \n    friend bool operator==(const employee& empl1, const employee& empl2) {\n        return empl1.m_id == empl2.m_id;\n    }\n    \n    \n    int m_id;\n    std::string m_name;\n};\n\n// ... or we implement a separate class to compare employees.\nstruct equal_employee {\n    using is_transparent = void;\n    \n    bool operator()(const employee& empl, int empl_id) const {\n        return empl.m_id == empl_id;\n    }\n    \n    bool operator()(int empl_id, const employee& empl) const {\n        return empl_id == empl.m_id;\n    }\n    \n    bool operator()(const employee& empl1, const employee& empl2) const {\n        return empl1.m_id == empl2.m_id;\n    }\n};\n\nstruct hash_employee {\n    std::size_t operator()(const employee& empl) const {\n        return std::hash<int>()(empl.m_id);\n    }\n    \n    std::size_t operator()(int id) const {\n        return std::hash<int>()(id);\n    }\n};\n\n\nint main() {\n    // Use std::equal_to<> which will automatically deduce and forward the parameters\n    tsl::robin_map<employee, int, hash_employee, std::equal_to<>> map; \n    map.insert({employee(1, \"John Doe\"), 2001});\n    map.insert({employee(2, \"Jane Doe\"), 2002});\n    map.insert({employee(3, \"John Smith\"), 2003});\n\n    // John Smith 2003\n    auto it = map.find(3);\n    if(it != map.end()) {\n        std::cout << it->first.m_name << \" \" << it->second << std::endl;\n    }\n\n    map.erase(1);\n\n\n\n    // Use a custom KeyEqual which has an is_transparent member type\n    tsl::robin_map<employee, int, hash_employee, equal_employee> map2;\n    map2.insert({employee(4, \"Johnny Doe\"), 2004});\n\n    // 2004\n    std::cout << map2.at(4) << std::endl;\n}  \n```\n\n\n\n### License\n\nThe code is licensed under the MIT license, see the [LICENSE file](LICENSE) for details.\n"
  },
  {
    "path": "Thirdparty/tessil-src/appveyor.yml",
    "content": "environment:\n    BOOST_ROOT: C:\\Libraries\\boost_1_67_0\n    matrix:\n        - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015\n          ARCH: Win32\n          BOOST_LIBRARYDIR: C:\\Libraries\\boost_1_67_0\\lib32-msvc-14.0\n          CMAKE_GENERATOR: Visual Studio 14 2015\n\n        - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015\n          ARCH: x64\n          BOOST_LIBRARYDIR: C:\\Libraries\\boost_1_67_0\\lib64-msvc-14.0\n          CMAKE_GENERATOR: Visual Studio 14 2015 Win64\n\n        - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017\n          ARCH: Win32\n          BOOST_LIBRARYDIR: C:\\Libraries\\boost_1_67_0\\lib32-msvc-14.1\n          CMAKE_GENERATOR: Visual Studio 15 2017\n          CXXFLAGS: /permissive-\n\n        - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017\n          ARCH: x64\n          BOOST_LIBRARYDIR: C:\\Libraries\\boost_1_67_0\\lib64-msvc-14.1\n          CMAKE_GENERATOR: Visual Studio 15 2017 Win64\n          CXXFLAGS: /permissive-\n\nconfiguration:\n    - Debug\n    - Release\n\nbuild_script:\n    - cd tests\n    - mkdir build\n    - cd build\n    - cmake -DCMAKE_BUILD_TYPE=%CONFIGURATION% -G\"%CMAKE_GENERATOR%\" ..\n    - cmake --build . --config %CONFIGURATION%\n\ntest_script: \n    - set PATH=%PATH%;%BOOST_LIBRARYDIR%\n    - .\\%CONFIGURATION%\\tsl_robin_map_tests.exe\n"
  },
  {
    "path": "Thirdparty/tessil-src/cmake/tsl-robin-mapConfig.cmake.in",
    "content": "# This module sets the following variables:\n# * tsl-robin-map_FOUND - true if tsl-robin-map found on the system\n# * tsl-robin-map_INCLUDE_DIRS - the directory containing tsl-robin-map headers\n@PACKAGE_INIT@\n\nif(NOT TARGET tsl::robin_map)\n  include(\"${CMAKE_CURRENT_LIST_DIR}/tsl-robin-mapTargets.cmake\")\n  get_target_property(tsl-robin-map_INCLUDE_DIRS tsl::robin_map INTERFACE_INCLUDE_DIRECTORIES)\nendif()\n"
  },
  {
    "path": "Thirdparty/tessil-src/doxygen.conf",
    "content": "# Doxyfile 1.8.11\n\n# This file describes the settings to be used by the documentation system\n# doxygen (www.doxygen.org) for a project.\n#\n# All text after a double hash (##) is considered a comment and is placed in\n# front of the TAG it is preceding.\n#\n# All text after a single hash (#) is considered a comment and will be ignored.\n# The format is:\n# TAG = value [value, ...]\n# For lists, items can also be appended using:\n# TAG += value [value, ...]\n# Values that contain spaces should be placed between quotes (\\\" \\\").\n\n#---------------------------------------------------------------------------\n# Project related configuration options\n#---------------------------------------------------------------------------\n\n# This tag specifies the encoding used for all characters in the config file\n# that follow. The default is UTF-8 which is also the encoding used for all text\n# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv\n# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv\n# for the list of possible encodings.\n# The default value is: UTF-8.\n\nDOXYFILE_ENCODING      = UTF-8\n\n# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by\n# double-quotes, unless you are using Doxywizard) that should identify the\n# project for which the documentation is generated. This name is used in the\n# title of most generated pages and in a few other places.\n# The default value is: My Project.\n\nPROJECT_NAME           = robin-map\n\n# The PROJECT_NUMBER tag can be used to enter a project or revision number. This\n# could be handy for archiving the generated documentation or if some version\n# control system is used.\n\nPROJECT_NUMBER         = \n\n# Using the PROJECT_BRIEF tag one can provide an optional one line description\n# for a project that appears at the top of each page and should give viewer a\n# quick idea about the purpose of the project. Keep the description short.\n\nPROJECT_BRIEF          = \n\n# With the PROJECT_LOGO tag one can specify a logo or an icon that is included\n# in the documentation. The maximum height of the logo should not exceed 55\n# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy\n# the logo to the output directory.\n\nPROJECT_LOGO           = \n\n# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path\n# into which the generated documentation will be written. If a relative path is\n# entered, it will be relative to the location where doxygen was started. If\n# left blank the current directory will be used.\n\nOUTPUT_DIRECTORY       = doc/\n\n# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-\n# directories (in 2 levels) under the output directory of each output format and\n# will distribute the generated files over these directories. Enabling this\n# option can be useful when feeding doxygen a huge amount of source files, where\n# putting all generated files in the same directory would otherwise causes\n# performance problems for the file system.\n# The default value is: NO.\n\nCREATE_SUBDIRS         = NO\n\n# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII\n# characters to appear in the names of generated files. If set to NO, non-ASCII\n# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode\n# U+3044.\n# The default value is: NO.\n\nALLOW_UNICODE_NAMES    = NO\n\n# The OUTPUT_LANGUAGE tag is used to specify the language in which all\n# documentation generated by doxygen is written. Doxygen will use this\n# information to generate all constant output in the proper language.\n# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,\n# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),\n# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,\n# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),\n# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,\n# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,\n# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,\n# Ukrainian and Vietnamese.\n# The default value is: English.\n\nOUTPUT_LANGUAGE        = English\n\n# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member\n# descriptions after the members that are listed in the file and class\n# documentation (similar to Javadoc). Set to NO to disable this.\n# The default value is: YES.\n\nBRIEF_MEMBER_DESC      = YES\n\n# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief\n# description of a member or function before the detailed description\n#\n# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the\n# brief descriptions will be completely suppressed.\n# The default value is: YES.\n\nREPEAT_BRIEF           = YES\n\n# This tag implements a quasi-intelligent brief description abbreviator that is\n# used to form the text in various listings. Each string in this list, if found\n# as the leading text of the brief description, will be stripped from the text\n# and the result, after processing the whole list, is used as the annotated\n# text. Otherwise, the brief description is used as-is. If left blank, the\n# following values are used ($name is automatically replaced with the name of\n# the entity):The $name class, The $name widget, The $name file, is, provides,\n# specifies, contains, represents, a, an and the.\n\nABBREVIATE_BRIEF       = \"The $name class\" \\\n                         \"The $name widget\" \\\n                         \"The $name file\" \\\n                         is \\\n                         provides \\\n                         specifies \\\n                         contains \\\n                         represents \\\n                         a \\\n                         an \\\n                         the\n\n# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then\n# doxygen will generate a detailed section even if there is only a brief\n# description.\n# The default value is: NO.\n\nALWAYS_DETAILED_SEC    = NO\n\n# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all\n# inherited members of a class in the documentation of that class as if those\n# members were ordinary class members. Constructors, destructors and assignment\n# operators of the base classes will not be shown.\n# The default value is: NO.\n\nINLINE_INHERITED_MEMB  = NO\n\n# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path\n# before files name in the file list and in the header files. If set to NO the\n# shortest path that makes the file name unique will be used\n# The default value is: YES.\n\nFULL_PATH_NAMES        = YES\n\n# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.\n# Stripping is only done if one of the specified strings matches the left-hand\n# part of the path. The tag can be used to show relative paths in the file list.\n# If left blank the directory from which doxygen is run is used as the path to\n# strip.\n#\n# Note that you can specify absolute paths here, but also relative paths, which\n# will be relative from the directory where doxygen is started.\n# This tag requires that the tag FULL_PATH_NAMES is set to YES.\n\nSTRIP_FROM_PATH        = \n\n# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the\n# path mentioned in the documentation of a class, which tells the reader which\n# header file to include in order to use a class. If left blank only the name of\n# the header file containing the class definition is used. Otherwise one should\n# specify the list of include paths that are normally passed to the compiler\n# using the -I flag.\n\nSTRIP_FROM_INC_PATH    = \n\n# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but\n# less readable) file names. This can be useful is your file systems doesn't\n# support long names like on DOS, Mac, or CD-ROM.\n# The default value is: NO.\n\nSHORT_NAMES            = NO\n\n# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the\n# first line (until the first dot) of a Javadoc-style comment as the brief\n# description. If set to NO, the Javadoc-style will behave just like regular Qt-\n# style comments (thus requiring an explicit @brief command for a brief\n# description.)\n# The default value is: NO.\n\nJAVADOC_AUTOBRIEF      = NO\n\n# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first\n# line (until the first dot) of a Qt-style comment as the brief description. If\n# set to NO, the Qt-style will behave just like regular Qt-style comments (thus\n# requiring an explicit \\brief command for a brief description.)\n# The default value is: NO.\n\nQT_AUTOBRIEF           = NO\n\n# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a\n# multi-line C++ special comment block (i.e. a block of //! or /// comments) as\n# a brief description. This used to be the default behavior. The new default is\n# to treat a multi-line C++ comment block as a detailed description. Set this\n# tag to YES if you prefer the old behavior instead.\n#\n# Note that setting this tag to YES also means that rational rose comments are\n# not recognized any more.\n# The default value is: NO.\n\nMULTILINE_CPP_IS_BRIEF = NO\n\n# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the\n# documentation from any documented member that it re-implements.\n# The default value is: YES.\n\nINHERIT_DOCS           = YES\n\n# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new\n# page for each member. If set to NO, the documentation of a member will be part\n# of the file/class/namespace that contains it.\n# The default value is: NO.\n\nSEPARATE_MEMBER_PAGES  = NO\n\n# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen\n# uses this value to replace tabs by spaces in code fragments.\n# Minimum value: 1, maximum value: 16, default value: 4.\n\nTAB_SIZE               = 4\n\n# This tag can be used to specify a number of aliases that act as commands in\n# the documentation. An alias has the form:\n# name=value\n# For example adding\n# \"sideeffect=@par Side Effects:\\n\"\n# will allow you to put the command \\sideeffect (or @sideeffect) in the\n# documentation, which will result in a user-defined paragraph with heading\n# \"Side Effects:\". You can put \\n's in the value part of an alias to insert\n# newlines.\n\nALIASES                = \n\n# This tag can be used to specify a number of word-keyword mappings (TCL only).\n# A mapping has the form \"name=value\". For example adding \"class=itcl::class\"\n# will allow you to use the command class in the itcl::class meaning.\n\nTCL_SUBST              = \n\n# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources\n# only. Doxygen will then generate output that is more tailored for C. For\n# instance, some of the names that are used will be different. The list of all\n# members will be omitted, etc.\n# The default value is: NO.\n\nOPTIMIZE_OUTPUT_FOR_C  = NO\n\n# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or\n# Python sources only. Doxygen will then generate output that is more tailored\n# for that language. For instance, namespaces will be presented as packages,\n# qualified scopes will look different, etc.\n# The default value is: NO.\n\nOPTIMIZE_OUTPUT_JAVA   = NO\n\n# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran\n# sources. Doxygen will then generate output that is tailored for Fortran.\n# The default value is: NO.\n\nOPTIMIZE_FOR_FORTRAN   = NO\n\n# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL\n# sources. Doxygen will then generate output that is tailored for VHDL.\n# The default value is: NO.\n\nOPTIMIZE_OUTPUT_VHDL   = NO\n\n# Doxygen selects the parser to use depending on the extension of the files it\n# parses. With this tag you can assign which parser to use for a given\n# extension. Doxygen has a built-in mapping, but you can override or extend it\n# using this tag. The format is ext=language, where ext is a file extension, and\n# language is one of the parsers supported by doxygen: IDL, Java, Javascript,\n# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:\n# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:\n# Fortran. In the later case the parser tries to guess whether the code is fixed\n# or free formatted code, this is the default for Fortran type files), VHDL. For\n# instance to make doxygen treat .inc files as Fortran files (default is PHP),\n# and .f files as C (default is Fortran), use: inc=Fortran f=C.\n#\n# Note: For files without extension you can use no_extension as a placeholder.\n#\n# Note that for custom extensions you also need to set FILE_PATTERNS otherwise\n# the files are not read by doxygen.\n\nEXTENSION_MAPPING      = \n\n# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments\n# according to the Markdown format, which allows for more readable\n# documentation. See http://daringfireball.net/projects/markdown/ for details.\n# The output of markdown processing is further processed by doxygen, so you can\n# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in\n# case of backward compatibilities issues.\n# The default value is: YES.\n\nMARKDOWN_SUPPORT       = YES\n\n# When enabled doxygen tries to link words that correspond to documented\n# classes, or namespaces to their corresponding documentation. Such a link can\n# be prevented in individual cases by putting a % sign in front of the word or\n# globally by setting AUTOLINK_SUPPORT to NO.\n# The default value is: YES.\n\nAUTOLINK_SUPPORT       = YES\n\n# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want\n# to include (a tag file for) the STL sources as input, then you should set this\n# tag to YES in order to let doxygen match functions declarations and\n# definitions whose arguments contain STL classes (e.g. func(std::string);\n# versus func(std::string) {}). This also make the inheritance and collaboration\n# diagrams that involve STL classes more complete and accurate.\n# The default value is: NO.\n\nBUILTIN_STL_SUPPORT    = NO\n\n# If you use Microsoft's C++/CLI language, you should set this option to YES to\n# enable parsing support.\n# The default value is: NO.\n\nCPP_CLI_SUPPORT        = NO\n\n# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:\n# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen\n# will parse them like normal C++ but will assume all classes use public instead\n# of private inheritance when no explicit protection keyword is present.\n# The default value is: NO.\n\nSIP_SUPPORT            = NO\n\n# For Microsoft's IDL there are propget and propput attributes to indicate\n# getter and setter methods for a property. Setting this option to YES will make\n# doxygen to replace the get and set methods by a property in the documentation.\n# This will only work if the methods are indeed getting or setting a simple\n# type. If this is not the case, or you want to show the methods anyway, you\n# should set this option to NO.\n# The default value is: YES.\n\nIDL_PROPERTY_SUPPORT   = YES\n\n# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC\n# tag is set to YES then doxygen will reuse the documentation of the first\n# member in the group (if any) for the other members of the group. By default\n# all members of a group must be documented explicitly.\n# The default value is: NO.\n\nDISTRIBUTE_GROUP_DOC   = NO\n\n# If one adds a struct or class to a group and this option is enabled, then also\n# any nested class or struct is added to the same group. By default this option\n# is disabled and one has to add nested compounds explicitly via \\ingroup.\n# The default value is: NO.\n\nGROUP_NESTED_COMPOUNDS = NO\n\n# Set the SUBGROUPING tag to YES to allow class member groups of the same type\n# (for instance a group of public functions) to be put as a subgroup of that\n# type (e.g. under the Public Functions section). Set it to NO to prevent\n# subgrouping. Alternatively, this can be done per class using the\n# \\nosubgrouping command.\n# The default value is: YES.\n\nSUBGROUPING            = YES\n\n# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions\n# are shown inside the group in which they are included (e.g. using \\ingroup)\n# instead of on a separate page (for HTML and Man pages) or section (for LaTeX\n# and RTF).\n#\n# Note that this feature does not work in combination with\n# SEPARATE_MEMBER_PAGES.\n# The default value is: NO.\n\nINLINE_GROUPED_CLASSES = NO\n\n# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions\n# with only public data fields or simple typedef fields will be shown inline in\n# the documentation of the scope in which they are defined (i.e. file,\n# namespace, or group documentation), provided this scope is documented. If set\n# to NO, structs, classes, and unions are shown on a separate page (for HTML and\n# Man pages) or section (for LaTeX and RTF).\n# The default value is: NO.\n\nINLINE_SIMPLE_STRUCTS  = NO\n\n# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or\n# enum is documented as struct, union, or enum with the name of the typedef. So\n# typedef struct TypeS {} TypeT, will appear in the documentation as a struct\n# with name TypeT. When disabled the typedef will appear as a member of a file,\n# namespace, or class. And the struct will be named TypeS. This can typically be\n# useful for C code in case the coding convention dictates that all compound\n# types are typedef'ed and only the typedef is referenced, never the tag name.\n# The default value is: NO.\n\nTYPEDEF_HIDES_STRUCT   = NO\n\n# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This\n# cache is used to resolve symbols given their name and scope. Since this can be\n# an expensive process and often the same symbol appears multiple times in the\n# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small\n# doxygen will become slower. If the cache is too large, memory is wasted. The\n# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range\n# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536\n# symbols. At the end of a run doxygen will report the cache usage and suggest\n# the optimal cache size from a speed point of view.\n# Minimum value: 0, maximum value: 9, default value: 0.\n\nLOOKUP_CACHE_SIZE      = 0\n\n#---------------------------------------------------------------------------\n# Build related configuration options\n#---------------------------------------------------------------------------\n\n# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in\n# documentation are documented, even if no documentation was available. Private\n# class members and static file members will be hidden unless the\n# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.\n# Note: This will also disable the warnings about undocumented members that are\n# normally produced when WARNINGS is set to YES.\n# The default value is: NO.\n\nEXTRACT_ALL            = YES\n\n# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will\n# be included in the documentation.\n# The default value is: NO.\n\nEXTRACT_PRIVATE        = NO\n\n# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal\n# scope will be included in the documentation.\n# The default value is: NO.\n\nEXTRACT_PACKAGE        = NO\n\n# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be\n# included in the documentation.\n# The default value is: NO.\n\nEXTRACT_STATIC         = NO\n\n# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined\n# locally in source files will be included in the documentation. If set to NO,\n# only classes defined in header files are included. Does not have any effect\n# for Java sources.\n# The default value is: YES.\n\nEXTRACT_LOCAL_CLASSES  = YES\n\n# This flag is only useful for Objective-C code. If set to YES, local methods,\n# which are defined in the implementation section but not in the interface are\n# included in the documentation. If set to NO, only methods in the interface are\n# included.\n# The default value is: NO.\n\nEXTRACT_LOCAL_METHODS  = NO\n\n# If this flag is set to YES, the members of anonymous namespaces will be\n# extracted and appear in the documentation as a namespace called\n# 'anonymous_namespace{file}', where file will be replaced with the base name of\n# the file that contains the anonymous namespace. By default anonymous namespace\n# are hidden.\n# The default value is: NO.\n\nEXTRACT_ANON_NSPACES   = NO\n\n# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all\n# undocumented members inside documented classes or files. If set to NO these\n# members will be included in the various overviews, but no documentation\n# section is generated. This option has no effect if EXTRACT_ALL is enabled.\n# The default value is: NO.\n\nHIDE_UNDOC_MEMBERS     = NO\n\n# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all\n# undocumented classes that are normally visible in the class hierarchy. If set\n# to NO, these classes will be included in the various overviews. This option\n# has no effect if EXTRACT_ALL is enabled.\n# The default value is: NO.\n\nHIDE_UNDOC_CLASSES     = NO\n\n# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend\n# (class|struct|union) declarations. If set to NO, these declarations will be\n# included in the documentation.\n# The default value is: NO.\n\nHIDE_FRIEND_COMPOUNDS  = NO\n\n# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any\n# documentation blocks found inside the body of a function. If set to NO, these\n# blocks will be appended to the function's detailed documentation block.\n# The default value is: NO.\n\nHIDE_IN_BODY_DOCS      = NO\n\n# The INTERNAL_DOCS tag determines if documentation that is typed after a\n# \\internal command is included. If the tag is set to NO then the documentation\n# will be excluded. Set it to YES to include the internal documentation.\n# The default value is: NO.\n\nINTERNAL_DOCS          = NO\n\n# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file\n# names in lower-case letters. If set to YES, upper-case letters are also\n# allowed. This is useful if you have classes or files whose names only differ\n# in case and if your file system supports case sensitive file names. Windows\n# and Mac users are advised to set this option to NO.\n# The default value is: system dependent.\n\nCASE_SENSE_NAMES       = NO\n\n# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with\n# their full class and namespace scopes in the documentation. If set to YES, the\n# scope will be hidden.\n# The default value is: NO.\n\nHIDE_SCOPE_NAMES       = NO\n\n# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will\n# append additional text to a page's title, such as Class Reference. If set to\n# YES the compound reference will be hidden.\n# The default value is: NO.\n\nHIDE_COMPOUND_REFERENCE= NO\n\n# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of\n# the files that are included by a file in the documentation of that file.\n# The default value is: YES.\n\nSHOW_INCLUDE_FILES     = YES\n\n# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each\n# grouped member an include statement to the documentation, telling the reader\n# which file to include in order to use the member.\n# The default value is: NO.\n\nSHOW_GROUPED_MEMB_INC  = NO\n\n# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include\n# files with double quotes in the documentation rather than with sharp brackets.\n# The default value is: NO.\n\nFORCE_LOCAL_INCLUDES   = NO\n\n# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the\n# documentation for inline members.\n# The default value is: YES.\n\nINLINE_INFO            = YES\n\n# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the\n# (detailed) documentation of file and class members alphabetically by member\n# name. If set to NO, the members will appear in declaration order.\n# The default value is: YES.\n\nSORT_MEMBER_DOCS       = YES\n\n# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief\n# descriptions of file, namespace and class members alphabetically by member\n# name. If set to NO, the members will appear in declaration order. Note that\n# this will also influence the order of the classes in the class list.\n# The default value is: NO.\n\nSORT_BRIEF_DOCS        = NO\n\n# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the\n# (brief and detailed) documentation of class members so that constructors and\n# destructors are listed first. If set to NO the constructors will appear in the\n# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.\n# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief\n# member documentation.\n# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting\n# detailed member documentation.\n# The default value is: NO.\n\nSORT_MEMBERS_CTORS_1ST = NO\n\n# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy\n# of group names into alphabetical order. If set to NO the group names will\n# appear in their defined order.\n# The default value is: NO.\n\nSORT_GROUP_NAMES       = NO\n\n# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by\n# fully-qualified names, including namespaces. If set to NO, the class list will\n# be sorted only by class name, not including the namespace part.\n# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.\n# Note: This option applies only to the class list, not to the alphabetical\n# list.\n# The default value is: NO.\n\nSORT_BY_SCOPE_NAME     = NO\n\n# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper\n# type resolution of all parameters of a function it will reject a match between\n# the prototype and the implementation of a member function even if there is\n# only one candidate or it is obvious which candidate to choose by doing a\n# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still\n# accept a match between prototype and implementation in such cases.\n# The default value is: NO.\n\nSTRICT_PROTO_MATCHING  = NO\n\n# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo\n# list. This list is created by putting \\todo commands in the documentation.\n# The default value is: YES.\n\nGENERATE_TODOLIST      = YES\n\n# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test\n# list. This list is created by putting \\test commands in the documentation.\n# The default value is: YES.\n\nGENERATE_TESTLIST      = YES\n\n# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug\n# list. This list is created by putting \\bug commands in the documentation.\n# The default value is: YES.\n\nGENERATE_BUGLIST       = YES\n\n# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)\n# the deprecated list. This list is created by putting \\deprecated commands in\n# the documentation.\n# The default value is: YES.\n\nGENERATE_DEPRECATEDLIST= YES\n\n# The ENABLED_SECTIONS tag can be used to enable conditional documentation\n# sections, marked by \\if <section_label> ... \\endif and \\cond <section_label>\n# ... \\endcond blocks.\n\nENABLED_SECTIONS       = \n\n# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the\n# initial value of a variable or macro / define can have for it to appear in the\n# documentation. If the initializer consists of more lines than specified here\n# it will be hidden. Use a value of 0 to hide initializers completely. The\n# appearance of the value of individual variables and macros / defines can be\n# controlled using \\showinitializer or \\hideinitializer command in the\n# documentation regardless of this setting.\n# Minimum value: 0, maximum value: 10000, default value: 30.\n\nMAX_INITIALIZER_LINES  = 30\n\n# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at\n# the bottom of the documentation of classes and structs. If set to YES, the\n# list will mention the files that were used to generate the documentation.\n# The default value is: YES.\n\nSHOW_USED_FILES        = YES\n\n# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This\n# will remove the Files entry from the Quick Index and from the Folder Tree View\n# (if specified).\n# The default value is: YES.\n\nSHOW_FILES             = YES\n\n# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces\n# page. This will remove the Namespaces entry from the Quick Index and from the\n# Folder Tree View (if specified).\n# The default value is: YES.\n\nSHOW_NAMESPACES        = YES\n\n# The FILE_VERSION_FILTER tag can be used to specify a program or script that\n# doxygen should invoke to get the current version for each file (typically from\n# the version control system). Doxygen will invoke the program by executing (via\n# popen()) the command command input-file, where command is the value of the\n# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided\n# by doxygen. Whatever the program writes to standard output is used as the file\n# version. For an example see the documentation.\n\nFILE_VERSION_FILTER    = \n\n# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed\n# by doxygen. The layout file controls the global structure of the generated\n# output files in an output format independent way. To create the layout file\n# that represents doxygen's defaults, run doxygen with the -l option. You can\n# optionally specify a file name after the option, if omitted DoxygenLayout.xml\n# will be used as the name of the layout file.\n#\n# Note that if you run doxygen from a directory containing a file called\n# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE\n# tag is left empty.\n\nLAYOUT_FILE            = \n\n# The CITE_BIB_FILES tag can be used to specify one or more bib files containing\n# the reference definitions. This must be a list of .bib files. The .bib\n# extension is automatically appended if omitted. This requires the bibtex tool\n# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.\n# For LaTeX the style of the bibliography can be controlled using\n# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the\n# search path. See also \\cite for info how to create references.\n\nCITE_BIB_FILES         = \n\n#---------------------------------------------------------------------------\n# Configuration options related to warning and progress messages\n#---------------------------------------------------------------------------\n\n# The QUIET tag can be used to turn on/off the messages that are generated to\n# standard output by doxygen. If QUIET is set to YES this implies that the\n# messages are off.\n# The default value is: NO.\n\nQUIET                  = NO\n\n# The WARNINGS tag can be used to turn on/off the warning messages that are\n# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES\n# this implies that the warnings are on.\n#\n# Tip: Turn warnings on while writing the documentation.\n# The default value is: YES.\n\nWARNINGS               = YES\n\n# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate\n# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag\n# will automatically be disabled.\n# The default value is: YES.\n\nWARN_IF_UNDOCUMENTED   = YES\n\n# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for\n# potential errors in the documentation, such as not documenting some parameters\n# in a documented function, or documenting parameters that don't exist or using\n# markup commands wrongly.\n# The default value is: YES.\n\nWARN_IF_DOC_ERROR      = YES\n\n# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that\n# are documented, but have no documentation for their parameters or return\n# value. If set to NO, doxygen will only warn about wrong or incomplete\n# parameter documentation, but not about the absence of documentation.\n# The default value is: NO.\n\nWARN_NO_PARAMDOC       = NO\n\n# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when\n# a warning is encountered.\n# The default value is: NO.\n\nWARN_AS_ERROR          = NO\n\n# The WARN_FORMAT tag determines the format of the warning messages that doxygen\n# can produce. The string should contain the $file, $line, and $text tags, which\n# will be replaced by the file and line number from which the warning originated\n# and the warning text. Optionally the format may contain $version, which will\n# be replaced by the version of the file (if it could be obtained via\n# FILE_VERSION_FILTER)\n# The default value is: $file:$line: $text.\n\nWARN_FORMAT            = \"$file:$line: $text\"\n\n# The WARN_LOGFILE tag can be used to specify a file to which warning and error\n# messages should be written. If left blank the output is written to standard\n# error (stderr).\n\nWARN_LOGFILE           = \n\n#---------------------------------------------------------------------------\n# Configuration options related to the input files\n#---------------------------------------------------------------------------\n\n# The INPUT tag is used to specify the files and/or directories that contain\n# documented source files. You may enter file names like myfile.cpp or\n# directories like /usr/src/myproject. Separate the files or directories with\n# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING\n# Note: If this tag is empty the current directory is searched.\n\nINPUT                  = include/tsl/ README.md\n\n# This tag can be used to specify the character encoding of the source files\n# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses\n# libiconv (or the iconv built into libc) for the transcoding. See the libiconv\n# documentation (see: http://www.gnu.org/software/libiconv) for the list of\n# possible encodings.\n# The default value is: UTF-8.\n\nINPUT_ENCODING         = UTF-8\n\n# If the value of the INPUT tag contains directories, you can use the\n# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and\n# *.h) to filter out the source-files in the directories.\n#\n# Note that for custom extensions or not directly supported extensions you also\n# need to set EXTENSION_MAPPING for the extension otherwise the files are not\n# read by doxygen.\n#\n# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,\n# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,\n# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,\n# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f, *.for, *.tcl,\n# *.vhd, *.vhdl, *.ucf, *.qsf, *.as and *.js.\n\nFILE_PATTERNS          = *.c \\\n                         *.cc \\\n                         *.cxx \\\n                         *.cpp \\\n                         *.c++ \\\n                         *.java \\\n                         *.ii \\\n                         *.ixx \\\n                         *.ipp \\\n                         *.i++ \\\n                         *.inl \\\n                         *.idl \\\n                         *.ddl \\\n                         *.odl \\\n                         *.h \\\n                         *.hh \\\n                         *.hxx \\\n                         *.hpp \\\n                         *.h++ \\\n                         *.cs \\\n                         *.d \\\n                         *.php \\\n                         *.php4 \\\n                         *.php5 \\\n                         *.phtml \\\n                         *.inc \\\n                         *.m \\\n                         *.markdown \\\n                         *.md \\\n                         *.mm \\\n                         *.dox \\\n                         *.py \\\n                         *.pyw \\\n                         *.f90 \\\n                         *.f \\\n                         *.for \\\n                         *.tcl \\\n                         *.vhd \\\n                         *.vhdl \\\n                         *.ucf \\\n                         *.qsf \\\n                         *.as \\\n                         *.js\n\n# The RECURSIVE tag can be used to specify whether or not subdirectories should\n# be searched for input files as well.\n# The default value is: NO.\n\nRECURSIVE              = YES\n\n# The EXCLUDE tag can be used to specify files and/or directories that should be\n# excluded from the INPUT source files. This way you can easily exclude a\n# subdirectory from a directory tree whose root is specified with the INPUT tag.\n#\n# Note that relative paths are relative to the directory from which doxygen is\n# run.\n\nEXCLUDE                = \n\n# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or\n# directories that are symbolic links (a Unix file system feature) are excluded\n# from the input.\n# The default value is: NO.\n\nEXCLUDE_SYMLINKS       = NO\n\n# If the value of the INPUT tag contains directories, you can use the\n# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude\n# certain files from those directories.\n#\n# Note that the wildcards are matched against the file with absolute path, so to\n# exclude all test directories for example use the pattern */test/*\n\nEXCLUDE_PATTERNS       = \n\n# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names\n# (namespaces, classes, functions, etc.) that should be excluded from the\n# output. The symbol name can be a fully qualified name, a word, or if the\n# wildcard * is used, a substring. Examples: ANamespace, AClass,\n# AClass::ANamespace, ANamespace::*Test\n#\n# Note that the wildcards are matched against the file with absolute path, so to\n# exclude all test directories use the pattern */test/*\n\nEXCLUDE_SYMBOLS        = \\\ntsl::detail_robin_hash::has_is_transparent* \\\ntsl::detail_robin_hash::make_void* \\\ntsl::detail_robin_hash::is_power_of_two_policy* \\\ntsl::detail_robin_hash::bucket_entry*\n\n# The EXAMPLE_PATH tag can be used to specify one or more files or directories\n# that contain example code fragments that are included (see the \\include\n# command).\n\nEXAMPLE_PATH           = \n\n# If the value of the EXAMPLE_PATH tag contains directories, you can use the\n# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and\n# *.h) to filter out the source-files in the directories. If left blank all\n# files are included.\n\nEXAMPLE_PATTERNS       = *\n\n# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be\n# searched for input files to be used with the \\include or \\dontinclude commands\n# irrespective of the value of the RECURSIVE tag.\n# The default value is: NO.\n\nEXAMPLE_RECURSIVE      = NO\n\n# The IMAGE_PATH tag can be used to specify one or more files or directories\n# that contain images that are to be included in the documentation (see the\n# \\image command).\n\nIMAGE_PATH             = \n\n# The INPUT_FILTER tag can be used to specify a program that doxygen should\n# invoke to filter for each input file. Doxygen will invoke the filter program\n# by executing (via popen()) the command:\n#\n# <filter> <input-file>\n#\n# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the\n# name of an input file. Doxygen will then use the output that the filter\n# program writes to standard output. If FILTER_PATTERNS is specified, this tag\n# will be ignored.\n#\n# Note that the filter must not add or remove lines; it is applied before the\n# code is scanned, but not when the output code is generated. If lines are added\n# or removed, the anchors will not be placed correctly.\n#\n# Note that for custom extensions or not directly supported extensions you also\n# need to set EXTENSION_MAPPING for the extension otherwise the files are not\n# properly processed by doxygen.\n\nINPUT_FILTER           = \n\n# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern\n# basis. Doxygen will compare the file name with each pattern and apply the\n# filter if there is a match. The filters are a list of the form: pattern=filter\n# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how\n# filters are used. If the FILTER_PATTERNS tag is empty or if none of the\n# patterns match the file name, INPUT_FILTER is applied.\n#\n# Note that for custom extensions or not directly supported extensions you also\n# need to set EXTENSION_MAPPING for the extension otherwise the files are not\n# properly processed by doxygen.\n\nFILTER_PATTERNS        = \n\n# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using\n# INPUT_FILTER) will also be used to filter the input files that are used for\n# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).\n# The default value is: NO.\n\nFILTER_SOURCE_FILES    = NO\n\n# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file\n# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and\n# it is also possible to disable source filtering for a specific pattern using\n# *.ext= (so without naming a filter).\n# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.\n\nFILTER_SOURCE_PATTERNS = \n\n# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that\n# is part of the input, its contents will be placed on the main page\n# (index.html). This can be useful if you have a project on for instance GitHub\n# and want to reuse the introduction page also for the doxygen output.\n\nUSE_MDFILE_AS_MAINPAGE = README.md\n\n#---------------------------------------------------------------------------\n# Configuration options related to source browsing\n#---------------------------------------------------------------------------\n\n# If the SOURCE_BROWSER tag is set to YES then a list of source files will be\n# generated. Documented entities will be cross-referenced with these sources.\n#\n# Note: To get rid of all source code in the generated output, make sure that\n# also VERBATIM_HEADERS is set to NO.\n# The default value is: NO.\n\nSOURCE_BROWSER         = NO\n\n# Setting the INLINE_SOURCES tag to YES will include the body of functions,\n# classes and enums directly into the documentation.\n# The default value is: NO.\n\nINLINE_SOURCES         = NO\n\n# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any\n# special comment blocks from generated source code fragments. Normal C, C++ and\n# Fortran comments will always remain visible.\n# The default value is: YES.\n\nSTRIP_CODE_COMMENTS    = YES\n\n# If the REFERENCED_BY_RELATION tag is set to YES then for each documented\n# function all documented functions referencing it will be listed.\n# The default value is: NO.\n\nREFERENCED_BY_RELATION = NO\n\n# If the REFERENCES_RELATION tag is set to YES then for each documented function\n# all documented entities called/used by that function will be listed.\n# The default value is: NO.\n\nREFERENCES_RELATION    = NO\n\n# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set\n# to YES then the hyperlinks from functions in REFERENCES_RELATION and\n# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will\n# link to the documentation.\n# The default value is: YES.\n\nREFERENCES_LINK_SOURCE = YES\n\n# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the\n# source code will show a tooltip with additional information such as prototype,\n# brief description and links to the definition and documentation. Since this\n# will make the HTML file larger and loading of large files a bit slower, you\n# can opt to disable this feature.\n# The default value is: YES.\n# This tag requires that the tag SOURCE_BROWSER is set to YES.\n\nSOURCE_TOOLTIPS        = YES\n\n# If the USE_HTAGS tag is set to YES then the references to source code will\n# point to the HTML generated by the htags(1) tool instead of doxygen built-in\n# source browser. The htags tool is part of GNU's global source tagging system\n# (see http://www.gnu.org/software/global/global.html). You will need version\n# 4.8.6 or higher.\n#\n# To use it do the following:\n# - Install the latest version of global\n# - Enable SOURCE_BROWSER and USE_HTAGS in the config file\n# - Make sure the INPUT points to the root of the source tree\n# - Run doxygen as normal\n#\n# Doxygen will invoke htags (and that will in turn invoke gtags), so these\n# tools must be available from the command line (i.e. in the search path).\n#\n# The result: instead of the source browser generated by doxygen, the links to\n# source code will now point to the output of htags.\n# The default value is: NO.\n# This tag requires that the tag SOURCE_BROWSER is set to YES.\n\nUSE_HTAGS              = NO\n\n# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a\n# verbatim copy of the header file for each class for which an include is\n# specified. Set to NO to disable this.\n# See also: Section \\class.\n# The default value is: YES.\n\nVERBATIM_HEADERS       = YES\n\n# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the\n# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the\n# cost of reduced performance. This can be particularly helpful with template\n# rich C++ code for which doxygen's built-in parser lacks the necessary type\n# information.\n# Note: The availability of this option depends on whether or not doxygen was\n# generated with the -Duse-libclang=ON option for CMake.\n# The default value is: NO.\n\nCLANG_ASSISTED_PARSING = YES\n\n# If clang assisted parsing is enabled you can provide the compiler with command\n# line options that you would normally use when invoking the compiler. Note that\n# the include paths will already be set by doxygen for the files and directories\n# specified with INPUT and INCLUDE_PATH.\n# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.\n\nCLANG_OPTIONS          = -std=c++11\n\n#---------------------------------------------------------------------------\n# Configuration options related to the alphabetical class index\n#---------------------------------------------------------------------------\n\n# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all\n# compounds will be generated. Enable this if the project contains a lot of\n# classes, structs, unions or interfaces.\n# The default value is: YES.\n\nALPHABETICAL_INDEX     = YES\n\n# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in\n# which the alphabetical index list will be split.\n# Minimum value: 1, maximum value: 20, default value: 5.\n# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.\n\nCOLS_IN_ALPHA_INDEX    = 5\n\n# In case all classes in a project start with a common prefix, all classes will\n# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag\n# can be used to specify a prefix (or a list of prefixes) that should be ignored\n# while generating the index headers.\n# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.\n\nIGNORE_PREFIX          = \n\n#---------------------------------------------------------------------------\n# Configuration options related to the HTML output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output\n# The default value is: YES.\n\nGENERATE_HTML          = YES\n\n# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a\n# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of\n# it.\n# The default directory is: html.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_OUTPUT            = html\n\n# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each\n# generated HTML page (for example: .htm, .php, .asp).\n# The default value is: .html.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_FILE_EXTENSION    = .html\n\n# The HTML_HEADER tag can be used to specify a user-defined HTML header file for\n# each generated HTML page. If the tag is left blank doxygen will generate a\n# standard header.\n#\n# To get valid HTML the header file that includes any scripts and style sheets\n# that doxygen needs, which is dependent on the configuration options used (e.g.\n# the setting GENERATE_TREEVIEW). It is highly recommended to start with a\n# default header using\n# doxygen -w html new_header.html new_footer.html new_stylesheet.css\n# YourConfigFile\n# and then modify the file new_header.html. See also section \"Doxygen usage\"\n# for information on how to generate the default header that doxygen normally\n# uses.\n# Note: The header is subject to change so you typically have to regenerate the\n# default header when upgrading to a newer version of doxygen. For a description\n# of the possible markers and block names see the documentation.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_HEADER            = \n\n# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each\n# generated HTML page. If the tag is left blank doxygen will generate a standard\n# footer. See HTML_HEADER for more information on how to generate a default\n# footer and what special commands can be used inside the footer. See also\n# section \"Doxygen usage\" for information on how to generate the default footer\n# that doxygen normally uses.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_FOOTER            = \n\n# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style\n# sheet that is used by each HTML page. It can be used to fine-tune the look of\n# the HTML output. If left blank doxygen will generate a default style sheet.\n# See also section \"Doxygen usage\" for information on how to generate the style\n# sheet that doxygen normally uses.\n# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as\n# it is more robust and this tag (HTML_STYLESHEET) will in the future become\n# obsolete.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_STYLESHEET        = \n\n# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined\n# cascading style sheets that are included after the standard style sheets\n# created by doxygen. Using this option one can overrule certain style aspects.\n# This is preferred over using HTML_STYLESHEET since it does not replace the\n# standard style sheet and is therefore more robust against future updates.\n# Doxygen will copy the style sheet files to the output directory.\n# Note: The order of the extra style sheet files is of importance (e.g. the last\n# style sheet in the list overrules the setting of the previous ones in the\n# list). For an example see the documentation.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_EXTRA_STYLESHEET  = \n\n# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or\n# other source files which should be copied to the HTML output directory. Note\n# that these files will be copied to the base HTML output directory. Use the\n# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these\n# files. In the HTML_STYLESHEET file, use the file name only. Also note that the\n# files will be copied as-is; there are no commands or markers available.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_EXTRA_FILES       = \n\n# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen\n# will adjust the colors in the style sheet and background images according to\n# this color. Hue is specified as an angle on a colorwheel, see\n# http://en.wikipedia.org/wiki/Hue for more information. For instance the value\n# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300\n# purple, and 360 is red again.\n# Minimum value: 0, maximum value: 359, default value: 220.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_COLORSTYLE_HUE    = 220\n\n# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors\n# in the HTML output. For a value of 0 the output will use grayscales only. A\n# value of 255 will produce the most vivid colors.\n# Minimum value: 0, maximum value: 255, default value: 100.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_COLORSTYLE_SAT    = 100\n\n# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the\n# luminance component of the colors in the HTML output. Values below 100\n# gradually make the output lighter, whereas values above 100 make the output\n# darker. The value divided by 100 is the actual gamma applied, so 80 represents\n# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not\n# change the gamma.\n# Minimum value: 40, maximum value: 240, default value: 80.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_COLORSTYLE_GAMMA  = 80\n\n# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML\n# page will contain the date and time when the page was generated. Setting this\n# to YES can help to show when doxygen was last run and thus if the\n# documentation is up to date.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_TIMESTAMP         = NO\n\n# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML\n# documentation will contain sections that can be hidden and shown after the\n# page has loaded.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_DYNAMIC_SECTIONS  = NO\n\n# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries\n# shown in the various tree structured indices initially; the user can expand\n# and collapse entries dynamically later on. Doxygen will expand the tree to\n# such a level that at most the specified number of entries are visible (unless\n# a fully collapsed tree already exceeds this amount). So setting the number of\n# entries 1 will produce a full collapsed tree by default. 0 is a special value\n# representing an infinite number of entries and will result in a full expanded\n# tree by default.\n# Minimum value: 0, maximum value: 9999, default value: 100.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nHTML_INDEX_NUM_ENTRIES = 100\n\n# If the GENERATE_DOCSET tag is set to YES, additional index files will be\n# generated that can be used as input for Apple's Xcode 3 integrated development\n# environment (see: http://developer.apple.com/tools/xcode/), introduced with\n# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a\n# Makefile in the HTML output directory. Running make will produce the docset in\n# that directory and running make install will install the docset in\n# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at\n# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html\n# for more information.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nGENERATE_DOCSET        = NO\n\n# This tag determines the name of the docset feed. A documentation feed provides\n# an umbrella under which multiple documentation sets from a single provider\n# (such as a company or product suite) can be grouped.\n# The default value is: Doxygen generated docs.\n# This tag requires that the tag GENERATE_DOCSET is set to YES.\n\nDOCSET_FEEDNAME        = \"Doxygen generated docs\"\n\n# This tag specifies a string that should uniquely identify the documentation\n# set bundle. This should be a reverse domain-name style string, e.g.\n# com.mycompany.MyDocSet. Doxygen will append .docset to the name.\n# The default value is: org.doxygen.Project.\n# This tag requires that the tag GENERATE_DOCSET is set to YES.\n\nDOCSET_BUNDLE_ID       = org.doxygen.Project\n\n# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify\n# the documentation publisher. This should be a reverse domain-name style\n# string, e.g. com.mycompany.MyDocSet.documentation.\n# The default value is: org.doxygen.Publisher.\n# This tag requires that the tag GENERATE_DOCSET is set to YES.\n\nDOCSET_PUBLISHER_ID    = org.doxygen.Publisher\n\n# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.\n# The default value is: Publisher.\n# This tag requires that the tag GENERATE_DOCSET is set to YES.\n\nDOCSET_PUBLISHER_NAME  = Publisher\n\n# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three\n# additional HTML index files: index.hhp, index.hhc, and index.hhk. The\n# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop\n# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on\n# Windows.\n#\n# The HTML Help Workshop contains a compiler that can convert all HTML output\n# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML\n# files are now used as the Windows 98 help format, and will replace the old\n# Windows help format (.hlp) on all Windows platforms in the future. Compressed\n# HTML files also contain an index, a table of contents, and you can search for\n# words in the documentation. The HTML workshop also contains a viewer for\n# compressed HTML files.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nGENERATE_HTMLHELP      = NO\n\n# The CHM_FILE tag can be used to specify the file name of the resulting .chm\n# file. You can add a path in front of the file if the result should not be\n# written to the html output directory.\n# This tag requires that the tag GENERATE_HTMLHELP is set to YES.\n\nCHM_FILE               = \n\n# The HHC_LOCATION tag can be used to specify the location (absolute path\n# including file name) of the HTML help compiler (hhc.exe). If non-empty,\n# doxygen will try to run the HTML help compiler on the generated index.hhp.\n# The file has to be specified with full path.\n# This tag requires that the tag GENERATE_HTMLHELP is set to YES.\n\nHHC_LOCATION           = \n\n# The GENERATE_CHI flag controls if a separate .chi index file is generated\n# (YES) or that it should be included in the master .chm file (NO).\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTMLHELP is set to YES.\n\nGENERATE_CHI           = NO\n\n# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)\n# and project file content.\n# This tag requires that the tag GENERATE_HTMLHELP is set to YES.\n\nCHM_INDEX_ENCODING     = \n\n# The BINARY_TOC flag controls whether a binary table of contents is generated\n# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it\n# enables the Previous and Next buttons.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTMLHELP is set to YES.\n\nBINARY_TOC             = NO\n\n# The TOC_EXPAND flag can be set to YES to add extra items for group members to\n# the table of contents of the HTML help documentation and to the tree view.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTMLHELP is set to YES.\n\nTOC_EXPAND             = NO\n\n# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and\n# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that\n# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help\n# (.qch) of the generated HTML documentation.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nGENERATE_QHP           = NO\n\n# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify\n# the file name of the resulting .qch file. The path specified is relative to\n# the HTML output folder.\n# This tag requires that the tag GENERATE_QHP is set to YES.\n\nQCH_FILE               = \n\n# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help\n# Project output. For more information please see Qt Help Project / Namespace\n# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).\n# The default value is: org.doxygen.Project.\n# This tag requires that the tag GENERATE_QHP is set to YES.\n\nQHP_NAMESPACE          = org.doxygen.Project\n\n# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt\n# Help Project output. For more information please see Qt Help Project / Virtual\n# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-\n# folders).\n# The default value is: doc.\n# This tag requires that the tag GENERATE_QHP is set to YES.\n\nQHP_VIRTUAL_FOLDER     = doc\n\n# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom\n# filter to add. For more information please see Qt Help Project / Custom\n# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-\n# filters).\n# This tag requires that the tag GENERATE_QHP is set to YES.\n\nQHP_CUST_FILTER_NAME   = \n\n# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the\n# custom filter to add. For more information please see Qt Help Project / Custom\n# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-\n# filters).\n# This tag requires that the tag GENERATE_QHP is set to YES.\n\nQHP_CUST_FILTER_ATTRS  = \n\n# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this\n# project's filter section matches. Qt Help Project / Filter Attributes (see:\n# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).\n# This tag requires that the tag GENERATE_QHP is set to YES.\n\nQHP_SECT_FILTER_ATTRS  = \n\n# The QHG_LOCATION tag can be used to specify the location of Qt's\n# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the\n# generated .qhp file.\n# This tag requires that the tag GENERATE_QHP is set to YES.\n\nQHG_LOCATION           = \n\n# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be\n# generated, together with the HTML files, they form an Eclipse help plugin. To\n# install this plugin and make it available under the help contents menu in\n# Eclipse, the contents of the directory containing the HTML and XML files needs\n# to be copied into the plugins directory of eclipse. The name of the directory\n# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.\n# After copying Eclipse needs to be restarted before the help appears.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nGENERATE_ECLIPSEHELP   = NO\n\n# A unique identifier for the Eclipse help plugin. When installing the plugin\n# the directory name containing the HTML and XML files should also have this\n# name. Each documentation set should have its own identifier.\n# The default value is: org.doxygen.Project.\n# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.\n\nECLIPSE_DOC_ID         = org.doxygen.Project\n\n# If you want full control over the layout of the generated HTML pages it might\n# be necessary to disable the index and replace it with your own. The\n# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top\n# of each HTML page. A value of NO enables the index and the value YES disables\n# it. Since the tabs in the index contain the same information as the navigation\n# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nDISABLE_INDEX          = NO\n\n# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index\n# structure should be generated to display hierarchical information. If the tag\n# value is set to YES, a side panel will be generated containing a tree-like\n# index structure (just like the one that is generated for HTML Help). For this\n# to work a browser that supports JavaScript, DHTML, CSS and frames is required\n# (i.e. any modern browser). Windows users are probably better off using the\n# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can\n# further fine-tune the look of the index. As an example, the default style\n# sheet generated by doxygen has an example that shows how to put an image at\n# the root of the tree instead of the PROJECT_NAME. Since the tree basically has\n# the same information as the tab index, you could consider setting\n# DISABLE_INDEX to YES when enabling this option.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nGENERATE_TREEVIEW      = NO\n\n# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that\n# doxygen will group on one line in the generated HTML documentation.\n#\n# Note that a value of 0 will completely suppress the enum values from appearing\n# in the overview section.\n# Minimum value: 0, maximum value: 20, default value: 4.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nENUM_VALUES_PER_LINE   = 4\n\n# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used\n# to set the initial width (in pixels) of the frame in which the tree is shown.\n# Minimum value: 0, maximum value: 1500, default value: 250.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nTREEVIEW_WIDTH         = 250\n\n# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to\n# external symbols imported via tag files in a separate window.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nEXT_LINKS_IN_WINDOW    = NO\n\n# Use this tag to change the font size of LaTeX formulas included as images in\n# the HTML documentation. When you change the font size after a successful\n# doxygen run you need to manually remove any form_*.png images from the HTML\n# output directory to force them to be regenerated.\n# Minimum value: 8, maximum value: 50, default value: 10.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nFORMULA_FONTSIZE       = 10\n\n# Use the FORMULA_TRANPARENT tag to determine whether or not the images\n# generated for formulas are transparent PNGs. Transparent PNGs are not\n# supported properly for IE 6.0, but are supported on all modern browsers.\n#\n# Note that when changing this option you need to delete any form_*.png files in\n# the HTML output directory before the changes have effect.\n# The default value is: YES.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nFORMULA_TRANSPARENT    = YES\n\n# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see\n# http://www.mathjax.org) which uses client side Javascript for the rendering\n# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX\n# installed or if you want to formulas look prettier in the HTML output. When\n# enabled you may also need to install MathJax separately and configure the path\n# to it using the MATHJAX_RELPATH option.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nUSE_MATHJAX            = NO\n\n# When MathJax is enabled you can set the default output format to be used for\n# the MathJax output. See the MathJax site (see:\n# http://docs.mathjax.org/en/latest/output.html) for more details.\n# Possible values are: HTML-CSS (which is slower, but has the best\n# compatibility), NativeMML (i.e. MathML) and SVG.\n# The default value is: HTML-CSS.\n# This tag requires that the tag USE_MATHJAX is set to YES.\n\nMATHJAX_FORMAT         = HTML-CSS\n\n# When MathJax is enabled you need to specify the location relative to the HTML\n# output directory using the MATHJAX_RELPATH option. The destination directory\n# should contain the MathJax.js script. For instance, if the mathjax directory\n# is located at the same level as the HTML output directory, then\n# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax\n# Content Delivery Network so you can quickly see the result without installing\n# MathJax. However, it is strongly recommended to install a local copy of\n# MathJax from http://www.mathjax.org before deployment.\n# The default value is: http://cdn.mathjax.org/mathjax/latest.\n# This tag requires that the tag USE_MATHJAX is set to YES.\n\nMATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest\n\n# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax\n# extension names that should be enabled during MathJax rendering. For example\n# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols\n# This tag requires that the tag USE_MATHJAX is set to YES.\n\nMATHJAX_EXTENSIONS     = \n\n# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces\n# of code that will be used on startup of the MathJax code. See the MathJax site\n# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an\n# example see the documentation.\n# This tag requires that the tag USE_MATHJAX is set to YES.\n\nMATHJAX_CODEFILE       = \n\n# When the SEARCHENGINE tag is enabled doxygen will generate a search box for\n# the HTML output. The underlying search engine uses javascript and DHTML and\n# should work on any modern browser. Note that when using HTML help\n# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)\n# there is already a search function so this one should typically be disabled.\n# For large projects the javascript based search engine can be slow, then\n# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to\n# search using the keyboard; to jump to the search box use <access key> + S\n# (what the <access key> is depends on the OS and browser, but it is typically\n# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down\n# key> to jump into the search results window, the results can be navigated\n# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel\n# the search. The filter options can be selected when the cursor is inside the\n# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>\n# to select a filter and <Enter> or <escape> to activate or cancel the filter\n# option.\n# The default value is: YES.\n# This tag requires that the tag GENERATE_HTML is set to YES.\n\nSEARCHENGINE           = NO\n\n# When the SERVER_BASED_SEARCH tag is enabled the search engine will be\n# implemented using a web server instead of a web client using Javascript. There\n# are two flavors of web server based searching depending on the EXTERNAL_SEARCH\n# setting. When disabled, doxygen will generate a PHP script for searching and\n# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing\n# and searching needs to be provided by external tools. See the section\n# \"External Indexing and Searching\" for details.\n# The default value is: NO.\n# This tag requires that the tag SEARCHENGINE is set to YES.\n\nSERVER_BASED_SEARCH    = NO\n\n# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP\n# script for searching. Instead the search results are written to an XML file\n# which needs to be processed by an external indexer. Doxygen will invoke an\n# external search engine pointed to by the SEARCHENGINE_URL option to obtain the\n# search results.\n#\n# Doxygen ships with an example indexer (doxyindexer) and search engine\n# (doxysearch.cgi) which are based on the open source search engine library\n# Xapian (see: http://xapian.org/).\n#\n# See the section \"External Indexing and Searching\" for details.\n# The default value is: NO.\n# This tag requires that the tag SEARCHENGINE is set to YES.\n\nEXTERNAL_SEARCH        = NO\n\n# The SEARCHENGINE_URL should point to a search engine hosted by a web server\n# which will return the search results when EXTERNAL_SEARCH is enabled.\n#\n# Doxygen ships with an example indexer (doxyindexer) and search engine\n# (doxysearch.cgi) which are based on the open source search engine library\n# Xapian (see: http://xapian.org/). See the section \"External Indexing and\n# Searching\" for details.\n# This tag requires that the tag SEARCHENGINE is set to YES.\n\nSEARCHENGINE_URL       = \n\n# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed\n# search data is written to a file for indexing by an external tool. With the\n# SEARCHDATA_FILE tag the name of this file can be specified.\n# The default file is: searchdata.xml.\n# This tag requires that the tag SEARCHENGINE is set to YES.\n\nSEARCHDATA_FILE        = searchdata.xml\n\n# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the\n# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is\n# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple\n# projects and redirect the results back to the right project.\n# This tag requires that the tag SEARCHENGINE is set to YES.\n\nEXTERNAL_SEARCH_ID     = \n\n# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen\n# projects other than the one defined by this configuration file, but that are\n# all added to the same external search index. Each project needs to have a\n# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of\n# to a relative location where the documentation can be found. The format is:\n# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...\n# This tag requires that the tag SEARCHENGINE is set to YES.\n\nEXTRA_SEARCH_MAPPINGS  = \n\n#---------------------------------------------------------------------------\n# Configuration options related to the LaTeX output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.\n# The default value is: YES.\n\nGENERATE_LATEX         = NO\n\n# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a\n# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of\n# it.\n# The default directory is: latex.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_OUTPUT           = latex\n\n# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be\n# invoked.\n#\n# Note that when enabling USE_PDFLATEX this option is only used for generating\n# bitmaps for formulas in the HTML output, but not in the Makefile that is\n# written to the output directory.\n# The default file is: latex.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_CMD_NAME         = latex\n\n# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate\n# index for LaTeX.\n# The default file is: makeindex.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nMAKEINDEX_CMD_NAME     = makeindex\n\n# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX\n# documents. This may be useful for small projects and may help to save some\n# trees in general.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nCOMPACT_LATEX          = NO\n\n# The PAPER_TYPE tag can be used to set the paper type that is used by the\n# printer.\n# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x\n# 14 inches) and executive (7.25 x 10.5 inches).\n# The default value is: a4.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nPAPER_TYPE             = a4\n\n# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names\n# that should be included in the LaTeX output. The package can be specified just\n# by its name or with the correct syntax as to be used with the LaTeX\n# \\usepackage command. To get the times font for instance you can specify :\n# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}\n# To use the option intlimits with the amsmath package you can specify:\n# EXTRA_PACKAGES=[intlimits]{amsmath}\n# If left blank no extra packages will be included.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nEXTRA_PACKAGES         = \n\n# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the\n# generated LaTeX document. The header should contain everything until the first\n# chapter. If it is left blank doxygen will generate a standard header. See\n# section \"Doxygen usage\" for information on how to let doxygen write the\n# default header to a separate file.\n#\n# Note: Only use a user-defined header if you know what you are doing! The\n# following commands have a special meaning inside the header: $title,\n# $datetime, $date, $doxygenversion, $projectname, $projectnumber,\n# $projectbrief, $projectlogo. Doxygen will replace $title with the empty\n# string, for the replacement values of the other commands the user is referred\n# to HTML_HEADER.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_HEADER           = \n\n# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the\n# generated LaTeX document. The footer should contain everything after the last\n# chapter. If it is left blank doxygen will generate a standard footer. See\n# LATEX_HEADER for more information on how to generate a default footer and what\n# special commands can be used inside the footer.\n#\n# Note: Only use a user-defined footer if you know what you are doing!\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_FOOTER           = \n\n# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined\n# LaTeX style sheets that are included after the standard style sheets created\n# by doxygen. Using this option one can overrule certain style aspects. Doxygen\n# will copy the style sheet files to the output directory.\n# Note: The order of the extra style sheet files is of importance (e.g. the last\n# style sheet in the list overrules the setting of the previous ones in the\n# list).\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_EXTRA_STYLESHEET = \n\n# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or\n# other source files which should be copied to the LATEX_OUTPUT output\n# directory. Note that the files will be copied as-is; there are no commands or\n# markers available.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_EXTRA_FILES      = \n\n# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is\n# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will\n# contain links (just like the HTML output) instead of page references. This\n# makes the output suitable for online browsing using a PDF viewer.\n# The default value is: YES.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nPDF_HYPERLINKS         = YES\n\n# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate\n# the PDF file directly from the LaTeX files. Set this option to YES, to get a\n# higher quality PDF documentation.\n# The default value is: YES.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nUSE_PDFLATEX           = YES\n\n# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode\n# command to the generated LaTeX files. This will instruct LaTeX to keep running\n# if errors occur, instead of asking the user for help. This option is also used\n# when generating formulas in HTML.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_BATCHMODE        = NO\n\n# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the\n# index chapters (such as File Index, Compound Index, etc.) in the output.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_HIDE_INDICES     = NO\n\n# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source\n# code with syntax highlighting in the LaTeX output.\n#\n# Note that which sources are shown also depends on other settings such as\n# SOURCE_BROWSER.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_SOURCE_CODE      = NO\n\n# The LATEX_BIB_STYLE tag can be used to specify the style to use for the\n# bibliography, e.g. plainnat, or ieeetr. See\n# http://en.wikipedia.org/wiki/BibTeX and \\cite for more info.\n# The default value is: plain.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_BIB_STYLE        = plain\n\n# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated\n# page will contain the date and time when the page was generated. Setting this\n# to NO can help when comparing the output of multiple runs.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_LATEX is set to YES.\n\nLATEX_TIMESTAMP        = NO\n\n#---------------------------------------------------------------------------\n# Configuration options related to the RTF output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The\n# RTF output is optimized for Word 97 and may not look too pretty with other RTF\n# readers/editors.\n# The default value is: NO.\n\nGENERATE_RTF           = NO\n\n# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a\n# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of\n# it.\n# The default directory is: rtf.\n# This tag requires that the tag GENERATE_RTF is set to YES.\n\nRTF_OUTPUT             = rtf\n\n# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF\n# documents. This may be useful for small projects and may help to save some\n# trees in general.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_RTF is set to YES.\n\nCOMPACT_RTF            = NO\n\n# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will\n# contain hyperlink fields. The RTF file will contain links (just like the HTML\n# output) instead of page references. This makes the output suitable for online\n# browsing using Word or some other Word compatible readers that support those\n# fields.\n#\n# Note: WordPad (write) and others do not support links.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_RTF is set to YES.\n\nRTF_HYPERLINKS         = NO\n\n# Load stylesheet definitions from file. Syntax is similar to doxygen's config\n# file, i.e. a series of assignments. You only have to provide replacements,\n# missing definitions are set to their default value.\n#\n# See also section \"Doxygen usage\" for information on how to generate the\n# default style sheet that doxygen normally uses.\n# This tag requires that the tag GENERATE_RTF is set to YES.\n\nRTF_STYLESHEET_FILE    = \n\n# Set optional variables used in the generation of an RTF document. Syntax is\n# similar to doxygen's config file. A template extensions file can be generated\n# using doxygen -e rtf extensionFile.\n# This tag requires that the tag GENERATE_RTF is set to YES.\n\nRTF_EXTENSIONS_FILE    = \n\n# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code\n# with syntax highlighting in the RTF output.\n#\n# Note that which sources are shown also depends on other settings such as\n# SOURCE_BROWSER.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_RTF is set to YES.\n\nRTF_SOURCE_CODE        = NO\n\n#---------------------------------------------------------------------------\n# Configuration options related to the man page output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for\n# classes and files.\n# The default value is: NO.\n\nGENERATE_MAN           = NO\n\n# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a\n# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of\n# it. A directory man3 will be created inside the directory specified by\n# MAN_OUTPUT.\n# The default directory is: man.\n# This tag requires that the tag GENERATE_MAN is set to YES.\n\nMAN_OUTPUT             = man\n\n# The MAN_EXTENSION tag determines the extension that is added to the generated\n# man pages. In case the manual section does not start with a number, the number\n# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is\n# optional.\n# The default value is: .3.\n# This tag requires that the tag GENERATE_MAN is set to YES.\n\nMAN_EXTENSION          = .3\n\n# The MAN_SUBDIR tag determines the name of the directory created within\n# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by\n# MAN_EXTENSION with the initial . removed.\n# This tag requires that the tag GENERATE_MAN is set to YES.\n\nMAN_SUBDIR             = \n\n# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it\n# will generate one additional man file for each entity documented in the real\n# man page(s). These additional files only source the real man page, but without\n# them the man command would be unable to find the correct page.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_MAN is set to YES.\n\nMAN_LINKS              = NO\n\n#---------------------------------------------------------------------------\n# Configuration options related to the XML output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that\n# captures the structure of the code including all documentation.\n# The default value is: NO.\n\nGENERATE_XML           = NO\n\n# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a\n# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of\n# it.\n# The default directory is: xml.\n# This tag requires that the tag GENERATE_XML is set to YES.\n\nXML_OUTPUT             = xml\n\n# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program\n# listings (including syntax highlighting and cross-referencing information) to\n# the XML output. Note that enabling this will significantly increase the size\n# of the XML output.\n# The default value is: YES.\n# This tag requires that the tag GENERATE_XML is set to YES.\n\nXML_PROGRAMLISTING     = YES\n\n#---------------------------------------------------------------------------\n# Configuration options related to the DOCBOOK output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files\n# that can be used to generate PDF.\n# The default value is: NO.\n\nGENERATE_DOCBOOK       = NO\n\n# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.\n# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in\n# front of it.\n# The default directory is: docbook.\n# This tag requires that the tag GENERATE_DOCBOOK is set to YES.\n\nDOCBOOK_OUTPUT         = docbook\n\n# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the\n# program listings (including syntax highlighting and cross-referencing\n# information) to the DOCBOOK output. Note that enabling this will significantly\n# increase the size of the DOCBOOK output.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_DOCBOOK is set to YES.\n\nDOCBOOK_PROGRAMLISTING = NO\n\n#---------------------------------------------------------------------------\n# Configuration options for the AutoGen Definitions output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an\n# AutoGen Definitions (see http://autogen.sf.net) file that captures the\n# structure of the code including all documentation. Note that this feature is\n# still experimental and incomplete at the moment.\n# The default value is: NO.\n\nGENERATE_AUTOGEN_DEF   = NO\n\n#---------------------------------------------------------------------------\n# Configuration options related to the Perl module output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module\n# file that captures the structure of the code including all documentation.\n#\n# Note that this feature is still experimental and incomplete at the moment.\n# The default value is: NO.\n\nGENERATE_PERLMOD       = NO\n\n# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary\n# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI\n# output from the Perl module output.\n# The default value is: NO.\n# This tag requires that the tag GENERATE_PERLMOD is set to YES.\n\nPERLMOD_LATEX          = NO\n\n# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely\n# formatted so it can be parsed by a human reader. This is useful if you want to\n# understand what is going on. On the other hand, if this tag is set to NO, the\n# size of the Perl module output will be much smaller and Perl will parse it\n# just the same.\n# The default value is: YES.\n# This tag requires that the tag GENERATE_PERLMOD is set to YES.\n\nPERLMOD_PRETTY         = YES\n\n# The names of the make variables in the generated doxyrules.make file are\n# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful\n# so different doxyrules.make files included by the same Makefile don't\n# overwrite each other's variables.\n# This tag requires that the tag GENERATE_PERLMOD is set to YES.\n\nPERLMOD_MAKEVAR_PREFIX = \n\n#---------------------------------------------------------------------------\n# Configuration options related to the preprocessor\n#---------------------------------------------------------------------------\n\n# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all\n# C-preprocessor directives found in the sources and include files.\n# The default value is: YES.\n\nENABLE_PREPROCESSING   = YES\n\n# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names\n# in the source code. If set to NO, only conditional compilation will be\n# performed. Macro expansion can be done in a controlled way by setting\n# EXPAND_ONLY_PREDEF to YES.\n# The default value is: NO.\n# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n\nMACRO_EXPANSION        = NO\n\n# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then\n# the macro expansion is limited to the macros specified with the PREDEFINED and\n# EXPAND_AS_DEFINED tags.\n# The default value is: NO.\n# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n\nEXPAND_ONLY_PREDEF     = NO\n\n# If the SEARCH_INCLUDES tag is set to YES, the include files in the\n# INCLUDE_PATH will be searched if a #include is found.\n# The default value is: YES.\n# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n\nSEARCH_INCLUDES        = YES\n\n# The INCLUDE_PATH tag can be used to specify one or more directories that\n# contain include files that are not input files but should be processed by the\n# preprocessor.\n# This tag requires that the tag SEARCH_INCLUDES is set to YES.\n\nINCLUDE_PATH           = \n\n# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard\n# patterns (like *.h and *.hpp) to filter out the header-files in the\n# directories. If left blank, the patterns specified with FILE_PATTERNS will be\n# used.\n# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n\nINCLUDE_FILE_PATTERNS  = \n\n# The PREDEFINED tag can be used to specify one or more macro names that are\n# defined before the preprocessor is started (similar to the -D option of e.g.\n# gcc). The argument of the tag is a list of macros of the form: name or\n# name=definition (no spaces). If the definition and the \"=\" are omitted, \"=1\"\n# is assumed. To prevent a macro definition from being undefined via #undef or\n# recursively expanded use the := operator instead of the = operator.\n# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n\nPREDEFINED             = \n\n# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this\n# tag can be used to specify a list of macro names that should be expanded. The\n# macro definition that is found in the sources will be used. Use the PREDEFINED\n# tag if you want to use a different macro definition that overrules the\n# definition found in the source code.\n# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n\nEXPAND_AS_DEFINED      = \n\n# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will\n# remove all references to function-like macros that are alone on a line, have\n# an all uppercase name, and do not end with a semicolon. Such function macros\n# are typically used for boiler-plate code, and will confuse the parser if not\n# removed.\n# The default value is: YES.\n# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n\nSKIP_FUNCTION_MACROS   = YES\n\n#---------------------------------------------------------------------------\n# Configuration options related to external references\n#---------------------------------------------------------------------------\n\n# The TAGFILES tag can be used to specify one or more tag files. For each tag\n# file the location of the external documentation should be added. The format of\n# a tag file without this location is as follows:\n# TAGFILES = file1 file2 ...\n# Adding location for the tag files is done as follows:\n# TAGFILES = file1=loc1 \"file2 = loc2\" ...\n# where loc1 and loc2 can be relative or absolute paths or URLs. See the\n# section \"Linking to external documentation\" for more information about the use\n# of tag files.\n# Note: Each tag file must have a unique name (where the name does NOT include\n# the path). If a tag file is not located in the directory in which doxygen is\n# run, you must also specify the path to the tagfile here.\n\nTAGFILES               = \n\n# When a file name is specified after GENERATE_TAGFILE, doxygen will create a\n# tag file that is based on the input files it reads. See section \"Linking to\n# external documentation\" for more information about the usage of tag files.\n\nGENERATE_TAGFILE       = \n\n# If the ALLEXTERNALS tag is set to YES, all external class will be listed in\n# the class index. If set to NO, only the inherited external classes will be\n# listed.\n# The default value is: NO.\n\nALLEXTERNALS           = NO\n\n# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed\n# in the modules index. If set to NO, only the current project's groups will be\n# listed.\n# The default value is: YES.\n\nEXTERNAL_GROUPS        = YES\n\n# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in\n# the related pages index. If set to NO, only the current project's pages will\n# be listed.\n# The default value is: YES.\n\nEXTERNAL_PAGES         = YES\n\n# The PERL_PATH should be the absolute path and name of the perl script\n# interpreter (i.e. the result of 'which perl').\n# The default file (with absolute path) is: /usr/bin/perl.\n\nPERL_PATH              = /usr/bin/perl\n\n#---------------------------------------------------------------------------\n# Configuration options related to the dot tool\n#---------------------------------------------------------------------------\n\n# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram\n# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to\n# NO turns the diagrams off. Note that this option also works with HAVE_DOT\n# disabled, but it is recommended to install and use dot, since it yields more\n# powerful graphs.\n# The default value is: YES.\n\nCLASS_DIAGRAMS         = NO\n\n# You can define message sequence charts within doxygen comments using the \\msc\n# command. Doxygen will then run the mscgen tool (see:\n# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the\n# documentation. The MSCGEN_PATH tag allows you to specify the directory where\n# the mscgen tool resides. If left empty the tool is assumed to be found in the\n# default search path.\n\nMSCGEN_PATH            = \n\n# You can include diagrams made with dia in doxygen documentation. Doxygen will\n# then run dia to produce the diagram and insert it in the documentation. The\n# DIA_PATH tag allows you to specify the directory where the dia binary resides.\n# If left empty dia is assumed to be found in the default search path.\n\nDIA_PATH               = \n\n# If set to YES the inheritance and collaboration graphs will hide inheritance\n# and usage relations if the target is undocumented or is not a class.\n# The default value is: YES.\n\nHIDE_UNDOC_RELATIONS   = YES\n\n# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is\n# available from the path. This tool is part of Graphviz (see:\n# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent\n# Bell Labs. The other options in this section have no effect if this option is\n# set to NO\n# The default value is: YES.\n\nHAVE_DOT               = NO\n\n# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed\n# to run in parallel. When set to 0 doxygen will base this on the number of\n# processors available in the system. You can set it explicitly to a value\n# larger than 0 to get control over the balance between CPU load and processing\n# speed.\n# Minimum value: 0, maximum value: 32, default value: 0.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_NUM_THREADS        = 0\n\n# When you want a differently looking font in the dot files that doxygen\n# generates you can specify the font name using DOT_FONTNAME. You need to make\n# sure dot is able to find the font, which can be done by putting it in a\n# standard location or by setting the DOTFONTPATH environment variable or by\n# setting DOT_FONTPATH to the directory containing the font.\n# The default value is: Helvetica.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_FONTNAME           = Helvetica\n\n# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of\n# dot graphs.\n# Minimum value: 4, maximum value: 24, default value: 10.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_FONTSIZE           = 10\n\n# By default doxygen will tell dot to use the default font as specified with\n# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set\n# the path where dot can find it using this tag.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_FONTPATH           = \n\n# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for\n# each documented class showing the direct and indirect inheritance relations.\n# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nCLASS_GRAPH            = YES\n\n# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a\n# graph for each documented class showing the direct and indirect implementation\n# dependencies (inheritance, containment, and class references variables) of the\n# class with other documented classes.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nCOLLABORATION_GRAPH    = YES\n\n# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for\n# groups, showing the direct groups dependencies.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nGROUP_GRAPHS           = YES\n\n# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and\n# collaboration diagrams in a style similar to the OMG's Unified Modeling\n# Language.\n# The default value is: NO.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nUML_LOOK               = NO\n\n# If the UML_LOOK tag is enabled, the fields and methods are shown inside the\n# class node. If there are many fields or methods and many nodes the graph may\n# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the\n# number of items for each type to make the size more manageable. Set this to 0\n# for no limit. Note that the threshold may be exceeded by 50% before the limit\n# is enforced. So when you set the threshold to 10, up to 15 fields may appear,\n# but if the number exceeds 15, the total amount of fields shown is limited to\n# 10.\n# Minimum value: 0, maximum value: 100, default value: 10.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nUML_LIMIT_NUM_FIELDS   = 10\n\n# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and\n# collaboration graphs will show the relations between templates and their\n# instances.\n# The default value is: NO.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nTEMPLATE_RELATIONS     = NO\n\n# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to\n# YES then doxygen will generate a graph for each documented file showing the\n# direct and indirect include dependencies of the file with other documented\n# files.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nINCLUDE_GRAPH          = YES\n\n# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are\n# set to YES then doxygen will generate a graph for each documented file showing\n# the direct and indirect include dependencies of the file with other documented\n# files.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nINCLUDED_BY_GRAPH      = YES\n\n# If the CALL_GRAPH tag is set to YES then doxygen will generate a call\n# dependency graph for every global function or class method.\n#\n# Note that enabling this option will significantly increase the time of a run.\n# So in most cases it will be better to enable call graphs for selected\n# functions only using the \\callgraph command. Disabling a call graph can be\n# accomplished by means of the command \\hidecallgraph.\n# The default value is: NO.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nCALL_GRAPH             = NO\n\n# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller\n# dependency graph for every global function or class method.\n#\n# Note that enabling this option will significantly increase the time of a run.\n# So in most cases it will be better to enable caller graphs for selected\n# functions only using the \\callergraph command. Disabling a caller graph can be\n# accomplished by means of the command \\hidecallergraph.\n# The default value is: NO.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nCALLER_GRAPH           = NO\n\n# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical\n# hierarchy of all classes instead of a textual one.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nGRAPHICAL_HIERARCHY    = YES\n\n# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the\n# dependencies a directory has on other directories in a graphical way. The\n# dependency relations are determined by the #include relations between the\n# files in the directories.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDIRECTORY_GRAPH        = YES\n\n# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images\n# generated by dot. For an explanation of the image formats see the section\n# output formats in the documentation of the dot tool (Graphviz (see:\n# http://www.graphviz.org/)).\n# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order\n# to make the SVG files visible in IE 9+ (other browsers do not have this\n# requirement).\n# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,\n# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,\n# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,\n# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and\n# png:gdiplus:gdiplus.\n# The default value is: png.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_IMAGE_FORMAT       = png\n\n# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to\n# enable generation of interactive SVG images that allow zooming and panning.\n#\n# Note that this requires a modern browser other than Internet Explorer. Tested\n# and working are Firefox, Chrome, Safari, and Opera.\n# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make\n# the SVG files visible. Older versions of IE do not have SVG support.\n# The default value is: NO.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nINTERACTIVE_SVG        = NO\n\n# The DOT_PATH tag can be used to specify the path where the dot tool can be\n# found. If left blank, it is assumed the dot tool can be found in the path.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_PATH               = \n\n# The DOTFILE_DIRS tag can be used to specify one or more directories that\n# contain dot files that are included in the documentation (see the \\dotfile\n# command).\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOTFILE_DIRS           = \n\n# The MSCFILE_DIRS tag can be used to specify one or more directories that\n# contain msc files that are included in the documentation (see the \\mscfile\n# command).\n\nMSCFILE_DIRS           = \n\n# The DIAFILE_DIRS tag can be used to specify one or more directories that\n# contain dia files that are included in the documentation (see the \\diafile\n# command).\n\nDIAFILE_DIRS           = \n\n# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the\n# path where java can find the plantuml.jar file. If left blank, it is assumed\n# PlantUML is not used or called during a preprocessing step. Doxygen will\n# generate a warning when it encounters a \\startuml command in this case and\n# will not generate output for the diagram.\n\nPLANTUML_JAR_PATH      = \n\n# When using plantuml, the specified paths are searched for files specified by\n# the !include statement in a plantuml block.\n\nPLANTUML_INCLUDE_PATH  = \n\n# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes\n# that will be shown in the graph. If the number of nodes in a graph becomes\n# larger than this value, doxygen will truncate the graph, which is visualized\n# by representing a node as a red box. Note that doxygen if the number of direct\n# children of the root node in a graph is already larger than\n# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that\n# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.\n# Minimum value: 0, maximum value: 10000, default value: 50.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_GRAPH_MAX_NODES    = 50\n\n# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs\n# generated by dot. A depth value of 3 means that only nodes reachable from the\n# root by following a path via at most 3 edges will be shown. Nodes that lay\n# further from the root node will be omitted. Note that setting this option to 1\n# or 2 may greatly reduce the computation time needed for large code bases. Also\n# note that the size of a graph can be further restricted by\n# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.\n# Minimum value: 0, maximum value: 1000, default value: 0.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nMAX_DOT_GRAPH_DEPTH    = 0\n\n# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent\n# background. This is disabled by default, because dot on Windows does not seem\n# to support this out of the box.\n#\n# Warning: Depending on the platform used, enabling this option may lead to\n# badly anti-aliased labels on the edges of a graph (i.e. they become hard to\n# read).\n# The default value is: NO.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_TRANSPARENT        = NO\n\n# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output\n# files in one run (i.e. multiple -o and -T options on the command line). This\n# makes dot run faster, but since only newer versions of dot (>1.8.10) support\n# this, this feature is disabled by default.\n# The default value is: NO.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_MULTI_TARGETS      = NO\n\n# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page\n# explaining the meaning of the various boxes and arrows in the dot generated\n# graphs.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nGENERATE_LEGEND        = YES\n\n# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot\n# files that are used to generate the various graphs.\n# The default value is: YES.\n# This tag requires that the tag HAVE_DOT is set to YES.\n\nDOT_CLEANUP            = YES\n"
  },
  {
    "path": "Thirdparty/tessil-src/include/tsl/robin_growth_policy.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_ROBIN_GROWTH_POLICY_H\n#define TSL_ROBIN_GROWTH_POLICY_H \n\n\n#include <algorithm>\n#include <array>\n#include <climits>\n#include <cmath>\n#include <cstddef>\n#include <cstdint>\n#include <iterator>\n#include <limits>\n#include <ratio>\n#include <stdexcept>\n\n\n#ifdef TSL_DEBUG\n#    define tsl_rh_assert(expr) assert(expr)\n#else\n#    define tsl_rh_assert(expr) (static_cast<void>(0))\n#endif\n\n\n/**\n * If exceptions are enabled, throw the exception passed in parameter, otherwise call std::terminate.\n */\n#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || (defined (_MSC_VER) && defined (_CPPUNWIND))) && !defined(TSL_NO_EXCEPTIONS)\n#    define TSL_RH_THROW_OR_TERMINATE(ex, msg) throw ex(msg)\n#else\n#    define TSL_RH_NO_EXCEPTIONS\n#    ifdef NDEBUG\n#        define TSL_RH_THROW_OR_TERMINATE(ex, msg) std::terminate()\n#    else\n#        include <iostream>\n#        define TSL_RH_THROW_OR_TERMINATE(ex, msg) do { std::cerr << msg << std::endl; std::terminate(); } while(0)\n#    endif\n#endif\n\n\n#if defined(__GNUC__) || defined(__clang__)\n#    define TSL_RH_LIKELY(exp) (__builtin_expect(!!(exp), true))\n#else\n#    define TSL_RH_LIKELY(exp) (exp)\n#endif\n\n\nnamespace tsl {\nnamespace rh {\n    \n/**\n * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a power of two. It allows\n * the table to use a mask operation instead of a modulo operation to map a hash to a bucket.\n * \n * GrowthFactor must be a power of two >= 2.\n */\ntemplate<std::size_t GrowthFactor>\nclass power_of_two_growth_policy {\npublic:\n    /**\n     * Called on the hash table creation and on rehash. The number of buckets for the table is passed in parameter.\n     * This number is a minimum, the policy may update this value with a higher value if needed (but not lower).\n     *\n     * If 0 is given, min_bucket_count_in_out must still be 0 after the policy creation and\n     * bucket_for_hash must always return 0 in this case.\n     */\n    explicit power_of_two_growth_policy(std::size_t& min_bucket_count_in_out) {\n        if(min_bucket_count_in_out > max_bucket_count()) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The hash table exceeds its maximum size.\");\n        }\n        \n        if(min_bucket_count_in_out > 0) {\n            min_bucket_count_in_out = round_up_to_power_of_two(min_bucket_count_in_out);\n            m_mask = min_bucket_count_in_out - 1;\n        }\n        else {\n            m_mask = 0;\n        }\n    }\n    \n    /**\n     * Return the bucket [0, bucket_count()) to which the hash belongs. \n     * If bucket_count() is 0, it must always return 0.\n     */\n    std::size_t bucket_for_hash(std::size_t hash) const noexcept {\n        return hash & m_mask;\n    }\n    \n    /**\n     * Return the number of buckets that should be used on next growth.\n     */\n    std::size_t next_bucket_count() const {\n        if((m_mask + 1) > max_bucket_count() / GrowthFactor) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The hash table exceeds its maximum size.\");\n        }\n        \n        return (m_mask + 1) * GrowthFactor;\n    }\n    \n    /**\n     * Return the maximum number of buckets supported by the policy.\n     */\n    std::size_t max_bucket_count() const {\n        // Largest power of two.\n        return (std::numeric_limits<std::size_t>::max() / 2) + 1;\n    }\n    \n    /**\n     * Reset the growth policy as if it was created with a bucket count of 0.\n     * After a clear, the policy must always return 0 when bucket_for_hash is called.\n     */\n    void clear() noexcept {\n        m_mask = 0;\n    }\n    \nprivate:\n    static std::size_t round_up_to_power_of_two(std::size_t value) {\n        if(is_power_of_two(value)) {\n            return value;\n        }\n        \n        if(value == 0) {\n            return 1;\n        }\n            \n        --value;\n        for(std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) {\n            value |= value >> i;\n        }\n        \n        return value + 1;\n    }\n    \n    static constexpr bool is_power_of_two(std::size_t value) {\n        return value != 0 && (value & (value - 1)) == 0;\n    }\n    \nprotected:\n    static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2, \"GrowthFactor must be a power of two >= 2.\");\n    \n    std::size_t m_mask;\n};\n\n\n/**\n * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo to map a hash\n * to a bucket. Slower but it can be useful if you want a slower growth.\n */\ntemplate<class GrowthFactor = std::ratio<3, 2>>\nclass mod_growth_policy {\npublic:\n    explicit mod_growth_policy(std::size_t& min_bucket_count_in_out) {\n        if(min_bucket_count_in_out > max_bucket_count()) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The hash table exceeds its maximum size.\");\n        }\n        \n        if(min_bucket_count_in_out > 0) {\n            m_mod = min_bucket_count_in_out;\n        }\n        else {\n            m_mod = 1;\n        }\n    }\n    \n    std::size_t bucket_for_hash(std::size_t hash) const noexcept {\n        return hash % m_mod;\n    }\n    \n    std::size_t next_bucket_count() const {\n        if(m_mod == max_bucket_count()) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The hash table exceeds its maximum size.\");\n        }\n        \n        const double next_bucket_count = std::ceil(double(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR);\n        if(!std::isnormal(next_bucket_count)) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The hash table exceeds its maximum size.\");\n        }\n        \n        if(next_bucket_count > double(max_bucket_count())) {\n            return max_bucket_count();\n        }\n        else {\n            return std::size_t(next_bucket_count);\n        }\n    }\n    \n    std::size_t max_bucket_count() const {\n        return MAX_BUCKET_COUNT;\n    }\n    \n    void clear() noexcept {\n        m_mod = 1;\n    }\n    \nprivate:\n    static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = 1.0 * GrowthFactor::num / GrowthFactor::den;\n    static const std::size_t MAX_BUCKET_COUNT = \n            std::size_t(double(\n                    std::numeric_limits<std::size_t>::max() / REHASH_SIZE_MULTIPLICATION_FACTOR\n            ));\n            \n    static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, \"Growth factor should be >= 1.1.\");\n    \n    std::size_t m_mod;\n};\n\n\n\nnamespace detail {\n\n#if SIZE_MAX >= ULLONG_MAX\n#define TSL_RH_NB_PRIMES 51\n#elif SIZE_MAX >= ULONG_MAX\n#define TSL_RH_NB_PRIMES 40\n#else\n#define TSL_RH_NB_PRIMES 23\n#endif\n\nstatic constexpr const std::array<std::size_t, TSL_RH_NB_PRIMES> PRIMES = {{\n    1u, 5u, 17u, 29u, 37u, 53u, 67u, 79u, 97u, 131u, 193u, 257u, 389u, 521u, 769u, 1031u, \n    1543u, 2053u, 3079u, 6151u, 12289u, 24593u, 49157u,\n#if SIZE_MAX >= ULONG_MAX\n    98317ul, 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, 6291469ul, 12582917ul, \n    25165843ul, 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, 1610612741ul, \n    3221225473ul, 4294967291ul,\n#endif\n#if SIZE_MAX >= ULLONG_MAX\n    6442450939ull, 12884901893ull, 25769803751ull, 51539607551ull, 103079215111ull, 206158430209ull, \n    412316860441ull, 824633720831ull, 1649267441651ull, 3298534883309ull, 6597069766657ull,\n#endif\n}};\n\ntemplate<unsigned int IPrime>\nstatic constexpr std::size_t mod(std::size_t hash) { return hash % PRIMES[IPrime]; }\n\n// MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for faster modulo as the\n// compiler can optimize the modulo code better with a constant known at the compilation.\nstatic constexpr const std::array<std::size_t(*)(std::size_t), TSL_RH_NB_PRIMES> MOD_PRIME = {{ \n    &mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>, &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, \n    &mod<11>, &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>, &mod<18>, &mod<19>, &mod<20>, \n    &mod<21>, &mod<22>,  \n#if SIZE_MAX >= ULONG_MAX\n    &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>, &mod<29>, &mod<30>, &mod<31>, &mod<32>, \n    &mod<33>, &mod<34>, &mod<35>, &mod<36>, &mod<37> , &mod<38>, &mod<39>,\n#endif\n#if SIZE_MAX >= ULLONG_MAX\n    &mod<40>, &mod<41>, &mod<42>, &mod<43>, &mod<44>, &mod<45>, &mod<46>, &mod<47>, &mod<48>, &mod<49>, \n    &mod<50>,\n#endif\n}};\n\n}\n\n/**\n * Grow the hash table by using prime numbers as bucket count. Slower than tsl::rh::power_of_two_growth_policy in  \n * general but will probably distribute the values around better in the buckets with a poor hash function.\n * \n * To allow the compiler to optimize the modulo operation, a lookup table is used with constant primes numbers.\n * \n * With a switch the code would look like:\n * \\code\n * switch(iprime) { // iprime is the current prime of the hash table\n *     case 0: hash % 5ul;\n *             break;\n *     case 1: hash % 17ul;\n *             break;\n *     case 2: hash % 29ul;\n *             break;\n *     ...\n * }    \n * \\endcode\n * \n * Due to the constant variable in the modulo the compiler is able to optimize the operation\n * by a series of multiplications, substractions and shifts. \n * \n * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) * 5' in a 64 bits environment.\n */\nclass prime_growth_policy {\npublic:\n    explicit prime_growth_policy(std::size_t& min_bucket_count_in_out) {\n        auto it_prime = std::lower_bound(detail::PRIMES.begin(), \n                                         detail::PRIMES.end(), min_bucket_count_in_out);\n        if(it_prime == detail::PRIMES.end()) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The hash table exceeds its maximum size.\");\n        }\n        \n        m_iprime = static_cast<unsigned int>(std::distance(detail::PRIMES.begin(), it_prime));\n        if(min_bucket_count_in_out > 0) {\n            min_bucket_count_in_out = *it_prime;\n        }\n        else {\n            min_bucket_count_in_out = 0;\n        }\n    }\n    \n    std::size_t bucket_for_hash(std::size_t hash) const noexcept {\n        return detail::MOD_PRIME[m_iprime](hash);\n    }\n    \n    std::size_t next_bucket_count() const {\n        if(m_iprime + 1 >= detail::PRIMES.size()) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The hash table exceeds its maximum size.\");\n        }\n        \n        return detail::PRIMES[m_iprime + 1];\n    }   \n    \n    std::size_t max_bucket_count() const {\n        return detail::PRIMES.back();\n    }\n    \n    void clear() noexcept {\n        m_iprime = 0;\n    }\n    \nprivate:\n    unsigned int m_iprime;\n    \n    static_assert(std::numeric_limits<decltype(m_iprime)>::max() >= detail::PRIMES.size(), \n                  \"The type of m_iprime is not big enough.\");\n}; \n\n}\n}\n\n#endif\n"
  },
  {
    "path": "Thirdparty/tessil-src/include/tsl/robin_hash.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_ROBIN_HASH_H\n#define TSL_ROBIN_HASH_H \n\n\n#include <algorithm>\n#include <cassert>\n#include <cmath>\n#include <cstddef>\n#include <cstdint>\n#include <exception>\n#include <iterator>\n#include <limits>\n#include <memory>\n#include <stdexcept>\n#include <tuple>\n#include <type_traits>\n#include <utility>\n#include <vector>\n#include \"robin_growth_policy.h\"\n\n\nnamespace tsl {\n    \nnamespace detail_robin_hash {\n\ntemplate<typename T>\nstruct make_void {\n    using type = void;\n};\n\ntemplate<typename T, typename = void>\nstruct has_is_transparent: std::false_type {\n};\n\ntemplate<typename T>\nstruct has_is_transparent<T, typename make_void<typename T::is_transparent>::type>: std::true_type {\n};\n\ntemplate<typename U>\nstruct is_power_of_two_policy: std::false_type {\n};\n\ntemplate<std::size_t GrowthFactor>\nstruct is_power_of_two_policy<tsl::rh::power_of_two_growth_policy<GrowthFactor>>: std::true_type {\n};\n\n// Only available in C++17, we need to be compatible with C++11\ntemplate<class T>\nconst T& clamp( const T& v, const T& lo, const T& hi) {\n    return std::min(hi, std::max(lo, v));\n}\n\ntemplate<typename T, typename U>\nstatic T numeric_cast(U value, const char* error_message = \"numeric_cast() failed.\") {\n    T ret = static_cast<T>(value);\n    if(static_cast<U>(ret) != value) {\n        TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message);\n    }\n    \n    const bool is_same_signedness = (std::is_unsigned<T>::value && std::is_unsigned<U>::value) ||\n                                    (std::is_signed<T>::value && std::is_signed<U>::value);\n    if(!is_same_signedness && (ret < T{}) != (value < U{})) {\n        TSL_RH_THROW_OR_TERMINATE(std::runtime_error, error_message);\n    }\n    \n    return ret;\n}\n\n\nusing truncated_hash_type = std::uint_least32_t;\n\n/**\n * Helper class that stores a truncated hash if StoreHash is true and nothing otherwise.\n */\ntemplate<bool StoreHash>\nclass bucket_entry_hash {\npublic:\n    bool bucket_hash_equal(std::size_t /*hash*/) const noexcept {\n        return true;\n    }\n    \n    truncated_hash_type truncated_hash() const noexcept {\n        return 0;\n    }\n    \nprotected:\n    void set_hash(truncated_hash_type /*hash*/) noexcept {\n    }\n};\n\ntemplate<>\nclass bucket_entry_hash<true> {\npublic:\n    bool bucket_hash_equal(std::size_t hash) const noexcept {\n        return m_hash == truncated_hash_type(hash);\n    }\n    \n    truncated_hash_type truncated_hash() const noexcept {\n        return m_hash;\n    }\n    \nprotected:\n    void set_hash(truncated_hash_type hash) noexcept {\n        m_hash = truncated_hash_type(hash);\n    }\n    \nprivate:    \n    truncated_hash_type m_hash;\n};\n\n\n/**\n * Each bucket entry has:\n * - A value of type `ValueType`.\n * - An integer to store how far the value of the bucket, if any, is from its ideal bucket \n *   (ex: if the current bucket 5 has the value 'foo' and `hash('foo') % nb_buckets` == 3,\n *        `dist_from_ideal_bucket()` will return 2 as the current value of the bucket is two\n *        buckets away from its ideal bucket)\n *   If there is no value in the bucket (i.e. `empty()` is true) `dist_from_ideal_bucket()` will be < 0.\n * - A marker which tells us if the bucket is the last bucket of the bucket array (useful for the \n *   iterator of the hash table).\n * - If `StoreHash` is true, 32 bits of the hash of the value, if any, are also stored in the bucket. \n *   If the size of the hash is more than 32 bits, it is truncated. We don't store the full hash\n *   as storing the hash is a potential opportunity to use the unused space due to the alignment\n *   of the bucket_entry structure. We can thus potentially store the hash without any extra space \n *   (which would not be possible with 64 bits of the hash).\n */\ntemplate<typename ValueType, bool StoreHash>\nclass bucket_entry: public bucket_entry_hash<StoreHash> {\n    using bucket_hash = bucket_entry_hash<StoreHash>;\n    \npublic:\n    using value_type = ValueType;\n    using distance_type = std::int_least16_t;\n    \n    \n    bucket_entry() noexcept: bucket_hash(), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),\n                             m_last_bucket(false)\n    {\n        tsl_rh_assert(empty());\n    }\n    \n    bucket_entry(bool last_bucket) noexcept: bucket_hash(), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET),\n                                             m_last_bucket(last_bucket)\n    {\n        tsl_rh_assert(empty());\n    }\n    \n    bucket_entry(const bucket_entry& other) noexcept(std::is_nothrow_copy_constructible<value_type>::value): \n            bucket_hash(other),\n            m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), \n            m_last_bucket(other.m_last_bucket)\n    {\n        if(!other.empty()) {\n            ::new (static_cast<void*>(std::addressof(m_value))) value_type(other.value());\n            m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;\n        }\n    }\n    \n    /**\n     * Never really used, but still necessary as we must call resize on an empty `std::vector<bucket_entry>`.\n     * and we need to support move-only types. See robin_hash constructor for details.\n     */\n    bucket_entry(bucket_entry&& other) noexcept(std::is_nothrow_move_constructible<value_type>::value): \n            bucket_hash(std::move(other)),\n            m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), \n            m_last_bucket(other.m_last_bucket) \n    {\n        if(!other.empty()) {\n            ::new (static_cast<void*>(std::addressof(m_value))) value_type(std::move(other.value()));\n            m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;\n        }\n    }\n    \n    bucket_entry& operator=(const bucket_entry& other) \n            noexcept(std::is_nothrow_copy_constructible<value_type>::value) \n    {\n        if(this != &other) {\n            clear();\n            \n            bucket_hash::operator=(other);\n            if(!other.empty()) {\n                ::new (static_cast<void*>(std::addressof(m_value))) value_type(other.value());\n            }\n            \n            m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket;\n            m_last_bucket = other.m_last_bucket;\n        }\n        \n        return *this;\n    }\n    \n    bucket_entry& operator=(bucket_entry&& ) = delete;\n    \n    ~bucket_entry() noexcept {\n        clear();\n    }\n    \n    void clear() noexcept {\n        if(!empty()) {\n            destroy_value();\n            m_dist_from_ideal_bucket = EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;\n        }\n    }\n    \n    bool empty() const noexcept {\n        return m_dist_from_ideal_bucket == EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET;\n    }\n    \n    value_type& value() noexcept {\n        tsl_rh_assert(!empty());\n        return *reinterpret_cast<value_type*>(std::addressof(m_value));\n    }\n    \n    const value_type& value() const noexcept {\n        tsl_rh_assert(!empty());\n        return *reinterpret_cast<const value_type*>(std::addressof(m_value));\n    }\n    \n    distance_type dist_from_ideal_bucket() const noexcept {\n        return m_dist_from_ideal_bucket;\n    }\n    \n    bool last_bucket() const noexcept {\n        return m_last_bucket;\n    }\n    \n    void set_as_last_bucket() noexcept {\n        m_last_bucket = true;\n    }\n        \n    template<typename... Args>\n    void set_value_of_empty_bucket(distance_type dist_from_ideal_bucket, \n                                   truncated_hash_type hash, Args&&... value_type_args) \n    {\n        tsl_rh_assert(dist_from_ideal_bucket >= 0);\n        tsl_rh_assert(empty());\n        \n        ::new (static_cast<void*>(std::addressof(m_value))) value_type(std::forward<Args>(value_type_args)...);\n        this->set_hash(hash);\n        m_dist_from_ideal_bucket = dist_from_ideal_bucket;\n        \n        tsl_rh_assert(!empty());\n    }\n    \n    void swap_with_value_in_bucket(distance_type& dist_from_ideal_bucket, \n                                   truncated_hash_type& hash, value_type& value) \n    {\n        tsl_rh_assert(!empty());\n        \n        using std::swap;\n        swap(value, this->value());\n        swap(dist_from_ideal_bucket, m_dist_from_ideal_bucket);\n        \n        // Avoid warning of unused variable if StoreHash is false\n        (void) hash;\n        if(StoreHash) {\n            const truncated_hash_type tmp_hash = this->truncated_hash();\n            this->set_hash(hash);\n            hash = tmp_hash;\n        }\n    }\n    \n    static truncated_hash_type truncate_hash(std::size_t hash) noexcept {\n        return truncated_hash_type(hash);\n    }\n    \nprivate:\n    void destroy_value() noexcept {\n        tsl_rh_assert(!empty());\n        value().~value_type();\n    }\n\npublic:\n    static const distance_type DIST_FROM_IDEAL_BUCKET_LIMIT = 4096;\n    static_assert(DIST_FROM_IDEAL_BUCKET_LIMIT <= std::numeric_limits<distance_type>::max() - 1,\n                 \"DIST_FROM_IDEAL_BUCKET_LIMIT must be <= std::numeric_limits<distance_type>::max() - 1.\");\n    \nprivate:\n    using storage = typename std::aligned_storage<sizeof(value_type), alignof(value_type)>::type;\n    \n    static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1;\n    \n    distance_type m_dist_from_ideal_bucket;\n    bool m_last_bucket;\n    storage m_value;\n};\n\n\n\n/**\n * Internal common class used by `robin_map` and `robin_set`. \n * \n * ValueType is what will be stored by `robin_hash` (usually `std::pair<Key, T>` for map and `Key` for set).\n * \n * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in parameter and returns a \n *  reference to the key.\n * \n * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in parameter and returns a \n *  reference to the value. `ValueSelect` should be void if there is no value (in a set for example).\n * \n * The strong exception guarantee only holds if the expression \n * `std::is_nothrow_swappable<ValueType>::value && std::is_nothrow_move_constructible<ValueType>::value` is true.\n * \n * Behaviour is undefined if the destructor of `ValueType` throws.\n */\ntemplate<class ValueType,\n         class KeySelect,\n         class ValueSelect,\n         class Hash,\n         class KeyEqual,\n         class Allocator,\n         bool StoreHash,\n         class GrowthPolicy>\nclass robin_hash: private Hash, private KeyEqual, private GrowthPolicy {\nprivate:    \n    template<typename U>\n    using has_mapped_type = typename std::integral_constant<bool, !std::is_same<U, void>::value>;\n    \n    static_assert(noexcept(std::declval<GrowthPolicy>().bucket_for_hash(std::size_t(0))), \"GrowthPolicy::bucket_for_hash must be noexcept.\");\n    static_assert(noexcept(std::declval<GrowthPolicy>().clear()), \"GrowthPolicy::clear must be noexcept.\");\n    \npublic:\n    template<bool IsConst>\n    class robin_iterator;\n    \n    using key_type = typename KeySelect::key_type;\n    using value_type = ValueType;\n    using size_type = std::size_t;\n    using difference_type = std::ptrdiff_t;\n    using hasher = Hash;\n    using key_equal = KeyEqual;\n    using allocator_type = Allocator;\n    using reference = value_type&;\n    using const_reference = const value_type&;\n    using pointer = value_type*;\n    using const_pointer = const value_type*;\n    using iterator = robin_iterator<false>;\n    using const_iterator = robin_iterator<true>;\n    \n    \nprivate:\n    /**\n     * Either store the hash because we are asked by the `StoreHash` template parameter\n     * or store the hash because it doesn't cost us anything in size and can be used to speed up rehash.\n     */\n    static constexpr bool STORE_HASH = StoreHash || \n                                       (\n                                         (sizeof(tsl::detail_robin_hash::bucket_entry<value_type, true>) ==\n                                          sizeof(tsl::detail_robin_hash::bucket_entry<value_type, false>))\n                                         &&\n                                         (sizeof(std::size_t) == sizeof(truncated_hash_type) ||\n                                          is_power_of_two_policy<GrowthPolicy>::value)\n                                         &&\n                                          // Don't store the hash for primitive types with default hash.\n                                          (!std::is_arithmetic<key_type>::value ||\n                                           !std::is_same<Hash, std::hash<key_type>>::value)\n                                       );\n                                        \n    /**\n     * Only use the stored hash on lookup if we are explicitly asked. We are not sure how slow\n     * the KeyEqual operation is. An extra comparison may slow things down with a fast KeyEqual.\n     */\n    static constexpr bool USE_STORED_HASH_ON_LOOKUP = StoreHash;\n\n    /**\n     * We can only use the hash on rehash if the size of the hash type is the same as the stored one or\n     * if we use a power of two modulo. In the case of the power of two modulo, we just mask\n     * the least significant bytes, we just have to check that the truncated_hash_type didn't truncated\n     * more bytes.\n     */\n    static bool USE_STORED_HASH_ON_REHASH(size_type bucket_count) {\n        (void) bucket_count;\n        if(STORE_HASH && sizeof(std::size_t) == sizeof(truncated_hash_type)) {\n            return true;\n        }\n        else if(STORE_HASH && is_power_of_two_policy<GrowthPolicy>::value) {\n            tsl_rh_assert(bucket_count > 0);\n            return (bucket_count - 1) <= std::numeric_limits<truncated_hash_type>::max();\n        }\n        else {\n            return false;   \n        }\n    }\n    \n    using bucket_entry = tsl::detail_robin_hash::bucket_entry<value_type, STORE_HASH>;\n    using distance_type = typename bucket_entry::distance_type;\n    \n    using buckets_allocator = typename std::allocator_traits<allocator_type>::template rebind_alloc<bucket_entry>;\n    using buckets_container_type = std::vector<bucket_entry, buckets_allocator>;\n    \n    \npublic: \n    /**\n     * The 'operator*()' and 'operator->()' methods return a const reference and const pointer respectively to the \n     * stored value type.\n     * \n     * In case of a map, to get a mutable reference to the value associated to a key (the '.second' in the \n     * stored pair), you have to call 'value()'. \n     * \n     * The main reason for this is that if we returned a `std::pair<Key, T>&` instead \n     * of a `const std::pair<Key, T>&`, the user may modify the key which will put the map in a undefined state.\n     */\n    template<bool IsConst>\n    class robin_iterator {\n        friend class robin_hash;\n        \n    private:\n        using bucket_entry_ptr = typename std::conditional<IsConst, \n                                                           const bucket_entry*, \n                                                           bucket_entry*>::type;\n    \n        \n        robin_iterator(bucket_entry_ptr bucket) noexcept: m_bucket(bucket) {\n        }\n        \n    public:\n        using iterator_category = std::forward_iterator_tag;\n        using value_type = const typename robin_hash::value_type;\n        using difference_type = std::ptrdiff_t;\n        using reference = value_type&;\n        using pointer = value_type*;\n        \n        \n        robin_iterator() noexcept {\n        }\n        \n        // Copy constructor from iterator to const_iterator.\n        template<bool TIsConst = IsConst, typename std::enable_if<TIsConst>::type* = nullptr>\n        robin_iterator(const robin_iterator<!TIsConst>& other) noexcept: m_bucket(other.m_bucket) {\n        }\n        \n        robin_iterator(const robin_iterator& other) = default;\n        robin_iterator(robin_iterator&& other) = default;\n        robin_iterator& operator=(const robin_iterator& other) = default;\n        robin_iterator& operator=(robin_iterator&& other) = default;\n        \n        const typename robin_hash::key_type& key() const {\n            return KeySelect()(m_bucket->value());\n        }\n\n        template<class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value && IsConst>::type* = nullptr>\n        const typename U::value_type& value() const {\n            return U()(m_bucket->value());\n        }\n\n        template<class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value && !IsConst>::type* = nullptr>\n        typename U::value_type& value() const {\n            return U()(m_bucket->value());\n        }\n        \n        reference operator*() const {\n            return m_bucket->value();\n        }\n        \n        pointer operator->() const {\n            return std::addressof(m_bucket->value());\n        }\n        \n        robin_iterator& operator++() {\n            while(true) {\n                if(m_bucket->last_bucket()) {\n                    ++m_bucket;\n                    return *this;\n                }\n                \n                ++m_bucket;\n                if(!m_bucket->empty()) {\n                    return *this;\n                }\n            }\n        }\n        \n        robin_iterator operator++(int) {\n            robin_iterator tmp(*this);\n            ++*this;\n            \n            return tmp;\n        }\n        \n        friend bool operator==(const robin_iterator& lhs, const robin_iterator& rhs) { \n            return lhs.m_bucket == rhs.m_bucket; \n        }\n        \n        friend bool operator!=(const robin_iterator& lhs, const robin_iterator& rhs) { \n            return !(lhs == rhs); \n        }\n        \n    private:\n        bucket_entry_ptr m_bucket;\n    };\n\n    \npublic:\n#if defined(__cplusplus) && __cplusplus >= 201402L\n    robin_hash(size_type bucket_count, \n               const Hash& hash,\n               const KeyEqual& equal,\n               const Allocator& alloc,\n               float min_load_factor = DEFAULT_MIN_LOAD_FACTOR,\n               float max_load_factor = DEFAULT_MAX_LOAD_FACTOR): \n                                       Hash(hash), \n                                       KeyEqual(equal),\n                                       GrowthPolicy(bucket_count),\n                                       m_buckets_data(\n                                           [&]() {\n                                               if(bucket_count > max_bucket_count()) {\n                                                   TSL_RH_THROW_OR_TERMINATE(std::length_error, \n                                                                             \"The map exceeds its maximum bucket count.\");\n                                               }\n                                               \n                                               return bucket_count;\n                                           }(), alloc\n                                       ),\n                                       m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():m_buckets_data.data()),\n                                       m_bucket_count(bucket_count),\n                                       m_nb_elements(0), \n                                       m_grow_on_next_insert(false),\n                                       m_try_shrink_on_next_insert(false)\n    {\n        if(m_bucket_count > 0) {\n            tsl_rh_assert(!m_buckets_data.empty());\n            m_buckets_data.back().set_as_last_bucket();\n        }\n        \n        this->min_load_factor(min_load_factor);\n        this->max_load_factor(max_load_factor);\n    }\n#else\n    /**\n     * C++11 doesn't support the creation of a std::vector with a custom allocator and 'count' default-inserted elements. \n     * The needed contructor `explicit vector(size_type count, const Allocator& alloc = Allocator());` is only\n     * available in C++14 and later. We thus must resize after using the `vector(const Allocator& alloc)` constructor.\n     * \n     * We can't use `vector(size_type count, const T& value, const Allocator& alloc)` as it requires the\n     * value T to be copyable.\n     */\n    robin_hash(size_type bucket_count, \n               const Hash& hash,\n               const KeyEqual& equal,\n               const Allocator& alloc,\n               float min_load_factor = DEFAULT_MIN_LOAD_FACTOR,\n               float max_load_factor = DEFAULT_MAX_LOAD_FACTOR): \n                                       Hash(hash), \n                                       KeyEqual(equal),\n                                       GrowthPolicy(bucket_count),\n                                       m_buckets_data(alloc), \n                                       m_buckets(static_empty_bucket_ptr()), \n                                       m_bucket_count(bucket_count),\n                                       m_nb_elements(0), \n                                       m_grow_on_next_insert(false),\n                                       m_try_shrink_on_next_insert(false)\n    {\n        if(bucket_count > max_bucket_count()) {\n            TSL_RH_THROW_OR_TERMINATE(std::length_error, \"The map exceeds its maximum bucket count.\");\n        }\n        \n        if(m_bucket_count > 0) {\n            m_buckets_data.resize(m_bucket_count);\n            m_buckets = m_buckets_data.data();\n            \n            tsl_rh_assert(!m_buckets_data.empty());\n            m_buckets_data.back().set_as_last_bucket();\n        }\n        \n        this->min_load_factor(min_load_factor);\n        this->max_load_factor(max_load_factor);\n    }\n#endif\n    \n    robin_hash(const robin_hash& other): Hash(other),\n                                         KeyEqual(other),\n                                         GrowthPolicy(other),\n                                         m_buckets_data(other.m_buckets_data),\n                                         m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():m_buckets_data.data()),\n                                         m_bucket_count(other.m_bucket_count),\n                                         m_nb_elements(other.m_nb_elements),\n                                         m_load_threshold(other.m_load_threshold),\n                                         m_min_load_factor(other.m_min_load_factor),\n                                         m_max_load_factor(other.m_max_load_factor),\n                                         m_grow_on_next_insert(other.m_grow_on_next_insert),\n                                         m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert)\n    {\n    }\n    \n    robin_hash(robin_hash&& other) noexcept(std::is_nothrow_move_constructible<Hash>::value &&\n                                            std::is_nothrow_move_constructible<KeyEqual>::value &&\n                                            std::is_nothrow_move_constructible<GrowthPolicy>::value &&\n                                            std::is_nothrow_move_constructible<buckets_container_type>::value)\n                                          : Hash(std::move(static_cast<Hash&>(other))),\n                                            KeyEqual(std::move(static_cast<KeyEqual&>(other))),\n                                            GrowthPolicy(std::move(static_cast<GrowthPolicy&>(other))),\n                                            m_buckets_data(std::move(other.m_buckets_data)),\n                                            m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():m_buckets_data.data()),\n                                            m_bucket_count(other.m_bucket_count),\n                                            m_nb_elements(other.m_nb_elements),\n                                            m_load_threshold(other.m_load_threshold),\n                                            m_min_load_factor(other.m_min_load_factor),\n                                            m_max_load_factor(other.m_max_load_factor),\n                                            m_grow_on_next_insert(other.m_grow_on_next_insert),\n                                            m_try_shrink_on_next_insert(other.m_try_shrink_on_next_insert)\n    {\n        other.clear_and_shrink();\n    }\n    \n    robin_hash& operator=(const robin_hash& other) {\n        if(&other != this) {\n            Hash::operator=(other);\n            KeyEqual::operator=(other);\n            GrowthPolicy::operator=(other);\n            \n            m_buckets_data = other.m_buckets_data;\n            m_buckets = m_buckets_data.empty()?static_empty_bucket_ptr():\n                                               m_buckets_data.data();\n            m_bucket_count = other.m_bucket_count;\n            m_nb_elements = other.m_nb_elements;\n            \n            m_load_threshold = other.m_load_threshold;\n            m_min_load_factor = other.m_min_load_factor;\n            m_max_load_factor = other.m_max_load_factor;\n            \n            m_grow_on_next_insert = other.m_grow_on_next_insert;\n            m_try_shrink_on_next_insert = other.m_try_shrink_on_next_insert;\n        }\n        \n        return *this;\n    }\n    \n    robin_hash& operator=(robin_hash&& other) {\n        other.swap(*this);\n        other.clear();\n        \n        return *this;\n    }\n    \n    allocator_type get_allocator() const {\n        return m_buckets_data.get_allocator();\n    }\n    \n    \n    /*\n     * Iterators\n     */\n    iterator begin() noexcept {\n        std::size_t i = 0;\n        while(i < m_bucket_count && m_buckets[i].empty()) {\n            i++;\n        }\n        \n        return iterator(m_buckets + i);\n    }\n    \n    const_iterator begin() const noexcept {\n        return cbegin();\n    }\n    \n    const_iterator cbegin() const noexcept {\n        std::size_t i = 0;\n        while(i < m_bucket_count && m_buckets[i].empty()) {\n            i++;\n        }\n        \n        return const_iterator(m_buckets + i);\n    }\n    \n    iterator end() noexcept {\n        return iterator(m_buckets + m_bucket_count);\n    }\n    \n    const_iterator end() const noexcept {\n        return cend();\n    }\n    \n    const_iterator cend() const noexcept {\n        return const_iterator(m_buckets + m_bucket_count);\n    }\n    \n    \n    /*\n     * Capacity\n     */\n    bool empty() const noexcept {\n        return m_nb_elements == 0;\n    }\n    \n    size_type size() const noexcept {\n        return m_nb_elements;\n    }\n    \n    size_type max_size() const noexcept {\n        return m_buckets_data.max_size();\n    }\n    \n    /*\n     * Modifiers\n     */\n    void clear() noexcept {\n        if(m_min_load_factor > 0.0f) {\n            clear_and_shrink();\n        }\n        else {\n            for(auto& bucket: m_buckets_data) {\n                bucket.clear();\n            }\n            \n            m_nb_elements = 0;\n            m_grow_on_next_insert = false;\n        }\n    }\n    \n    \n    \n    template<typename P>\n    std::pair<iterator, bool> insert(P&& value) {\n        return insert_impl(KeySelect()(value), std::forward<P>(value));\n    }\n    \n    template<typename P>\n    iterator insert_hint(const_iterator hint, P&& value) { \n        if(hint != cend() && compare_keys(KeySelect()(*hint), KeySelect()(value))) { \n            return mutable_iterator(hint); \n        }\n        \n        return insert(std::forward<P>(value)).first; \n    }\n    \n    template<class InputIt>\n    void insert(InputIt first, InputIt last) {\n        if(std::is_base_of<std::forward_iterator_tag, \n                           typename std::iterator_traits<InputIt>::iterator_category>::value) \n        {\n            const auto nb_elements_insert = std::distance(first, last);\n            const size_type nb_free_buckets = m_load_threshold - size();\n            tsl_rh_assert(m_load_threshold >= size());\n            \n            if(nb_elements_insert > 0 && nb_free_buckets < size_type(nb_elements_insert)) {\n                reserve(size() + size_type(nb_elements_insert));\n            }\n        }\n        \n        for(; first != last; ++first) {\n            insert(*first);\n        }\n    }\n    \n    \n    \n    template<class K, class M>\n    std::pair<iterator, bool> insert_or_assign(K&& key, M&& obj) { \n        auto it = try_emplace(std::forward<K>(key), std::forward<M>(obj));\n        if(!it.second) {\n            it.first.value() = std::forward<M>(obj);\n        }\n        \n        return it;\n    }\n    \n    template<class K, class M>\n    iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) {\n        if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { \n            auto it = mutable_iterator(hint); \n            it.value() = std::forward<M>(obj);\n            \n            return it;\n        }\n        \n        return insert_or_assign(std::forward<K>(key), std::forward<M>(obj)).first;\n    }\n\n    \n    template<class... Args>\n    std::pair<iterator, bool> emplace(Args&&... args) {\n        return insert(value_type(std::forward<Args>(args)...));\n    }\n    \n    template<class... Args>\n    iterator emplace_hint(const_iterator hint, Args&&... args) {\n        return insert_hint(hint, value_type(std::forward<Args>(args)...));        \n    }\n    \n    \n    \n    template<class K, class... Args>\n    std::pair<iterator, bool> try_emplace(K&& key, Args&&... args) {\n        return insert_impl(key, std::piecewise_construct, \n                                std::forward_as_tuple(std::forward<K>(key)), \n                                std::forward_as_tuple(std::forward<Args>(args)...));\n    }\n    \n    template<class K, class... Args>\n    iterator try_emplace_hint(const_iterator hint, K&& key, Args&&... args) { \n        if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { \n            return mutable_iterator(hint); \n        }\n        \n        return try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;\n    }\n    \n    /**\n     * Here to avoid `template<class K> size_type erase(const K& key)` being used when\n     * we use an `iterator` instead of a `const_iterator`.\n     */\n    iterator erase(iterator pos) {\n        erase_from_bucket(pos);\n        \n        /**\n         * Erase bucket used a backward shift after clearing the bucket.\n         * Check if there is a new value in the bucket, if not get the next non-empty.\n         */\n        if(pos.m_bucket->empty()) {\n            ++pos;\n        }\n        \n        m_try_shrink_on_next_insert = true;\n        \n        return pos;\n    }\n    \n    iterator erase(const_iterator pos) {\n        return erase(mutable_iterator(pos));\n    }\n    \n    iterator erase(const_iterator first, const_iterator last) {\n        if(first == last) {\n            return mutable_iterator(first);\n        }\n        \n        auto first_mutable = mutable_iterator(first);\n        auto last_mutable = mutable_iterator(last);\n        for(auto it = first_mutable.m_bucket; it != last_mutable.m_bucket; ++it) {\n            if(!it->empty()) {\n                it->clear();\n                m_nb_elements--;\n            }\n        }\n        \n        if(last_mutable == end()) {\n            m_try_shrink_on_next_insert = true;\n            return end();\n        }\n        \n        \n        /*\n         * Backward shift on the values which come after the deleted values.\n         * We try to move the values closer to their ideal bucket.\n         */\n        std::size_t icloser_bucket = static_cast<std::size_t>(first_mutable.m_bucket - m_buckets);\n        std::size_t ito_move_closer_value = static_cast<std::size_t>(last_mutable.m_bucket - m_buckets);\n        tsl_rh_assert(ito_move_closer_value > icloser_bucket);\n        \n        const std::size_t ireturn_bucket = ito_move_closer_value - \n                                           std::min(ito_move_closer_value - icloser_bucket, \n                                                    std::size_t(m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));\n        \n        while(ito_move_closer_value < m_bucket_count && m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) {\n            icloser_bucket = ito_move_closer_value - \n                             std::min(ito_move_closer_value - icloser_bucket, \n                                      std::size_t(m_buckets[ito_move_closer_value].dist_from_ideal_bucket()));\n            \n            \n            tsl_rh_assert(m_buckets[icloser_bucket].empty());\n            const distance_type new_distance = distance_type(m_buckets[ito_move_closer_value].dist_from_ideal_bucket() -\n                                                             (ito_move_closer_value - icloser_bucket));\n            m_buckets[icloser_bucket].set_value_of_empty_bucket(new_distance, \n                                                                m_buckets[ito_move_closer_value].truncated_hash(), \n                                                                std::move(m_buckets[ito_move_closer_value].value()));\n            m_buckets[ito_move_closer_value].clear();\n            \n            \n            ++icloser_bucket;\n            ++ito_move_closer_value;\n        }\n        \n        m_try_shrink_on_next_insert = true;\n        \n        return iterator(m_buckets + ireturn_bucket);\n    }\n    \n    \n    template<class K>\n    size_type erase(const K& key) {\n        return erase(key, hash_key(key));\n    }\n    \n    template<class K>\n    size_type erase(const K& key, std::size_t hash) {\n        auto it = find(key, hash);\n        if(it != end()) {\n            erase_from_bucket(it);\n            m_try_shrink_on_next_insert = true;\n            \n            return 1;\n        }\n        else {\n            return 0;\n        }\n    }\n    \n    \n    \n    \n    \n    void swap(robin_hash& other) {\n        using std::swap;\n        \n        swap(static_cast<Hash&>(*this), static_cast<Hash&>(other));\n        swap(static_cast<KeyEqual&>(*this), static_cast<KeyEqual&>(other));\n        swap(static_cast<GrowthPolicy&>(*this), static_cast<GrowthPolicy&>(other));\n        swap(m_buckets_data, other.m_buckets_data);\n        swap(m_buckets, other.m_buckets);\n        swap(m_bucket_count, other.m_bucket_count);\n        swap(m_nb_elements, other.m_nb_elements);\n        swap(m_load_threshold, other.m_load_threshold);\n        swap(m_min_load_factor, other.m_min_load_factor);\n        swap(m_max_load_factor, other.m_max_load_factor);\n        swap(m_grow_on_next_insert, other.m_grow_on_next_insert);\n        swap(m_try_shrink_on_next_insert, other.m_try_shrink_on_next_insert);\n    }\n    \n    \n    /*\n     * Lookup\n     */\n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    typename U::value_type& at(const K& key) {\n        return at(key, hash_key(key));\n    }\n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    typename U::value_type& at(const K& key, std::size_t hash) {\n        return const_cast<typename U::value_type&>(static_cast<const robin_hash*>(this)->at(key, hash));\n    }\n    \n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    const typename U::value_type& at(const K& key) const {\n        return at(key, hash_key(key));\n    }\n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    const typename U::value_type& at(const K& key, std::size_t hash) const {\n        auto it = find(key, hash);\n        if(it != cend()) {\n            return it.value();\n        }\n        else {\n            TSL_RH_THROW_OR_TERMINATE(std::out_of_range, \"Couldn't find key.\");\n        }\n    }\n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    typename U::value_type& operator[](K&& key) {\n        return try_emplace(std::forward<K>(key)).first.value();\n    }\n    \n    \n    template<class K>\n    size_type count(const K& key) const {\n        return count(key, hash_key(key));\n    }\n    \n    template<class K>\n    size_type count(const K& key, std::size_t hash) const {\n        if(find(key, hash) != cend()) {\n            return 1;\n        }\n        else {\n            return 0;\n        }\n    }\n    \n    \n    template<class K>\n    iterator find(const K& key) {\n        return find_impl(key, hash_key(key));\n    }\n    \n    template<class K>\n    iterator find(const K& key, std::size_t hash) {\n        return find_impl(key, hash);\n    }\n    \n    \n    template<class K>\n    const_iterator find(const K& key) const {\n        return find_impl(key, hash_key(key));\n    }\n    \n    template<class K>\n    const_iterator find(const K& key, std::size_t hash) const {\n        return find_impl(key, hash);\n    }\n    \n    \n    template<class K>\n    bool contains(const K& key) const {\n        return contains(key, hash_key(key));\n    }\n    \n    template<class K>\n    bool contains(const K& key, std::size_t hash) const {\n        return count(key, hash) != 0;\n    }\n    \n    \n    template<class K>\n    std::pair<iterator, iterator> equal_range(const K& key) {\n        return equal_range(key, hash_key(key));\n    }\n    \n    template<class K>\n    std::pair<iterator, iterator> equal_range(const K& key, std::size_t hash) {\n        iterator it = find(key, hash);\n        return std::make_pair(it, (it == end())?it:std::next(it));\n    }\n    \n    \n    template<class K>\n    std::pair<const_iterator, const_iterator> equal_range(const K& key) const {\n        return equal_range(key, hash_key(key));\n    }\n    \n    template<class K>\n    std::pair<const_iterator, const_iterator> equal_range(const K& key, std::size_t hash) const {\n        const_iterator it = find(key, hash);\n        return std::make_pair(it, (it == cend())?it:std::next(it));\n    }\n    \n    /*\n     * Bucket interface \n     */\n    size_type bucket_count() const {\n        return m_bucket_count; \n    }\n    \n    size_type max_bucket_count() const {\n        return std::min(GrowthPolicy::max_bucket_count(), m_buckets_data.max_size());\n    }\n    \n    /*\n     * Hash policy \n     */\n    float load_factor() const {\n        if(bucket_count() == 0) {\n            return 0;\n        }\n        \n        return float(m_nb_elements)/float(bucket_count());\n    }\n    \n    float min_load_factor() const {\n        return m_min_load_factor;\n    }\n    \n    float max_load_factor() const {\n        return m_max_load_factor;\n    }\n    \n    void min_load_factor(float ml) {\n        m_min_load_factor = clamp(ml, float(MINIMUM_MIN_LOAD_FACTOR), \n                                      float(MAXIMUM_MIN_LOAD_FACTOR));\n    }\n    \n    void max_load_factor(float ml) {\n        m_max_load_factor = clamp(ml, float(MINIMUM_MAX_LOAD_FACTOR), \n                                      float(MAXIMUM_MAX_LOAD_FACTOR));\n        m_load_threshold = size_type(float(bucket_count())*m_max_load_factor);\n    }\n    \n    void rehash(size_type count) {\n        count = std::max(count, size_type(std::ceil(float(size())/max_load_factor())));\n        rehash_impl(count);\n    }\n    \n    void reserve(size_type count) {\n        rehash(size_type(std::ceil(float(count)/max_load_factor())));\n    }    \n    \n    /*\n     * Observers\n     */\n    hasher hash_function() const {\n        return static_cast<const Hash&>(*this);\n    }\n    \n    key_equal key_eq() const {\n        return static_cast<const KeyEqual&>(*this);\n    }\n    \n    \n    /*\n     * Other\n     */    \n    iterator mutable_iterator(const_iterator pos) {\n        return iterator(const_cast<bucket_entry*>(pos.m_bucket));\n    }\n    \nprivate:\n    template<class K>\n    std::size_t hash_key(const K& key) const {\n        return Hash::operator()(key);\n    }\n    \n    template<class K1, class K2>\n    bool compare_keys(const K1& key1, const K2& key2) const {\n        return KeyEqual::operator()(key1, key2);\n    }\n    \n    std::size_t bucket_for_hash(std::size_t hash) const {\n        const std::size_t bucket = GrowthPolicy::bucket_for_hash(hash);\n        tsl_rh_assert(bucket < m_bucket_count || (bucket == 0 && m_bucket_count == 0));\n        \n        return bucket;\n    }\n    \n    template<class U = GrowthPolicy, typename std::enable_if<is_power_of_two_policy<U>::value>::type* = nullptr>\n    std::size_t next_bucket(std::size_t index) const noexcept {\n        tsl_rh_assert(index < bucket_count());\n        \n        return (index + 1) & this->m_mask;\n    }\n    \n    template<class U = GrowthPolicy, typename std::enable_if<!is_power_of_two_policy<U>::value>::type* = nullptr>\n    std::size_t next_bucket(std::size_t index) const noexcept {\n        tsl_rh_assert(index < bucket_count());\n        \n        index++;\n        return (index != bucket_count())?index:0;\n    }\n    \n    \n    \n    template<class K>\n    iterator find_impl(const K& key, std::size_t hash) {\n        return mutable_iterator(static_cast<const robin_hash*>(this)->find(key, hash));\n    }\n    \n    template<class K>\n    const_iterator find_impl(const K& key, std::size_t hash) const {\n        std::size_t ibucket = bucket_for_hash(hash); \n        distance_type dist_from_ideal_bucket = 0;\n        \n        while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) {\n            if(TSL_RH_LIKELY((!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) && \n               compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) \n            {\n                return const_iterator(m_buckets + ibucket);\n            }\n            \n            ibucket = next_bucket(ibucket);\n            dist_from_ideal_bucket++;\n        }\n        \n        return cend();\n    }\n    \n    void erase_from_bucket(iterator pos) {\n        pos.m_bucket->clear();\n        m_nb_elements--;\n        \n        /**\n         * Backward shift, swap the empty bucket, previous_ibucket, with the values on its right, ibucket,\n         * until we cross another empty bucket or if the other bucket has a distance_from_ideal_bucket == 0.\n         * \n         * We try to move the values closer to their ideal bucket.\n         */\n        std::size_t previous_ibucket = static_cast<std::size_t>(pos.m_bucket - m_buckets);\n        std::size_t ibucket = next_bucket(previous_ibucket);\n        \n        while(m_buckets[ibucket].dist_from_ideal_bucket() > 0) {\n            tsl_rh_assert(m_buckets[previous_ibucket].empty());\n            \n            const distance_type new_distance = distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1);\n            m_buckets[previous_ibucket].set_value_of_empty_bucket(new_distance, m_buckets[ibucket].truncated_hash(), \n                                                                  std::move(m_buckets[ibucket].value()));\n            m_buckets[ibucket].clear();\n\n            previous_ibucket = ibucket;\n            ibucket = next_bucket(ibucket);\n        }\n    }\n    \n    template<class K, class... Args>\n    std::pair<iterator, bool> insert_impl(const K& key, Args&&... value_type_args) {\n        const std::size_t hash = hash_key(key);\n        \n        std::size_t ibucket = bucket_for_hash(hash); \n        distance_type dist_from_ideal_bucket = 0;\n        \n        while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) {\n            if((!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) &&\n               compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) \n            {\n                return std::make_pair(iterator(m_buckets + ibucket), false);\n            }\n            \n            ibucket = next_bucket(ibucket);\n            dist_from_ideal_bucket++;\n        }\n        \n        if(rehash_on_extreme_load()) {\n            ibucket = bucket_for_hash(hash);\n            dist_from_ideal_bucket = 0;\n            \n            while(dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) {\n                ibucket = next_bucket(ibucket);\n                dist_from_ideal_bucket++;\n            }\n        }\n \n        \n        if(m_buckets[ibucket].empty()) {\n            m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, bucket_entry::truncate_hash(hash),\n                                                         std::forward<Args>(value_type_args)...);\n        }\n        else {\n            insert_value(ibucket, dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), \n                         std::forward<Args>(value_type_args)...);\n        }\n        \n        \n        m_nb_elements++;\n        /*\n         * The value will be inserted in ibucket in any case, either because it was\n         * empty or by stealing the bucket (robin hood). \n         */\n        return std::make_pair(iterator(m_buckets + ibucket), true);\n    }\n    \n    \n    template<class... Args>\n    void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, \n                      truncated_hash_type hash, Args&&... value_type_args) \n    {\n        value_type value(std::forward<Args>(value_type_args)...);\n        insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);\n    }\n\n    void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket,\n                      truncated_hash_type hash, value_type&& value)\n    {\n        insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value);\n    }\n\n    /*\n     * We don't use `value_type&& value` as last argument due to a bug in MSVC when `value_type` is a pointer,\n     * The compiler is not able to see the difference between `std::string*` and `std::string*&&` resulting in \n     * a compilation error.\n     * \n     * The `value` will be in a moved state at the end of the function.\n     */\n    void insert_value_impl(std::size_t ibucket, distance_type dist_from_ideal_bucket,\n                           truncated_hash_type hash, value_type& value)\n    {\n        m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value);\n        ibucket = next_bucket(ibucket);\n        dist_from_ideal_bucket++;\n        \n        while(!m_buckets[ibucket].empty()) {\n            if(dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) {\n                if(dist_from_ideal_bucket >= bucket_entry::DIST_FROM_IDEAL_BUCKET_LIMIT) {\n                    /**\n                     * The number of probes is really high, rehash the map on the next insert.\n                     * Difficult to do now as rehash may throw an exception.\n                     */\n                    m_grow_on_next_insert = true;\n                }\n            \n                m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value);\n            }\n            \n            ibucket = next_bucket(ibucket);\n            dist_from_ideal_bucket++;\n        }\n        \n        m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value));\n    }\n    \n    \n    void rehash_impl(size_type count) {\n        robin_hash new_table(count, static_cast<Hash&>(*this), static_cast<KeyEqual&>(*this), \n                             get_allocator(), m_min_load_factor, m_max_load_factor);\n        \n        const bool use_stored_hash = USE_STORED_HASH_ON_REHASH(new_table.bucket_count());\n        for(auto& bucket: m_buckets_data) {\n            if(bucket.empty()) { \n                continue; \n            }\n            \n            const std::size_t hash = use_stored_hash?bucket.truncated_hash():\n                                                     new_table.hash_key(KeySelect()(bucket.value()));\n                                                     \n            new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0, \n                                             bucket_entry::truncate_hash(hash), std::move(bucket.value()));\n        }\n        \n        new_table.m_nb_elements = m_nb_elements;\n        new_table.swap(*this);\n    }\n    \n    void clear_and_shrink() noexcept {\n        GrowthPolicy::clear();\n        m_buckets_data.clear();\n        m_buckets = static_empty_bucket_ptr();\n        m_bucket_count = 0;\n        m_nb_elements = 0;\n        m_load_threshold = 0;\n        m_grow_on_next_insert = false;\n        m_try_shrink_on_next_insert = false;\n    }\n    \n    void insert_value_on_rehash(std::size_t ibucket, distance_type dist_from_ideal_bucket, \n                                truncated_hash_type hash, value_type&& value) \n    {\n        while(true) {\n            if(dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) {\n                if(m_buckets[ibucket].empty()) {\n                    m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value));\n                    return;\n                }\n                else {\n                    m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value);\n                }\n            }\n            \n            dist_from_ideal_bucket++;\n            ibucket = next_bucket(ibucket);\n        }\n    }\n    \n    \n    \n    /**\n     * Grow the table if m_grow_on_next_insert is true or we reached the max_load_factor.\n     * Shrink the table if m_try_shrink_on_next_insert is true (an erase occurred) and\n     * we're below the min_load_factor.\n     * \n     * Return true if the table has been rehashed.\n     */\n    bool rehash_on_extreme_load() {\n        if(m_grow_on_next_insert || size() >= m_load_threshold) {\n            rehash_impl(GrowthPolicy::next_bucket_count());\n            m_grow_on_next_insert = false;\n            \n            return true;\n        }\n        \n        if(m_try_shrink_on_next_insert) {\n            m_try_shrink_on_next_insert = false;\n            if(m_min_load_factor != 0.0f && load_factor() < m_min_load_factor) {\n                reserve(size() + 1);\n                \n                return true;\n            }\n        }\n        \n        return false;\n    }\n\n    \npublic:\n    static const size_type DEFAULT_INIT_BUCKETS_SIZE = 0;\n    \n    static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f;\n    static constexpr float MINIMUM_MAX_LOAD_FACTOR = 0.2f;\n    static constexpr float MAXIMUM_MAX_LOAD_FACTOR = 0.95f;\n    \n    static constexpr float DEFAULT_MIN_LOAD_FACTOR = 0.0f;\n    static constexpr float MINIMUM_MIN_LOAD_FACTOR = 0.0f;\n    static constexpr float MAXIMUM_MIN_LOAD_FACTOR = 0.15f;\n    \n    static_assert(MINIMUM_MAX_LOAD_FACTOR < MAXIMUM_MAX_LOAD_FACTOR, \n                  \"MINIMUM_MAX_LOAD_FACTOR should be < MAXIMUM_MAX_LOAD_FACTOR\");\n    static_assert(MINIMUM_MIN_LOAD_FACTOR < MAXIMUM_MIN_LOAD_FACTOR, \n                  \"MINIMUM_MIN_LOAD_FACTOR should be < MAXIMUM_MIN_LOAD_FACTOR\");\n    static_assert(MAXIMUM_MIN_LOAD_FACTOR < MINIMUM_MAX_LOAD_FACTOR, \n                  \"MAXIMUM_MIN_LOAD_FACTOR should be < MINIMUM_MAX_LOAD_FACTOR\");\n    \nprivate:\n    /**\n     * Return an always valid pointer to an static empty bucket_entry with last_bucket() == true.\n     */            \n    bucket_entry* static_empty_bucket_ptr() noexcept {\n        static bucket_entry empty_bucket(true);\n        return &empty_bucket;\n    }\n    \nprivate:\n    buckets_container_type m_buckets_data;\n    \n    /**\n     * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points to static_empty_bucket_ptr.\n     * This variable is useful to avoid the cost of checking if m_buckets_data is empty when trying \n     * to find an element.\n     * \n     * TODO Remove m_buckets_data and only use a pointer instead of a pointer+vector to save some space in the robin_hash object.\n     * Manage the Allocator manually.\n     */\n    bucket_entry* m_buckets;\n    \n    /**\n     * Used a lot in find, avoid the call to m_buckets_data.size() which is a bit slower.\n     */\n    size_type m_bucket_count;\n    \n    size_type m_nb_elements;\n    \n    size_type m_load_threshold;\n    \n    float m_min_load_factor;\n    float m_max_load_factor;\n    \n    bool m_grow_on_next_insert;\n    \n    /**\n     * We can't shrink down the map on erase operations as the erase methods need to return the next iterator.\n     * Shrinking the map would invalidate all the iterators and we could not return the next iterator in a meaningful way,\n     * On erase, we thus just indicate on erase that we should try to shrink the hash table on the next insert\n     * if we go below the min_load_factor. \n     */\n    bool m_try_shrink_on_next_insert;\n};\n\n}\n\n}\n\n#endif\n"
  },
  {
    "path": "Thirdparty/tessil-src/include/tsl/robin_map.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_ROBIN_MAP_H\n#define TSL_ROBIN_MAP_H \n\n\n#include <cstddef>\n#include <functional>\n#include <initializer_list>\n#include <memory>\n#include <type_traits>\n#include <utility>\n#include \"robin_hash.h\"\n\n\nnamespace tsl {\n\n    \n/**\n * Implementation of a hash map using open-addressing and the robin hood hashing algorithm with backward shift deletion.\n * \n * For operations modifying the hash map (insert, erase, rehash, ...), the strong exception guarantee \n * is only guaranteed when the expression `std::is_nothrow_swappable<std::pair<Key, T>>::value &&\n * std::is_nothrow_move_constructible<std::pair<Key, T>>::value` is true, otherwise if an exception\n * is thrown during the swap or the move, the hash map may end up in a undefined state. Per the standard\n * a `Key` or `T` with a noexcept copy constructor and no move constructor also satisfies the \n * `std::is_nothrow_move_constructible<std::pair<Key, T>>::value` criterion (and will thus guarantee the \n * strong exception for the map).\n * \n * When `StoreHash` is true, 32 bits of the hash are stored alongside the values. It can improve \n * the performance during lookups if the `KeyEqual` function takes time (if it engenders a cache-miss for example) \n * as we then compare the stored hashes before comparing the keys. When `tsl::rh::power_of_two_growth_policy` is used\n * as `GrowthPolicy`, it may also speed-up the rehash process as we can avoid to recalculate the hash. \n * When it is detected that storing the hash will not incur any memory penalty due to alignment (i.e. \n * `sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, true>) == \n * sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, false>)`) and `tsl::rh::power_of_two_growth_policy` is\n * used, the hash will be stored even if `StoreHash` is false so that we can speed-up the rehash (but it will\n * not be used on lookups unless `StoreHash` is true).\n * \n * `GrowthPolicy` defines how the map grows and consequently how a hash value is mapped to a bucket. \n * By default the map uses `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of buckets \n * to a power of two and uses a mask to map the hash to a bucket instead of the slow modulo.\n * Other growth policies are available and you may define your own growth policy, \n * check `tsl::rh::power_of_two_growth_policy` for the interface.\n * \n * `std::pair<Key, T>` must be swappable.\n * \n * `Key` and `T` must be copy and/or move constructible.\n * \n * If the destructor of `Key` or `T` throws an exception, the behaviour of the class is undefined.\n * \n * Iterators invalidation:\n *  - clear, operator=, reserve, rehash: always invalidate the iterators.\n *  - insert, emplace, emplace_hint, operator[]: if there is an effective insert, invalidate the iterators.\n *  - erase: always invalidate the iterators.\n */\ntemplate<class Key, \n         class T, \n         class Hash = std::hash<Key>,\n         class KeyEqual = std::equal_to<Key>,\n         class Allocator = std::allocator<std::pair<Key, T>>,\n         bool StoreHash = false,\n         class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>>\nclass robin_map {\nprivate:\n    template<typename U>\n    using has_is_transparent = tsl::detail_robin_hash::has_is_transparent<U>;\n    \n    class KeySelect {\n    public:\n        using key_type = Key;\n        \n        const key_type& operator()(const std::pair<Key, T>& key_value) const noexcept {\n            return key_value.first;\n        }\n        \n        key_type& operator()(std::pair<Key, T>& key_value) noexcept {\n            return key_value.first;\n        }\n    };  \n    \n    class ValueSelect {\n    public:\n        using value_type = T;\n        \n        const value_type& operator()(const std::pair<Key, T>& key_value) const noexcept {\n            return key_value.second;\n        }\n        \n        value_type& operator()(std::pair<Key, T>& key_value) noexcept {\n            return key_value.second;\n        }\n    };\n    \n    using ht = detail_robin_hash::robin_hash<std::pair<Key, T>, KeySelect, ValueSelect,\n                                             Hash, KeyEqual, Allocator, StoreHash, GrowthPolicy>;  \n                                             \npublic:\n    using key_type = typename ht::key_type;\n    using mapped_type = T;\n    using value_type = typename ht::value_type;\n    using size_type = typename ht::size_type;\n    using difference_type = typename ht::difference_type;\n    using hasher = typename ht::hasher;\n    using key_equal = typename ht::key_equal;\n    using allocator_type = typename ht::allocator_type;\n    using reference = typename ht::reference;\n    using const_reference = typename ht::const_reference;\n    using pointer = typename ht::pointer;\n    using const_pointer = typename ht::const_pointer;\n    using iterator = typename ht::iterator;\n    using const_iterator = typename ht::const_iterator;\n    \n    \npublic:\n    /*\n     * Constructors\n     */\n    robin_map(): robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) {\n    }\n    \n    explicit robin_map(size_type bucket_count, \n                       const Hash& hash = Hash(),\n                       const KeyEqual& equal = KeyEqual(),\n                       const Allocator& alloc = Allocator()): \n                m_ht(bucket_count, hash, equal, alloc)\n    {\n    }\n    \n    robin_map(size_type bucket_count,\n              const Allocator& alloc): robin_map(bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    robin_map(size_type bucket_count,\n              const Hash& hash,\n              const Allocator& alloc): robin_map(bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n    \n    explicit robin_map(const Allocator& alloc): robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {\n    }\n    \n    template<class InputIt>\n    robin_map(InputIt first, InputIt last,\n              size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n              const Hash& hash = Hash(),\n              const KeyEqual& equal = KeyEqual(),\n              const Allocator& alloc = Allocator()): robin_map(bucket_count, hash, equal, alloc)\n    {\n        insert(first, last);\n    }\n    \n    template<class InputIt>\n    robin_map(InputIt first, InputIt last,\n              size_type bucket_count,\n              const Allocator& alloc): robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    template<class InputIt>\n    robin_map(InputIt first, InputIt last,\n              size_type bucket_count,\n              const Hash& hash,\n              const Allocator& alloc): robin_map(first, last, bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n\n    robin_map(std::initializer_list<value_type> init,\n              size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n              const Hash& hash = Hash(),\n              const KeyEqual& equal = KeyEqual(),\n              const Allocator& alloc = Allocator()): \n          robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc)\n    {\n    }\n\n    robin_map(std::initializer_list<value_type> init,\n              size_type bucket_count,\n              const Allocator& alloc): \n          robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n\n    robin_map(std::initializer_list<value_type> init,\n              size_type bucket_count,\n              const Hash& hash,\n              const Allocator& alloc): \n          robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n    \n    robin_map& operator=(std::initializer_list<value_type> ilist) {\n        m_ht.clear();\n        \n        m_ht.reserve(ilist.size());\n        m_ht.insert(ilist.begin(), ilist.end());\n        \n        return *this;\n    }\n    \n    allocator_type get_allocator() const { return m_ht.get_allocator(); }\n    \n    \n    /*\n     * Iterators\n     */\n    iterator begin() noexcept { return m_ht.begin(); }\n    const_iterator begin() const noexcept { return m_ht.begin(); }\n    const_iterator cbegin() const noexcept { return m_ht.cbegin(); }\n    \n    iterator end() noexcept { return m_ht.end(); }\n    const_iterator end() const noexcept { return m_ht.end(); }\n    const_iterator cend() const noexcept { return m_ht.cend(); }\n    \n    \n    /*\n     * Capacity\n     */\n    bool empty() const noexcept { return m_ht.empty(); }\n    size_type size() const noexcept { return m_ht.size(); }\n    size_type max_size() const noexcept { return m_ht.max_size(); }\n    \n    /*\n     * Modifiers\n     */\n    void clear() noexcept { m_ht.clear(); }\n    \n    \n    \n    std::pair<iterator, bool> insert(const value_type& value) { \n        return m_ht.insert(value); \n    }\n        \n    template<class P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type* = nullptr>\n    std::pair<iterator, bool> insert(P&& value) { \n        return m_ht.emplace(std::forward<P>(value)); \n    }\n    \n    std::pair<iterator, bool> insert(value_type&& value) { \n        return m_ht.insert(std::move(value)); \n    }\n    \n    \n    iterator insert(const_iterator hint, const value_type& value) { \n        return m_ht.insert_hint(hint, value); \n    }\n        \n    template<class P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type* = nullptr>\n    iterator insert(const_iterator hint, P&& value) { \n        return m_ht.emplace_hint(hint, std::forward<P>(value));\n    }\n    \n    iterator insert(const_iterator hint, value_type&& value) { \n        return m_ht.insert_hint(hint, std::move(value)); \n    }\n    \n    \n    template<class InputIt>\n    void insert(InputIt first, InputIt last) { \n        m_ht.insert(first, last); \n    }\n    \n    void insert(std::initializer_list<value_type> ilist) { \n        m_ht.insert(ilist.begin(), ilist.end()); \n    }\n\n    \n    \n    \n    template<class M>\n    std::pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj) { \n        return m_ht.insert_or_assign(k, std::forward<M>(obj)); \n    }\n\n    template<class M>\n    std::pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj) { \n        return m_ht.insert_or_assign(std::move(k), std::forward<M>(obj)); \n    }\n    \n    template<class M>\n    iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj) {\n        return m_ht.insert_or_assign(hint, k, std::forward<M>(obj));\n    }\n    \n    template<class M>\n    iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj) {\n        return m_ht.insert_or_assign(hint, std::move(k), std::forward<M>(obj));\n    }\n    \n    \n    \n    /**\n     * Due to the way elements are stored, emplace will need to move or copy the key-value once.\n     * The method is equivalent to insert(value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template<class... Args>\n    std::pair<iterator, bool> emplace(Args&&... args) { \n        return m_ht.emplace(std::forward<Args>(args)...); \n    }\n    \n    \n    \n    /**\n     * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once.\n     * The method is equivalent to insert(hint, value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template<class... Args>\n    iterator emplace_hint(const_iterator hint, Args&&... args) {\n        return m_ht.emplace_hint(hint, std::forward<Args>(args)...);\n    }\n    \n    \n    \n    \n    template<class... Args>\n    std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args) { \n        return m_ht.try_emplace(k, std::forward<Args>(args)...);\n    }\n    \n    template<class... Args>\n    std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args) {\n        return m_ht.try_emplace(std::move(k), std::forward<Args>(args)...);\n    }\n    \n    template<class... Args>\n    iterator try_emplace(const_iterator hint, const key_type& k, Args&&... args) {\n        return m_ht.try_emplace_hint(hint, k, std::forward<Args>(args)...);\n    }\n    \n    template<class... Args>\n    iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args) {\n        return m_ht.try_emplace_hint(hint, std::move(k), std::forward<Args>(args)...);\n    }\n    \n    \n\n    \n    iterator erase(iterator pos) { return m_ht.erase(pos); }\n    iterator erase(const_iterator pos) { return m_ht.erase(pos); }\n    iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); }\n    size_type erase(const key_type& key) { return m_ht.erase(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash.\n     */    \n    size_type erase(const key_type& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key) { return m_ht.erase(key); }\n    \n    /**\n     * @copydoc erase(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash.\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    \n    \n    void swap(robin_map& other) { other.m_ht.swap(m_ht); }\n    \n    \n    \n    /*\n     * Lookup\n     */\n    T& at(const Key& key) { return m_ht.at(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    T& at(const Key& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); }\n    \n    \n    const T& at(const Key& key) const { return m_ht.at(key); }\n    \n    /**\n     * @copydoc at(const Key& key, std::size_t precalculated_hash)\n     */\n    const T& at(const Key& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); }\n    \n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    T& at(const K& key) { return m_ht.at(key); }\n\n    /**\n     * @copydoc at(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    T& at(const K& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); }\n    \n    \n    /**\n     * @copydoc at(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const T& at(const K& key) const { return m_ht.at(key); }\n    \n    /**\n     * @copydoc at(const K& key, std::size_t precalculated_hash)\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const T& at(const K& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); }\n    \n    \n    \n    \n    T& operator[](const Key& key) { return m_ht[key]; }    \n    T& operator[](Key&& key) { return m_ht[std::move(key)]; }\n    \n    \n    \n    \n    size_type count(const Key& key) const { return m_ht.count(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    size_type count(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.count(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type count(const K& key) const { return m_ht.count(key); }\n    \n    /**\n     * @copydoc count(const K& key) const\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */     \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type count(const K& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); }\n    \n    \n    \n    \n    iterator find(const Key& key) { return m_ht.find(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    const_iterator find(const Key& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const Key& key, std::size_t precalculated_hash)\n     */\n    const_iterator find(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.find(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    iterator find(const K& key) { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    /**\n     * @copydoc find(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const_iterator find(const K& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const_iterator find(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.find(key, precalculated_hash); \n    }\n    \n    \n    \n    \n    bool contains(const Key& key) const { return m_ht.contains(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    bool contains(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.contains(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    bool contains(const K& key) const { return m_ht.contains(key); }\n    \n    /**\n     * @copydoc contains(const K& key) const\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    bool contains(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.contains(key, precalculated_hash); \n    }\n    \n    \n    \n    \n    std::pair<iterator, iterator> equal_range(const Key& key) { return m_ht.equal_range(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    std::pair<iterator, iterator> equal_range(const Key& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    std::pair<const_iterator, const_iterator> equal_range(const Key& key) const { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const Key& key, std::size_t precalculated_hash)\n     */\n    std::pair<const_iterator, const_iterator> equal_range(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n\n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<iterator, iterator> equal_range(const K& key) { return m_ht.equal_range(key); }\n    \n    \n    /**\n     * @copydoc equal_range(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<iterator, iterator> equal_range(const K& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc equal_range(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<const_iterator, const_iterator> equal_range(const K& key) const { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const K& key, std::size_t precalculated_hash)\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<const_iterator, const_iterator> equal_range(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    \n    \n    \n    /*\n     * Bucket interface \n     */\n    size_type bucket_count() const { return m_ht.bucket_count(); }\n    size_type max_bucket_count() const { return m_ht.max_bucket_count(); }\n    \n    \n    /*\n     *  Hash policy \n     */\n    float load_factor() const { return m_ht.load_factor(); }\n    \n    float min_load_factor() const { return m_ht.min_load_factor(); }\n    float max_load_factor() const { return m_ht.max_load_factor(); }\n    \n    /**\n     * Set the `min_load_factor` to `ml`. When the `load_factor` of the map goes\n     * below `min_load_factor` after some erase operations, the map will be\n     * shrunk when an insertion occurs. The erase method itself never shrinks\n     * the map.\n     * \n     * The default value of `min_load_factor` is 0.0f, the map never shrinks by default.\n     */\n    void min_load_factor(float ml) { m_ht.min_load_factor(ml); }\n    void max_load_factor(float ml) { m_ht.max_load_factor(ml); }\n    \n    void rehash(size_type count) { m_ht.rehash(count); }\n    void reserve(size_type count) { m_ht.reserve(count); }\n    \n    \n    /*\n     * Observers\n     */\n    hasher hash_function() const { return m_ht.hash_function(); }\n    key_equal key_eq() const { return m_ht.key_eq(); }\n    \n    /*\n     * Other\n     */\n    \n    /**\n     * Convert a const_iterator to an iterator.\n     */\n    iterator mutable_iterator(const_iterator pos) {\n        return m_ht.mutable_iterator(pos);\n    }\n    \n    friend bool operator==(const robin_map& lhs, const robin_map& rhs) {\n        if(lhs.size() != rhs.size()) {\n            return false;\n        }\n        \n        for(const auto& element_lhs: lhs) {\n            const auto it_element_rhs = rhs.find(element_lhs.first);\n            if(it_element_rhs == rhs.cend() || element_lhs.second != it_element_rhs->second) {\n                return false;\n            }\n        }\n        \n        return true;\n    }\n\n    friend bool operator!=(const robin_map& lhs, const robin_map& rhs) {\n        return !operator==(lhs, rhs);\n    }\n\n    friend void swap(robin_map& lhs, robin_map& rhs) {\n        lhs.swap(rhs);\n    }\n    \nprivate:\n    ht m_ht;\n};\n\n\n/**\n * Same as `tsl::robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash, tsl::rh::prime_growth_policy>`.\n */\ntemplate<class Key, \n         class T, \n         class Hash = std::hash<Key>,\n         class KeyEqual = std::equal_to<Key>,\n         class Allocator = std::allocator<std::pair<Key, T>>,\n         bool StoreHash = false>\nusing robin_pg_map = robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash, tsl::rh::prime_growth_policy>;\n\n} // end namespace tsl\n\n#endif\n"
  },
  {
    "path": "Thirdparty/tessil-src/include/tsl/robin_set.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_ROBIN_SET_H\n#define TSL_ROBIN_SET_H\n\n\n#include <cstddef>\n#include <functional>\n#include <initializer_list>\n#include <memory>\n#include <type_traits>\n#include <utility>\n#include \"robin_hash.h\"\n\n\nnamespace tsl {\n\n    \n/**\n * Implementation of a hash set using open-addressing and the robin hood hashing algorithm with backward shift deletion.\n * \n * For operations modifying the hash set (insert, erase, rehash, ...), the strong exception guarantee \n * is only guaranteed when the expression `std::is_nothrow_swappable<Key>::value &&\n * std::is_nothrow_move_constructible<Key>::value` is true, otherwise if an exception\n * is thrown during the swap or the move, the hash set may end up in a undefined state. Per the standard\n * a `Key` with a noexcept copy constructor and no move constructor also satisfies the \n * `std::is_nothrow_move_constructible<Key>::value` criterion (and will thus guarantee the \n * strong exception for the set).\n * \n * When `StoreHash` is true, 32 bits of the hash are stored alongside the values. It can improve \n * the performance during lookups if the `KeyEqual` function takes time (or engenders a cache-miss for example) \n * as we then compare the stored hashes before comparing the keys. When `tsl::rh::power_of_two_growth_policy` is used\n * as `GrowthPolicy`, it may also speed-up the rehash process as we can avoid to recalculate the hash. \n * When it is detected that storing the hash will not incur any memory penalty due to alignment (i.e. \n * `sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, true>) == \n * sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, false>)`) and `tsl::rh::power_of_two_growth_policy` is\n * used, the hash will be stored even if `StoreHash` is false so that we can speed-up the rehash (but it will\n * not be used on lookups unless `StoreHash` is true).\n * \n * `GrowthPolicy` defines how the set grows and consequently how a hash value is mapped to a bucket. \n * By default the set uses `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of buckets \n * to a power of two and uses a mask to set the hash to a bucket instead of the slow modulo.\n * Other growth policies are available and you may define your own growth policy, \n * check `tsl::rh::power_of_two_growth_policy` for the interface.\n * \n * `Key` must be swappable.\n * \n * `Key` must be copy and/or move constructible.\n * \n * If the destructor of `Key` throws an exception, the behaviour of the class is undefined.\n * \n * Iterators invalidation:\n *  - clear, operator=, reserve, rehash: always invalidate the iterators.\n *  - insert, emplace, emplace_hint, operator[]: if there is an effective insert, invalidate the iterators.\n *  - erase: always invalidate the iterators.\n */\ntemplate<class Key, \n         class Hash = std::hash<Key>,\n         class KeyEqual = std::equal_to<Key>,\n         class Allocator = std::allocator<Key>,\n         bool StoreHash = false,\n         class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>>\nclass robin_set {\nprivate:\n    template<typename U>\n    using has_is_transparent = tsl::detail_robin_hash::has_is_transparent<U>;\n    \n    class KeySelect {\n    public:\n        using key_type = Key;\n        \n        const key_type& operator()(const Key& key) const noexcept {\n            return key;\n        }\n        \n        key_type& operator()(Key& key) noexcept {\n            return key;\n        }\n    };\n    \n    using ht = detail_robin_hash::robin_hash<Key, KeySelect, void,\n                                             Hash, KeyEqual, Allocator, StoreHash, GrowthPolicy>;\n            \npublic:\n    using key_type = typename ht::key_type;\n    using value_type = typename ht::value_type;\n    using size_type = typename ht::size_type;\n    using difference_type = typename ht::difference_type;\n    using hasher = typename ht::hasher;\n    using key_equal = typename ht::key_equal;\n    using allocator_type = typename ht::allocator_type;\n    using reference = typename ht::reference;\n    using const_reference = typename ht::const_reference;\n    using pointer = typename ht::pointer;\n    using const_pointer = typename ht::const_pointer;\n    using iterator = typename ht::iterator;\n    using const_iterator = typename ht::const_iterator;\n\n    \n    /*\n     * Constructors\n     */\n    robin_set(): robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE) {\n    }\n    \n    explicit robin_set(size_type bucket_count, \n                       const Hash& hash = Hash(),\n                       const KeyEqual& equal = KeyEqual(),\n                       const Allocator& alloc = Allocator()): \n                    m_ht(bucket_count, hash, equal, alloc)\n    {\n    }\n    \n    robin_set(size_type bucket_count,\n              const Allocator& alloc): robin_set(bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    robin_set(size_type bucket_count,\n              const Hash& hash,\n              const Allocator& alloc): robin_set(bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n    \n    explicit robin_set(const Allocator& alloc): robin_set(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {\n    }\n    \n    template<class InputIt>\n    robin_set(InputIt first, InputIt last,\n              size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n              const Hash& hash = Hash(),\n              const KeyEqual& equal = KeyEqual(),\n              const Allocator& alloc = Allocator()): robin_set(bucket_count, hash, equal, alloc)\n    {\n        insert(first, last);\n    }\n    \n    template<class InputIt>\n    robin_set(InputIt first, InputIt last,\n              size_type bucket_count,\n              const Allocator& alloc): robin_set(first, last, bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    template<class InputIt>\n    robin_set(InputIt first, InputIt last,\n              size_type bucket_count,\n              const Hash& hash,\n              const Allocator& alloc): robin_set(first, last, bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n\n    robin_set(std::initializer_list<value_type> init,\n              size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n              const Hash& hash = Hash(),\n              const KeyEqual& equal = KeyEqual(),\n              const Allocator& alloc = Allocator()): \n          robin_set(init.begin(), init.end(), bucket_count, hash, equal, alloc)\n    {\n    }\n\n    robin_set(std::initializer_list<value_type> init,\n              size_type bucket_count,\n              const Allocator& alloc): \n          robin_set(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n\n    robin_set(std::initializer_list<value_type> init,\n              size_type bucket_count,\n              const Hash& hash,\n              const Allocator& alloc): \n          robin_set(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n\n    \n    robin_set& operator=(std::initializer_list<value_type> ilist) {\n        m_ht.clear();\n        \n        m_ht.reserve(ilist.size());\n        m_ht.insert(ilist.begin(), ilist.end());\n        \n        return *this;\n    }\n    \n    allocator_type get_allocator() const { return m_ht.get_allocator(); }\n    \n    \n    /*\n     * Iterators\n     */\n    iterator begin() noexcept { return m_ht.begin(); }\n    const_iterator begin() const noexcept { return m_ht.begin(); }\n    const_iterator cbegin() const noexcept { return m_ht.cbegin(); }\n    \n    iterator end() noexcept { return m_ht.end(); }\n    const_iterator end() const noexcept { return m_ht.end(); }\n    const_iterator cend() const noexcept { return m_ht.cend(); }\n    \n    \n    /*\n     * Capacity\n     */\n    bool empty() const noexcept { return m_ht.empty(); }\n    size_type size() const noexcept { return m_ht.size(); }\n    size_type max_size() const noexcept { return m_ht.max_size(); }\n    \n    /*\n     * Modifiers\n     */\n    void clear() noexcept { m_ht.clear(); }\n    \n    \n    \n    \n    std::pair<iterator, bool> insert(const value_type& value) { \n        return m_ht.insert(value); \n    }\n    \n    std::pair<iterator, bool> insert(value_type&& value) { \n        return m_ht.insert(std::move(value)); \n    }\n    \n    iterator insert(const_iterator hint, const value_type& value) { \n        return m_ht.insert_hint(hint, value); \n    }\n    \n    iterator insert(const_iterator hint, value_type&& value) { \n        return m_ht.insert_hint(hint, std::move(value)); \n    }\n    \n    template<class InputIt>\n    void insert(InputIt first, InputIt last) { \n        m_ht.insert(first, last);\n    }\n    \n    void insert(std::initializer_list<value_type> ilist) { \n        m_ht.insert(ilist.begin(), ilist.end()); \n    }\n\n    \n    \n    \n    /**\n     * Due to the way elements are stored, emplace will need to move or copy the key-value once.\n     * The method is equivalent to insert(value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template<class... Args>\n    std::pair<iterator, bool> emplace(Args&&... args) { \n        return m_ht.emplace(std::forward<Args>(args)...); \n    }\n    \n    \n    \n    /**\n     * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once.\n     * The method is equivalent to insert(hint, value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template<class... Args>\n    iterator emplace_hint(const_iterator hint, Args&&... args) {\n        return m_ht.emplace_hint(hint, std::forward<Args>(args)...);\n    }\n    \n    \n    \n    iterator erase(iterator pos) { return m_ht.erase(pos); }\n    iterator erase(const_iterator pos) { return m_ht.erase(pos); }\n    iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); }\n    size_type erase(const key_type& key) { return m_ht.erase(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash.\n     */    \n    size_type erase(const key_type& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key) { return m_ht.erase(key); }\n    \n    /**\n     * @copydoc erase(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup to the value if you already have the hash.\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    \n    \n    void swap(robin_set& other) { other.m_ht.swap(m_ht); }\n    \n    \n    \n    /*\n     * Lookup\n     */\n    size_type count(const Key& key) const { return m_ht.count(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    size_type count(const Key& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type count(const K& key) const { return m_ht.count(key); }\n    \n    /**\n     * @copydoc count(const K& key) const\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type count(const K& key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); }\n    \n    \n    \n    \n    iterator find(const Key& key) { return m_ht.find(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    const_iterator find(const Key& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const Key& key, std::size_t precalculated_hash)\n     */\n    const_iterator find(const Key& key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    iterator find(const K& key) { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    /**\n     * @copydoc find(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const_iterator find(const K& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const_iterator find(const K& key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); }\n    \n    \n    \n    \n    bool contains(const Key& key) const { return m_ht.contains(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    bool contains(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.contains(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    bool contains(const K& key) const { return m_ht.contains(key); }\n    \n    /**\n     * @copydoc contains(const K& key) const\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    bool contains(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.contains(key, precalculated_hash); \n    }\n    \n    \n    \n    \n    std::pair<iterator, iterator> equal_range(const Key& key) { return m_ht.equal_range(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */\n    std::pair<iterator, iterator> equal_range(const Key& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    std::pair<const_iterator, const_iterator> equal_range(const Key& key) const { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const Key& key, std::size_t precalculated_hash)\n     */\n    std::pair<const_iterator, const_iterator> equal_range(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<iterator, iterator> equal_range(const K& key) { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Useful to speed-up the lookup if you already have the hash.\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<iterator, iterator> equal_range(const K& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc equal_range(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<const_iterator, const_iterator> equal_range(const K& key) const { return m_ht.equal_range(key); }\n\n    /**\n     * @copydoc equal_range(const K& key, std::size_t precalculated_hash)\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<const_iterator, const_iterator> equal_range(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    \n    \n\n    /*\n     * Bucket interface \n     */\n    size_type bucket_count() const { return m_ht.bucket_count(); }\n    size_type max_bucket_count() const { return m_ht.max_bucket_count(); }\n    \n    \n    /*\n     *  Hash policy \n     */\n    float load_factor() const { return m_ht.load_factor(); }\n    \n    float min_load_factor() const { return m_ht.min_load_factor(); }\n    float max_load_factor() const { return m_ht.max_load_factor(); }\n    \n    /**\n     * Set the `min_load_factor` to `ml`. When the `load_factor` of the set goes\n     * below `min_load_factor` after some erase operations, the set will be\n     * shrunk when an insertion occurs. The erase method itself never shrinks\n     * the set.\n     * \n     * The default value of `min_load_factor` is 0.0f, the set never shrinks by default.\n     */\n    void min_load_factor(float ml) { m_ht.min_load_factor(ml); }\n    void max_load_factor(float ml) { m_ht.max_load_factor(ml); }\n    \n    void rehash(size_type count) { m_ht.rehash(count); }\n    void reserve(size_type count) { m_ht.reserve(count); }\n    \n    \n    /*\n     * Observers\n     */\n    hasher hash_function() const { return m_ht.hash_function(); }\n    key_equal key_eq() const { return m_ht.key_eq(); }\n    \n    \n    /*\n     * Other\n     */\n    \n    /**\n     * Convert a const_iterator to an iterator.\n     */\n    iterator mutable_iterator(const_iterator pos) {\n        return m_ht.mutable_iterator(pos);\n    }\n    \n    friend bool operator==(const robin_set& lhs, const robin_set& rhs) {\n        if(lhs.size() != rhs.size()) {\n            return false;\n        }\n        \n        for(const auto& element_lhs: lhs) {\n            const auto it_element_rhs = rhs.find(element_lhs);\n            if(it_element_rhs == rhs.cend()) {\n                return false;\n            }\n        }\n        \n        return true;\n    }\n\n    friend bool operator!=(const robin_set& lhs, const robin_set& rhs) {\n        return !operator==(lhs, rhs);\n    }\n\n    friend void swap(robin_set& lhs, robin_set& rhs) {\n        lhs.swap(rhs);\n    }\n    \nprivate:\n    ht m_ht;    \n};\n\n\n/**\n * Same as `tsl::robin_set<Key, Hash, KeyEqual, Allocator, StoreHash, tsl::rh::prime_growth_policy>`.\n */\ntemplate<class Key, \n         class Hash = std::hash<Key>,\n         class KeyEqual = std::equal_to<Key>,\n         class Allocator = std::allocator<Key>,\n         bool StoreHash = false>\nusing robin_pg_set = robin_set<Key, Hash, KeyEqual, Allocator, StoreHash, tsl::rh::prime_growth_policy>;\n\n} // end namespace tsl\n\n#endif\n \n"
  },
  {
    "path": "Thirdparty/tessil-src/tests/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.8)\n\nproject(tsl_robin_map_tests)\n\nadd_executable(tsl_robin_map_tests \"main.cpp\" \n                                   \"custom_allocator_tests.cpp\"\n                                   \"policy_tests.cpp\"\n                                   \"robin_map_tests.cpp\" \n                                   \"robin_set_tests.cpp\")\n\ntarget_compile_features(tsl_robin_map_tests PRIVATE cxx_std_11)\n                                    \nif(CMAKE_CXX_COMPILER_ID MATCHES \"Clang\" OR CMAKE_CXX_COMPILER_ID MATCHES \"GNU\")\n    target_compile_options(tsl_robin_map_tests PRIVATE -Werror -Wall -Wextra -Wold-style-cast -DTSL_DEBUG -UNDEBUG)\nelseif(CMAKE_CXX_COMPILER_ID MATCHES \"MSVC\")\n    target_compile_options(tsl_robin_map_tests PRIVATE /bigobj /WX /W3 /DTSL_DEBUG /UNDEBUG)\nendif()\n\n# Boost::unit_test_framework\nfind_package(Boost 1.54.0 REQUIRED COMPONENTS unit_test_framework)\ntarget_link_libraries(tsl_robin_map_tests PRIVATE Boost::unit_test_framework)   \n\n# tsl::robin_map\nadd_subdirectory(../ ${CMAKE_CURRENT_BINARY_DIR}/tsl)\ntarget_link_libraries(tsl_robin_map_tests PRIVATE tsl::robin_map)  \n"
  },
  {
    "path": "Thirdparty/tessil-src/tests/custom_allocator_tests.cpp",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#define BOOST_TEST_DYN_LINK\n\n\n#include <boost/test/unit_test.hpp>\n#include <cstddef>\n#include <cstdlib>\n#include <functional>\n#include <limits>\n#include <stdexcept>\n#include <type_traits>\n#include <utility>\n\n#include <tsl/robin_map.h>\n#include \"utils.h\"\n\n\nstatic std::size_t nb_custom_allocs = 0;\n\ntemplate<typename T>\nclass custom_allocator {\npublic:\n    using value_type = T;\n    using pointer = T*;\n    using const_pointer = const T*;\n    using reference = T&;\n    using const_reference = const T&;\n    using size_type = std::size_t;\n    using difference_type = std::ptrdiff_t;\n    using propagate_on_container_move_assignment = std::true_type;\n\n\n    template<typename U> \n    struct rebind { \n        using other = custom_allocator<U>;\n    };\n    \n    custom_allocator() = default;\n    custom_allocator(const custom_allocator&) = default;\n    \n    template<typename U> \n    custom_allocator(const custom_allocator<U>&) {\n    }\n\n    pointer address(reference x) const noexcept {\n        return &x;\n    }\n    \n    const_pointer address(const_reference x) const noexcept {\n        return &x;\n    }\n\n    pointer allocate(size_type n, const void* /*hint*/ = 0) {\n        nb_custom_allocs++;\n        \n        pointer ptr = static_cast<pointer>(std::malloc(n * sizeof(T)));\n        if(ptr == nullptr) {\n#ifdef TSL_RH_NO_EXCEPTIONS\n            std::abort();\n#else\n            throw std::bad_alloc();\n#endif\n        }\n        \n        return ptr;\n    }\n\n    void deallocate(T* p, size_type /*n*/) {\n        std::free(p);\n    }\n    \n    size_type max_size() const noexcept {\n        return std::numeric_limits<size_type>::max()/sizeof(value_type);\n    }\n\n    template<typename U, typename... Args>\n    void construct(U* p, Args&&... args) {\n        ::new(static_cast<void *>(p)) U(std::forward<Args>(args)...);\n    }\n\n    template<typename U>\n    void destroy(U* p) {\n        p->~U();\n    }\n};\n\ntemplate <class T, class U>\nbool operator==(const custom_allocator<T>&, const custom_allocator<U>&) { \n    return true; \n}\n\ntemplate <class T, class U>\nbool operator!=(const custom_allocator<T>&, const custom_allocator<U>&) { \n    return false; \n}\n\n\n        \n//TODO Avoid overloading new to check number of global new\n// static std::size_t nb_global_new = 0;\n// void* operator new(std::size_t sz) {\n//     nb_global_new++;\n//     return std::malloc(sz);\n// }\n// \n// void operator delete(void* ptr) noexcept {\n//     std::free(ptr);\n// }\n\nBOOST_AUTO_TEST_SUITE(test_custom_allocator)\n\nBOOST_AUTO_TEST_CASE(test_custom_allocator_1) {\n//    nb_global_new = 0;\n    nb_custom_allocs = 0;\n    \n    tsl::robin_map<int, int, std::hash<int>, std::equal_to<int>, \n                    custom_allocator<std::pair<int, int>>> map;\n    \n    const int nb_elements = 1000;\n    for(int i = 0; i < nb_elements; i++) {\n        map.insert({i, i*2});\n    }\n    \n    BOOST_CHECK_NE(nb_custom_allocs, 0);\n//    BOOST_CHECK_EQUAL(nb_global_new, 0);\n}\n\nBOOST_AUTO_TEST_SUITE_END()\n"
  },
  {
    "path": "Thirdparty/tessil-src/tests/main.cpp",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#define BOOST_TEST_MODULE robin_map_tests\n#define BOOST_TEST_DYN_LINK\n\n\n#include <boost/test/unit_test.hpp>\n"
  },
  {
    "path": "Thirdparty/tessil-src/tests/policy_tests.cpp",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#define BOOST_TEST_DYN_LINK\n\n#include <boost/test/unit_test.hpp>\n#include <boost/mpl/list.hpp> \n#include <cstddef>\n#include <limits>\n#include <ratio>\n#include <stdexcept>\n\n#include <tsl/robin_growth_policy.h>\n#include \"utils.h\"\n\n\nBOOST_AUTO_TEST_SUITE(test_policy)\n\nusing test_types = boost::mpl::list<tsl::rh::power_of_two_growth_policy<2>,\n                                    tsl::rh::power_of_two_growth_policy<4>,\n                                    tsl::rh::prime_growth_policy,\n                                    tsl::rh::mod_growth_policy<>,\n                                    tsl::rh::mod_growth_policy<std::ratio<7,2>>>;\n\n\nBOOST_AUTO_TEST_CASE_TEMPLATE(test_policy, Policy, test_types) {\n    // Call next_bucket_count() on the policy until we reach its max_bucket_count()\n    std::size_t bucket_count = 0;\n    Policy policy(bucket_count);\n    \n    BOOST_CHECK_EQUAL(policy.bucket_for_hash(0), 0);\n    BOOST_CHECK_EQUAL(bucket_count, 0);\n    \n#ifndef TSL_RH_NO_EXCEPTIONS\n    bool exception_thrown = false;\n    try {\n        while(true) {\n            const std::size_t previous_bucket_count = bucket_count;\n            \n            bucket_count = policy.next_bucket_count();\n            policy = Policy(bucket_count);\n            \n            BOOST_CHECK_EQUAL(policy.bucket_for_hash(0), 0);\n            BOOST_CHECK(bucket_count > previous_bucket_count);\n        }\n    }\n    catch(const std::length_error& ) {\n        exception_thrown = true;\n    }\n    \n    BOOST_CHECK(exception_thrown);\n#endif\n}\n\nBOOST_AUTO_TEST_CASE_TEMPLATE(test_policy_min_bucket_count, Policy, test_types) {\n    // Check policy when a bucket_count of 0 is asked.\n    std::size_t bucket_count = 0;\n    Policy policy(bucket_count);\n    \n    BOOST_CHECK_EQUAL(policy.bucket_for_hash(0), 0);\n}\n\nBOOST_AUTO_TEST_CASE_TEMPLATE(test_policy_max_bucket_count, Policy, test_types) {\n    // Test a bucket_count equals to the max_bucket_count limit and above\n    std::size_t bucket_count = 0;\n    Policy policy(bucket_count);\n    \n    \n    bucket_count = policy.max_bucket_count();\n    Policy policy2(bucket_count);\n    \n    \n    bucket_count = std::numeric_limits<std::size_t>::max();\n    TSL_RH_CHECK_THROW((Policy(bucket_count)), std::length_error);\n    \n    \n    bucket_count = policy.max_bucket_count() + 1;\n    TSL_RH_CHECK_THROW((Policy(bucket_count)), std::length_error);\n}\n\n\nBOOST_AUTO_TEST_SUITE_END()\n"
  },
  {
    "path": "Thirdparty/tessil-src/tests/robin_map_tests.cpp",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#define BOOST_TEST_DYN_LINK\n\n\n#include <boost/test/unit_test.hpp>\n#include <boost/mpl/list.hpp>\n#include <cstddef>\n#include <cstdint>\n#include <functional>\n#include <iterator>\n#include <memory>\n#include <stdexcept>\n#include <string>\n#include <tuple>\n#include <type_traits>\n#include <utility>\n#include <vector>\n\n#include <tsl/robin_map.h>\n#include \"utils.h\"\n\n\nBOOST_AUTO_TEST_SUITE(test_robin_map)\n\n\n\nusing test_types = boost::mpl::list<\n                        tsl::robin_map<std::int64_t, std::int64_t>,\n                        tsl::robin_map<std::string, std::string>,\n                        // Test with hash having a lot of collisions\n                        tsl::robin_map<std::int64_t, std::int64_t, mod_hash<9>>,\n                        tsl::robin_map<std::string, std::string, mod_hash<9>>,\n                        tsl::robin_map<move_only_test, move_only_test, mod_hash<9>>,\n                        tsl::robin_map<copy_only_test, copy_only_test, mod_hash<9>>,\n                        tsl::robin_map<self_reference_member_test, self_reference_member_test, mod_hash<9>>,\n                        \n                        // other GrowthPolicy\n                        tsl::robin_map<move_only_test, move_only_test, mod_hash<9>, std::equal_to<move_only_test>,\n                                      std::allocator<std::pair<move_only_test, move_only_test>>, true,\n                                      tsl::rh::power_of_two_growth_policy<4>>,\n                        tsl::robin_pg_map<move_only_test, move_only_test, mod_hash<9>>,\n                        tsl::robin_map<move_only_test, move_only_test, mod_hash<9>, std::equal_to<move_only_test>,\n                                      std::allocator<std::pair<move_only_test, move_only_test>>, false,\n                                      tsl::rh::mod_growth_policy<>>,\n                                      \n                        tsl::robin_map<copy_only_test, copy_only_test, mod_hash<9>, std::equal_to<copy_only_test>,\n                                      std::allocator<std::pair<copy_only_test, copy_only_test>>, false,\n                                      tsl::rh::power_of_two_growth_policy<4>>,\n                        tsl::robin_pg_map<copy_only_test, copy_only_test, mod_hash<9>>,\n                        tsl::robin_map<copy_only_test, copy_only_test, mod_hash<9>, std::equal_to<copy_only_test>,\n                                      std::allocator<std::pair<copy_only_test, copy_only_test>>, true,\n                                      tsl::rh::mod_growth_policy<>>\n                    >;\n                                    \n\n/**\n * insert\n */                                      \nBOOST_AUTO_TEST_CASE_TEMPLATE(test_insert, HMap, test_types) {\n    // insert x values, insert them again, check values\n    using key_t = typename HMap::key_type; using value_t = typename HMap:: mapped_type;\n    \n    const std::size_t nb_values = 1000;\n    HMap map(0);\n    BOOST_CHECK_EQUAL(map.bucket_count(), 0);\n    \n    typename HMap::iterator it;\n    bool inserted;\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        std::tie(it, inserted) = map.insert({utils::get_key<key_t>(i), utils::get_value<value_t>(i)});\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<key_t>(i));\n        BOOST_CHECK_EQUAL(it->second, utils::get_value<value_t>(i));\n        BOOST_CHECK(inserted);\n    }\n    BOOST_CHECK_EQUAL(map.size(), nb_values);\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        std::tie(it, inserted) = map.insert({utils::get_key<key_t>(i), utils::get_value<value_t>(i + 1)});\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<key_t>(i));\n        BOOST_CHECK_EQUAL(it->second, utils::get_value<value_t>(i));\n        BOOST_CHECK(!inserted);\n    }\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        it = map.find(utils::get_key<key_t>(i));\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<key_t>(i));\n        BOOST_CHECK_EQUAL(it->second, utils::get_value<value_t>(i));\n    }\n}\n\nBOOST_AUTO_TEST_CASE(test_range_insert) {\n    // create a vector<std::pair> of values to insert, insert part of them in the map, check values\n    const int nb_values = 1000;\n    std::vector<std::pair<int, int>> values_to_insert(nb_values);\n    for(int i = 0; i < nb_values; i++) {\n        values_to_insert[i] = std::make_pair(i, i+1);\n    }\n    \n    \n    tsl::robin_map<int, int> map = {{-1, 1}, {-2, 2}};\n    map.insert(std::next(values_to_insert.begin(), 10), values_to_insert.end() - 5);\n    \n    \n    BOOST_CHECK_EQUAL(map.size(), 987);\n    \n    BOOST_CHECK_EQUAL(map.at(-1), 1);\n    BOOST_CHECK_EQUAL(map.at(-2), 2);\n    \n    for(int i = 10; i < nb_values - 5; i++) {\n        BOOST_CHECK_EQUAL(map.at(i), i+1);\n    }\n}\n\n\nBOOST_AUTO_TEST_CASE(test_insert_with_hint) {\n    tsl::robin_map<int, int> map{{1, 0}, {2, 1}, {3, 2}};\n    \n    // Wrong hint\n    BOOST_CHECK(map.insert(map.find(2), std::make_pair(3, 4)) == map.find(3));\n    \n    // Good hint\n    BOOST_CHECK(map.insert(map.find(2), std::make_pair(2, 4)) == map.find(2));\n    \n    // end() hint\n    BOOST_CHECK(map.insert(map.find(10), std::make_pair(2, 4)) == map.find(2));\n    \n    BOOST_CHECK_EQUAL(map.size(), 3);\n    \n    \n    // end() hint, new value\n    BOOST_CHECK_EQUAL(map.insert(map.find(10), std::make_pair(4, 3))->first, 4);\n    \n    // Wrong hint, new value\n    BOOST_CHECK_EQUAL(map.insert(map.find(2), std::make_pair(5, 4))->first, 5);\n    \n    BOOST_CHECK_EQUAL(map.size(), 5);\n}\n\n/**\n * emplace_hint\n */\nBOOST_AUTO_TEST_CASE(test_emplace_hint) {\n    tsl::robin_map<int, int> map{{1, 0}, {2, 1}, {3, 2}};\n    \n    // Wrong hint\n    BOOST_CHECK(map.emplace_hint(map.find(2), std::piecewise_construct, \n                                              std::forward_as_tuple(3), std::forward_as_tuple(4)) == map.find(3));\n    \n    // Good hint\n    BOOST_CHECK(map.emplace_hint(map.find(2), std::piecewise_construct, \n                                              std::forward_as_tuple(2), std::forward_as_tuple(4)) == map.find(2));\n    \n    // end() hint\n    BOOST_CHECK(map.emplace_hint(map.find(10), std::piecewise_construct, \n                                               std::forward_as_tuple(2), std::forward_as_tuple(4)) == map.find(2));\n    \n    BOOST_CHECK_EQUAL(map.size(), 3);\n    \n    \n    // end() hint, new value\n    BOOST_CHECK_EQUAL(map.emplace_hint(map.find(10), std::piecewise_construct, \n                                                    std::forward_as_tuple(4), std::forward_as_tuple(3))->first, 4);\n    \n    // Wrong hint, new value\n    BOOST_CHECK_EQUAL(map.emplace_hint(map.find(2), std::piecewise_construct, \n                                                    std::forward_as_tuple(5), std::forward_as_tuple(4))->first, 5);\n    \n    BOOST_CHECK_EQUAL(map.size(), 5);\n}\n\n/**\n * emplace\n */\nBOOST_AUTO_TEST_CASE(test_emplace) {\n    tsl::robin_map<std::int64_t, move_only_test> map;\n    tsl::robin_map<std::int64_t, move_only_test>::iterator it;\n    bool inserted;\n    \n    \n    std::tie(it, inserted) = map.emplace(std::piecewise_construct,\n                                         std::forward_as_tuple(10),\n                                         std::forward_as_tuple(1));\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    BOOST_CHECK(inserted);\n    \n    \n    std::tie(it, inserted) = map.emplace(std::piecewise_construct,\n                                         std::forward_as_tuple(10),\n                                         std::forward_as_tuple(3));\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    BOOST_CHECK(!inserted);\n}\n\n\n/**\n * try_emplace\n */\nBOOST_AUTO_TEST_CASE(test_try_emplace) {\n    tsl::robin_map<std::int64_t, move_only_test> map;\n    tsl::robin_map<std::int64_t, move_only_test>::iterator it;\n    bool inserted;\n    \n    \n    std::tie(it, inserted) = map.try_emplace(10, 1);\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    BOOST_CHECK(inserted);\n    \n    \n    std::tie(it, inserted) = map.try_emplace(10, 3);\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    BOOST_CHECK(!inserted);\n}\n\nBOOST_AUTO_TEST_CASE(test_try_emplace_2) {\n    // Insert x values with try_emplace, insert them again, check with find.\n    tsl::robin_map<std::string, move_only_test> map;\n    tsl::robin_map<std::string, move_only_test>::iterator it;\n    bool inserted;\n    \n    const std::size_t nb_values = 1000;\n    for(std::size_t i = 0; i < nb_values; i++) {\n        std::tie(it, inserted) = map.try_emplace(utils::get_key<std::string>(i), i);\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<std::string>(i));\n        BOOST_CHECK_EQUAL(it->second, move_only_test(i));\n        BOOST_CHECK(inserted);\n    }\n    BOOST_CHECK_EQUAL(map.size(), nb_values);\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        std::tie(it, inserted) = map.try_emplace(utils::get_key<std::string>(i), i + 1);\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<std::string>(i));\n        BOOST_CHECK_EQUAL(it->second, move_only_test(i));\n        BOOST_CHECK(!inserted);\n    }\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        it = map.find(utils::get_key<std::string>(i));\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<std::string>(i));\n        BOOST_CHECK_EQUAL(it->second, move_only_test(i));\n    }\n}\n\nBOOST_AUTO_TEST_CASE(test_try_emplace_hint) {\n    tsl::robin_map<std::int64_t, move_only_test> map(0);\n    \n    // end() hint, new value\n    auto it = map.try_emplace(map.find(10), 10, 1);\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    \n    // Good hint\n    it = map.try_emplace(map.find(10), 10, 3);\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    \n    // Wrong hint, new value\n    it = map.try_emplace(map.find(10), 1, 3);\n    BOOST_CHECK_EQUAL(it->first, 1);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(3));\n}\n\n\n/**\n * insert_or_assign\n */\nBOOST_AUTO_TEST_CASE(test_insert_or_assign) {\n    tsl::robin_map<std::int64_t, move_only_test> map;\n    tsl::robin_map<std::int64_t, move_only_test>::iterator it;    \n    bool inserted;\n    \n    \n    std::tie(it, inserted) = map.insert_or_assign(10, move_only_test(1));\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    BOOST_CHECK(inserted);\n    \n    \n    std::tie(it, inserted) = map.insert_or_assign(10, move_only_test(3));\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(3));\n    BOOST_CHECK(!inserted);\n}\n\n\nBOOST_AUTO_TEST_CASE(test_insert_or_assign_hint) {\n    tsl::robin_map<std::int64_t, move_only_test> map(0);\n    \n    // end() hint, new value\n    auto it = map.insert_or_assign(map.find(10), 10, move_only_test(1));\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(1));\n    \n    // Good hint\n    it = map.insert_or_assign(map.find(10), 10, move_only_test(3));\n    BOOST_CHECK_EQUAL(it->first, 10);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(3));\n    \n    // Bad hint, new value\n    it = map.insert_or_assign(map.find(10), 1, move_only_test(3));\n    BOOST_CHECK_EQUAL(it->first, 1);\n    BOOST_CHECK_EQUAL(it->second, move_only_test(3));\n}\n\n\n\n/**\n * erase\n */\nBOOST_AUTO_TEST_CASE(test_range_erase_all) {\n    // insert x values, delete all with iterators\n    using HMap = tsl::robin_map<std::string, std::int64_t>;\n    \n    const std::size_t nb_values = 1000;\n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    \n    auto it = map.erase(map.begin(), map.end());\n    BOOST_CHECK(it == map.end());\n    BOOST_CHECK(map.empty());\n}\n\nBOOST_AUTO_TEST_CASE(test_range_erase) {\n    // insert x values, delete all with iterators except 10 first and 780 last values\n    using HMap = tsl::robin_map<std::string, std::int64_t>;\n    \n    const std::size_t nb_values = 1000;\n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    \n    auto it_first = std::next(map.begin(), 10);\n    auto it_last = std::next(map.begin(), 220);\n    \n    auto it = map.erase(it_first, it_last);\n    BOOST_CHECK_EQUAL(std::distance(it, map.end()), 780);\n    BOOST_CHECK_EQUAL(map.size(), 790);\n    BOOST_CHECK_EQUAL(std::distance(map.begin(), map.end()), 790);\n    \n    for(auto& val: map) {\n        BOOST_CHECK_EQUAL(map.count(val.first), 1);\n    }\n}\n\nBOOST_AUTO_TEST_CASE_TEMPLATE(test_erase_loop, HMap, test_types) {\n    // insert x values, delete all one by one with iterator\n    std::size_t nb_values = 1000;\n    \n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    HMap map2 = utils::get_filled_hash_map<HMap>(nb_values);\n    \n    auto it = map.begin();\n    // Use second map to check for key after delete as we may not copy the key with move-only types.\n    auto it2 = map2.begin();\n    while(it != map.end()) {\n        it = map.erase(it);\n        --nb_values;\n        \n        BOOST_CHECK_EQUAL(map.count(it2->first), 0);\n        BOOST_CHECK_EQUAL(map.size(), nb_values);\n        ++it2;\n    }\n    \n    BOOST_CHECK(map.empty());\n}\n\nBOOST_AUTO_TEST_CASE_TEMPLATE(test_erase_loop_range, HMap, test_types) {\n    // insert x values, delete all five by five with iterators\n    const std::size_t hop = 5;\n    std::size_t nb_values = 1000;\n    \n    BOOST_REQUIRE_EQUAL(nb_values % hop, 0);\n    \n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    \n    auto it = map.begin();\n    while(it != map.end()) {\n        it = map.erase(it, std::next(it, hop));\n        nb_values -= hop;\n        \n        BOOST_CHECK_EQUAL(map.size(), nb_values);\n    }\n    \n    BOOST_CHECK(map.empty());\n}\n\nBOOST_AUTO_TEST_CASE_TEMPLATE(test_insert_erase_insert, HMap, test_types) {\n    // insert x/2 values, delete x/4 values, insert x/2 values, find each value\n    using key_t = typename HMap::key_type; using value_t = typename HMap:: mapped_type;\n    \n    const std::size_t nb_values = 2000;\n    HMap map(10);\n    typename HMap::iterator it;\n    bool inserted;\n    \n    // Insert nb_values/2\n    for(std::size_t i = 0; i < nb_values/2; i++) {\n        std::tie(it, inserted) = map.insert({utils::get_key<key_t>(i), utils::get_value<value_t>(i)});\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<key_t>(i));\n        BOOST_CHECK_EQUAL(it->second, utils::get_value<value_t>(i));\n        BOOST_CHECK(inserted);\n    }\n    BOOST_CHECK_EQUAL(map.size(), nb_values/2);\n    \n    \n    // Delete nb_values/4\n    for(std::size_t i = 0; i < nb_values/2; i++) {\n        if(i%2 == 0) {\n            BOOST_CHECK_EQUAL(map.erase(utils::get_key<key_t>(i)), 1);\n        }\n    }\n    BOOST_CHECK_EQUAL(map.size(), nb_values/4);\n    \n    \n    // Insert nb_values/2\n    for(std::size_t i = nb_values/2; i < nb_values; i++) {\n        std::tie(it, inserted) = map.insert({utils::get_key<key_t>(i), utils::get_value<value_t>(i)});\n        \n        BOOST_CHECK_EQUAL(it->first, utils::get_key<key_t>(i));\n        BOOST_CHECK_EQUAL(it->second, utils::get_value<value_t>(i));\n        BOOST_CHECK(inserted);\n    }\n    BOOST_CHECK_EQUAL(map.size(), nb_values-nb_values/4);\n    \n    \n    // Find\n    for(std::size_t i = 0; i < nb_values; i++) {\n        if(i%2 == 0 && i < nb_values/2) {\n            it = map.find(utils::get_key<key_t>(i));\n            \n            BOOST_CHECK(it == map.end());\n        }\n        else {\n            it = map.find(utils::get_key<key_t>(i));\n            \n            BOOST_REQUIRE(it != map.end());\n            BOOST_CHECK_EQUAL(it->first, utils::get_key<key_t>(i));\n            BOOST_CHECK_EQUAL(it->second, utils::get_value<value_t>(i));\n        }\n    }\n}\n\nBOOST_AUTO_TEST_CASE(test_range_erase_same_iterators) {\n    // insert x values, test erase with same iterator as each parameter, check if returned mutable iterator is valid.\n    const std::size_t nb_values = 100;\n    auto map = utils::get_filled_hash_map<tsl::robin_map<std::int64_t, std::int64_t>>(nb_values);\n    \n    tsl::robin_map<std::int64_t, std::int64_t>::const_iterator it_const = map.cbegin();\n    std::advance(it_const, 10);\n    \n    tsl::robin_map<std::int64_t, std::int64_t>::iterator it_mutable = map.erase(it_const, it_const);\n    BOOST_CHECK(it_const == it_mutable);\n    BOOST_CHECK(map.mutable_iterator(it_const) == it_mutable);\n    BOOST_CHECK_EQUAL(map.size(), 100);\n    \n    it_mutable.value() = -100;\n    BOOST_CHECK_EQUAL(it_const.value(), -100);\n}\n\n/**\n * max_load_factor\n */\nBOOST_AUTO_TEST_CASE(test_max_load_factor_extreme_factors) {\n    tsl::robin_map<std::int64_t, std::int64_t> map;\n    \n    map.max_load_factor(0.0f);\n    BOOST_CHECK_GT(map.max_load_factor(), 0.0f);\n    \n    map.max_load_factor(10.0f);\n    BOOST_CHECK_LT(map.max_load_factor(), 1.0f);\n}\n\n/**\n * min_load_factor\n */\nBOOST_AUTO_TEST_CASE(test_min_load_factor_extreme_factors) {\n    tsl::robin_map<std::int64_t, std::int64_t> map;\n    \n    BOOST_CHECK_EQUAL(map.min_load_factor(), 0.0f);\n    BOOST_CHECK_LT(map.min_load_factor(), map.max_load_factor());\n    \n    map.min_load_factor(-10.0f);\n    BOOST_CHECK_EQUAL(map.min_load_factor(), 0.0f);\n    \n    map.min_load_factor(0.9f);\n    map.max_load_factor(0.1f);\n    \n    // max_load_factor should always be > min_load_factor. \n    // Factors should have been clamped.\n    BOOST_CHECK_LT(map.min_load_factor(), map.max_load_factor());\n}\n\nBOOST_AUTO_TEST_CASE(test_min_load_factor) {\n    // set min_load_factor to 0.15 and max_load_factor to 0.5.\n    // rehash to 100 buckets, insert 50 elements, erase until load_factor() < min_load_factor(), \n    // insert an element, check if map has shrinked.\n    const std::size_t nb_values = 50;\n    tsl::robin_map<std::int64_t, std::int64_t> map;\n    \n    map.min_load_factor(0.15f);\n    BOOST_CHECK_EQUAL(map.min_load_factor(), 0.15f);\n    \n    map.max_load_factor(0.5f);\n    BOOST_CHECK_EQUAL(map.max_load_factor(), 0.5f);\n    \n    \n    map.rehash(nb_values*2);\n    for(std::size_t i = 0; i < nb_values; i++) {\n        map.insert({utils::get_key<std::int64_t>(i), utils::get_value<std::int64_t>(i)});\n    }\n    BOOST_CHECK_GT(map.load_factor(), map.min_load_factor());\n    \n    \n    while(map.load_factor() >= map.min_load_factor()) {\n        map.erase(map.begin());\n    }\n    \n    // Shrink is done on insert.\n    map.insert({utils::get_key<std::int64_t>(map.bucket_count()), \n                utils::get_value<std::int64_t>(map.bucket_count())});\n    BOOST_CHECK_GT(map.load_factor(), map.min_load_factor());\n}\n\nBOOST_AUTO_TEST_CASE(test_min_load_factor_range_erase) {\n    // set min_load_factor to 0.15 and max_load_factor to 0.5.\n    // rehash to 100 buckets, insert 50 elements, erase 40 with range erase, insert an element, \n    // check if map has shrinked.\n    const std::size_t nb_values = 50;\n    const std::size_t nb_values_erase = 40;\n    tsl::robin_map<std::int64_t, std::int64_t> map;\n    \n    map.min_load_factor(0.15f);\n    BOOST_CHECK_EQUAL(map.min_load_factor(), 0.15f);\n    \n    map.max_load_factor(0.5f);\n    BOOST_CHECK_EQUAL(map.max_load_factor(), 0.5f);\n    \n    \n    map.rehash(nb_values*2);\n    for(std::size_t i = 0; i < nb_values; i++) {\n        map.insert({utils::get_key<std::int64_t>(i), utils::get_value<std::int64_t>(i)});\n    }\n    BOOST_CHECK_GT(map.load_factor(), map.min_load_factor());\n    \n    \n    map.erase(std::next(map.begin(), nb_values - nb_values_erase) , map.end());\n    \n    // Shrink is done on insert.\n    map.insert({utils::get_key<std::int64_t>(map.bucket_count()), \n                utils::get_value<std::int64_t>(map.bucket_count())});\n    BOOST_CHECK_GT(map.load_factor(), map.min_load_factor());\n    BOOST_CHECK_LT(map.bucket_count(), nb_values*2);\n}\n\n/**\n * rehash\n */\nBOOST_AUTO_TEST_CASE(test_rehash_empty) {\n    // test rehash(0), test find/erase/insert on map.\n    const std::size_t nb_values = 100;\n    auto map = utils::get_filled_hash_map<tsl::robin_map<std::int64_t, std::int64_t>>(nb_values);\n    \n    const std::size_t bucket_count = map.bucket_count();\n    BOOST_CHECK(bucket_count >= nb_values);\n    \n    map.clear();\n    BOOST_CHECK_EQUAL(map.bucket_count(), bucket_count);\n    BOOST_CHECK(map.empty());\n    \n    map.rehash(0);\n    BOOST_CHECK_EQUAL(map.bucket_count(), 0);\n    BOOST_CHECK(map.empty());\n    \n    \n    BOOST_CHECK(map.find(1) == map.end());\n    BOOST_CHECK_EQUAL(map.erase(1), 0);\n    BOOST_CHECK(map.insert({1, 10}).second);\n    BOOST_CHECK_EQUAL(map.at(1), 10);\n}\n\n\n/**\n * operator== and operator!=\n */\nBOOST_AUTO_TEST_CASE(test_compare) {\n    const tsl::robin_map<std::string, std::int64_t> map1 = {{\"a\", 1}, {\"e\", 5}, {\"d\", 4}, {\"c\", 3}, {\"b\", 2}};\n    const tsl::robin_map<std::string, std::int64_t> map1_copy = {{\"e\", 5}, {\"c\", 3}, {\"b\", 2}, {\"a\", 1}, {\"d\", 4}};\n    const tsl::robin_map<std::string, std::int64_t> map2 = {{\"e\", 5}, {\"c\", 3}, {\"b\", 2}, {\"a\", 1}, {\"d\", 4}, {\"f\", 6}};\n    const tsl::robin_map<std::string, std::int64_t> map3 = {{\"e\", 5}, {\"c\", 3}, {\"b\", 2}, {\"a\", 1}};\n    const tsl::robin_map<std::string, std::int64_t> map4 = {{\"a\", 1}, {\"e\", 5}, {\"d\", 4}, {\"c\", 3}, {\"b\", 26}};\n    const tsl::robin_map<std::string, std::int64_t> map5 = {{\"a\", 1}, {\"e\", 5}, {\"d\", 4}, {\"c\", 3}, {\"z\", 2}};\n    \n    BOOST_CHECK(map1 == map1_copy);\n    BOOST_CHECK(map1_copy == map1);\n    \n    BOOST_CHECK(map1 != map2);\n    BOOST_CHECK(map2 != map1);\n    \n    BOOST_CHECK(map1 != map3);\n    BOOST_CHECK(map3 != map1);\n    \n    BOOST_CHECK(map1 != map4);\n    BOOST_CHECK(map4 != map1);\n    \n    BOOST_CHECK(map1 != map5);\n    BOOST_CHECK(map5 != map1);\n    \n    BOOST_CHECK(map2 != map3);\n    BOOST_CHECK(map3 != map2);\n    \n    BOOST_CHECK(map2 != map4);\n    BOOST_CHECK(map4 != map2);\n    \n    BOOST_CHECK(map2 != map5);\n    BOOST_CHECK(map5 != map2);\n    \n    BOOST_CHECK(map3 != map4);\n    BOOST_CHECK(map4 != map3);\n    \n    BOOST_CHECK(map3 != map5);\n    BOOST_CHECK(map5 != map3);\n    \n    BOOST_CHECK(map4 != map5);\n    BOOST_CHECK(map5 != map4);\n}\n\n\n\n\n/**\n * clear\n */\nBOOST_AUTO_TEST_CASE(test_clear) {\n    // insert x values, clear map, test insert\n    using HMap = tsl::robin_map<std::int64_t, std::int64_t>;\n    \n    const std::size_t nb_values = 1000;\n    auto map = utils::get_filled_hash_map<HMap>(nb_values);\n    \n    map.clear();\n    BOOST_CHECK_EQUAL(map.size(), 0);\n    BOOST_CHECK_EQUAL(std::distance(map.begin(), map.end()), 0);\n    \n    map.insert({5, -5});\n    map.insert({{1, -1}, {2, -1}, {4, -4}, {3, -3}});\n    \n    BOOST_CHECK(map == (HMap({{5, -5}, {1, -1}, {2, -1}, {4, -4}, {3, -3}})));\n}\n\nBOOST_AUTO_TEST_CASE(test_clear_with_min_load_factor) {\n    // insert x values, clear map, test insert\n    using HMap = tsl::robin_map<std::int64_t, std::int64_t>;\n    \n    const std::size_t nb_values = 1000;\n    auto map = utils::get_filled_hash_map<HMap>(nb_values);\n    map.min_load_factor(0.1f);\n    \n    map.clear();\n    BOOST_CHECK_EQUAL(map.bucket_count(), 0);\n    BOOST_CHECK_EQUAL(map.size(), 0);\n    BOOST_CHECK_EQUAL(std::distance(map.begin(), map.end()), 0);\n    \n    map.insert({5, -5});\n    map.insert({{1, -1}, {2, -1}, {4, -4}, {3, -3}});\n    \n    BOOST_CHECK(map == (HMap({{5, -5}, {1, -1}, {2, -1}, {4, -4}, {3, -3}})));\n}\n\n\n/**\n * iterator.value()\n */\nBOOST_AUTO_TEST_CASE(test_modify_value_through_iterator) {\n    // insert x values, modify value of even keys with iterators, check values\n    const std::size_t nb_values = 100;\n    auto map = utils::get_filled_hash_map<tsl::robin_map<std::int64_t, std::int64_t>>(nb_values);\n    \n    for(auto it = map.begin(); it != map.end(); it++) {\n        if(it.key() % 2 == 0) {\n            it.value() = -1;\n        }\n    }\n    \n    for(auto& val : map) {\n        if(val.first % 2 == 0) {\n            BOOST_CHECK_EQUAL(val.second, -1);\n        }\n        else {\n            BOOST_CHECK_NE(val.second, -1);\n        }\n    }\n}\n\nBOOST_AUTO_TEST_CASE(test_modify_value_through_iterator_with_const_qualifier) {\n    tsl::robin_map<int, int> map = {{0, 1}};\n    const auto it = map.begin();\n\n    BOOST_CHECK_EQUAL(it->second, 1);\n    it.value() += 10;\n    BOOST_CHECK_EQUAL(it->second, 11);\n}\n\n/**\n * constructor\n */\nBOOST_AUTO_TEST_CASE(test_extreme_bucket_count_value_construction) {\n    TSL_RH_CHECK_THROW((tsl::robin_map<int, int, std::hash<int>, std::equal_to<int>, \n                                      std::allocator<std::pair<int, int>>, false,\n                                      tsl::rh::power_of_two_growth_policy<2>>\n                            (std::numeric_limits<std::size_t>::max())), std::length_error);\n    \n    TSL_RH_CHECK_THROW((tsl::robin_map<int, int, std::hash<int>, std::equal_to<int>, \n                                      std::allocator<std::pair<int, int>>, false, \n                                      tsl::rh::power_of_two_growth_policy<2>>\n                            (std::numeric_limits<std::size_t>::max()/2 + 1)), std::length_error);\n    \n    \n    \n    TSL_RH_CHECK_THROW((tsl::robin_map<int, int, std::hash<int>, std::equal_to<int>, \n                                      std::allocator<std::pair<int, int>>, false, \n                                      tsl::rh::prime_growth_policy>\n                            (std::numeric_limits<std::size_t>::max())), std::length_error);\n    \n    TSL_RH_CHECK_THROW((tsl::robin_map<int, int, std::hash<int>, std::equal_to<int>, \n                                      std::allocator<std::pair<int, int>>, false, \n                                      tsl::rh::prime_growth_policy>\n                            (std::numeric_limits<std::size_t>::max()/2)), std::length_error);\n    \n    \n    \n    TSL_RH_CHECK_THROW((tsl::robin_map<int, int, std::hash<int>, std::equal_to<int>, \n                                      std::allocator<std::pair<int, int>>, false, \n                                      tsl::rh::mod_growth_policy<>>\n                            (std::numeric_limits<std::size_t>::max())), std::length_error);\n}\n\nBOOST_AUTO_TEST_CASE(test_range_construct) {\n    tsl::robin_map<int, int> map = {{2, 1}, {1, 0}, {3, 2}};\n    \n    tsl::robin_map<int, int> map2(map.begin(), map.end());\n    tsl::robin_map<int, int> map3(map.cbegin(), map.cend());\n}\n\n\n/**\n * operator=(std::initializer_list)\n */\nBOOST_AUTO_TEST_CASE(test_assign_operator) {\n    tsl::robin_map<std::int64_t, std::int64_t> map = {{0, 10}, {-2, 20}};\n    BOOST_CHECK_EQUAL(map.size(), 2);\n    \n    map = {{1, 3}, {2, 4}};\n    BOOST_CHECK_EQUAL(map.size(), 2);\n    BOOST_CHECK_EQUAL(map.at(1), 3);\n    BOOST_CHECK_EQUAL(map.at(2), 4);\n    BOOST_CHECK(map.find(0) == map.end());\n    \n    map = {};\n    BOOST_CHECK(map.empty());\n}\n\n\n/**\n * move/copy constructor/operator\n */\nBOOST_AUTO_TEST_CASE(test_move_constructor) {\n    // insert x values in map, move map into map_move with move constructor, check map and map_move, \n    // insert additional values in map_move, check map_move\n    using HMap = tsl::robin_map<std::string, move_only_test>;\n    \n    const std::size_t nb_values = 100;\n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    HMap map_move(std::move(map));\n    \n    BOOST_CHECK(map_move == utils::get_filled_hash_map<HMap>(nb_values));\n    BOOST_CHECK(map == (HMap()));\n\n    \n    \n    for(std::size_t i = nb_values; i < nb_values*2; i++) {\n        map_move.insert({utils::get_key<std::string>(i), utils::get_value<move_only_test>(i)});\n    }\n    \n    BOOST_CHECK_EQUAL(map_move.size(), nb_values*2);\n    BOOST_CHECK(map_move == utils::get_filled_hash_map<HMap>(nb_values*2));\n}\n\nBOOST_AUTO_TEST_CASE(test_move_constructor_empty) {\n    tsl::robin_map<std::string, move_only_test> map(0);\n    tsl::robin_map<std::string, move_only_test> map_move(std::move(map));\n    \n    BOOST_CHECK(map.empty());\n    BOOST_CHECK(map_move.empty());\n    \n    BOOST_CHECK(map.find(\"\") == map.end());\n    BOOST_CHECK(map_move.find(\"\") == map_move.end());\n}\n\nBOOST_AUTO_TEST_CASE(test_move_operator) {\n    // insert x values in map, move map into map_move with move operator, check map and map_move, \n    // insert additional values in map_move, check map_move\n    using HMap = tsl::robin_map<std::string, move_only_test>;\n    \n    const std::size_t nb_values = 100;\n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    HMap map_move = utils::get_filled_hash_map<HMap>(1);\n    map_move = std::move(map);\n    \n    BOOST_CHECK(map_move == utils::get_filled_hash_map<HMap>(nb_values));\n    BOOST_CHECK(map == (HMap()));\n\n    \n    \n    for(std::size_t i = nb_values; i < nb_values*2; i++) {\n        map_move.insert({utils::get_key<std::string>(i), utils::get_value<move_only_test>(i)});\n    }\n    \n    BOOST_CHECK_EQUAL(map_move.size(), nb_values*2);\n    BOOST_CHECK(map_move == utils::get_filled_hash_map<HMap>(nb_values*2));\n}\n\nBOOST_AUTO_TEST_CASE(test_move_operator_empty) {\n    tsl::robin_map<std::string, move_only_test> map(0);\n    tsl::robin_map<std::string, move_only_test> map_move;\n    map_move = (std::move(map));\n    \n    BOOST_CHECK(map.empty());\n    BOOST_CHECK(map_move.empty());\n    \n    BOOST_CHECK(map.find(\"\") == map.end());\n    BOOST_CHECK(map_move.find(\"\") == map_move.end());\n}\n\nBOOST_AUTO_TEST_CASE(test_reassign_moved_object_move_constructor) {\n    using HMap = tsl::robin_map<std::string, std::string>;\n    \n    HMap map = {{\"Key1\", \"Value1\"}, {\"Key2\", \"Value2\"}, {\"Key3\", \"Value3\"}};\n    HMap map_move(std::move(map));\n    \n    BOOST_CHECK_EQUAL(map_move.size(), 3);\n    BOOST_CHECK_EQUAL(map.size(), 0);\n    \n    map = {{\"Key4\", \"Value4\"}, {\"Key5\", \"Value5\"}};\n    BOOST_CHECK(map == (HMap({{\"Key4\", \"Value4\"}, {\"Key5\", \"Value5\"}})));\n}\n\nBOOST_AUTO_TEST_CASE(test_reassign_moved_object_move_operator) {\n    using HMap = tsl::robin_map<std::string, std::string>;\n    \n    HMap map = {{\"Key1\", \"Value1\"}, {\"Key2\", \"Value2\"}, {\"Key3\", \"Value3\"}};\n    HMap map_move = std::move(map);\n    \n    BOOST_CHECK_EQUAL(map_move.size(), 3);\n    BOOST_CHECK_EQUAL(map.size(), 0);\n    \n    map = {{\"Key4\", \"Value4\"}, {\"Key5\", \"Value5\"}};\n    BOOST_CHECK(map == (HMap({{\"Key4\", \"Value4\"}, {\"Key5\", \"Value5\"}})));\n}\n\nBOOST_AUTO_TEST_CASE(test_use_after_move_constructor) {\n    using HMap = tsl::robin_map<std::string, move_only_test>;\n    \n    const std::size_t nb_values = 100;\n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    HMap map_move(std::move(map));\n    \n    \n    BOOST_CHECK(map == (HMap()));\n    BOOST_CHECK_EQUAL(map.size(), 0);\n    BOOST_CHECK_EQUAL(map.bucket_count(), 0);\n    BOOST_CHECK_EQUAL(map.erase(\"a\"), 0);\n    BOOST_CHECK(map.find(\"a\") == map.end());\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        map.insert({utils::get_key<std::string>(i), utils::get_value<move_only_test>(i)});\n    }\n    \n    BOOST_CHECK_EQUAL(map.size(), nb_values);\n    BOOST_CHECK(map == map_move);\n}\n\nBOOST_AUTO_TEST_CASE(test_use_after_move_operator) {\n    using HMap = tsl::robin_map<std::string, move_only_test>;\n    \n    const std::size_t nb_values = 100;\n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    HMap map_move(0);\n    map_move = std::move(map);\n    \n    \n    BOOST_CHECK(map == (HMap()));\n    BOOST_CHECK_EQUAL(map.size(), 0);\n    BOOST_CHECK_EQUAL(map.bucket_count(), 0);\n    BOOST_CHECK_EQUAL(map.erase(\"a\"), 0);\n    BOOST_CHECK(map.find(\"a\") == map.end());\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        map.insert({utils::get_key<std::string>(i), utils::get_value<move_only_test>(i)});\n    }\n    \n    BOOST_CHECK_EQUAL(map.size(), nb_values);\n    BOOST_CHECK(map == map_move);\n}\n\nBOOST_AUTO_TEST_CASE(test_copy_constructor_and_operator) {\n    using HMap = tsl::robin_map<std::string, std::string, mod_hash<9>>;\n    \n    \n    const std::size_t nb_values = 100;\n    HMap map = utils::get_filled_hash_map<HMap>(nb_values);\n    \n    HMap map_copy = map;\n    HMap map_copy2(map);\n    HMap map_copy3 = utils::get_filled_hash_map<HMap>(1);\n    map_copy3 = map;\n    \n    BOOST_CHECK(map == map_copy);\n    map.clear();\n    \n    BOOST_CHECK(map_copy == map_copy2);\n    BOOST_CHECK(map_copy == map_copy3);\n}\n\nBOOST_AUTO_TEST_CASE(test_copy_constructor_empty) {\n    tsl::robin_map<std::string, int> map(0);\n    tsl::robin_map<std::string, int> map_copy(map);\n    \n    BOOST_CHECK(map.empty());\n    BOOST_CHECK(map_copy.empty());\n    \n    BOOST_CHECK(map.find(\"\") == map.end());\n    BOOST_CHECK(map_copy.find(\"\") == map_copy.end());\n}\n\nBOOST_AUTO_TEST_CASE(test_copy_operator_empty) {\n    tsl::robin_map<std::string, int> map(0);\n    tsl::robin_map<std::string, int> map_copy(16);\n    map_copy = map;\n    \n    BOOST_CHECK(map.empty());\n    BOOST_CHECK(map_copy.empty());\n    \n    BOOST_CHECK(map.find(\"\") == map.end());\n    BOOST_CHECK(map_copy.find(\"\") == map_copy.end());\n}\n\n\n/**\n * at\n */\nBOOST_AUTO_TEST_CASE(test_at) {\n    // insert x values, use at for known and unknown values.\n    const tsl::robin_map<std::int64_t, std::int64_t> map = {{0, 10}, {-2, 20}};\n    \n    BOOST_CHECK_EQUAL(map.at(0), 10);\n    BOOST_CHECK_EQUAL(map.at(-2), 20);\n    TSL_RH_CHECK_THROW(map.at(1), std::out_of_range);\n}\n\n/**\n * contains\n */\nBOOST_AUTO_TEST_CASE(test_contains) {\n    tsl::robin_map<std::int64_t, std::int64_t> map = {{0, 10}, {-2, 20}};\n    \n    BOOST_CHECK(map.contains(0));\n    BOOST_CHECK(map.contains(-2));\n    BOOST_CHECK(!map.contains(-3));\n}\n\n/**\n * equal_range\n */\nBOOST_AUTO_TEST_CASE(test_equal_range) {\n    const tsl::robin_map<std::int64_t, std::int64_t> map = {{0, 10}, {-2, 20}};\n    \n    auto it_pair = map.equal_range(0);\n    BOOST_REQUIRE_EQUAL(std::distance(it_pair.first, it_pair.second), 1);\n    BOOST_CHECK_EQUAL(it_pair.first->second, 10);\n    \n    it_pair = map.equal_range(1);\n    BOOST_CHECK(it_pair.first == it_pair.second);\n    BOOST_CHECK(it_pair.first == map.end());\n}\n\n\n/**\n * operator[]\n */\nBOOST_AUTO_TEST_CASE(test_access_operator) {\n    // insert x values, use at for known and unknown values.\n    tsl::robin_map<std::int64_t, std::int64_t> map = {{0, 10}, {-2, 20}};\n    \n    BOOST_CHECK_EQUAL(map[0], 10);\n    BOOST_CHECK_EQUAL(map[-2], 20);\n    BOOST_CHECK_EQUAL(map[2], std::int64_t());\n    \n    BOOST_CHECK_EQUAL(map.size(), 3);\n}\n\n\n\n/**\n * swap\n */\nBOOST_AUTO_TEST_CASE(test_swap) {\n    tsl::robin_map<std::int64_t, std::int64_t> map = {{1, 10}, {8, 80}, {3, 30}};\n    tsl::robin_map<std::int64_t, std::int64_t> map2 = {{4, 40}, {5, 50}};\n    \n    using std::swap;\n    swap(map, map2);\n    \n    BOOST_CHECK(map == (tsl::robin_map<std::int64_t, std::int64_t>{{4, 40}, {5, 50}}));\n    BOOST_CHECK(map2 == (tsl::robin_map<std::int64_t, std::int64_t>{{1, 10}, {8, 80}, {3, 30}}));\n    \n    map.insert({6, 60});\n    map2.insert({4, 40});\n    \n    BOOST_CHECK(map == (tsl::robin_map<std::int64_t, std::int64_t>{{4, 40}, {5, 50}, {6, 60}}));\n    BOOST_CHECK(map2 == (tsl::robin_map<std::int64_t, std::int64_t>{{1, 10}, {8, 80}, {3, 30}, {4, 40}}));\n}\n\nBOOST_AUTO_TEST_CASE(test_swap_empty) {\n    tsl::robin_map<std::int64_t, std::int64_t> map = {{1, 10}, {8, 80}, {3, 30}};\n    tsl::robin_map<std::int64_t, std::int64_t> map2;\n    \n    using std::swap;\n    swap(map, map2);\n    \n    BOOST_CHECK(map == (tsl::robin_map<std::int64_t, std::int64_t>{}));\n    BOOST_CHECK(map2 == (tsl::robin_map<std::int64_t, std::int64_t>{{1, 10}, {8, 80}, {3, 30}}));\n    \n    map.insert({6, 60});\n    map2.insert({4, 40});\n    \n    BOOST_CHECK(map == (tsl::robin_map<std::int64_t, std::int64_t>{{6, 60}}));\n    BOOST_CHECK(map2 == (tsl::robin_map<std::int64_t, std::int64_t>{{1, 10}, {8, 80}, {3, 30}, {4, 40}}));\n}\n\n/**\n * KeyEqual\n */\nBOOST_AUTO_TEST_CASE(test_key_equal) {\n    // Use a KeyEqual and Hash where any odd unsigned number 'x' is equal to 'x-1'.\n    // Make sure that KeyEqual is called (and not ==).\n    struct hash {\n        std::size_t operator()(std::uint64_t v) const {\n            if(v%2u == 1u) {\n                return std::hash<std::uint64_t>()(v-1);\n            }\n            else {\n                return std::hash<std::uint64_t>()(v);\n            }\n        }\n    };\n    \n    struct key_equal {\n        bool operator()(std::uint64_t lhs, std::uint64_t rhs) const {\n            if(lhs%2u == 1u) {\n                lhs--;\n            }\n            \n            if(rhs%2u == 1u) {\n                rhs--;\n            }\n            \n            return lhs == rhs;\n        }\n    };\n    \n    tsl::robin_map<std::uint64_t, std::uint64_t, hash, key_equal> map;\n    BOOST_CHECK(map.insert({2, 10}).second);\n    BOOST_CHECK_EQUAL(map.at(2), 10);\n    BOOST_CHECK_EQUAL(map.at(3), 10);\n    BOOST_CHECK(!map.insert({3, 10}).second);\n    \n    BOOST_CHECK_EQUAL(map.size(), 1);\n}\n\n\n\n/**\n * other\n */\nBOOST_AUTO_TEST_CASE(test_heterogeneous_lookups) {\n    struct hash_ptr {\n        std::size_t operator()(const std::unique_ptr<int>& p) const {\n            return std::hash<std::uintptr_t>()(reinterpret_cast<std::uintptr_t>(p.get()));\n        }\n        \n        std::size_t operator()(std::uintptr_t p) const {\n            return std::hash<std::uintptr_t>()(p);\n        }\n        \n        std::size_t operator()(const int* const& p) const {\n            return std::hash<std::uintptr_t>()(reinterpret_cast<std::uintptr_t>(p));\n        }\n    };\n    \n    struct equal_to_ptr {\n        using is_transparent = std::true_type;\n        \n        bool operator()(const std::unique_ptr<int>& p1, const std::unique_ptr<int>& p2) const {\n            return p1 == p2;\n        }\n        \n        bool operator()(const std::unique_ptr<int>& p1, std::uintptr_t p2) const {\n            return reinterpret_cast<std::uintptr_t>(p1.get()) == p2;\n        }\n        \n        bool operator()(std::uintptr_t p1, const std::unique_ptr<int>& p2) const {\n            return p1 == reinterpret_cast<std::uintptr_t>(p2.get());\n        }\n        \n        bool operator()(const std::unique_ptr<int>& p1, const int* const& p2) const {\n            return p1.get() == p2;\n        }\n        \n        bool operator()(const int* const& p1, const std::unique_ptr<int>& p2) const {\n            return p1 == p2.get();\n        }\n    };\n    \n    std::unique_ptr<int> ptr1(new int(1));\n    std::unique_ptr<int> ptr2(new int(2));\n    std::unique_ptr<int> ptr3(new int(3));\n    int other = -1;\n    \n    const std::uintptr_t addr1 = reinterpret_cast<std::uintptr_t>(ptr1.get());\n    const int* const addr2 = ptr2.get();\n    const int* const addr_unknown = &other;\n     \n    tsl::robin_map<std::unique_ptr<int>, int, hash_ptr, equal_to_ptr> map;\n    map.insert({std::move(ptr1), 4});\n    map.insert({std::move(ptr2), 5});\n    map.insert({std::move(ptr3), 6});\n    \n    BOOST_CHECK_EQUAL(map.size(), 3);\n    \n    \n    BOOST_CHECK_EQUAL(map.at(addr1), 4);\n    BOOST_CHECK_EQUAL(map.at(addr2), 5);\n    TSL_RH_CHECK_THROW(map.at(addr_unknown), std::out_of_range);\n    \n    \n    BOOST_REQUIRE(map.find(addr1) != map.end());\n    BOOST_CHECK_EQUAL(*map.find(addr1)->first, 1);\n    \n    BOOST_REQUIRE(map.find(addr2) != map.end());\n    BOOST_CHECK_EQUAL(*map.find(addr2)->first, 2);\n    \n    BOOST_CHECK(map.find(addr_unknown) == map.end());\n    \n    \n    BOOST_CHECK_EQUAL(map.count(addr1), 1);\n    BOOST_CHECK_EQUAL(map.count(addr2), 1);\n    BOOST_CHECK_EQUAL(map.count(addr_unknown), 0);\n    \n    \n    BOOST_CHECK_EQUAL(map.erase(addr1), 1);\n    BOOST_CHECK_EQUAL(map.erase(addr2), 1);\n    BOOST_CHECK_EQUAL(map.erase(addr_unknown), 0);\n    \n    \n    BOOST_CHECK_EQUAL(map.size(), 1);\n}\n\n\n\n/**\n * Various operations on empty map\n */\nBOOST_AUTO_TEST_CASE(test_empty_map) {\n    tsl::robin_map<std::string, int> map(0);\n    \n    BOOST_CHECK_EQUAL(map.bucket_count(), 0);\n    BOOST_CHECK_EQUAL(map.size(), 0);\n    BOOST_CHECK_EQUAL(map.load_factor(), 0);\n    BOOST_CHECK(map.empty());\n    \n    BOOST_CHECK(map.begin() == map.end());\n    BOOST_CHECK(map.begin() == map.cend());\n    BOOST_CHECK(map.cbegin() == map.cend());\n    \n    BOOST_CHECK(map.find(\"\") == map.end());\n    BOOST_CHECK(map.find(\"test\") == map.end());\n    \n    BOOST_CHECK_EQUAL(map.count(\"\"), 0);\n    BOOST_CHECK_EQUAL(map.count(\"test\"), 0);\n    \n    BOOST_CHECK(!map.contains(\"\"));\n    BOOST_CHECK(!map.contains(\"test\"));\n    \n    TSL_RH_CHECK_THROW(map.at(\"\"), std::out_of_range);\n    TSL_RH_CHECK_THROW(map.at(\"test\"), std::out_of_range);\n    \n    auto range = map.equal_range(\"test\");\n    BOOST_CHECK(range.first == range.second);\n    \n    BOOST_CHECK_EQUAL(map.erase(\"test\"), 0);\n    BOOST_CHECK(map.erase(map.begin(), map.end()) == map.end());\n    \n    BOOST_CHECK_EQUAL(map[\"new value\"], int{});\n}\n\n/**\n * Test precalculated hash\n */\nBOOST_AUTO_TEST_CASE(test_precalculated_hash) {\n    tsl::robin_map<int, int, identity_hash<int>> map = {{1, -1}, {2, -2}, {3, -3}, {4, -4}, {5, -5}, {6, -6}};\n    const tsl::robin_map<int, int, identity_hash<int>> map_const = map;\n    \n    /**\n     * find\n     */\n    BOOST_REQUIRE(map.find(3, map.hash_function()(3)) != map.end());\n    BOOST_CHECK_EQUAL(map.find(3, map.hash_function()(3))->second, -3);\n    \n    BOOST_REQUIRE(map_const.find(3, map_const.hash_function()(3)) != map_const.end());\n    BOOST_CHECK_EQUAL(map_const.find(3, map_const.hash_function()(3))->second, -3);\n    \n    BOOST_REQUIRE_NE(map.hash_function()(2), map.hash_function()(3));\n    BOOST_CHECK(map.find(3, map.hash_function()(2)) == map.end());\n    \n    /**\n     * at\n     */\n    BOOST_CHECK_EQUAL(map.at(3, map.hash_function()(3)), -3);\n    BOOST_CHECK_EQUAL(map_const.at(3, map_const.hash_function()(3)), -3);\n    \n    BOOST_REQUIRE_NE(map.hash_function()(2), map.hash_function()(3));\n    TSL_RH_CHECK_THROW(map.at(3, map.hash_function()(2)), std::out_of_range);\n    \n    /**\n     * contains\n     */\n    BOOST_CHECK(map.contains(3, map.hash_function()(3)));\n    BOOST_CHECK(map_const.contains(3, map_const.hash_function()(3)));\n    \n    BOOST_REQUIRE_NE(map.hash_function()(2), map.hash_function()(3));\n    BOOST_CHECK(!map.contains(3, map.hash_function()(2)));\n    \n    /**\n     * count\n     */\n    BOOST_CHECK_EQUAL(map.count(3, map.hash_function()(3)), 1);\n    BOOST_CHECK_EQUAL(map_const.count(3, map_const.hash_function()(3)), 1);\n    \n    BOOST_REQUIRE_NE(map.hash_function()(2), map.hash_function()(3));\n    BOOST_CHECK_EQUAL(map.count(3, map.hash_function()(2)), 0);\n    \n    /**\n     * equal_range\n     */\n    auto it_range = map.equal_range(3, map.hash_function()(3));\n    BOOST_REQUIRE_EQUAL(std::distance(it_range.first, it_range.second), 1);\n    BOOST_CHECK_EQUAL(it_range.first->second, -3);\n    \n    auto it_range_const = map_const.equal_range(3, map_const.hash_function()(3));\n    BOOST_REQUIRE_EQUAL(std::distance(it_range_const.first, it_range_const.second), 1);\n    BOOST_CHECK_EQUAL(it_range_const.first->second, -3);\n    \n    it_range = map.equal_range(3, map.hash_function()(2));\n    BOOST_REQUIRE_NE(map.hash_function()(2), map.hash_function()(3));\n    BOOST_CHECK_EQUAL(std::distance(it_range.first, it_range.second), 0);\n    \n    /**\n     * erase\n     */\n    BOOST_CHECK_EQUAL(map.erase(3, map.hash_function()(3)), 1);\n    \n    BOOST_REQUIRE_NE(map.hash_function()(2), map.hash_function()(4));\n    BOOST_CHECK_EQUAL(map.erase(4, map.hash_function()(2)), 0);\n}\n\n\nBOOST_AUTO_TEST_SUITE_END()\n"
  },
  {
    "path": "Thirdparty/tessil-src/tests/robin_set_tests.cpp",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#define BOOST_TEST_DYN_LINK\n\n\n#include <boost/test/unit_test.hpp>\n#include <boost/mpl/list.hpp>\n#include <cstddef>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <string>\n#include <tuple>\n#include <utility>\n\n#include <tsl/robin_set.h>\n#include \"utils.h\"\n\n\nBOOST_AUTO_TEST_SUITE(test_robin_set)\n\nusing test_types = boost::mpl::list<tsl::robin_set<std::int64_t>,\n                                    tsl::robin_set<std::string>,\n                                    tsl::robin_set<self_reference_member_test>,\n                                    tsl::robin_set<move_only_test>,\n                                    tsl::robin_pg_set<self_reference_member_test>,\n                                    tsl::robin_set<move_only_test, \n                                                   std::hash<move_only_test>,\n                                                   std::equal_to<move_only_test>, \n                                                   std::allocator<move_only_test>,\n                                                   true,\n                                                   tsl::rh::prime_growth_policy>,\n                                    tsl::robin_set<self_reference_member_test, \n                                                   std::hash<self_reference_member_test>,\n                                                   std::equal_to<self_reference_member_test>, \n                                                   std::allocator<self_reference_member_test>,\n                                                   true,\n                                                   tsl::rh::mod_growth_policy<>>,\n                                    tsl::robin_set<move_only_test, \n                                                   std::hash<move_only_test>,\n                                                   std::equal_to<move_only_test>, \n                                                   std::allocator<move_only_test>,\n                                                   false,\n                                                   tsl::rh::mod_growth_policy<>>\n                                    >;\n                                    \n                                    \n                                    \nBOOST_AUTO_TEST_CASE_TEMPLATE(test_insert, HSet, test_types) {\n    // insert x values, insert them again, check values\n    using key_t = typename HSet::key_type;\n    \n    const std::size_t nb_values = 1000;\n    HSet set;\n    typename HSet::iterator it;\n    bool inserted;\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        std::tie(it, inserted) = set.insert(utils::get_key<key_t>(i));\n        \n        BOOST_CHECK_EQUAL(*it, utils::get_key<key_t>(i));\n        BOOST_CHECK(inserted);\n    }\n    BOOST_CHECK_EQUAL(set.size(), nb_values);\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        std::tie(it, inserted) = set.insert(utils::get_key<key_t>(i));\n        \n        BOOST_CHECK_EQUAL(*it, utils::get_key<key_t>(i));\n        BOOST_CHECK(!inserted);\n    }\n    \n    for(std::size_t i = 0; i < nb_values; i++) {\n        it = set.find(utils::get_key<key_t>(i));\n        \n        BOOST_CHECK_EQUAL(*it, utils::get_key<key_t>(i));\n    }\n}\n\nBOOST_AUTO_TEST_CASE(test_compare) {\n    const tsl::robin_set<std::string> set1 = {\"a\", \"e\", \"d\", \"c\", \"b\"};\n    const tsl::robin_set<std::string> set1_copy = {\"e\", \"c\", \"b\", \"a\", \"d\"};\n    const tsl::robin_set<std::string> set2 = {\"e\", \"c\", \"b\", \"a\", \"d\", \"f\"};\n    const tsl::robin_set<std::string> set3 = {\"e\", \"c\", \"b\", \"a\"};\n    const tsl::robin_set<std::string> set4 = {\"a\", \"e\", \"d\", \"c\", \"z\"};\n    \n    BOOST_CHECK(set1 == set1_copy);\n    BOOST_CHECK(set1_copy == set1);\n    \n    BOOST_CHECK(set1 != set2);\n    BOOST_CHECK(set2 != set1);\n    \n    BOOST_CHECK(set1 != set3);\n    BOOST_CHECK(set3 != set1);\n    \n    BOOST_CHECK(set1 != set4);\n    BOOST_CHECK(set4 != set1);\n    \n    BOOST_CHECK(set2 != set3);\n    BOOST_CHECK(set3 != set2);\n    \n    BOOST_CHECK(set2 != set4);\n    BOOST_CHECK(set4 != set2);\n    \n    BOOST_CHECK(set3 != set4);\n    BOOST_CHECK(set4 != set3);\n}\n\nBOOST_AUTO_TEST_CASE(test_insert_pointer) {\n    // Test added mainly to be sure that the code compiles with MSVC due to a bug in the compiler.\n    // See robin_hash::insert_value_impl for details.\n    std::string value;\n    std::string* value_ptr = &value;\n\n    tsl::robin_set<std::string*> set;\n    set.insert(value_ptr);\n    set.emplace(value_ptr);\n\n    BOOST_CHECK_EQUAL(set.size(), 1);\n    BOOST_CHECK_EQUAL(**set.begin(), value);\n}\n\nBOOST_AUTO_TEST_SUITE_END()\n"
  },
  {
    "path": "Thirdparty/tessil-src/tests/utils.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Thibaut Goetghebuer-Planchon <tessil@gmx.com>\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_UTILS_H\n#define TSL_UTILS_H\n\n\n#include <cstddef>\n#include <cstdint>\n#include <functional>\n#include <memory>\n#include <ostream>\n#include <string>\n#include <utility>\n\n#include <tsl/robin_hash.h>\n\n#ifdef TSL_RH_NO_EXCEPTIONS\n#define TSL_RH_CHECK_THROW(S, E)\n#else\n#define TSL_RH_CHECK_THROW(S, E) BOOST_CHECK_THROW(S, E)\n#endif\n\ntemplate<typename T>\nclass identity_hash {\npublic:    \n    std::size_t operator()(const T& value) const {\n        return static_cast<std::size_t>(value);\n    }\n};\n\ntemplate<unsigned int MOD>\nclass mod_hash {\npublic:   \n    template<typename T>\n    std::size_t operator()(const T& value) const {\n        return std::hash<T>()(value) % MOD;\n    }\n};\n\nclass self_reference_member_test {\npublic:\n    self_reference_member_test() : m_value(std::to_string(-1)), m_value_ptr(&m_value) {\n    }\n    \n    explicit self_reference_member_test(std::int64_t value) : m_value(std::to_string(value)), m_value_ptr(&m_value) {\n    }\n    \n    self_reference_member_test(const self_reference_member_test& other) : m_value(*other.m_value_ptr), m_value_ptr(&m_value) {\n    }\n    \n    self_reference_member_test(self_reference_member_test&& other) : m_value(*other.m_value_ptr), m_value_ptr(&m_value) {\n    }\n    \n    self_reference_member_test& operator=(const self_reference_member_test& other) {\n        m_value = *other.m_value_ptr;\n        m_value_ptr = &m_value;\n        \n        return *this;\n    }\n    \n    self_reference_member_test& operator=(self_reference_member_test&& other) {\n        m_value = *other.m_value_ptr;\n        m_value_ptr = &m_value;\n        \n        return *this;\n    }\n    \n    std::string value() const {\n        return *m_value_ptr;\n    }\n\n    friend std::ostream& operator<<(std::ostream& stream, const self_reference_member_test& value) {\n        stream << *value.m_value_ptr;\n        \n        return stream;\n    }\n    \n    friend bool operator==(const self_reference_member_test& lhs, const self_reference_member_test& rhs) { \n        return *lhs.m_value_ptr == *rhs.m_value_ptr;\n    }\n    \n    friend bool operator!=(const self_reference_member_test& lhs, const self_reference_member_test& rhs) { \n        return !(lhs == rhs); \n    }\n    \n    friend bool operator<(const self_reference_member_test& lhs, const self_reference_member_test& rhs) {\n        return *lhs.m_value_ptr < *rhs.m_value_ptr;\n    }\nprivate:    \n    std::string  m_value;\n    std::string* m_value_ptr;\n};\n\n\nclass move_only_test {\npublic:\n    explicit move_only_test(std::int64_t value) : m_value(new std::string(std::to_string(value))) {\n    }\n    \n    move_only_test(const move_only_test&) = delete;\n    move_only_test(move_only_test&&) = default;\n    move_only_test& operator=(const move_only_test&) = delete;\n    move_only_test& operator=(move_only_test&&) = default;\n    \n    friend std::ostream& operator<<(std::ostream& stream, const move_only_test& value) {\n        if(value.m_value == nullptr) {\n            stream << \"null\";\n        }\n        else {\n            stream << *value.m_value;\n        }\n        \n        return stream;\n    }\n    \n    friend bool operator==(const move_only_test& lhs, const move_only_test& rhs) { \n        if(lhs.m_value == nullptr || rhs.m_value == nullptr) {\n            return lhs.m_value == nullptr && rhs.m_value == nullptr;\n        }\n        else {\n            return *lhs.m_value == *rhs.m_value; \n        }\n    }\n    \n    friend bool operator!=(const move_only_test& lhs, const move_only_test& rhs) { \n        return !(lhs == rhs); \n    }\n    \n    friend bool operator<(const move_only_test& lhs, const move_only_test& rhs) {\n        if(lhs.m_value == nullptr && rhs.m_value == nullptr) {\n            return false;\n        }\n        else if(lhs.m_value == nullptr) {\n            return true;\n        }\n        else if(rhs.m_value == nullptr) {\n            return false;\n        }\n        else {\n            return *lhs.m_value < *rhs.m_value; \n        }\n    }\n    \n    const std::string& value() const {\n        return *m_value;\n    }\n    \nprivate:    \n    std::unique_ptr<std::string> m_value;\n};\n\n\nclass copy_only_test {\npublic:\n    explicit copy_only_test(std::int64_t value): m_value(std::to_string(value)) {\n    }\n    \n    copy_only_test(const copy_only_test& other): m_value(other.m_value) {\n    }\n    \n    copy_only_test& operator=(const copy_only_test& other) {\n        m_value = other.m_value;\n        \n        return *this;\n    }\n    \n    ~copy_only_test() {\n    }\n    \n    \n    friend std::ostream& operator<<(std::ostream& stream, const copy_only_test& value) {\n        stream << value.m_value;\n        \n        return stream;\n    }\n    \n    friend bool operator==(const copy_only_test& lhs, const copy_only_test& rhs) { \n        return lhs.m_value == rhs.m_value; \n    }\n    \n    friend bool operator!=(const copy_only_test& lhs, const copy_only_test& rhs) { \n        return !(lhs == rhs); \n    }\n    \n    friend bool operator<(const copy_only_test& lhs, const copy_only_test& rhs) {\n        return lhs.m_value < rhs.m_value; \n    }\n    \n    std::string value() const {\n        return m_value;\n    }\n    \nprivate:    \n    std::string m_value;\n};\n\n\n\nnamespace std {\n    template<>\n    struct hash<self_reference_member_test> {\n        std::size_t operator()(const self_reference_member_test& val) const {\n            return std::hash<std::string>()(val.value());\n        }\n    };\n    \n    template<>\n    struct hash<move_only_test> {\n        std::size_t operator()(const move_only_test& val) const {\n            return std::hash<std::string>()(val.value());\n        }\n    };\n    \n    template<>\n    struct hash<copy_only_test> {\n        std::size_t operator()(const copy_only_test& val) const {\n            return std::hash<std::string>()(val.value());\n        }\n    };\n}\n\n\nclass utils {\npublic:\n    template<typename T>\n    static T get_key(std::size_t counter);\n    \n    template<typename T>\n    static T get_value(std::size_t counter);\n    \n    template<typename HMap>\n    static HMap get_filled_hash_map(std::size_t nb_elements);\n};\n\n\n\ntemplate<>\ninline std::int64_t utils::get_key<std::int64_t>(std::size_t counter) {\n    return tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter);\n}\n\ntemplate<>\ninline self_reference_member_test utils::get_key<self_reference_member_test>(std::size_t counter) {\n    return self_reference_member_test(tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter));\n}\n\ntemplate<>\ninline std::string utils::get_key<std::string>(std::size_t counter) {\n    return \"Key \" + std::to_string(counter);\n}\n\ntemplate<>\ninline move_only_test utils::get_key<move_only_test>(std::size_t counter) {\n    return move_only_test(tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter));\n}\n\ntemplate<>\ninline copy_only_test utils::get_key<copy_only_test>(std::size_t counter) {\n    return copy_only_test(tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter));\n}\n\n\n\n\ntemplate<>\ninline std::int64_t utils::get_value<std::int64_t>(std::size_t counter) {\n    return tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter*2);\n}\n\ntemplate<>\ninline self_reference_member_test utils::get_value<self_reference_member_test>(std::size_t counter) {\n    return self_reference_member_test(tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter*2));\n}\n\ntemplate<>\ninline std::string utils::get_value<std::string>(std::size_t counter) {\n    return \"Value \" + std::to_string(counter);\n}\n\ntemplate<>\ninline move_only_test utils::get_value<move_only_test>(std::size_t counter) {\n    return move_only_test(tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter*2));\n}\n\ntemplate<>\ninline copy_only_test utils::get_value<copy_only_test>(std::size_t counter) {\n    return copy_only_test(tsl::detail_robin_hash::numeric_cast<std::int64_t>(counter*2));\n}\n\n\n\ntemplate<typename HMap>\ninline HMap utils::get_filled_hash_map(std::size_t nb_elements) {\n    using key_t = typename HMap::key_type; using value_t = typename HMap:: mapped_type;\n    \n    HMap map;\n    map.reserve(nb_elements);\n    \n    for(std::size_t i = 0; i < nb_elements; i++) {\n        map.insert({utils::get_key<key_t>(i), utils::get_value<value_t>(i)});\n    }\n    \n    return map;\n}\n\n#endif\n"
  },
  {
    "path": "Thirdparty/tessil-src/tsl-robin-map.natvis",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<AutoVisualizer xmlns=\"http://schemas.microsoft.com/vstudio/debugger/natvis/2010\">\n    <!-- Written in VC 2017 15.7 but is expected to be compatible with VC 2015 -->\n\n    <!-- Visualization that shows the index in the name column and the key-value pair in the value column -->\n    <Type Name=\"tsl::robin_map&lt;*&gt;\" Priority=\"Medium\">\n        <AlternativeType Name=\"tsl::robin_set&lt;*&gt;\"/>\n        <DisplayString>{{ size={m_ht.m_nb_elements} }}</DisplayString>\n        <Expand>\n            <Item Name=\"[bucket_count]\" IncludeView=\"detailed\">m_ht.m_buckets_data._Mypair._Myval2._Mylast - m_ht.m_buckets_data._Mypair._Myval2._Myfirst</Item>\n            <Item Name=\"[load_factor]\" Condition=\"m_ht.m_buckets_data._Mypair._Myval2._Myfirst != m_ht.m_buckets_data._Mypair._Myval2._Mylast\" IncludeView=\"detailed\">\n                ((float)m_ht.m_nb_elements) / ((float)(m_ht.m_buckets_data._Mypair._Myval2._Mylast - m_ht.m_buckets_data._Mypair._Myval2._Myfirst))\n            </Item>\n            <Item Name=\"[load_factor]\" Condition=\"m_ht.m_buckets_data._Mypair._Myval2._Myfirst == m_ht.m_buckets_data._Mypair._Myval2._Mylast\" IncludeView=\"detailed\">\n                0\n            </Item>\n            <Item Name=\"[max_load_factor]\" IncludeView=\"detailed\">m_ht.m_max_load_factor</Item>\n            <CustomListItems>\n                <Variable Name=\"bucket\" InitialValue=\"m_ht.m_buckets\"/>\n\n                <Size>m_ht.m_nb_elements</Size>\n                <Loop>\n                    <Item Condition=\"bucket-&gt;m_dist_from_ideal_bucket != -1\">*bucket</Item>\n                    <Break Condition=\"bucket-&gt;m_last_bucket\"/>\n                    <Exec>++bucket</Exec>\n                </Loop>\n            </CustomListItems>\n        </Expand>\n    </Type>\n\n    <!-- Visualization that shows the key in the name column and the key-value pair in the value column -->\n    <Type Name=\"tsl::robin_map&lt;*&gt;\" ExcludeView=\"ShowElementsByIndex\" Priority=\"MediumHigh\">\n        <DisplayString>{{ size={m_ht.m_nb_elements} }}</DisplayString>\n        <Expand>\n            <Item Name=\"[bucket_count]\" IncludeView=\"detailed\">m_ht.m_buckets_data._Mypair._Myval2._Mylast - m_ht.m_buckets_data._Mypair._Myval2._Myfirst</Item>\n            <Item Name=\"[load_factor]\" Condition=\"m_ht.m_buckets_data._Mypair._Myval2._Myfirst != m_ht.m_buckets_data._Mypair._Myval2._Mylast\" IncludeView=\"detailed\">\n                ((float)m_ht.m_nb_elements) / ((float)(m_ht.m_buckets_data._Mypair._Myval2._Mylast - m_ht.m_buckets_data._Mypair._Myval2._Myfirst))\n            </Item>\n            <Item Name=\"[load_factor]\" Condition=\"m_ht.m_buckets_data._Mypair._Myval2._Myfirst == m_ht.m_buckets_data._Mypair._Myval2._Mylast\" IncludeView=\"detailed\">\n                0\n            </Item>\n            <Item Name=\"[max_load_factor]\" IncludeView=\"detailed\">m_ht.m_max_load_factor</Item>\n            <CustomListItems>\n                <Variable Name=\"bucket\" InitialValue=\"m_ht.m_buckets\"/>\n\n                <Size>m_ht.m_nb_elements</Size>\n                <Loop>\n                    <Item Condition=\"bucket-&gt;m_dist_from_ideal_bucket != -1\" Name=\"[{reinterpret_cast&lt;std::pair&lt;$T1,$T2&gt;*&gt;(&amp;bucket->m_value)->first}]\">*bucket</Item>\n                    <Break Condition=\"bucket-&gt;m_last_bucket\"/>\n                    <Exec>++bucket</Exec>\n                </Loop>\n            </CustomListItems>\n        </Expand>\n    </Type>\n\n    <Type Name=\"tsl::detail_robin_hash::robin_hash&lt;*&gt;::robin_iterator&lt;*&gt;\">\n        <DisplayString>{*m_bucket}</DisplayString>\n        <Expand>\n            <ExpandedItem>*m_bucket</ExpandedItem>\n        </Expand>\n    </Type>\n\n    <Type Name=\"tsl::detail_robin_hash::bucket_entry&lt;*&gt;\">\n        <DisplayString Condition=\"m_dist_from_ideal_bucket == -1\">empty</DisplayString>\n        <DisplayString Condition=\"m_dist_from_ideal_bucket != -1\">{*reinterpret_cast&lt;$T1*&gt;(&amp;m_value)}</DisplayString>\n        <Expand>\n            <ExpandedItem Condition=\"m_dist_from_ideal_bucket != -1\">*reinterpret_cast&lt;$T1*&gt;(&amp;m_value)</ExpandedItem>\n        </Expand>\n    </Type>\n\n    <Type Name=\"tsl::detail_robin_hash::bucket_entry&lt;*&gt;\" IncludeView=\"MapHelper\">\n        <DisplayString Condition=\"m_dist_from_ideal_bucket == -1\">empty</DisplayString>\n        <DisplayString Condition=\"m_dist_from_ideal_bucket != -1\">{reinterpret_cast&lt;$T1*&gt;(&amp;m_value)->second}</DisplayString>\n        <Expand>\n            <ExpandedItem Condition=\"m_dist_from_ideal_bucket != -1\">*reinterpret_cast&lt;$T1*&gt;(&amp;m_value)</ExpandedItem>\n        </Expand>\n    </Type>\n</AutoVisualizer>\n"
  },
  {
    "path": "config/avia.yaml",
    "content": "common:\n    lid_topic:  \"/livox/lidar\"\n\npreprocess:\n    lidar_type: 1               # Livox series LiDAR\n    feature_extract_en: false\n    scan_line: 6\n    blind: 2\n    point_filter_num: 2\n\ninitialization:\n    cut_frame: false\n    cut_frame_num: 5           # must be positive integer\n    cut_frame_init_num: 5\n\nmapping:\n    filter_size_surf: 0.05\n    filter_size_map: 0.15\n    gyr_cov: 50\n    acc_cov: 2\n    det_range: 450.0\n    cov_lidar: 0.001\n    max_iteration: 20\n    max_undistort: 7\n    cube_side_length: 2000\n\npublish:\n    path_en:  true\n    scan_publish_en:  true       # false: close all the point cloud output\n    dense_publish_en: true       # false: low down the points number in a global-frame point clouds scan.\n    scan_bodyframe_pub_en: true  # true: output the point cloud scans in IMU-body-frame\n\npcd_save:\n    pcd_save_en: false\n    interval: -1                 # how many LiDAR frames saved in each pcd file; \n                                 # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames.\n"
  },
  {
    "path": "config/horizon.yaml",
    "content": "common:\n    lid_topic:  \"/livox/lidar\"\n\npreprocess:\n    lidar_type: 1                # Livox series LiDAR\n    feature_extract_en: false\n    scan_line: 6\n    blind: 1\n    point_filter_num: 3\n\ninitialization:\n    cut_frame: true\n    cut_frame_num: 5\n    cut_frame_init_num: 5\n\nmapping:\n    filter_size_surf: 0.3\n    filter_size_map: 0.3\n    gyr_cov: 1.01\n    acc_cov: 1.01\n    det_range: 100.0\n    cov_lidar: 0.001\n    max_iteration: 20\n    max_undistort: 5\n    cube_side_length: 2000\n\npublish:\n    path_en:  true\n    scan_publish_en:  true       # false: close all the point cloud output\n    dense_publish_en: true       # false: low down the points number in a global-frame point clouds scan.\n    scan_bodyframe_pub_en: false  # true: output the point cloud scans in IMU-body-frame\n\npcd_save:\n    pcd_save_en: false\n    interval: -1                 # how many LiDAR frames saved in each pcd file; \n                                 # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames.\n"
  },
  {
    "path": "config/mid360.yaml",
    "content": "common:\n    lid_topic:  \"/livox/lidar\"\n\npreprocess:\n    lidar_type: 1                # Livox\n    feature_extract_en: false\n    scan_line: 6\n    blind: 1\n    point_filter_num: 3\n\ninitialization:\n    cut_frame: false\n    cut_frame_num: 6\n    cut_frame_init_num: 5\n\nmapping:\n    filter_size_surf: 0.3\n    filter_size_map: 0.3\n    gyr_cov: 0.01\n    acc_cov: 0.01\n    det_range: 100.0\n    cov_lidar: 0.001\n    max_iteration: 20\n    max_undistort: 5\n    cube_side_length: 2000\n\nadaptive_cov:\n    use: false\n    K: 100.0\n    a: 50\n    b: 10.0\n\npublish:\n    path_en:  true\n    scan_publish_en:  true\n    dense_publish_en: true\n    scan_bodyframe_pub_en: false\n\npcd_save:\n    pcd_save_en: false\n    interval: -1\n\n"
  },
  {
    "path": "config/ouster.yaml",
    "content": "common:\n    lid_topic:  \"/os_cloud_node/points\"\n\npreprocess:\n    lidar_type: 3                # Ouster LiDAR\n    scan_line: 128\n    blind: 4\n    feature_extract_en: false\n    point_filter_num: 5\n\ninitialization:\n    cut_frame: true\n    cut_frame_num: 3 # must be positive integer\n    cut_frame_init_num: 20\n\nmapping:\n    filter_size_surf: 0.5\n    filter_size_map: 0.5\n    gyr_cov: 0.01\n    acc_cov: 0.01\n    det_range: 150.0\n    cov_lidar: 0.001\n    max_iteration: 20\n    max_undistort: 3\n    cube_side_length: 2000\n\npublish:\n    path_en:  true\n    scan_publish_en:  true       # false: close all the point cloud output\n    dense_publish_en: true       # false: low down the points number in a global-frame point clouds scan.\n    scan_bodyframe_pub_en: true  # true: output the point cloud scans in IMU-body-frame\n\npcd_save:\n    pcd_save_en: false\n    interval: -1                 # how many LiDAR frames saved in each pcd file; \n                                 # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames.\n"
  },
  {
    "path": "config/pandar.yaml",
    "content": "common:\n    lid_topic:  \"/hesai/pandar\"\n\npreprocess:\n    lidar_type: 5                # Hesai PandarXT\n    scan_line: 32\n    blind: 3\n    feature_extract_en: false\n    point_filter_num: 2\n\ninitialization:\n    cut_frame_num: 4 # must be positive integer\n    orig_odom_freq: 10\n    cut_frame_init_num: 20\n\nmapping:\n    filter_size_surf: 0.05\n    filter_size_map: 0.15\n    gyr_cov: 20\n    acc_cov: 2\n    det_range: 120.0\n    cov_lidar: 0.001\n    max_iteration: 20\n    max_undistort: 3\n    cube_side_length: 2000\n\npublish:\n    path_en:  true\n    scan_publish_en:  true       # false: close all the point cloud output\n    dense_publish_en: true       # false: low down the points number in a global-frame point clouds scan.\n    scan_bodyframe_pub_en: true  # true: output the point cloud scans in IMU-body-frame\n\npcd_save:\n    pcd_save_en: false\n    interval: -1                 # how many LiDAR frames saved in each pcd file; \n                                 # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames.\n"
  },
  {
    "path": "config/robosense.yaml",
    "content": "common:\n    lid_topic:  \"/rslidar_points\"\n\npreprocess:\n    lidar_type: 6                # Robosense LiDAR\n    scan_line: 128\n    blind: 2\n    feature_extract_en: false\n    point_filter_num: 2\n\ninitialization:\n    cut_frame_num: 3 # must be positive integer\n    orig_odom_freq: 10\n    cut_frame_init_num: 20\n\nmapping:\n    filter_size_surf: 0.05\n    filter_size_map: 0.15\n    gyr_cov: 0.5\n    acc_cov: 0.5\n    det_range: 100.0\n    cov_lidar: 0.001\n    max_iteration: 20\n    max_undistort: 3\n    cube_side_length: 2000\n\npublish:\n    path_en:  true\n    scan_publish_en:  true       # false: close all the point cloud output\n    dense_publish_en: true       # false: low down the points number in a global-frame point clouds scan.\n    scan_bodyframe_pub_en: true  # true: output the point cloud scans in IMU-body-frame\n\npcd_save:\n    pcd_save_en: false\n    interval: -1                 # how many LiDAR frames saved in each pcd file; \n                                 # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames.\n"
  },
  {
    "path": "config/velodyne.yaml",
    "content": "common:\n    lid_topic:  \"/velodyne_points_0\"\n\npreprocess:\n    lidar_type: 2                # Velodyne LiDAR\n    scan_line: 32\n    blind: 2\n    feature_extract_en: false\n    point_filter_num: 2\n\ninitialization:\n    cut_frame: false\n    cut_frame_num: 2 # must be positive integer\n    cut_frame_init_num: 20\n\nmapping:\n    filter_size_surf: 0.5\n    filter_size_map: 0.5\n    gyr_cov: 0.1\n    acc_cov: 0.1\n    det_range: 100.0\n    cov_lidar: 0.001\n    max_iteration: 20\n    max_undistort: 3\n    cube_side_length: 2000\n\npublish:\n    path_en:  true\n    scan_publish_en:  true       # false: close all the point cloud output\n    dense_publish_en: true       # false: low down the points number in a global-frame point clouds scan.\n    scan_bodyframe_pub_en: true  # true: output the point cloud scans in IMU-body-frame\n\npcd_save:\n    pcd_save_en: false\n    interval: -1                 # how many LiDAR frames saved in each pcd file; \n                                 # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames.\n"
  },
  {
    "path": "include/color.h",
    "content": "#ifndef COLOR_H\n#define COLOR_H\n\n#define RESET       \"\\033[0m\"\n#define BLACK       \"\\033[30m\"             /* Black */\n#define RED         \"\\033[31m\"             /* Red */\n#define GREEN       \"\\033[32m\"             /* Green */\n#define YELLOW      \"\\033[33m\"             /* Yellow */\n#define BLUE        \"\\033[34m\"             /* Blue */\n#define MAGENTA     \"\\033[35m\"             /* Magenta */\n#define CYAN        \"\\033[36m\"             /* Cyan */\n#define WHITE       \"\\033[37m\"             /* White */\n#define REDPURPLE   \"\\033[95m\"             /* Red Purple */\n#define BOLDBLACK   \"\\033[1m\\033[30m\"      /* Bold Black */\n#define BOLDRED     \"\\033[1m\\033[31m\"      /* Bold Red */\n#define BOLDGREEN   \"\\033[1m\\033[32m\"      /* Bold Green */\n#define BOLDYELLOW  \"\\033[1m\\033[33m\"      /* Bold Yellow */\n#define BOLDBLUE    \"\\033[1m\\033[34m\"      /* Bold Blue */\n#define BOLDMAGENTA \"\\033[1m\\033[35m\"      /* Bold Magenta */\n#define BOLDCYAN    \"\\033[1m\\033[36m\"      /* Bold Cyan */\n#define BOLDWHITE   \"\\033[1m\\033[37m\"      /* Bold White */\n#define BOLDREDPURPLE   \"\\033[1m\\033[95m\"  /* Bold Red Purple */\n\n#endif\n"
  },
  {
    "path": "include/common_lib.h",
    "content": "#ifndef COMMON_LIB_H\n#define COMMON_LIB_H\n\n#include <so3_math.h>\n#include <Eigen/Eigen>\n#include <pcl/point_types.h>\n#include <pcl/point_cloud.h>\n#include <i2ekf_lo/States.h>\n#include <i2ekf_lo/Pose6D.h>\n#include <sensor_msgs/Imu.h>\n#include <nav_msgs/Odometry.h>\n#include <tf/transform_broadcaster.h>\n#include <eigen_conversions/eigen_msg.h>\n#include <color.h>\n#include <scope_timer.hpp>\n\n#include <tr1/unordered_map>\n// robin map\n#include \"tsl/robin_map.h\"\n\n#include \"sophus/se3.hpp\"\n#include \"sophus/interpolate.hpp\"\n\nusing namespace std;\nusing namespace Eigen;\n\n#define PBSTR \"||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\"\n#define PBWIDTH 30\n#define PI_M (3.14159265358)\n#define G_m_s2 (9.81)         // Gravity const in GuangDong/China\n#define DIM_STATE (24)      // Dimension of states (Let Dim(SO(3)) = 3)\n\n#define LIDAR_SP_LEN    (2)\n#define INIT_COV   (1)\n#define NUM_MATCH_POINTS    (5)\n\n#define VEC_FROM_ARRAY(v)        v[0],v[1],v[2]\n#define MAT_FROM_ARRAY(v)        v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8]\n\n#define DEBUG_FILE_DIR(name)     (string(string(ROOT_DIR) + \"Log/\"+ name))\n#define RESULT_FILE_DIR(name)    (string(string(ROOT_DIR) + \"result/\"+ name))\n\ntypedef i2ekf_lo::Pose6D     Pose6D;\ntypedef pcl::PointXYZINormal PointType;\ntypedef pcl::PointXYZRGB     PointTypeRGB;\ntypedef pcl::PointCloud<PointType>    PointCloudXYZI;\ntypedef pcl::PointCloud<PointTypeRGB> PointCloudXYZRGB;\ntypedef vector<PointType, Eigen::aligned_allocator<PointType>>  PointVector;\ntypedef Vector3d V3D;\ntypedef Matrix3d M3D;\ntypedef Vector3f V3F;\n\n#define MD(a,b)  Matrix<double, (a), (b)>\n#define VD(a)    Matrix<double, (a), 1>\n\nconst M3D Eye3d(M3D::Identity());\nconst V3D Zero3d(0, 0, 0);\n\n// Vector3d Lidar_offset_to_IMU(0.05512, 0.02226, -0.0297); // Horizon\n// Vector3d Lidar_offset_to_IMU(0.04165, 0.02326, -0.0284); // Avia\n\nenum LID_TYPE{AVIA = 1, VELO, OUSTER, L515, PANDAR, ROBOSENSE}; //{1, 2, 3}\n\nenum POINT_TYPE{RAW = 1, UNDISTORT, WORLD};\n\nstruct StatesGroup\n{\n    StatesGroup() {\n\t\tthis->rot_end = M3D::Identity();\n\t\tthis->pos_end = Zero3d;\n        this->offset_R_L_I = M3D::Identity();\n        this->offset_T_L_I = Zero3d;\n        this->vel_end = Zero3d;\n        this->bias_g  = Zero3d;\n        this->bias_a  = Zero3d;\n        this->gravity = Zero3d;\n        this->cov     = MD(DIM_STATE,DIM_STATE)::Identity() * INIT_COV;\n        this->cov.block<9,9>(15,15) = MD(9,9)::Identity() * 0.00001;\n\t};\n\n    StatesGroup(const StatesGroup& b) {\n\t\tthis->rot_end = b.rot_end;\n\t\tthis->pos_end = b.pos_end;\n        this->offset_R_L_I = b.offset_R_L_I;\n        this->offset_T_L_I = b.offset_T_L_I;\n        this->vel_end = b.vel_end;\n        this->bias_g  = b.bias_g;\n        this->bias_a  = b.bias_a;\n        this->gravity = b.gravity;\n        this->cov     = b.cov;\n\t};\n\n    StatesGroup& operator=(const StatesGroup& b)\n\t{\n        this->rot_end = b.rot_end;\n\t\tthis->pos_end = b.pos_end;\n        this->offset_R_L_I = b.offset_R_L_I;\n        this->offset_T_L_I = b.offset_T_L_I;\n        this->vel_end = b.vel_end;\n        this->bias_g  = b.bias_g;\n        this->bias_a  = b.bias_a;\n        this->gravity = b.gravity;\n        this->cov     = b.cov;\n        return *this;\n\t};\n\n    StatesGroup operator+(const Matrix<double, DIM_STATE, 1> &state_add)\n\t{\n        StatesGroup a;\n\t\ta.rot_end = this->rot_end * Exp(state_add(0,0), state_add(1,0), state_add(2,0));\n\t\ta.pos_end = this->pos_end + state_add.block<3,1>(3,0);\n        a.offset_R_L_I = this->offset_R_L_I *  Exp(state_add(6,0), state_add(7,0), state_add(8,0));\n        a.offset_T_L_I = this->offset_T_L_I + state_add.block<3,1>(9,0);\n        a.vel_end = this->vel_end + state_add.block<3,1>(12,0);\n        a.bias_g  = this->bias_g  + state_add.block<3,1>(15,0);\n        a.bias_a  = this->bias_a  + state_add.block<3,1>(18,0);\n        a.gravity = this->gravity + state_add.block<3,1>(21,0);\n        a.cov     = this->cov;\n\t\treturn a;\n\t};\n\n    StatesGroup& operator+=(const Matrix<double, DIM_STATE, 1> &state_add)\n\t{\n        this->rot_end = this->rot_end * Exp(state_add(0,0), state_add(1,0), state_add(2,0));\n\t\tthis->pos_end += state_add.block<3,1>(3,0);\n        this->offset_R_L_I = this->offset_R_L_I * Exp(state_add(6,0), state_add(7,0), state_add(8,0));\n        this->offset_T_L_I += state_add.block<3,1>(9,0);\n        this->vel_end += state_add.block<3,1>(12,0);\n        this->bias_g  += state_add.block<3,1>(15,0);\n        this->bias_a  += state_add.block<3,1>(18,0);\n        this->gravity += state_add.block<3,1>(21,0);\n\t\treturn *this;\n\t};\n\n    Matrix<double, DIM_STATE, 1> operator-(const StatesGroup& b)\n\t{\n        Matrix<double, DIM_STATE, 1> a;\n        M3D rotd(b.rot_end.transpose() * this->rot_end);\n        a.block<3,1>(0,0)  = Log(rotd);\n        a.block<3,1>(3,0)  = this->pos_end - b.pos_end;\n        M3D offsetd(b.offset_R_L_I.transpose() * this->offset_R_L_I);\n        a.block<3,1>(6,0) = Log(offsetd);\n        a.block<3,1>(9,0) = this->offset_T_L_I - b.offset_T_L_I;\n        a.block<3,1>(12,0)  = this->vel_end - b.vel_end;\n        a.block<3,1>(15,0)  = this->bias_g  - b.bias_g;\n        a.block<3,1>(18,0) = this->bias_a  - b.bias_a;\n        a.block<3,1>(21,0) = this->gravity - b.gravity;\n\t\treturn a;\n\t};\n\n    void resetpose()\n    {\n        this->rot_end = M3D::Identity();\n\t\tthis->pos_end = Zero3d;\n        this->vel_end = Zero3d;\n    }\n\n\tM3D rot_end;      // the estimated attitude (rotation matrix) at the end lidar point\n    V3D pos_end;      // the estimated position at the end lidar point (world frame)\n    M3D offset_R_L_I; // Rotation from Lidar frame L to IMU frame I\n    V3D offset_T_L_I; // Translation from Lidar frame L to IMU frame I\n    V3D vel_end;      // the estimated velocity at the end lidar point (world frame)\n    V3D bias_g;       // gyroscope bias\n    V3D bias_a;       // accelerator bias\n    V3D gravity;      // the estimated gravity acceleration\n    Matrix<double, DIM_STATE, DIM_STATE>  cov;     // states covariance\n};\n\ntemplate<typename T>\nT rad2deg(T radians)\n{\n  return radians * 180.0 / PI_M;\n}\n\ntemplate<typename T>\nT deg2rad(T degrees)\n{\n  return degrees * PI_M / 180.0;\n}\n\ntemplate<typename T>\nauto set_pose6d(const double t, const Matrix<T, 3, 1> &a, const Matrix<T, 3, 1> &g, \\\n                const Matrix<T, 3, 1> &v, const Matrix<T, 3, 1> &p, const Matrix<T, 3, 3> &R)\n{\n    Pose6D rot_kp;\n    rot_kp.offset_time = t;\n    for (int i = 0; i < 3; i++)\n    {\n        rot_kp.acc[i] = a(i);\n        rot_kp.gyr[i] = g(i);\n        rot_kp.vel[i] = v(i);\n        rot_kp.pos[i] = p(i);\n        for (int j = 0; j < 3; j++)  rot_kp.rot[i*3+j] = R(i,j);\n    }\n    // Map<M3D>(rot_kp.rot, 3,3) = R;\n    return move(rot_kp);\n}\n\n/* comment\nplane equation: Ax + By + Cz + D = 0\nconvert to: A/D*x + B/D*y + C/D*z = -1\nsolve: A0*x0 = b0\nwhere A0_i = [x_i, y_i, z_i], x0 = [A/D, B/D, C/D]^T, b0 = [-1, ..., -1]^T\nnormvec:  normalized x0\n*/\ntemplate<typename T>\nbool esti_normvector(Matrix<T, 3, 1> &normvec, const PointVector &point, const T &threshold, const int &point_num)\n{\n    MatrixXf A(point_num, 3);\n    MatrixXf b(point_num, 1);\n    b.setOnes();\n    b *= -1.0f;\n\n    for (int j = 0; j < point_num; j++)\n    {\n        A(j,0) = point[j].x;\n        A(j,1) = point[j].y;\n        A(j,2) = point[j].z;\n    }\n    normvec = A.colPivHouseholderQr().solve(b);\n    \n    for (int j = 0; j < point_num; j++)\n    {\n        if (fabs(normvec(0) * point[j].x + normvec(1) * point[j].y + normvec(2) * point[j].z + 1.0f) > threshold)\n        {\n            return false;\n        }\n    }\n\n    normvec.normalize();\n    return true;\n}\n\ntemplate<typename T>\nbool esti_plane(Matrix<T, 4, 1> &pca_result, const PointVector &point, const T &threshold)\n{\n    Matrix<T, NUM_MATCH_POINTS, 3> A;\n    Matrix<T, NUM_MATCH_POINTS, 1> b;\n    A.setZero();\n    b.setOnes();\n    b *= -1.0f;\n\n    for (int j = 0; j < NUM_MATCH_POINTS; j++)\n    {\n        A(j,0) = point[j].x;\n        A(j,1) = point[j].y;\n        A(j,2) = point[j].z;\n    }\n\n    Matrix<T, 3, 1> normvec = A.colPivHouseholderQr().solve(b);\n\n    T n = normvec.norm();\n    pca_result(0) = normvec(0) / n;\n    pca_result(1) = normvec(1) / n;\n    pca_result(2) = normvec(2) / n;\n    pca_result(3) = 1.0 / n;\n\n    for (int j = 0; j < NUM_MATCH_POINTS; j++)\n    {\n        if (fabs(pca_result(0) * point[j].x + pca_result(1) * point[j].y + pca_result(2) * point[j].z + pca_result(3)) > threshold)\n        {\n            return false;\n        }\n    }\n\n    return true;\n}\n\nstruct Point3D\n{\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n    Eigen::Vector3d raw_point;\n    Eigen::Vector3d undistort_lidar_point;\n    Eigen::Vector3d world_point;\n    float relative_time = 0.0;\n    double scale = 1.0;\n    double intensity = 0.0;\n    Point3D() = default;\n};\n\nstruct MeasureGroup     // Lidar data and imu dates for the curent process\n{\n    MeasureGroup()\n    {\n        lidar_beg_time = 0.0;\n        this->lidar.reset(new PointCloudXYZI());\n    };\n    double lidar_beg_time;\n    PointCloudXYZI::Ptr lidar;\n    deque<sensor_msgs::Imu::ConstPtr> imu;\n    std::vector<Point3D> points;\n};\n\nstruct voxel\n{\n\n    voxel() = default;\n\n    voxel(short x, short y, short z) : x(x), y(y), z(z) {}\n\n    bool operator==(const voxel &vox) const { return x == vox.x && y == vox.y && z == vox.z; }\n\n    inline bool operator<(const voxel &vox) const\n    {\n        return x < vox.x || (x == vox.x && y < vox.y) || (x == vox.x && y == vox.y && z < vox.z);\n    }\n\n    inline static voxel coordinates(const Eigen::Vector3d &point, double voxel_size)\n    {\n        return {short(point.x() / voxel_size),\n                short(point.y() / voxel_size),\n                short(point.z() / voxel_size)};\n    }\n\n    short x;\n    short y;\n    short z;\n};\n\nstruct voxelBlock\n{\n\n    explicit voxelBlock(int num_points_ = 20) : num_points(num_points_) { points.reserve(num_points_); }\n\n    std::vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> points;\n\n    bool IsFull() const { return num_points == points.size(); }\n\n    void AddPoint(const Eigen::Vector3d &point) {\n        assert(num_points > points.size());\n        points.push_back(point);\n    }\n\n    inline int NumPoints() const { return points.size(); }\n\n    inline int Capacity() { return num_points; }\n\nprivate:\n    int num_points;\n};\n\ntypedef tsl::robin_map<voxel, voxelBlock> voxelHashMap;\n\nnamespace std\n{\n\n    template<> struct hash<voxel>\n    {\n        std::size_t operator()(const voxel &vox) const\n        {\n            const size_t kP1 = 73856093;\n            const size_t kP2 = 19349669;\n            const size_t kP3 = 83492791;\n            return vox.x * kP1 + vox.y * kP2 + vox.z * kP3;\n        }\n    };\n}\n\n#endif"
  },
  {
    "path": "include/ikd-Tree/README.md",
    "content": "# ikd-Tree\nikd-Tree is an incremental k-d tree for robotic applications.\n"
  },
  {
    "path": "include/ikd-Tree/ikd_Tree.cpp",
    "content": "#include \"ikd_Tree.h\"\n\n/*\nDescription: ikd-Tree: an incremental k-d tree for robotic applications \nAuthor: Yixi Cai\nemail: yixicai@connect.hku.hk\n*/\n\nKD_TREE::KD_TREE(float delete_param, float balance_param, float box_length) {\n    delete_criterion_param = delete_param;\n    balance_criterion_param = balance_param;\n    downsample_size = box_length;\n    Rebuild_Logger.clear();           \n    termination_flag = false;\n    start_thread();\n}\n\nKD_TREE::~KD_TREE()\n{\n    stop_thread();\n    Delete_Storage_Disabled = true;\n    delete_tree_nodes(&Root_Node);\n    PointVector ().swap(PCL_Storage);\n    Rebuild_Logger.clear();           \n}\n\nvoid KD_TREE::Set_delete_criterion_param(float delete_param){\n    delete_criterion_param = delete_param;\n}\n\nvoid KD_TREE::Set_balance_criterion_param(float balance_param){\n    balance_criterion_param = balance_param;\n}\n\nvoid KD_TREE::set_downsample_param(float downsample_param){\n    downsample_size = downsample_param;\n}\n\nvoid KD_TREE::InitializeKDTree(float delete_param, float balance_param, float box_length){\n    Set_delete_criterion_param(delete_param);\n    Set_balance_criterion_param(balance_param);\n    set_downsample_param(box_length);\n}\n\nvoid KD_TREE::InitTreeNode(KD_TREE_NODE * root){\n    root->point.x = 0.0f;\n    root->point.y = 0.0f;\n    root->point.z = 0.0f;       \n    root->node_range_x[0] = 0.0f;\n    root->node_range_x[1] = 0.0f;\n    root->node_range_y[0] = 0.0f;\n    root->node_range_y[1] = 0.0f;    \n    root->node_range_z[0] = 0.0f;\n    root->node_range_z[1] = 0.0f;     \n    root->division_axis = 0;\n    root->father_ptr = nullptr;\n    root->left_son_ptr = nullptr;\n    root->right_son_ptr = nullptr;\n    root->TreeSize = 0;\n    root->invalid_point_num = 0;\n    root->down_del_num = 0;\n    root->point_deleted = false;\n    root->tree_deleted = false;\n    root->need_push_down_to_left = false;\n    root->need_push_down_to_right = false;\n    root->point_downsample_deleted = false;\n    root->working_flag = false;\n    pthread_mutex_init(&(root->push_down_mutex_lock),NULL);\n}   \n\nint KD_TREE::size(){\n    int s = 0;\n    if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){\n        if (Root_Node != nullptr) {\n            return Root_Node->TreeSize;\n        } else {\n            return 0;\n        }\n    } else {\n        if (!pthread_mutex_trylock(&working_flag_mutex)){\n            s = Root_Node->TreeSize;\n            pthread_mutex_unlock(&working_flag_mutex);\n            return s;\n        } else {\n            return Treesize_tmp;\n        }\n    }\n}\n\nBoxPointType KD_TREE::tree_range(){\n    BoxPointType range;\n    if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){\n        if (Root_Node != nullptr) {\n            range.vertex_min[0] = Root_Node->node_range_x[0];\n            range.vertex_min[1] = Root_Node->node_range_y[0];\n            range.vertex_min[2] = Root_Node->node_range_z[0];\n            range.vertex_max[0] = Root_Node->node_range_x[1];\n            range.vertex_max[1] = Root_Node->node_range_y[1];\n            range.vertex_max[2] = Root_Node->node_range_z[1];\n        } else {\n            memset(&range, 0, sizeof(range));\n        }\n    } else {\n        if (!pthread_mutex_trylock(&working_flag_mutex)){\n            range.vertex_min[0] = Root_Node->node_range_x[0];\n            range.vertex_min[1] = Root_Node->node_range_y[0];\n            range.vertex_min[2] = Root_Node->node_range_z[0];\n            range.vertex_max[0] = Root_Node->node_range_x[1];\n            range.vertex_max[1] = Root_Node->node_range_y[1];\n            range.vertex_max[2] = Root_Node->node_range_z[1];\n            pthread_mutex_unlock(&working_flag_mutex);\n        } else {\n            memset(&range, 0, sizeof(range));\n        }\n    }\n    return range;\n}\n\nint KD_TREE::validnum(){\n    int s = 0;\n    if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){\n        if (Root_Node != nullptr)\n            return (Root_Node->TreeSize - Root_Node->invalid_point_num);\n        else \n            return 0;\n    } else {\n        if (!pthread_mutex_trylock(&working_flag_mutex)){\n            s = Root_Node->TreeSize-Root_Node->invalid_point_num;\n            pthread_mutex_unlock(&working_flag_mutex);\n            return s;\n        } else {\n            return -1;\n        }\n    }\n}\n\nvoid KD_TREE::root_alpha(float &alpha_bal, float &alpha_del){\n    if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){\n        alpha_bal = Root_Node->alpha_bal;\n        alpha_del = Root_Node->alpha_del;\n        return;\n    } else {\n        if (!pthread_mutex_trylock(&working_flag_mutex)){\n            alpha_bal = Root_Node->alpha_bal;\n            alpha_del = Root_Node->alpha_del;\n            pthread_mutex_unlock(&working_flag_mutex);\n            return;\n        } else {\n            alpha_bal = alpha_bal_tmp;\n            alpha_del = alpha_del_tmp;      \n            return;\n        }\n    }    \n}\n\nvoid KD_TREE::start_thread(){\n    pthread_mutex_init(&termination_flag_mutex_lock, NULL);   \n    pthread_mutex_init(&rebuild_ptr_mutex_lock, NULL);     \n    pthread_mutex_init(&rebuild_logger_mutex_lock, NULL);\n    pthread_mutex_init(&points_deleted_rebuild_mutex_lock, NULL); \n    pthread_mutex_init(&working_flag_mutex, NULL);\n    pthread_mutex_init(&search_flag_mutex, NULL);\n    pthread_create(&rebuild_thread, NULL, multi_thread_ptr, (void*) this);\n    printf(\"Multi thread started \\n\");    \n}\n\nvoid KD_TREE::stop_thread(){\n    pthread_mutex_lock(&termination_flag_mutex_lock);\n    termination_flag = true;\n    pthread_mutex_unlock(&termination_flag_mutex_lock);\n    if (rebuild_thread) pthread_join(rebuild_thread, NULL);\n    pthread_mutex_destroy(&termination_flag_mutex_lock);\n    pthread_mutex_destroy(&rebuild_logger_mutex_lock);\n    pthread_mutex_destroy(&rebuild_ptr_mutex_lock);\n    pthread_mutex_destroy(&points_deleted_rebuild_mutex_lock);\n    pthread_mutex_destroy(&working_flag_mutex);\n    pthread_mutex_destroy(&search_flag_mutex);     \n}\n\nvoid * KD_TREE::multi_thread_ptr(void * arg){\n    KD_TREE * handle = (KD_TREE*) arg;\n    handle->multi_thread_rebuild();\n    return nullptr;    \n}    \n\nvoid KD_TREE::multi_thread_rebuild(){\n    bool terminated = false;\n    KD_TREE_NODE * father_ptr, ** new_node_ptr;\n    pthread_mutex_lock(&termination_flag_mutex_lock);\n    terminated = termination_flag;\n    pthread_mutex_unlock(&termination_flag_mutex_lock);\n    while (!terminated){\n        pthread_mutex_lock(&rebuild_ptr_mutex_lock);\n        pthread_mutex_lock(&working_flag_mutex);\n        if (Rebuild_Ptr != nullptr ){                    \n            /* Traverse and copy */\n            if (!Rebuild_Logger.empty()){\n                printf(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n ERROR!!! \\n\\n\\n\\n\\n\\n\\n\\n\\n\");\n            }\n            rebuild_flag = true;\n            if (*Rebuild_Ptr == Root_Node) {\n                Treesize_tmp = Root_Node->TreeSize;\n                Validnum_tmp = Root_Node->TreeSize - Root_Node->invalid_point_num;\n                alpha_bal_tmp = Root_Node->alpha_bal;\n                alpha_del_tmp = Root_Node->alpha_del;\n            }\n            KD_TREE_NODE * old_root_node = (*Rebuild_Ptr);                            \n            father_ptr = (*Rebuild_Ptr)->father_ptr;  \n            PointVector ().swap(Rebuild_PCL_Storage);\n            // Lock Search \n            pthread_mutex_lock(&search_flag_mutex);\n            while (search_mutex_counter != 0){\n                pthread_mutex_unlock(&search_flag_mutex);\n                usleep(1);             \n                pthread_mutex_lock(&search_flag_mutex);\n            }\n            search_mutex_counter = -1;\n            pthread_mutex_unlock(&search_flag_mutex);\n            // Lock deleted points cache\n            pthread_mutex_lock(&points_deleted_rebuild_mutex_lock);    \n            flatten(*Rebuild_Ptr, Rebuild_PCL_Storage, MULTI_THREAD_REC);\n            // Unlock deleted points cache\n            pthread_mutex_unlock(&points_deleted_rebuild_mutex_lock);\n            // Unlock Search\n            pthread_mutex_lock(&search_flag_mutex);\n            search_mutex_counter = 0;\n            pthread_mutex_unlock(&search_flag_mutex);              \n            pthread_mutex_unlock(&working_flag_mutex);   \n            /* Rebuild and update missed operations*/\n            Operation_Logger_Type Operation;\n            KD_TREE_NODE * new_root_node = nullptr;  \n            if (int(Rebuild_PCL_Storage.size()) > 0){\n                BuildTree(&new_root_node, 0, Rebuild_PCL_Storage.size()-1, Rebuild_PCL_Storage);\n                // Rebuild has been done. Updates the blocked operations into the new tree\n                pthread_mutex_lock(&working_flag_mutex);\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                int tmp_counter = 0;\n                while (!Rebuild_Logger.empty()){\n                    Operation = Rebuild_Logger.front();\n                    max_queue_size = max(max_queue_size, Rebuild_Logger.size());\n                    Rebuild_Logger.pop();\n                    pthread_mutex_unlock(&rebuild_logger_mutex_lock);                  \n                    pthread_mutex_unlock(&working_flag_mutex);\n                    run_operation(&new_root_node, Operation);\n                    tmp_counter ++;\n                    if (tmp_counter % 10 == 0) usleep(1);\n                    pthread_mutex_lock(&working_flag_mutex);\n                    pthread_mutex_lock(&rebuild_logger_mutex_lock);               \n                }   \n               pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n            }  \n            /* Replace to original tree*/          \n            // pthread_mutex_lock(&working_flag_mutex);\n            pthread_mutex_lock(&search_flag_mutex);\n            while (search_mutex_counter != 0){\n                pthread_mutex_unlock(&search_flag_mutex);\n                usleep(1);             \n                pthread_mutex_lock(&search_flag_mutex);\n            }\n            search_mutex_counter = -1;\n            pthread_mutex_unlock(&search_flag_mutex);\n            if (father_ptr->left_son_ptr == *Rebuild_Ptr) {\n                father_ptr->left_son_ptr = new_root_node;\n            } else if (father_ptr->right_son_ptr == *Rebuild_Ptr){             \n                father_ptr->right_son_ptr = new_root_node;\n            } else {\n                throw \"Error: Father ptr incompatible with current node\\n\";\n            }\n            if (new_root_node != nullptr) new_root_node->father_ptr = father_ptr;\n            (*Rebuild_Ptr) = new_root_node;\n            int valid_old = old_root_node->TreeSize-old_root_node->invalid_point_num;\n            int valid_new = new_root_node->TreeSize-new_root_node->invalid_point_num;\n            if (father_ptr == STATIC_ROOT_NODE) Root_Node = STATIC_ROOT_NODE->left_son_ptr;\n            KD_TREE_NODE * update_root = *Rebuild_Ptr;\n            while (update_root != nullptr && update_root != Root_Node){\n                update_root = update_root->father_ptr;\n                if (update_root->working_flag) break;\n                if (update_root == update_root->father_ptr->left_son_ptr && update_root->father_ptr->need_push_down_to_left) break;\n                if (update_root == update_root->father_ptr->right_son_ptr && update_root->father_ptr->need_push_down_to_right) break;\n                Update(update_root);\n            }\n            pthread_mutex_lock(&search_flag_mutex);\n            search_mutex_counter = 0;\n            pthread_mutex_unlock(&search_flag_mutex);\n            Rebuild_Ptr = nullptr;\n            pthread_mutex_unlock(&working_flag_mutex);\n            rebuild_flag = false;                     \n            /* Delete discarded tree nodes */\n            delete_tree_nodes(&old_root_node);\n        } else {\n            pthread_mutex_unlock(&working_flag_mutex);             \n        }\n        pthread_mutex_unlock(&rebuild_ptr_mutex_lock);         \n        pthread_mutex_lock(&termination_flag_mutex_lock);\n        terminated = termination_flag;\n        pthread_mutex_unlock(&termination_flag_mutex_lock);\n        usleep(100); \n    }\n    printf(\"Rebuild thread terminated normally\\n\");    \n}\n\nvoid KD_TREE::run_operation(KD_TREE_NODE ** root, Operation_Logger_Type operation){\n    switch (operation.op)\n    {\n    case ADD_POINT:      \n        Add_by_point(root, operation.point, false, (*root)->division_axis);          \n        break;\n    case ADD_BOX:\n        Add_by_range(root, operation.boxpoint, false);\n        break;\n    case DELETE_POINT:\n        Delete_by_point(root, operation.point, false);\n        break;\n    case DELETE_BOX:\n        Delete_by_range(root, operation.boxpoint, false, false);\n        break;\n    case DOWNSAMPLE_DELETE:\n        Delete_by_range(root, operation.boxpoint, false, true);\n        break;\n    case PUSH_DOWN:\n        (*root)->tree_downsample_deleted |= operation.tree_downsample_deleted;\n        (*root)->point_downsample_deleted |= operation.tree_downsample_deleted;\n        (*root)->tree_deleted = operation.tree_deleted || (*root)->tree_downsample_deleted;\n        (*root)->point_deleted = (*root)->tree_deleted || (*root)->point_downsample_deleted;\n        if (operation.tree_downsample_deleted) (*root)->down_del_num = (*root)->TreeSize;\n        if (operation.tree_deleted) (*root)->invalid_point_num = (*root)->TreeSize;\n            else (*root)->invalid_point_num = (*root)->down_del_num;\n        (*root)->need_push_down_to_left = true;\n        (*root)->need_push_down_to_right = true;     \n        break;\n    default:\n        break;\n    }\n}\n\nvoid KD_TREE::Build(PointVector point_cloud){\n    if (Root_Node != nullptr){\n        delete_tree_nodes(&Root_Node);\n    }\n    if (point_cloud.size() == 0) return;\n    STATIC_ROOT_NODE = new KD_TREE_NODE;\n    InitTreeNode(STATIC_ROOT_NODE); \n    BuildTree(&STATIC_ROOT_NODE->left_son_ptr, 0, point_cloud.size()-1, point_cloud);\n    Update(STATIC_ROOT_NODE);\n    STATIC_ROOT_NODE->TreeSize = 0;\n    Root_Node = STATIC_ROOT_NODE->left_son_ptr;    \n}\n\nvoid KD_TREE::Nearest_Search(PointType point, int k_nearest, PointVector& Nearest_Points, vector<float> & Point_Distance, double max_dist){   \n    MANUAL_HEAP q(2*k_nearest);\n    q.clear();\n    vector<float> ().swap(Point_Distance);\n    if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){\n        Search(Root_Node, k_nearest, point, q, max_dist);\n    } else {\n        pthread_mutex_lock(&search_flag_mutex);\n        while (search_mutex_counter == -1)\n        {\n            pthread_mutex_unlock(&search_flag_mutex);\n            usleep(1);\n            pthread_mutex_lock(&search_flag_mutex);\n        }\n        search_mutex_counter += 1;\n        pthread_mutex_unlock(&search_flag_mutex);  \n        Search(Root_Node, k_nearest, point, q, max_dist);  \n        pthread_mutex_lock(&search_flag_mutex);\n        search_mutex_counter -= 1;\n        pthread_mutex_unlock(&search_flag_mutex);      \n    }\n    int k_found = min(k_nearest,int(q.size()));\n    PointVector ().swap(Nearest_Points);\n    vector<float> ().swap(Point_Distance);\n    for (int i=0;i < k_found;i++){\n        Nearest_Points.insert(Nearest_Points.begin(), q.top().point);\n        Point_Distance.insert(Point_Distance.begin(), q.top().dist);\n        q.pop();\n    }\n    return;\n}\n\nint KD_TREE::Add_Points(PointVector & PointToAdd, bool downsample_on){\n    int NewPointSize = PointToAdd.size();\n    int tree_size = size();\n    BoxPointType Box_of_Point;\n    PointType downsample_result, mid_point;\n    bool downsample_switch = downsample_on && DOWNSAMPLE_SWITCH;\n    float min_dist, tmp_dist;\n    int tmp_counter = 0;\n    for (int i=0; i<PointToAdd.size();i++){\n        if (downsample_switch){\n            Box_of_Point.vertex_min[0] = floor(PointToAdd[i].x/downsample_size)*downsample_size;\n            Box_of_Point.vertex_max[0] = Box_of_Point.vertex_min[0]+downsample_size;\n            Box_of_Point.vertex_min[1] = floor(PointToAdd[i].y/downsample_size)*downsample_size;\n            Box_of_Point.vertex_max[1] = Box_of_Point.vertex_min[1]+downsample_size; \n            Box_of_Point.vertex_min[2] = floor(PointToAdd[i].z/downsample_size)*downsample_size;\n            Box_of_Point.vertex_max[2] = Box_of_Point.vertex_min[2]+downsample_size;   \n            mid_point.x = Box_of_Point.vertex_min[0] + (Box_of_Point.vertex_max[0]-Box_of_Point.vertex_min[0])/2.0;\n            mid_point.y = Box_of_Point.vertex_min[1] + (Box_of_Point.vertex_max[1]-Box_of_Point.vertex_min[1])/2.0;\n            mid_point.z = Box_of_Point.vertex_min[2] + (Box_of_Point.vertex_max[2]-Box_of_Point.vertex_min[2])/2.0;\n            PointVector ().swap(Downsample_Storage);\n            Search_by_range(Root_Node, Box_of_Point, Downsample_Storage);\n            min_dist = calc_dist(PointToAdd[i],mid_point);\n            downsample_result = PointToAdd[i];                \n            for (int index = 0; index < Downsample_Storage.size(); index++){\n                tmp_dist = calc_dist(Downsample_Storage[index], mid_point);\n                if (tmp_dist < min_dist){\n                    min_dist = tmp_dist;\n                    downsample_result = Downsample_Storage[index];\n                }\n            }\n            if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){  \n                if (Downsample_Storage.size() > 1 || same_point(PointToAdd[i], downsample_result)){\n                    if (Downsample_Storage.size() > 0) Delete_by_range(&Root_Node, Box_of_Point, true, true);\n                    Add_by_point(&Root_Node, downsample_result, true, Root_Node->division_axis);\n                    tmp_counter ++;                      \n                }\n            } else {\n                if (Downsample_Storage.size() > 1 || same_point(PointToAdd[i], downsample_result)){\n                    Operation_Logger_Type  operation_delete, operation;\n                    operation_delete.boxpoint = Box_of_Point;\n                    operation_delete.op = DOWNSAMPLE_DELETE;\n                    operation.point = downsample_result;\n                    operation.op = ADD_POINT;\n                    pthread_mutex_lock(&working_flag_mutex);\n                    if (Downsample_Storage.size() > 0) Delete_by_range(&Root_Node, Box_of_Point, false , true);                                      \n                    Add_by_point(&Root_Node, downsample_result, false, Root_Node->division_axis);\n                    tmp_counter ++;\n                    if (rebuild_flag){\n                        pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                        if (Downsample_Storage.size() > 0) Rebuild_Logger.push(operation_delete);\n                        Rebuild_Logger.push(operation);\n                        pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n                    }\n                    pthread_mutex_unlock(&working_flag_mutex);\n                };\n            }\n        } else {\n            if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){\n                Add_by_point(&Root_Node, PointToAdd[i], true, Root_Node->division_axis);     \n            } else {\n                Operation_Logger_Type operation;\n                operation.point = PointToAdd[i];\n                operation.op = ADD_POINT;                \n                pthread_mutex_lock(&working_flag_mutex);\n                Add_by_point(&Root_Node, PointToAdd[i], false, Root_Node->division_axis);\n                if (rebuild_flag){\n                    pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                    Rebuild_Logger.push(operation);\n                    pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n                }\n                pthread_mutex_unlock(&working_flag_mutex);       \n            }\n        }\n    }\n    return tmp_counter;\n}\n\nvoid KD_TREE::Add_Point_Boxes(vector<BoxPointType> & BoxPoints){     \n    for (int i=0;i < BoxPoints.size();i++){\n        if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){\n            Add_by_range(&Root_Node ,BoxPoints[i], true);\n        } else {\n            Operation_Logger_Type operation;\n            operation.boxpoint = BoxPoints[i];\n            operation.op = ADD_BOX;\n            pthread_mutex_lock(&working_flag_mutex);\n            Add_by_range(&Root_Node ,BoxPoints[i], false);\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(operation);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n            }               \n            pthread_mutex_unlock(&working_flag_mutex);\n        }    \n    } \n    return;\n}\n\nvoid KD_TREE::Delete_Points(PointVector & PointToDel){        \n    for (int i=0;i<PointToDel.size();i++){\n        if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){               \n            Delete_by_point(&Root_Node, PointToDel[i], true);\n        } else {\n            Operation_Logger_Type operation;\n            operation.point = PointToDel[i];\n            operation.op = DELETE_POINT;\n            pthread_mutex_lock(&working_flag_mutex);        \n            Delete_by_point(&Root_Node, PointToDel[i], false);\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(operation);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n            }\n            pthread_mutex_unlock(&working_flag_mutex);\n        }      \n    }      \n    return;\n}\n\nint KD_TREE::Delete_Point_Boxes(vector<BoxPointType> & BoxPoints){\n    int tmp_counter = 0;\n    for (int i=0;i < BoxPoints.size();i++){ \n        if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != Root_Node){               \n            tmp_counter += Delete_by_range(&Root_Node ,BoxPoints[i], true, false);\n        } else {\n            Operation_Logger_Type operation;\n            operation.boxpoint = BoxPoints[i];\n            operation.op = DELETE_BOX;     \n            pthread_mutex_lock(&working_flag_mutex); \n            tmp_counter += Delete_by_range(&Root_Node ,BoxPoints[i], false, false);\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(operation);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n            }                \n            pthread_mutex_unlock(&working_flag_mutex);\n        }\n    } \n    return tmp_counter;\n}\n\nvoid KD_TREE::acquire_removed_points(PointVector & removed_points){\n    pthread_mutex_lock(&points_deleted_rebuild_mutex_lock); \n    for (int i = 0; i < Points_deleted.size();i++){\n        removed_points.push_back(Points_deleted[i]);\n    }\n    for (int i = 0; i < Multithread_Points_deleted.size();i++){\n        removed_points.push_back(Multithread_Points_deleted[i]);\n    }\n    Points_deleted.clear();\n    Multithread_Points_deleted.clear();\n    pthread_mutex_unlock(&points_deleted_rebuild_mutex_lock);   \n    return;\n}\n\nvoid KD_TREE::BuildTree(KD_TREE_NODE ** root, int l, int r, PointVector & Storage){\n    if (l>r) return;\n    *root = new KD_TREE_NODE;\n    InitTreeNode(*root);\n    int mid = (l+r)>>1;\n    int div_axis = 0;\n    int i;\n    // Find the best division Axis\n    float min_value[3] = {INFINITY, INFINITY, INFINITY};\n    float max_value[3] = {-INFINITY, -INFINITY, -INFINITY};\n    float dim_range[3] = {0,0,0};\n    for (i=l;i<=r;i++){\n        min_value[0] = min(min_value[0], Storage[i].x);\n        min_value[1] = min(min_value[1], Storage[i].y);\n        min_value[2] = min(min_value[2], Storage[i].z);\n        max_value[0] = max(max_value[0], Storage[i].x);\n        max_value[1] = max(max_value[1], Storage[i].y);\n        max_value[2] = max(max_value[2], Storage[i].z);\n    }\n    // Select the longest dimension as division axis\n    for (i=0;i<3;i++) dim_range[i] = max_value[i] - min_value[i];\n    for (i=1;i<3;i++) if (dim_range[i] > dim_range[div_axis]) div_axis = i;\n    // Divide by the division axis and recursively build.\n\n    (*root)->division_axis = div_axis;\n    switch (div_axis)\n    {\n    case 0:\n        nth_element(begin(Storage)+l, begin(Storage)+mid, begin(Storage)+r+1, point_cmp_x);\n        break;\n    case 1:\n        nth_element(begin(Storage)+l, begin(Storage)+mid, begin(Storage)+r+1, point_cmp_y);\n        break;\n    case 2:\n        nth_element(begin(Storage)+l, begin(Storage)+mid, begin(Storage)+r+1, point_cmp_z);\n        break;\n    default:\n        nth_element(begin(Storage)+l, begin(Storage)+mid, begin(Storage)+r+1, point_cmp_x);\n        break;\n    }  \n    (*root)->point = Storage[mid]; \n    KD_TREE_NODE * left_son = nullptr, * right_son = nullptr;\n    BuildTree(&left_son, l, mid-1, Storage);\n    BuildTree(&right_son, mid+1, r, Storage);  \n    (*root)->left_son_ptr = left_son;\n    (*root)->right_son_ptr = right_son;\n    Update((*root));  \n    return;\n}\n\nvoid KD_TREE::Rebuild(KD_TREE_NODE ** root){    \n    KD_TREE_NODE * father_ptr;\n    if ((*root)->TreeSize >= Multi_Thread_Rebuild_Point_Num) { \n        if (!pthread_mutex_trylock(&rebuild_ptr_mutex_lock)){     \n            if (Rebuild_Ptr == nullptr || ((*root)->TreeSize > (*Rebuild_Ptr)->TreeSize)) {\n                Rebuild_Ptr = root;          \n            }\n            pthread_mutex_unlock(&rebuild_ptr_mutex_lock);\n        }\n    } else {\n        father_ptr = (*root)->father_ptr;\n        int size_rec = (*root)->TreeSize;\n        PCL_Storage.clear();\n        flatten(*root, PCL_Storage, DELETE_POINTS_REC);\n        delete_tree_nodes(root);\n        BuildTree(root, 0, PCL_Storage.size()-1, PCL_Storage);\n        if (*root != nullptr) (*root)->father_ptr = father_ptr;\n        if (*root == Root_Node) STATIC_ROOT_NODE->left_son_ptr = *root;\n    } \n    return;\n}\n\nint KD_TREE::Delete_by_range(KD_TREE_NODE ** root,  BoxPointType boxpoint, bool allow_rebuild, bool is_downsample){   \n    if ((*root) == nullptr || (*root)->tree_deleted) return 0;\n    (*root)->working_flag = true;\n    Push_Down(*root);\n    int tmp_counter = 0;\n    if (boxpoint.vertex_max[0] <= (*root)->node_range_x[0] || boxpoint.vertex_min[0] > (*root)->node_range_x[1]) return 0;\n    if (boxpoint.vertex_max[1] <= (*root)->node_range_y[0] || boxpoint.vertex_min[1] > (*root)->node_range_y[1]) return 0;\n    if (boxpoint.vertex_max[2] <= (*root)->node_range_z[0] || boxpoint.vertex_min[2] > (*root)->node_range_z[1]) return 0;\n    if (boxpoint.vertex_min[0] <= (*root)->node_range_x[0] && boxpoint.vertex_max[0] > (*root)->node_range_x[1] && boxpoint.vertex_min[1] <= (*root)->node_range_y[0] && boxpoint.vertex_max[1] > (*root)->node_range_y[1] && boxpoint.vertex_min[2] <= (*root)->node_range_z[0] && boxpoint.vertex_max[2] > (*root)->node_range_z[1]){\n        (*root)->tree_deleted = true;\n        (*root)->point_deleted = true;\n        (*root)->need_push_down_to_left = true;\n        (*root)->need_push_down_to_right = true;\n        tmp_counter = (*root)->TreeSize - (*root)->invalid_point_num;\n        (*root)->invalid_point_num = (*root)->TreeSize;\n        if (is_downsample){\n            (*root)->tree_downsample_deleted = true;\n            (*root)->point_downsample_deleted = true;\n            (*root)->down_del_num = (*root)->TreeSize;\n        }\n        return tmp_counter;\n    }\n    if (!(*root)->point_deleted && boxpoint.vertex_min[0] <= (*root)->point.x && boxpoint.vertex_max[0] > (*root)->point.x && boxpoint.vertex_min[1] <= (*root)->point.y && boxpoint.vertex_max[1] > (*root)->point.y && boxpoint.vertex_min[2] <= (*root)->point.z && boxpoint.vertex_max[2] > (*root)->point.z){\n        (*root)->point_deleted = true;\n        tmp_counter += 1;\n        if (is_downsample) (*root)->point_downsample_deleted = true;\n    }\n    Operation_Logger_Type delete_box_log;\n    struct timespec Timeout;    \n    if (is_downsample) delete_box_log.op = DOWNSAMPLE_DELETE;\n        else delete_box_log.op = DELETE_BOX;\n    delete_box_log.boxpoint = boxpoint;\n    if ((Rebuild_Ptr == nullptr) || (*root)->left_son_ptr != *Rebuild_Ptr){\n        tmp_counter += Delete_by_range(&((*root)->left_son_ptr), boxpoint, allow_rebuild, is_downsample);\n    } else {\n        pthread_mutex_lock(&working_flag_mutex);\n        tmp_counter += Delete_by_range(&((*root)->left_son_ptr), boxpoint, false, is_downsample);\n        if (rebuild_flag){\n            pthread_mutex_lock(&rebuild_logger_mutex_lock);\n            Rebuild_Logger.push(delete_box_log);\n            pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n        }\n        pthread_mutex_unlock(&working_flag_mutex);\n    }\n    if ((Rebuild_Ptr == nullptr) || (*root)->right_son_ptr != *Rebuild_Ptr){\n        tmp_counter += Delete_by_range(&((*root)->right_son_ptr), boxpoint, allow_rebuild, is_downsample);\n    } else {\n        pthread_mutex_lock(&working_flag_mutex);\n        tmp_counter += Delete_by_range(&((*root)->right_son_ptr), boxpoint, false, is_downsample);\n        if (rebuild_flag){\n            pthread_mutex_lock(&rebuild_logger_mutex_lock);\n            Rebuild_Logger.push(delete_box_log);\n            pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n        }\n        pthread_mutex_unlock(&working_flag_mutex);\n    }    \n    Update(*root);     \n    if (Rebuild_Ptr != nullptr && *Rebuild_Ptr == *root && (*root)->TreeSize < Multi_Thread_Rebuild_Point_Num) Rebuild_Ptr = nullptr; \n    bool need_rebuild = allow_rebuild & Criterion_Check((*root));\n    if (need_rebuild) Rebuild(root);\n    if ((*root) != nullptr) (*root)->working_flag = false;\n    return tmp_counter;\n}\n\nvoid KD_TREE::Delete_by_point(KD_TREE_NODE ** root, PointType point, bool allow_rebuild){   \n    if ((*root) == nullptr || (*root)->tree_deleted) return;\n    (*root)->working_flag = true;\n    Push_Down(*root);\n    if (same_point((*root)->point, point) && !(*root)->point_deleted) {          \n        (*root)->point_deleted = true;\n        (*root)->invalid_point_num += 1;\n        if ((*root)->invalid_point_num == (*root)->TreeSize) (*root)->tree_deleted = true;    \n        return;\n    }\n    Operation_Logger_Type delete_log;\n    struct timespec Timeout;    \n    delete_log.op = DELETE_POINT;\n    delete_log.point = point;     \n    if (((*root)->division_axis == 0 && point.x < (*root)->point.x) || ((*root)->division_axis == 1 && point.y < (*root)->point.y) || ((*root)->division_axis == 2 && point.z < (*root)->point.z)){           \n        if ((Rebuild_Ptr == nullptr) || (*root)->left_son_ptr != *Rebuild_Ptr){          \n            Delete_by_point(&(*root)->left_son_ptr, point, allow_rebuild);         \n        } else {\n            pthread_mutex_lock(&working_flag_mutex);\n            Delete_by_point(&(*root)->left_son_ptr, point,false);\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(delete_log);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n            }\n            pthread_mutex_unlock(&working_flag_mutex);\n        }\n    } else {       \n        if ((Rebuild_Ptr == nullptr) || (*root)->right_son_ptr != *Rebuild_Ptr){         \n            Delete_by_point(&(*root)->right_son_ptr, point, allow_rebuild);         \n        } else {\n            pthread_mutex_lock(&working_flag_mutex); \n            Delete_by_point(&(*root)->right_son_ptr, point, false);\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(delete_log);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n            }\n            pthread_mutex_unlock(&working_flag_mutex);\n        }        \n    }\n    Update(*root);\n    if (Rebuild_Ptr != nullptr && *Rebuild_Ptr == *root && (*root)->TreeSize < Multi_Thread_Rebuild_Point_Num) Rebuild_Ptr = nullptr; \n    bool need_rebuild = allow_rebuild & Criterion_Check((*root));\n    if (need_rebuild) Rebuild(root);\n    if ((*root) != nullptr) (*root)->working_flag = false;   \n    return;\n}\n\nvoid KD_TREE::Add_by_range(KD_TREE_NODE ** root, BoxPointType boxpoint, bool allow_rebuild){\n    if ((*root) == nullptr) return;\n    (*root)->working_flag = true;\n    Push_Down(*root);       \n    if (boxpoint.vertex_max[0] <= (*root)->node_range_x[0] || boxpoint.vertex_min[0] > (*root)->node_range_x[1]) return;\n    if (boxpoint.vertex_max[1] <= (*root)->node_range_y[0] || boxpoint.vertex_min[1] > (*root)->node_range_y[1]) return;\n    if (boxpoint.vertex_max[2] <= (*root)->node_range_z[0] || boxpoint.vertex_min[2] > (*root)->node_range_z[1]) return;\n    if (boxpoint.vertex_min[0] <= (*root)->node_range_x[0] && boxpoint.vertex_max[0] > (*root)->node_range_x[1] && boxpoint.vertex_min[1] <= (*root)->node_range_y[0] && boxpoint.vertex_max[1]> (*root)->node_range_y[1] && boxpoint.vertex_min[2] <= (*root)->node_range_z[0] && boxpoint.vertex_max[2] > (*root)->node_range_z[1]){\n        (*root)->tree_deleted = false || (*root)->tree_downsample_deleted;\n        (*root)->point_deleted = false || (*root)->point_downsample_deleted;\n        (*root)->need_push_down_to_left = true;\n        (*root)->need_push_down_to_right = true;\n        (*root)->invalid_point_num = (*root)->down_del_num; \n        return;\n    }\n    if (boxpoint.vertex_min[0] <= (*root)->point.x && boxpoint.vertex_max[0] > (*root)->point.x && boxpoint.vertex_min[1] <= (*root)->point.y && boxpoint.vertex_max[1] > (*root)->point.y && boxpoint.vertex_min[2] <= (*root)->point.z && boxpoint.vertex_max[2] > (*root)->point.z){\n        (*root)->point_deleted = (*root)->point_downsample_deleted;\n    }\n    Operation_Logger_Type add_box_log;\n    struct timespec Timeout;    \n    add_box_log.op = ADD_BOX;\n    add_box_log.boxpoint = boxpoint;\n    if ((Rebuild_Ptr == nullptr) || (*root)->left_son_ptr != *Rebuild_Ptr){\n        Add_by_range(&((*root)->left_son_ptr), boxpoint, allow_rebuild);\n    } else {\n        pthread_mutex_lock(&working_flag_mutex);\n        Add_by_range(&((*root)->left_son_ptr), boxpoint, false);\n        if (rebuild_flag){\n            pthread_mutex_lock(&rebuild_logger_mutex_lock);\n            Rebuild_Logger.push(add_box_log);\n            pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n        }        \n        pthread_mutex_unlock(&working_flag_mutex);\n    }\n    if ((Rebuild_Ptr == nullptr) || (*root)->right_son_ptr != *Rebuild_Ptr){\n        Add_by_range(&((*root)->right_son_ptr), boxpoint, allow_rebuild);\n    } else {\n        pthread_mutex_lock(&working_flag_mutex);\n        Add_by_range(&((*root)->right_son_ptr), boxpoint, false);\n        if (rebuild_flag){\n            pthread_mutex_lock(&rebuild_logger_mutex_lock);\n            Rebuild_Logger.push(add_box_log);\n            pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n        }\n        pthread_mutex_unlock(&working_flag_mutex);\n    }\n    Update(*root);\n    if (Rebuild_Ptr != nullptr && *Rebuild_Ptr == *root && (*root)->TreeSize < Multi_Thread_Rebuild_Point_Num) Rebuild_Ptr = nullptr; \n    bool need_rebuild = allow_rebuild & Criterion_Check((*root));\n    if (need_rebuild) Rebuild(root);\n    if ((*root) != nullptr) (*root)->working_flag = false;   \n    return;\n}\n\nvoid KD_TREE::Add_by_point(KD_TREE_NODE ** root, PointType point, bool allow_rebuild, int father_axis){     \n    if (*root == nullptr){\n        *root = new KD_TREE_NODE;\n        InitTreeNode(*root);\n        (*root)->point = point;\n        (*root)->division_axis = (father_axis + 1) % 3;\n        Update(*root);\n        return;\n    }\n    (*root)->working_flag = true;\n    Operation_Logger_Type add_log;\n    struct timespec Timeout;    \n    add_log.op = ADD_POINT;\n    add_log.point = point;\n    Push_Down(*root);\n    if (((*root)->division_axis == 0 && point.x < (*root)->point.x) || ((*root)->division_axis == 1 && point.y < (*root)->point.y) || ((*root)->division_axis == 2 && point.z < (*root)->point.z)){\n        if ((Rebuild_Ptr == nullptr) || (*root)->left_son_ptr != *Rebuild_Ptr){          \n            Add_by_point(&(*root)->left_son_ptr, point, allow_rebuild, (*root)->division_axis);\n        } else {\n            pthread_mutex_lock(&working_flag_mutex);\n            Add_by_point(&(*root)->left_son_ptr, point, false,(*root)->division_axis);\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(add_log);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n            }\n            pthread_mutex_unlock(&working_flag_mutex);            \n        }\n    } else {  \n        if ((Rebuild_Ptr == nullptr) || (*root)->right_son_ptr != *Rebuild_Ptr){         \n            Add_by_point(&(*root)->right_son_ptr, point, allow_rebuild,(*root)->division_axis);\n        } else {\n            pthread_mutex_lock(&working_flag_mutex);\n            Add_by_point(&(*root)->right_son_ptr, point, false,(*root)->division_axis);       \n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(add_log);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);                 \n            }\n            pthread_mutex_unlock(&working_flag_mutex); \n        }\n    }\n    Update(*root);   \n    if (Rebuild_Ptr != nullptr && *Rebuild_Ptr == *root && (*root)->TreeSize < Multi_Thread_Rebuild_Point_Num) Rebuild_Ptr = nullptr; \n    bool need_rebuild = allow_rebuild & Criterion_Check((*root));\n    if (need_rebuild) Rebuild(root); \n    if ((*root) != nullptr) (*root)->working_flag = false;   \n    return;\n}\n\nvoid KD_TREE::Search(KD_TREE_NODE * root, int k_nearest, PointType point, MANUAL_HEAP &q, double max_dist){\n    if (root == nullptr || root->tree_deleted) return;   \n    double cur_dist = calc_box_dist(root, point);\n    if (cur_dist > max_dist * max_dist) return;    \n    int retval; \n    if (root->need_push_down_to_left || root->need_push_down_to_right) {\n        retval = pthread_mutex_trylock(&(root->push_down_mutex_lock));\n        if (retval == 0){\n            Push_Down(root);\n            pthread_mutex_unlock(&(root->push_down_mutex_lock));\n        } else {\n            pthread_mutex_lock(&(root->push_down_mutex_lock));\n            pthread_mutex_unlock(&(root->push_down_mutex_lock));\n        }\n    }\n    if (!root->point_deleted){\n        float dist = calc_dist(point, root->point);\n        if (dist <= max_dist && (q.size() < k_nearest || dist < q.top().dist)){\n            if (q.size() >= k_nearest) q.pop();\n            PointType_CMP current_point{root->point, dist};                    \n            q.push(current_point);            \n        }\n    }  \n    int cur_search_counter;\n    float dist_left_node = calc_box_dist(root->left_son_ptr, point);\n    float dist_right_node = calc_box_dist(root->right_son_ptr, point);\n    if (q.size()< k_nearest || dist_left_node < q.top().dist && dist_right_node < q.top().dist){\n        if (dist_left_node <= dist_right_node) {\n            if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->left_son_ptr){\n                Search(root->left_son_ptr, k_nearest, point, q, max_dist);                       \n            } else {\n                pthread_mutex_lock(&search_flag_mutex);\n                while (search_mutex_counter == -1)\n                {\n                    pthread_mutex_unlock(&search_flag_mutex);\n                    usleep(1);\n                    pthread_mutex_lock(&search_flag_mutex);\n                }\n                search_mutex_counter += 1;\n                pthread_mutex_unlock(&search_flag_mutex);\n                Search(root->left_son_ptr, k_nearest, point, q, max_dist);  \n                pthread_mutex_lock(&search_flag_mutex);\n                search_mutex_counter -= 1;\n                pthread_mutex_unlock(&search_flag_mutex);\n            }\n            if (q.size() < k_nearest || dist_right_node < q.top().dist) {\n                if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->right_son_ptr){\n                    Search(root->right_son_ptr, k_nearest, point, q, max_dist);                       \n                } else {\n                    pthread_mutex_lock(&search_flag_mutex);\n                    while (search_mutex_counter == -1)\n                    {\n                        pthread_mutex_unlock(&search_flag_mutex);\n                        usleep(1);\n                        pthread_mutex_lock(&search_flag_mutex);\n                    }\n                    search_mutex_counter += 1;\n                    pthread_mutex_unlock(&search_flag_mutex);                    \n                    Search(root->right_son_ptr, k_nearest, point, q, max_dist);  \n                    pthread_mutex_lock(&search_flag_mutex);\n                    search_mutex_counter -= 1;\n                    pthread_mutex_unlock(&search_flag_mutex);\n                }                \n            }\n        } else {\n            if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->right_son_ptr){\n                Search(root->right_son_ptr, k_nearest, point, q, max_dist);                       \n            } else {\n                pthread_mutex_lock(&search_flag_mutex);\n                while (search_mutex_counter == -1)\n                {\n                    pthread_mutex_unlock(&search_flag_mutex);\n                    usleep(1);\n                    pthread_mutex_lock(&search_flag_mutex);\n                }\n                search_mutex_counter += 1;\n                pthread_mutex_unlock(&search_flag_mutex);                   \n                Search(root->right_son_ptr, k_nearest, point, q, max_dist);  \n                pthread_mutex_lock(&search_flag_mutex);\n                search_mutex_counter -= 1;\n                pthread_mutex_unlock(&search_flag_mutex);\n            }\n            if (q.size() < k_nearest || dist_left_node < q.top().dist) {            \n                if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->left_son_ptr){\n                    Search(root->left_son_ptr, k_nearest, point, q, max_dist);                       \n                } else {\n                    pthread_mutex_lock(&search_flag_mutex);\n                    while (search_mutex_counter == -1)\n                    {\n                        pthread_mutex_unlock(&search_flag_mutex);\n                        usleep(1);\n                        pthread_mutex_lock(&search_flag_mutex);\n                    }\n                    search_mutex_counter += 1;\n                    pthread_mutex_unlock(&search_flag_mutex);  \n                    Search(root->left_son_ptr, k_nearest, point, q, max_dist);  \n                    pthread_mutex_lock(&search_flag_mutex);\n                    search_mutex_counter -= 1;\n                    pthread_mutex_unlock(&search_flag_mutex);\n                }\n            }\n        }\n    } else {\n        if (dist_left_node < q.top().dist) {        \n            if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->left_son_ptr){\n                Search(root->left_son_ptr, k_nearest, point, q, max_dist);                       \n            } else {\n                pthread_mutex_lock(&search_flag_mutex);\n                while (search_mutex_counter == -1)\n                {\n                    pthread_mutex_unlock(&search_flag_mutex);\n                    usleep(1);\n                    pthread_mutex_lock(&search_flag_mutex);\n                }\n                search_mutex_counter += 1;\n                pthread_mutex_unlock(&search_flag_mutex);  \n                Search(root->left_son_ptr, k_nearest, point, q, max_dist);  \n                pthread_mutex_lock(&search_flag_mutex);\n                search_mutex_counter -= 1;\n                pthread_mutex_unlock(&search_flag_mutex);\n            }\n        }\n        if (dist_right_node < q.top().dist) {\n            if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->right_son_ptr){\n                Search(root->right_son_ptr, k_nearest, point, q, max_dist);                       \n            } else {\n                pthread_mutex_lock(&search_flag_mutex);\n                while (search_mutex_counter == -1)\n                {\n                    pthread_mutex_unlock(&search_flag_mutex);\n                    usleep(1);\n                    pthread_mutex_lock(&search_flag_mutex);\n                }\n                search_mutex_counter += 1;\n                pthread_mutex_unlock(&search_flag_mutex);  \n                Search(root->right_son_ptr, k_nearest, point, q, max_dist);\n                pthread_mutex_lock(&search_flag_mutex);\n                search_mutex_counter -= 1;\n                pthread_mutex_unlock(&search_flag_mutex);\n            }\n        }\n    }\n    return;\n}\n\nvoid KD_TREE::Search_by_range(KD_TREE_NODE *root, BoxPointType boxpoint, PointVector & Storage){\n    if (root == nullptr) return;\n    Push_Down(root);       \n    if (boxpoint.vertex_max[0] <= root->node_range_x[0] || boxpoint.vertex_min[0] > root->node_range_x[1]) return;\n    if (boxpoint.vertex_max[1] <= root->node_range_y[0] || boxpoint.vertex_min[1] > root->node_range_y[1]) return;\n    if (boxpoint.vertex_max[2] <= root->node_range_z[0] || boxpoint.vertex_min[2] > root->node_range_z[1]) return;\n    if (boxpoint.vertex_min[0] <= root->node_range_x[0] && boxpoint.vertex_max[0] > root->node_range_x[1] && boxpoint.vertex_min[1] <= root->node_range_y[0] && boxpoint.vertex_max[1] > root->node_range_y[1] && boxpoint.vertex_min[2] <= root->node_range_z[0] && boxpoint.vertex_max[2] > root->node_range_z[1]){\n        flatten(root, Storage, NOT_RECORD);\n        return;\n    }\n    if (boxpoint.vertex_min[0] <= root->point.x && boxpoint.vertex_max[0] > root->point.x && boxpoint.vertex_min[1] <= root->point.y && boxpoint.vertex_max[1] > root->point.y && boxpoint.vertex_min[2] <= root->point.z && boxpoint.vertex_max[2] > root->point.z){\n        if (!root->point_deleted) Storage.push_back(root->point);\n    }\n    if ((Rebuild_Ptr == nullptr) || root->left_son_ptr != *Rebuild_Ptr){\n        Search_by_range(root->left_son_ptr, boxpoint, Storage);\n    } else {\n        pthread_mutex_lock(&search_flag_mutex);\n        Search_by_range(root->left_son_ptr, boxpoint, Storage);\n        pthread_mutex_unlock(&search_flag_mutex);\n    }\n    if ((Rebuild_Ptr == nullptr) || root->right_son_ptr != *Rebuild_Ptr){\n        Search_by_range(root->right_son_ptr, boxpoint, Storage);\n    } else {\n        pthread_mutex_lock(&search_flag_mutex);\n        Search_by_range(root->right_son_ptr, boxpoint, Storage);\n        pthread_mutex_unlock(&search_flag_mutex);\n    }\n    return;    \n}\n\nbool KD_TREE::Criterion_Check(KD_TREE_NODE * root){\n    if (root->TreeSize <= Minimal_Unbalanced_Tree_Size){\n        return false;\n    }\n    float balance_evaluation = 0.0f;\n    float delete_evaluation = 0.0f;\n    KD_TREE_NODE * son_ptr = root->left_son_ptr;\n    if (son_ptr == nullptr) son_ptr = root->right_son_ptr;\n    delete_evaluation = float(root->invalid_point_num)/ root->TreeSize;\n    balance_evaluation = float(son_ptr->TreeSize) / (root->TreeSize-1);  \n    if (delete_evaluation > delete_criterion_param){\n        return true;\n    }\n    if (balance_evaluation > balance_criterion_param || balance_evaluation < 1-balance_criterion_param){\n        return true;\n    } \n    return false;\n}\n\nvoid KD_TREE::Push_Down(KD_TREE_NODE *root){\n    if (root == nullptr) return;\n    Operation_Logger_Type operation;\n    operation.op = PUSH_DOWN;\n    operation.tree_deleted = root->tree_deleted;\n    operation.tree_downsample_deleted = root->tree_downsample_deleted;\n    if (root->need_push_down_to_left && root->left_son_ptr != nullptr){\n        if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->left_son_ptr){\n            root->left_son_ptr->tree_downsample_deleted |= root->tree_downsample_deleted;\n            root->left_son_ptr->point_downsample_deleted |= root->tree_downsample_deleted;\n            root->left_son_ptr->tree_deleted = root->tree_deleted || root->left_son_ptr->tree_downsample_deleted;\n            root->left_son_ptr->point_deleted = root->left_son_ptr->tree_deleted || root->left_son_ptr->point_downsample_deleted;\n            if (root->tree_downsample_deleted) root->left_son_ptr->down_del_num = root->left_son_ptr->TreeSize;\n            if (root->tree_deleted) root->left_son_ptr->invalid_point_num = root->left_son_ptr->TreeSize;\n                else root->left_son_ptr->invalid_point_num = root->left_son_ptr->down_del_num;\n            root->left_son_ptr->need_push_down_to_left = true;\n            root->left_son_ptr->need_push_down_to_right = true;\n            root->need_push_down_to_left = false;                \n        } else {\n            pthread_mutex_lock(&working_flag_mutex);\n            root->left_son_ptr->tree_downsample_deleted |= root->tree_downsample_deleted;\n            root->left_son_ptr->point_downsample_deleted |= root->tree_downsample_deleted;\n            root->left_son_ptr->tree_deleted = root->tree_deleted || root->left_son_ptr->tree_downsample_deleted;\n            root->left_son_ptr->point_deleted = root->left_son_ptr->tree_deleted || root->left_son_ptr->point_downsample_deleted;\n            if (root->tree_downsample_deleted) root->left_son_ptr->down_del_num = root->left_son_ptr->TreeSize;\n            if (root->tree_deleted) root->left_son_ptr->invalid_point_num = root->left_son_ptr->TreeSize;\n                else root->left_son_ptr->invalid_point_num = root->left_son_ptr->down_del_num;            \n            root->left_son_ptr->need_push_down_to_left = true;\n            root->left_son_ptr->need_push_down_to_right = true;\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(operation);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n            }\n            root->need_push_down_to_left = false;\n            pthread_mutex_unlock(&working_flag_mutex);            \n        }\n    }\n    if (root->need_push_down_to_right && root->right_son_ptr != nullptr){\n        if (Rebuild_Ptr == nullptr || *Rebuild_Ptr != root->right_son_ptr){\n            root->right_son_ptr->tree_downsample_deleted |= root->tree_downsample_deleted;\n            root->right_son_ptr->point_downsample_deleted |= root->tree_downsample_deleted;\n            root->right_son_ptr->tree_deleted = root->tree_deleted || root->right_son_ptr->tree_downsample_deleted;\n            root->right_son_ptr->point_deleted = root->right_son_ptr->tree_deleted || root->right_son_ptr->point_downsample_deleted;\n            if (root->tree_downsample_deleted) root->right_son_ptr->down_del_num = root->right_son_ptr->TreeSize;\n            if (root->tree_deleted) root->right_son_ptr->invalid_point_num = root->right_son_ptr->TreeSize;\n                else root->right_son_ptr->invalid_point_num = root->right_son_ptr->down_del_num;\n            root->right_son_ptr->need_push_down_to_left = true;\n            root->right_son_ptr->need_push_down_to_right = true;\n            root->need_push_down_to_right = false;\n        } else {\n            pthread_mutex_lock(&working_flag_mutex);\n            root->right_son_ptr->tree_downsample_deleted |= root->tree_downsample_deleted;\n            root->right_son_ptr->point_downsample_deleted |= root->tree_downsample_deleted;\n            root->right_son_ptr->tree_deleted = root->tree_deleted || root->right_son_ptr->tree_downsample_deleted;\n            root->right_son_ptr->point_deleted = root->right_son_ptr->tree_deleted || root->right_son_ptr->point_downsample_deleted;\n            if (root->tree_downsample_deleted) root->right_son_ptr->down_del_num = root->right_son_ptr->TreeSize;\n            if (root->tree_deleted) root->right_son_ptr->invalid_point_num = root->right_son_ptr->TreeSize;\n                else root->right_son_ptr->invalid_point_num = root->right_son_ptr->down_del_num;            \n            root->right_son_ptr->need_push_down_to_left = true;\n            root->right_son_ptr->need_push_down_to_right = true;\n            if (rebuild_flag){\n                pthread_mutex_lock(&rebuild_logger_mutex_lock);\n                Rebuild_Logger.push(operation);\n                pthread_mutex_unlock(&rebuild_logger_mutex_lock);\n            }            \n            root->need_push_down_to_right = false;\n            pthread_mutex_unlock(&working_flag_mutex);\n        }\n    }\n    return;\n}\n\nvoid KD_TREE::Update(KD_TREE_NODE * root){\n    KD_TREE_NODE * left_son_ptr = root->left_son_ptr;\n    KD_TREE_NODE * right_son_ptr = root->right_son_ptr;\n    float tmp_range_x[2] = {INFINITY, -INFINITY};\n    float tmp_range_y[2] = {INFINITY, -INFINITY};\n    float tmp_range_z[2] = {INFINITY, -INFINITY};\n    // Update Tree Size   \n    if (left_son_ptr != nullptr && right_son_ptr != nullptr){\n        root->TreeSize = left_son_ptr->TreeSize + right_son_ptr->TreeSize + 1;\n        root->invalid_point_num = left_son_ptr->invalid_point_num + right_son_ptr->invalid_point_num + (root->point_deleted? 1:0);\n        root->down_del_num = left_son_ptr->down_del_num + right_son_ptr->down_del_num + (root->point_downsample_deleted? 1:0);\n        root->tree_downsample_deleted = left_son_ptr->tree_downsample_deleted & right_son_ptr->tree_downsample_deleted & root->point_downsample_deleted;\n        root->tree_deleted = left_son_ptr->tree_deleted && right_son_ptr->tree_deleted && root->point_deleted;\n        if (root->tree_deleted || (!left_son_ptr->tree_deleted && !right_son_ptr->tree_deleted && !root->point_deleted)){\n            tmp_range_x[0] = min(min(left_son_ptr->node_range_x[0],right_son_ptr->node_range_x[0]),root->point.x);\n            tmp_range_x[1] = max(max(left_son_ptr->node_range_x[1],right_son_ptr->node_range_x[1]),root->point.x);\n            tmp_range_y[0] = min(min(left_son_ptr->node_range_y[0],right_son_ptr->node_range_y[0]),root->point.y);\n            tmp_range_y[1] = max(max(left_son_ptr->node_range_y[1],right_son_ptr->node_range_y[1]),root->point.y);\n            tmp_range_z[0] = min(min(left_son_ptr->node_range_z[0],right_son_ptr->node_range_z[0]),root->point.z);\n            tmp_range_z[1] = max(max(left_son_ptr->node_range_z[1],right_son_ptr->node_range_z[1]),root->point.z);\n        } else {\n            if (!left_son_ptr->tree_deleted){\n                tmp_range_x[0] = min(tmp_range_x[0], left_son_ptr->node_range_x[0]);\n                tmp_range_x[1] = max(tmp_range_x[1], left_son_ptr->node_range_x[1]);\n                tmp_range_y[0] = min(tmp_range_y[0], left_son_ptr->node_range_y[0]);\n                tmp_range_y[1] = max(tmp_range_y[1], left_son_ptr->node_range_y[1]);\n                tmp_range_z[0] = min(tmp_range_z[0], left_son_ptr->node_range_z[0]);\n                tmp_range_z[1] = max(tmp_range_z[1], left_son_ptr->node_range_z[1]);\n            }\n            if (!right_son_ptr->tree_deleted){\n                tmp_range_x[0] = min(tmp_range_x[0], right_son_ptr->node_range_x[0]);\n                tmp_range_x[1] = max(tmp_range_x[1], right_son_ptr->node_range_x[1]);\n                tmp_range_y[0] = min(tmp_range_y[0], right_son_ptr->node_range_y[0]);\n                tmp_range_y[1] = max(tmp_range_y[1], right_son_ptr->node_range_y[1]);\n                tmp_range_z[0] = min(tmp_range_z[0], right_son_ptr->node_range_z[0]);\n                tmp_range_z[1] = max(tmp_range_z[1], right_son_ptr->node_range_z[1]);                \n            }\n            if (!root->point_deleted){\n                tmp_range_x[0] = min(tmp_range_x[0], root->point.x);\n                tmp_range_x[1] = max(tmp_range_x[1], root->point.x);\n                tmp_range_y[0] = min(tmp_range_y[0], root->point.y);\n                tmp_range_y[1] = max(tmp_range_y[1], root->point.y);\n                tmp_range_z[0] = min(tmp_range_z[0], root->point.z);\n                tmp_range_z[1] = max(tmp_range_z[1], root->point.z);                 \n            }\n        }\n    } else if (left_son_ptr != nullptr){\n        root->TreeSize = left_son_ptr->TreeSize + 1;\n        root->invalid_point_num = left_son_ptr->invalid_point_num + (root->point_deleted?1:0);\n        root->down_del_num = left_son_ptr->down_del_num + (root->point_downsample_deleted?1:0);\n        root->tree_downsample_deleted = left_son_ptr->tree_downsample_deleted & root->point_downsample_deleted;\n        root->tree_deleted = left_son_ptr->tree_deleted && root->point_deleted;\n        if (root->tree_deleted || (!left_son_ptr->tree_deleted && !root->point_deleted)){\n            tmp_range_x[0] = min(left_son_ptr->node_range_x[0],root->point.x);\n            tmp_range_x[1] = max(left_son_ptr->node_range_x[1],root->point.x);\n            tmp_range_y[0] = min(left_son_ptr->node_range_y[0],root->point.y);\n            tmp_range_y[1] = max(left_son_ptr->node_range_y[1],root->point.y); \n            tmp_range_z[0] = min(left_son_ptr->node_range_z[0],root->point.z);\n            tmp_range_z[1] = max(left_son_ptr->node_range_z[1],root->point.z);  \n        } else {\n            if (!left_son_ptr->tree_deleted){\n                tmp_range_x[0] = min(tmp_range_x[0], left_son_ptr->node_range_x[0]);\n                tmp_range_x[1] = max(tmp_range_x[1], left_son_ptr->node_range_x[1]);\n                tmp_range_y[0] = min(tmp_range_y[0], left_son_ptr->node_range_y[0]);\n                tmp_range_y[1] = max(tmp_range_y[1], left_son_ptr->node_range_y[1]);\n                tmp_range_z[0] = min(tmp_range_z[0], left_son_ptr->node_range_z[0]);\n                tmp_range_z[1] = max(tmp_range_z[1], left_son_ptr->node_range_z[1]);                \n            }\n            if (!root->point_deleted){\n                tmp_range_x[0] = min(tmp_range_x[0], root->point.x);\n                tmp_range_x[1] = max(tmp_range_x[1], root->point.x);\n                tmp_range_y[0] = min(tmp_range_y[0], root->point.y);\n                tmp_range_y[1] = max(tmp_range_y[1], root->point.y);\n                tmp_range_z[0] = min(tmp_range_z[0], root->point.z);\n                tmp_range_z[1] = max(tmp_range_z[1], root->point.z);                 \n            }            \n        }\n\n    } else if (right_son_ptr != nullptr){\n        root->TreeSize = right_son_ptr->TreeSize + 1;\n        root->invalid_point_num = right_son_ptr->invalid_point_num + (root->point_deleted? 1:0);\n        root->down_del_num = right_son_ptr->down_del_num + (root->point_downsample_deleted? 1:0);        \n        root->tree_downsample_deleted = right_son_ptr->tree_downsample_deleted & root->point_downsample_deleted;\n        root->tree_deleted = right_son_ptr->tree_deleted && root->point_deleted;\n        if (root->tree_deleted || (!right_son_ptr->tree_deleted && !root->point_deleted)){\n            tmp_range_x[0] = min(right_son_ptr->node_range_x[0],root->point.x);\n            tmp_range_x[1] = max(right_son_ptr->node_range_x[1],root->point.x);\n            tmp_range_y[0] = min(right_son_ptr->node_range_y[0],root->point.y);\n            tmp_range_y[1] = max(right_son_ptr->node_range_y[1],root->point.y); \n            tmp_range_z[0] = min(right_son_ptr->node_range_z[0],root->point.z);\n            tmp_range_z[1] = max(right_son_ptr->node_range_z[1],root->point.z); \n        } else {\n            if (!right_son_ptr->tree_deleted){\n                tmp_range_x[0] = min(tmp_range_x[0], right_son_ptr->node_range_x[0]);\n                tmp_range_x[1] = max(tmp_range_x[1], right_son_ptr->node_range_x[1]);\n                tmp_range_y[0] = min(tmp_range_y[0], right_son_ptr->node_range_y[0]);\n                tmp_range_y[1] = max(tmp_range_y[1], right_son_ptr->node_range_y[1]);\n                tmp_range_z[0] = min(tmp_range_z[0], right_son_ptr->node_range_z[0]);\n                tmp_range_z[1] = max(tmp_range_z[1], right_son_ptr->node_range_z[1]);                \n            }\n            if (!root->point_deleted){\n                tmp_range_x[0] = min(tmp_range_x[0], root->point.x);\n                tmp_range_x[1] = max(tmp_range_x[1], root->point.x);\n                tmp_range_y[0] = min(tmp_range_y[0], root->point.y);\n                tmp_range_y[1] = max(tmp_range_y[1], root->point.y);\n                tmp_range_z[0] = min(tmp_range_z[0], root->point.z);\n                tmp_range_z[1] = max(tmp_range_z[1], root->point.z);                 \n            }            \n        }\n    } else {\n        root->TreeSize = 1;\n        root->invalid_point_num = (root->point_deleted? 1:0);\n        root->down_del_num = (root->point_downsample_deleted? 1:0);\n        root->tree_downsample_deleted = root->point_downsample_deleted;\n        root->tree_deleted = root->point_deleted;\n        tmp_range_x[0] = root->point.x;\n        tmp_range_x[1] = root->point.x;        \n        tmp_range_y[0] = root->point.y;\n        tmp_range_y[1] = root->point.y; \n        tmp_range_z[0] = root->point.z;\n        tmp_range_z[1] = root->point.z;                 \n    }\n    memcpy(root->node_range_x,tmp_range_x,sizeof(tmp_range_x));\n    memcpy(root->node_range_y,tmp_range_y,sizeof(tmp_range_y));\n    memcpy(root->node_range_z,tmp_range_z,sizeof(tmp_range_z));\n    if (left_son_ptr != nullptr) left_son_ptr -> father_ptr = root;\n    if (right_son_ptr != nullptr) right_son_ptr -> father_ptr = root;\n    if (root == Root_Node && root->TreeSize > 3){\n        KD_TREE_NODE * son_ptr = root->left_son_ptr;\n        if (son_ptr == nullptr) son_ptr = root->right_son_ptr;\n        float tmp_bal = float(son_ptr->TreeSize) / (root->TreeSize-1);\n        root->alpha_del = float(root->invalid_point_num)/ root->TreeSize;\n        root->alpha_bal = (tmp_bal>=0.5-EPSS)?tmp_bal:1-tmp_bal;\n    }   \n    return;\n}\n\nvoid KD_TREE::flatten(KD_TREE_NODE * root, PointVector &Storage, delete_point_storage_set storage_type){\n    if (root == nullptr) return;\n    Push_Down(root);\n    if (!root->point_deleted) {\n        Storage.push_back(root->point);\n    }\n    flatten(root->left_son_ptr, Storage, storage_type);\n    flatten(root->right_son_ptr, Storage, storage_type);\n    switch (storage_type)\n    {\n    case NOT_RECORD:\n        break;\n    case DELETE_POINTS_REC:\n        if (root->point_deleted && !root->point_downsample_deleted) {\n            Points_deleted.push_back(root->point);\n        }       \n        break;\n    case MULTI_THREAD_REC:\n        if (root->point_deleted  && !root->point_downsample_deleted) {\n            Multithread_Points_deleted.push_back(root->point);\n        }\n        break;\n    default:\n        break;\n    }     \n    return;\n}\n\nvoid KD_TREE::delete_tree_nodes(KD_TREE_NODE ** root){ \n    if (*root == nullptr) return;\n    Push_Down(*root);    \n    delete_tree_nodes(&(*root)->left_son_ptr);\n    delete_tree_nodes(&(*root)->right_son_ptr);  \n    pthread_mutex_destroy( &(*root)->push_down_mutex_lock);              \n    delete *root;\n    *root = nullptr;                    \n\n    return;\n}\n\nbool KD_TREE::same_point(PointType a, PointType b){\n    return (fabs(a.x-b.x) < EPSS && fabs(a.y-b.y) < EPSS && fabs(a.z-b.z) < EPSS );\n}\n\nfloat KD_TREE::calc_dist(PointType a, PointType b){\n    float dist = 0.0f;\n    dist = (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z);\n    return dist;\n}\n\nfloat KD_TREE::calc_box_dist(KD_TREE_NODE * node, PointType point){\n    if (node == nullptr) return INFINITY;\n    float min_dist = 0.0;\n    if (point.x < node->node_range_x[0]) min_dist += (point.x - node->node_range_x[0])*(point.x - node->node_range_x[0]);\n    if (point.x > node->node_range_x[1]) min_dist += (point.x - node->node_range_x[1])*(point.x - node->node_range_x[1]);\n    if (point.y < node->node_range_y[0]) min_dist += (point.y - node->node_range_y[0])*(point.y - node->node_range_y[0]);\n    if (point.y > node->node_range_y[1]) min_dist += (point.y - node->node_range_y[1])*(point.y - node->node_range_y[1]);\n    if (point.z < node->node_range_z[0]) min_dist += (point.z - node->node_range_z[0])*(point.z - node->node_range_z[0]);\n    if (point.z > node->node_range_z[1]) min_dist += (point.z - node->node_range_z[1])*(point.z - node->node_range_z[1]);\n    return min_dist;\n}\n\nbool KD_TREE::point_cmp_x(PointType a, PointType b) { return a.x < b.x;}\nbool KD_TREE::point_cmp_y(PointType a, PointType b) { return a.y < b.y;}\nbool KD_TREE::point_cmp_z(PointType a, PointType b) { return a.z < b.z;}\n\n// Manual heap\nMANUAL_HEAP::MANUAL_HEAP(int max_capacity){\n    cap = max_capacity;\n    heap = new PointType_CMP[max_capacity];\n    heap_size = 0;\n}\n\nMANUAL_HEAP::~MANUAL_HEAP(){\n    delete[] heap;\n}\n\nvoid MANUAL_HEAP::pop(){\n    if (heap_size == 0) return;\n    heap[0] = heap[heap_size-1];\n    heap_size--;\n    MoveDown(0);\n    return;\n}\n        \nPointType_CMP MANUAL_HEAP::top(){\n    return heap[0];\n}\n        \nvoid MANUAL_HEAP::push(PointType_CMP point){\n    if (heap_size >= cap) return;\n    heap[heap_size] = point;\n    FloatUp(heap_size);\n    heap_size++;\n    return;\n}\n        \nint MANUAL_HEAP::size(){\n    return heap_size;\n}\n        \nvoid MANUAL_HEAP::clear(){\n    heap_size = 0;\n    return;\n}\n\nvoid MANUAL_HEAP::MoveDown(int heap_index){\n    int l = heap_index * 2 + 1;\n    PointType_CMP tmp = heap[heap_index];\n    while (l < heap_size){\n        if (l + 1 < heap_size && heap[l] < heap[l+1]) l++;\n        if (tmp < heap[l]){\n            heap[heap_index] = heap[l];\n            heap_index = l;\n            l = heap_index * 2 + 1;\n        } else break;\n    }\n    heap[heap_index] = tmp;\n    return;\n}\n        \nvoid MANUAL_HEAP::FloatUp(int heap_index){\n    int ancestor = (heap_index-1)/2;\n    PointType_CMP tmp = heap[heap_index];\n    while (heap_index > 0){\n        if (heap[ancestor] < tmp){\n            heap[heap_index] = heap[ancestor];\n            heap_index = ancestor;\n            ancestor = (heap_index-1)/2;\n        } else break;\n    }\n    heap[heap_index] = tmp;\n    return;\n}\n\n// manual queue\nvoid MANUAL_Q::clear(){\n    head = 0;\n    tail = 0;\n    counter = 0;\n    is_empty = true;\n    return;\n}\n\nvoid MANUAL_Q::pop(){\n    if (counter == 0) return;\n    head ++;\n    head %= Q_LEN;\n    counter --;\n    if (counter == 0) is_empty = true;\n    return;\n}\n\nOperation_Logger_Type MANUAL_Q::front(){\n    return q[head];\n}\n\nOperation_Logger_Type MANUAL_Q::back(){\n    return q[tail];\n}\n\nvoid MANUAL_Q::push(Operation_Logger_Type op){\n    q[tail] = op;\n    counter ++;\n    if (is_empty) is_empty = false;\n    tail ++;\n    tail %= Q_LEN;\n}\n\nbool MANUAL_Q::empty(){\n    return is_empty;\n}\n\nint MANUAL_Q::size(){\n    return counter;\n}\n\n\n"
  },
  {
    "path": "include/ikd-Tree/ikd_Tree.h",
    "content": "#pragma once\n#include <pcl/point_types.h>\n#include <Eigen/StdVector>\n#include <Eigen/Geometry>\n#include <stdio.h>\n#include <queue>\n#include <pthread.h>\n#include <chrono>\n#include <time.h>\n\n\n#define EPSS 1e-6\n#define Minimal_Unbalanced_Tree_Size 10\n#define Multi_Thread_Rebuild_Point_Num 1500\n#define DOWNSAMPLE_SWITCH true\n#define ForceRebuildPercentage 0.2\n#define Q_LEN 1000000\n\nusing namespace std;\n\ntypedef pcl::PointXYZINormal PointType;\ntypedef vector<PointType, Eigen::aligned_allocator<PointType>>  PointVector;\n\nconst PointType ZeroP;\n\nstruct KD_TREE_NODE\n{\n    PointType point;\n    int division_axis;  \n    int TreeSize = 1;\n    int invalid_point_num = 0;\n    int down_del_num = 0;\n    bool point_deleted = false;\n    bool tree_deleted = false; \n    bool point_downsample_deleted = false;\n    bool tree_downsample_deleted = false;\n    bool need_push_down_to_left = false;\n    bool need_push_down_to_right = false;\n    bool working_flag = false;\n    pthread_mutex_t push_down_mutex_lock;\n    float node_range_x[2], node_range_y[2], node_range_z[2];   \n    KD_TREE_NODE *left_son_ptr = nullptr;\n    KD_TREE_NODE *right_son_ptr = nullptr;\n    KD_TREE_NODE *father_ptr = nullptr;\n    // For paper data record\n    float alpha_del;\n    float alpha_bal;\n};\n\nstruct PointType_CMP{\n    PointType point;\n    float dist = 0.0;\n    PointType_CMP (PointType p = ZeroP, float d = INFINITY){\n        this->point = p;\n        this->dist = d;\n    };\n    bool operator < (const PointType_CMP &a)const{\n        if (fabs(dist - a.dist) < 1e-10) return point.x < a.point.x;\n          else return dist < a.dist;\n    }    \n};\n\nstruct BoxPointType{\n    float vertex_min[3];\n    float vertex_max[3];\n};\n\nenum operation_set {ADD_POINT, DELETE_POINT, DELETE_BOX, ADD_BOX, DOWNSAMPLE_DELETE, PUSH_DOWN};\n\nenum delete_point_storage_set {NOT_RECORD, DELETE_POINTS_REC, MULTI_THREAD_REC};\n\nstruct Operation_Logger_Type{\n    PointType point;\n    BoxPointType boxpoint;\n    bool tree_deleted, tree_downsample_deleted;\n    operation_set op;\n};\n\nclass MANUAL_Q{\n    private:\n        int head = 0,tail = 0, counter = 0;\n        Operation_Logger_Type q[Q_LEN];\n        bool is_empty;\n    public:\n        void pop();\n        Operation_Logger_Type front();\n        Operation_Logger_Type back();\n        void clear();\n        void push(Operation_Logger_Type op);\n        bool empty();\n        int size();\n};\n\nclass MANUAL_HEAP\n{\n    public:\n        MANUAL_HEAP(int max_capacity = 100);\n        ~MANUAL_HEAP();\n        void pop();\n        PointType_CMP top();\n        void push(PointType_CMP point);\n        int size();\n        void clear();\n    private:\n        PointType_CMP * heap;\n        void MoveDown(int heap_index);\n        void FloatUp(int heap_index);\n        int heap_size = 0;\n        int cap = 0;\n};\n\n\nclass KD_TREE\n{\nprivate:\n    // Multi-thread Tree Rebuild\n    bool termination_flag = false;\n    bool rebuild_flag = false;\n    pthread_t rebuild_thread;\n    pthread_mutex_t termination_flag_mutex_lock, rebuild_ptr_mutex_lock, working_flag_mutex, search_flag_mutex;\n    pthread_mutex_t rebuild_logger_mutex_lock, points_deleted_rebuild_mutex_lock;\n    // queue<Operation_Logger_Type> Rebuild_Logger;\n    MANUAL_Q Rebuild_Logger;    \n    PointVector Rebuild_PCL_Storage;\n    KD_TREE_NODE ** Rebuild_Ptr = nullptr;\n    int search_mutex_counter = 0;\n    static void * multi_thread_ptr(void *arg);\n    void multi_thread_rebuild();\n    void start_thread();\n    void stop_thread();\n    void run_operation(KD_TREE_NODE ** root, Operation_Logger_Type operation);\n    // KD Tree Functions and augmented variables\n    int Treesize_tmp = 0, Validnum_tmp = 0;\n    float alpha_bal_tmp = 0.5, alpha_del_tmp = 0.0;\n    float delete_criterion_param = 0.5f;\n    float balance_criterion_param = 0.7f;\n    float downsample_size = 0.2f;\n    bool Delete_Storage_Disabled = false;\n    KD_TREE_NODE * STATIC_ROOT_NODE = nullptr;\n    PointVector Points_deleted;\n    PointVector Downsample_Storage;\n    PointVector Multithread_Points_deleted;\n    void InitTreeNode(KD_TREE_NODE * root);\n    void Test_Lock_States(KD_TREE_NODE *root);\n    void BuildTree(KD_TREE_NODE ** root, int l, int r, PointVector & Storage);\n    void Rebuild(KD_TREE_NODE ** root);\n    int Delete_by_range(KD_TREE_NODE ** root, BoxPointType boxpoint, bool allow_rebuild, bool is_downsample);\n    void Delete_by_point(KD_TREE_NODE ** root, PointType point, bool allow_rebuild);\n    void Add_by_point(KD_TREE_NODE ** root, PointType point, bool allow_rebuild, int father_axis);\n    void Add_by_range(KD_TREE_NODE ** root, BoxPointType boxpoint, bool allow_rebuild);\n    void Search(KD_TREE_NODE * root, int k_nearest, PointType point, MANUAL_HEAP &q, double max_dist);//priority_queue<PointType_CMP>\n    void Search_by_range(KD_TREE_NODE *root, BoxPointType boxpoint, PointVector &Storage);\n    bool Criterion_Check(KD_TREE_NODE * root);\n    void Push_Down(KD_TREE_NODE * root);\n    void Update(KD_TREE_NODE * root); \n    void delete_tree_nodes(KD_TREE_NODE ** root);\n    void downsample(KD_TREE_NODE ** root);\n    bool same_point(PointType a, PointType b);\n    float calc_dist(PointType a, PointType b);\n    float calc_box_dist(KD_TREE_NODE * node, PointType point);    \n    static bool point_cmp_x(PointType a, PointType b); \n    static bool point_cmp_y(PointType a, PointType b); \n    static bool point_cmp_z(PointType a, PointType b); \n\npublic:\n    KD_TREE(float delete_param = 0.5, float balance_param = 0.6 , float box_length = 0.2);\n    ~KD_TREE();\n    void Set_delete_criterion_param(float delete_param);\n    void Set_balance_criterion_param(float balance_param);\n    void set_downsample_param(float box_length);\n    void InitializeKDTree(float delete_param = 0.5, float balance_param = 0.7, float box_length = 0.2); \n    int size();\n    int validnum();\n    void root_alpha(float &alpha_bal, float &alpha_del);\n    void Build(PointVector point_cloud);\n    void Nearest_Search(PointType point, int k_nearest, PointVector &Nearest_Points, vector<float> & Point_Distance, double max_dist = INFINITY);\n    int Add_Points(PointVector & PointToAdd, bool downsample_on);\n    void Add_Point_Boxes(vector<BoxPointType> & BoxPoints);\n    void Delete_Points(PointVector & PointToDel);\n    int Delete_Point_Boxes(vector<BoxPointType> & BoxPoints);\n    void flatten(KD_TREE_NODE * root, PointVector &Storage, delete_point_storage_set storage_type);\n    void acquire_removed_points(PointVector & removed_points);\n    BoxPointType tree_range();\n    PointVector PCL_Storage;     \n    KD_TREE_NODE * Root_Node = nullptr;\n    int max_queue_size = 0;\n};\n"
  },
  {
    "path": "include/matplotlibcpp.h",
    "content": "#pragma once\n\n// Python headers must be included before any system headers, since\n// they define _POSIX_C_SOURCE\n#include <Python.h>\n\n#include <vector>\n#include <map>\n#include <array>\n#include <numeric>\n#include <algorithm>\n#include <stdexcept>\n#include <iostream>\n#include <cstdint> // <cstdint> requires c++11 support\n#include <functional>\n\n#ifndef WITHOUT_NUMPY\n#  define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n#  include <numpy/arrayobject.h>\n\n#  ifdef WITH_OPENCV\n#    include <opencv2/opencv.hpp>\n#  endif // WITH_OPENCV\n\n/*\n * A bunch of constants were removed in OpenCV 4 in favour of enum classes, so\n * define the ones we need here.\n */\n#  if CV_MAJOR_VERSION > 3\n#    define CV_BGR2RGB cv::COLOR_BGR2RGB\n#    define CV_BGRA2RGBA cv::COLOR_BGRA2RGBA\n#  endif\n#endif // WITHOUT_NUMPY\n\n#if PY_MAJOR_VERSION >= 3\n#  define PyString_FromString PyUnicode_FromString\n#  define PyInt_FromLong PyLong_FromLong\n#  define PyString_FromString PyUnicode_FromString\n#endif\n\n\nnamespace matplotlibcpp {\nnamespace detail {\n\nstatic std::string s_backend;\n\nstruct _interpreter {\n    PyObject* s_python_function_arrow;\n    PyObject *s_python_function_show;\n    PyObject *s_python_function_close;\n    PyObject *s_python_function_draw;\n    PyObject *s_python_function_pause;\n    PyObject *s_python_function_save;\n    PyObject *s_python_function_figure;\n    PyObject *s_python_function_fignum_exists;\n    PyObject *s_python_function_plot;\n    PyObject *s_python_function_quiver;\n    PyObject* s_python_function_contour;\n    PyObject *s_python_function_semilogx;\n    PyObject *s_python_function_semilogy;\n    PyObject *s_python_function_loglog;\n    PyObject *s_python_function_fill;\n    PyObject *s_python_function_fill_between;\n    PyObject *s_python_function_hist;\n    PyObject *s_python_function_imshow;\n    PyObject *s_python_function_scatter;\n    PyObject *s_python_function_boxplot;\n    PyObject *s_python_function_subplot;\n    PyObject *s_python_function_subplot2grid;\n    PyObject *s_python_function_legend;\n    PyObject *s_python_function_xlim;\n    PyObject *s_python_function_ion;\n    PyObject *s_python_function_ginput;\n    PyObject *s_python_function_ylim;\n    PyObject *s_python_function_title;\n    PyObject *s_python_function_axis;\n    PyObject *s_python_function_axvline;\n    PyObject *s_python_function_axvspan;\n    PyObject *s_python_function_xlabel;\n    PyObject *s_python_function_ylabel;\n    PyObject *s_python_function_gca;\n    PyObject *s_python_function_xticks;\n    PyObject *s_python_function_yticks;\n    PyObject* s_python_function_margins;\n    PyObject *s_python_function_tick_params;\n    PyObject *s_python_function_grid;\n    PyObject* s_python_function_cla;\n    PyObject *s_python_function_clf;\n    PyObject *s_python_function_errorbar;\n    PyObject *s_python_function_annotate;\n    PyObject *s_python_function_tight_layout;\n    PyObject *s_python_colormap;\n    PyObject *s_python_empty_tuple;\n    PyObject *s_python_function_stem;\n    PyObject *s_python_function_xkcd;\n    PyObject *s_python_function_text;\n    PyObject *s_python_function_suptitle;\n    PyObject *s_python_function_bar;\n    PyObject *s_python_function_colorbar;\n    PyObject *s_python_function_subplots_adjust;\n\n\n    /* For now, _interpreter is implemented as a singleton since its currently not possible to have\n       multiple independent embedded python interpreters without patching the python source code\n       or starting a separate process for each.\n        http://bytes.com/topic/python/answers/793370-multiple-independent-python-interpreters-c-c-program\n       */\n\n    static _interpreter& get() {\n        static _interpreter ctx;\n        return ctx;\n    }\n\n    PyObject* safe_import(PyObject* module, std::string fname) {\n        PyObject* fn = PyObject_GetAttrString(module, fname.c_str());\n\n        if (!fn)\n            throw std::runtime_error(std::string(\"Couldn't find required function: \") + fname);\n\n        if (!PyFunction_Check(fn))\n            throw std::runtime_error(fname + std::string(\" is unexpectedly not a PyFunction.\"));\n\n        return fn;\n    }\n\nprivate:\n\n#ifndef WITHOUT_NUMPY\n#  if PY_MAJOR_VERSION >= 3\n\n    void *import_numpy() {\n        import_array(); // initialize C-API\n        return NULL;\n    }\n\n#  else\n\n    void import_numpy() {\n        import_array(); // initialize C-API\n    }\n\n#  endif\n#endif\n\n    _interpreter() {\n\n        // optional but recommended\n#if PY_MAJOR_VERSION >= 3\n        wchar_t name[] = L\"plotting\";\n#else\n        char name[] = \"plotting\";\n#endif\n        Py_SetProgramName(name);\n        Py_Initialize();\n\n#ifndef WITHOUT_NUMPY\n        import_numpy(); // initialize numpy C-API\n#endif\n\n        PyObject* matplotlibname = PyString_FromString(\"matplotlib\");\n        PyObject* pyplotname = PyString_FromString(\"matplotlib.pyplot\");\n        PyObject* cmname  = PyString_FromString(\"matplotlib.cm\");\n        PyObject* pylabname  = PyString_FromString(\"pylab\");\n        if (!pyplotname || !pylabname || !matplotlibname || !cmname) {\n            throw std::runtime_error(\"couldnt create string\");\n        }\n\n        PyObject* matplotlib = PyImport_Import(matplotlibname);\n        Py_DECREF(matplotlibname);\n        if (!matplotlib) {\n            PyErr_Print();\n            throw std::runtime_error(\"Error loading module matplotlib!\");\n        }\n\n        // matplotlib.use() must be called *before* pylab, matplotlib.pyplot,\n        // or matplotlib.backends is imported for the first time\n        if (!s_backend.empty()) {\n            PyObject_CallMethod(matplotlib, const_cast<char*>(\"use\"), const_cast<char*>(\"s\"), s_backend.c_str());\n        }\n\n        PyObject* pymod = PyImport_Import(pyplotname);\n        Py_DECREF(pyplotname);\n        if (!pymod) { throw std::runtime_error(\"Error loading module matplotlib.pyplot!\"); }\n\n        s_python_colormap = PyImport_Import(cmname);\n        Py_DECREF(cmname);\n        if (!s_python_colormap) { throw std::runtime_error(\"Error loading module matplotlib.cm!\"); }\n\n        PyObject* pylabmod = PyImport_Import(pylabname);\n        Py_DECREF(pylabname);\n        if (!pylabmod) { throw std::runtime_error(\"Error loading module pylab!\"); }\n\n        s_python_function_arrow = safe_import(pymod, \"arrow\");\n        s_python_function_show = safe_import(pymod, \"show\");\n        s_python_function_close = safe_import(pymod, \"close\");\n        s_python_function_draw = safe_import(pymod, \"draw\");\n        s_python_function_pause = safe_import(pymod, \"pause\");\n        s_python_function_figure = safe_import(pymod, \"figure\");\n        s_python_function_fignum_exists = safe_import(pymod, \"fignum_exists\");\n        s_python_function_plot = safe_import(pymod, \"plot\");\n        s_python_function_quiver = safe_import(pymod, \"quiver\");\n        s_python_function_contour = safe_import(pymod, \"contour\");\n        s_python_function_semilogx = safe_import(pymod, \"semilogx\");\n        s_python_function_semilogy = safe_import(pymod, \"semilogy\");\n        s_python_function_loglog = safe_import(pymod, \"loglog\");\n        s_python_function_fill = safe_import(pymod, \"fill\");\n        s_python_function_fill_between = safe_import(pymod, \"fill_between\");\n        s_python_function_hist = safe_import(pymod,\"hist\");\n        s_python_function_scatter = safe_import(pymod,\"scatter\");\n        s_python_function_boxplot = safe_import(pymod,\"boxplot\");\n        s_python_function_subplot = safe_import(pymod, \"subplot\");\n        s_python_function_subplot2grid = safe_import(pymod, \"subplot2grid\");\n        s_python_function_legend = safe_import(pymod, \"legend\");\n        s_python_function_ylim = safe_import(pymod, \"ylim\");\n        s_python_function_title = safe_import(pymod, \"title\");\n        s_python_function_axis = safe_import(pymod, \"axis\");\n        s_python_function_axvline = safe_import(pymod, \"axvline\");\n        s_python_function_axvspan = safe_import(pymod, \"axvspan\");\n        s_python_function_xlabel = safe_import(pymod, \"xlabel\");\n        s_python_function_ylabel = safe_import(pymod, \"ylabel\");\n        s_python_function_gca = safe_import(pymod, \"gca\");\n        s_python_function_xticks = safe_import(pymod, \"xticks\");\n        s_python_function_yticks = safe_import(pymod, \"yticks\");\n        s_python_function_margins = safe_import(pymod, \"margins\");\n        s_python_function_tick_params = safe_import(pymod, \"tick_params\");\n        s_python_function_grid = safe_import(pymod, \"grid\");\n        s_python_function_xlim = safe_import(pymod, \"xlim\");\n        s_python_function_ion = safe_import(pymod, \"ion\");\n        s_python_function_ginput = safe_import(pymod, \"ginput\");\n        s_python_function_save = safe_import(pylabmod, \"savefig\");\n        s_python_function_annotate = safe_import(pymod,\"annotate\");\n        s_python_function_cla = safe_import(pymod, \"cla\");\n        s_python_function_clf = safe_import(pymod, \"clf\");\n        s_python_function_errorbar = safe_import(pymod, \"errorbar\");\n        s_python_function_tight_layout = safe_import(pymod, \"tight_layout\");\n        s_python_function_stem = safe_import(pymod, \"stem\");\n        s_python_function_xkcd = safe_import(pymod, \"xkcd\");\n        s_python_function_text = safe_import(pymod, \"text\");\n        s_python_function_suptitle = safe_import(pymod, \"suptitle\");\n        s_python_function_bar = safe_import(pymod,\"bar\");\n        s_python_function_colorbar = PyObject_GetAttrString(pymod, \"colorbar\");\n        s_python_function_subplots_adjust = safe_import(pymod,\"subplots_adjust\");\n#ifndef WITHOUT_NUMPY\n        s_python_function_imshow = safe_import(pymod, \"imshow\");\n#endif\n        s_python_empty_tuple = PyTuple_New(0);\n    }\n\n    ~_interpreter() {\n        Py_Finalize();\n    }\n};\n\n} // end namespace detail\n\n/// Select the backend\n///\n/// **NOTE:** This must be called before the first plot command to have\n/// any effect.\n///\n/// Mainly useful to select the non-interactive 'Agg' backend when running\n/// matplotlibcpp in headless mode, for example on a machine with no display.\n///\n/// See also: https://matplotlib.org/2.0.2/api/matplotlib_configuration_api.html#matplotlib.use\ninline void backend(const std::string& name)\n{\n    detail::s_backend = name;\n}\n\ninline bool annotate(std::string annotation, double x, double y)\n{\n    detail::_interpreter::get();\n\n    PyObject * xy = PyTuple_New(2);\n    PyObject * str = PyString_FromString(annotation.c_str());\n\n    PyTuple_SetItem(xy,0,PyFloat_FromDouble(x));\n    PyTuple_SetItem(xy,1,PyFloat_FromDouble(y));\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"xy\", xy);\n\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, str);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_annotate, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\nnamespace detail {\n\n#ifndef WITHOUT_NUMPY\n// Type selector for numpy array conversion\ntemplate <typename T> struct select_npy_type { const static NPY_TYPES type = NPY_NOTYPE; }; //Default\ntemplate <> struct select_npy_type<double> { const static NPY_TYPES type = NPY_DOUBLE; };\ntemplate <> struct select_npy_type<float> { const static NPY_TYPES type = NPY_FLOAT; };\ntemplate <> struct select_npy_type<bool> { const static NPY_TYPES type = NPY_BOOL; };\ntemplate <> struct select_npy_type<int8_t> { const static NPY_TYPES type = NPY_INT8; };\ntemplate <> struct select_npy_type<int16_t> { const static NPY_TYPES type = NPY_SHORT; };\ntemplate <> struct select_npy_type<int32_t> { const static NPY_TYPES type = NPY_INT; };\ntemplate <> struct select_npy_type<int64_t> { const static NPY_TYPES type = NPY_INT64; };\ntemplate <> struct select_npy_type<uint8_t> { const static NPY_TYPES type = NPY_UINT8; };\ntemplate <> struct select_npy_type<uint16_t> { const static NPY_TYPES type = NPY_USHORT; };\ntemplate <> struct select_npy_type<uint32_t> { const static NPY_TYPES type = NPY_ULONG; };\ntemplate <> struct select_npy_type<uint64_t> { const static NPY_TYPES type = NPY_UINT64; };\n\n// Sanity checks; comment them out or change the numpy type below if you're compiling on\n// a platform where they don't apply\nstatic_assert(sizeof(long long) == 8);\ntemplate <> struct select_npy_type<long long> { const static NPY_TYPES type = NPY_INT64; };\nstatic_assert(sizeof(unsigned long long) == 8);\ntemplate <> struct select_npy_type<unsigned long long> { const static NPY_TYPES type = NPY_UINT64; };\n\ntemplate<typename Numeric>\nPyObject* get_array(const std::vector<Numeric>& v)\n{\n    npy_intp vsize = v.size();\n    NPY_TYPES type = select_npy_type<Numeric>::type;\n    if (type == NPY_NOTYPE) {\n        size_t memsize = v.size()*sizeof(double);\n        double* dp = static_cast<double*>(::malloc(memsize));\n        for (size_t i=0; i<v.size(); ++i)\n            dp[i] = v[i];\n        PyObject* varray = PyArray_SimpleNewFromData(1, &vsize, NPY_DOUBLE, dp);\n        PyArray_UpdateFlags(reinterpret_cast<PyArrayObject*>(varray), NPY_ARRAY_OWNDATA);\n        return varray;\n    }\n    \n    PyObject* varray = PyArray_SimpleNewFromData(1, &vsize, type, (void*)(v.data()));\n    return varray;\n}\n\n\ntemplate<typename Numeric>\nPyObject* get_2darray(const std::vector<::std::vector<Numeric>>& v)\n{\n    if (v.size() < 1) throw std::runtime_error(\"get_2d_array v too small\");\n\n    npy_intp vsize[2] = {static_cast<npy_intp>(v.size()),\n                         static_cast<npy_intp>(v[0].size())};\n\n    PyArrayObject *varray =\n        (PyArrayObject *)PyArray_SimpleNew(2, vsize, NPY_DOUBLE);\n\n    double *vd_begin = static_cast<double *>(PyArray_DATA(varray));\n\n    for (const ::std::vector<Numeric> &v_row : v) {\n      if (v_row.size() != static_cast<size_t>(vsize[1]))\n        throw std::runtime_error(\"Missmatched array size\");\n      std::copy(v_row.begin(), v_row.end(), vd_begin);\n      vd_begin += vsize[1];\n    }\n\n    return reinterpret_cast<PyObject *>(varray);\n}\n\n#else // fallback if we don't have numpy: copy every element of the given vector\n\ntemplate<typename Numeric>\nPyObject* get_array(const std::vector<Numeric>& v)\n{\n    PyObject* list = PyList_New(v.size());\n    for(size_t i = 0; i < v.size(); ++i) {\n        PyList_SetItem(list, i, PyFloat_FromDouble(v.at(i)));\n    }\n    return list;\n}\n\n#endif // WITHOUT_NUMPY\n\n// sometimes, for labels and such, we need string arrays\ninline PyObject * get_array(const std::vector<std::string>& strings)\n{\n  PyObject* list = PyList_New(strings.size());\n  for (std::size_t i = 0; i < strings.size(); ++i) {\n    PyList_SetItem(list, i, PyString_FromString(strings[i].c_str()));\n  }\n  return list;\n}\n\n// not all matplotlib need 2d arrays, some prefer lists of lists\ntemplate<typename Numeric>\nPyObject* get_listlist(const std::vector<std::vector<Numeric>>& ll)\n{\n  PyObject* listlist = PyList_New(ll.size());\n  for (std::size_t i = 0; i < ll.size(); ++i) {\n    PyList_SetItem(listlist, i, get_array(ll[i]));\n  }\n  return listlist;\n}\n\n} // namespace detail\n\n/// Plot a line through the given x and y data points..\n/// \n/// See: https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html\ntemplate<typename Numeric>\nbool plot(const std::vector<Numeric> &x, const std::vector<Numeric> &y, const std::map<std::string, std::string>& keywords)\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    // using numpy arrays\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    // construct positional args\n    PyObject* args = PyTuple_New(2);\n    PyTuple_SetItem(args, 0, xarray);\n    PyTuple_SetItem(args, 1, yarray);\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_plot, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\n// a non-numpy alternative for `detail::get_2darray()`.\n#ifndef WITHOUT_NUMPY\ntemplate <typename Numeric>\nvoid plot_surface(const std::vector<::std::vector<Numeric>> &x,\n                  const std::vector<::std::vector<Numeric>> &y,\n                  const std::vector<::std::vector<Numeric>> &z,\n                  const std::map<std::string, std::string> &keywords =\n                      std::map<std::string, std::string>())\n{\n  detail::_interpreter::get();\n\n  // We lazily load the modules here the first time this function is called\n  // because I'm not sure that we can assume \"matplotlib installed\" implies\n  // \"mpl_toolkits installed\" on all platforms, and we don't want to require\n  // it for people who don't need 3d plots.\n  static PyObject *mpl_toolkitsmod = nullptr, *axis3dmod = nullptr;\n  if (!mpl_toolkitsmod) {\n    detail::_interpreter::get();\n\n    PyObject* mpl_toolkits = PyString_FromString(\"mpl_toolkits\");\n    PyObject* axis3d = PyString_FromString(\"mpl_toolkits.mplot3d\");\n    if (!mpl_toolkits || !axis3d) { throw std::runtime_error(\"couldnt create string\"); }\n\n    mpl_toolkitsmod = PyImport_Import(mpl_toolkits);\n    Py_DECREF(mpl_toolkits);\n    if (!mpl_toolkitsmod) { throw std::runtime_error(\"Error loading module mpl_toolkits!\"); }\n\n    axis3dmod = PyImport_Import(axis3d);\n    Py_DECREF(axis3d);\n    if (!axis3dmod) { throw std::runtime_error(\"Error loading module mpl_toolkits.mplot3d!\"); }\n  }\n\n  assert(x.size() == y.size());\n  assert(y.size() == z.size());\n\n  // using numpy arrays\n  PyObject *xarray = detail::get_2darray(x);\n  PyObject *yarray = detail::get_2darray(y);\n  PyObject *zarray = detail::get_2darray(z);\n\n  // construct positional args\n  PyObject *args = PyTuple_New(3);\n  PyTuple_SetItem(args, 0, xarray);\n  PyTuple_SetItem(args, 1, yarray);\n  PyTuple_SetItem(args, 2, zarray);\n\n  // Build up the kw args.\n  PyObject *kwargs = PyDict_New();\n  PyDict_SetItemString(kwargs, \"rstride\", PyInt_FromLong(1));\n  PyDict_SetItemString(kwargs, \"cstride\", PyInt_FromLong(1));\n\n  PyObject *python_colormap_coolwarm = PyObject_GetAttrString(\n      detail::_interpreter::get().s_python_colormap, \"coolwarm\");\n\n  PyDict_SetItemString(kwargs, \"cmap\", python_colormap_coolwarm);\n\n  for (std::map<std::string, std::string>::const_iterator it = keywords.begin();\n       it != keywords.end(); ++it) {\n    PyDict_SetItemString(kwargs, it->first.c_str(),\n                         PyString_FromString(it->second.c_str()));\n  }\n\n\n  PyObject *fig =\n      PyObject_CallObject(detail::_interpreter::get().s_python_function_figure,\n                          detail::_interpreter::get().s_python_empty_tuple);\n  if (!fig) throw std::runtime_error(\"Call to figure() failed.\");\n\n  PyObject *gca_kwargs = PyDict_New();\n  PyDict_SetItemString(gca_kwargs, \"projection\", PyString_FromString(\"3d\"));\n\n  PyObject *gca = PyObject_GetAttrString(fig, \"gca\");\n  if (!gca) throw std::runtime_error(\"No gca\");\n  Py_INCREF(gca);\n  PyObject *axis = PyObject_Call(\n      gca, detail::_interpreter::get().s_python_empty_tuple, gca_kwargs);\n\n  if (!axis) throw std::runtime_error(\"No axis\");\n  Py_INCREF(axis);\n\n  Py_DECREF(gca);\n  Py_DECREF(gca_kwargs);\n\n  PyObject *plot_surface = PyObject_GetAttrString(axis, \"plot_surface\");\n  if (!plot_surface) throw std::runtime_error(\"No surface\");\n  Py_INCREF(plot_surface);\n  PyObject *res = PyObject_Call(plot_surface, args, kwargs);\n  if (!res) throw std::runtime_error(\"failed surface\");\n  Py_DECREF(plot_surface);\n\n  Py_DECREF(axis);\n  Py_DECREF(args);\n  Py_DECREF(kwargs);\n  if (res) Py_DECREF(res);\n}\n#endif // WITHOUT_NUMPY\n\ntemplate <typename Numeric>\nvoid plot3(const std::vector<Numeric> &x,\n                  const std::vector<Numeric> &y,\n                  const std::vector<Numeric> &z,\n                  const std::map<std::string, std::string> &keywords =\n                      std::map<std::string, std::string>())\n{\n  detail::_interpreter::get();\n\n  // Same as with plot_surface: We lazily load the modules here the first time \n  // this function is called because I'm not sure that we can assume \"matplotlib \n  // installed\" implies \"mpl_toolkits installed\" on all platforms, and we don't \n  // want to require it for people who don't need 3d plots.\n  static PyObject *mpl_toolkitsmod = nullptr, *axis3dmod = nullptr;\n  if (!mpl_toolkitsmod) {\n    detail::_interpreter::get();\n\n    PyObject* mpl_toolkits = PyString_FromString(\"mpl_toolkits\");\n    PyObject* axis3d = PyString_FromString(\"mpl_toolkits.mplot3d\");\n    if (!mpl_toolkits || !axis3d) { throw std::runtime_error(\"couldnt create string\"); }\n\n    mpl_toolkitsmod = PyImport_Import(mpl_toolkits);\n    Py_DECREF(mpl_toolkits);\n    if (!mpl_toolkitsmod) { throw std::runtime_error(\"Error loading module mpl_toolkits!\"); }\n\n    axis3dmod = PyImport_Import(axis3d);\n    Py_DECREF(axis3d);\n    if (!axis3dmod) { throw std::runtime_error(\"Error loading module mpl_toolkits.mplot3d!\"); }\n  }\n\n  assert(x.size() == y.size());\n  assert(y.size() == z.size());\n\n  PyObject *xarray = detail::get_array(x);\n  PyObject *yarray = detail::get_array(y);\n  PyObject *zarray = detail::get_array(z);\n\n  // construct positional args\n  PyObject *args = PyTuple_New(3);\n  PyTuple_SetItem(args, 0, xarray);\n  PyTuple_SetItem(args, 1, yarray);\n  PyTuple_SetItem(args, 2, zarray);\n\n  // Build up the kw args.\n  PyObject *kwargs = PyDict_New();\n\n  for (std::map<std::string, std::string>::const_iterator it = keywords.begin();\n       it != keywords.end(); ++it) {\n    PyDict_SetItemString(kwargs, it->first.c_str(),\n                         PyString_FromString(it->second.c_str()));\n  }\n\n  PyObject *fig =\n      PyObject_CallObject(detail::_interpreter::get().s_python_function_figure,\n                          detail::_interpreter::get().s_python_empty_tuple);\n  if (!fig) throw std::runtime_error(\"Call to figure() failed.\");\n\n  PyObject *gca_kwargs = PyDict_New();\n  PyDict_SetItemString(gca_kwargs, \"projection\", PyString_FromString(\"3d\"));\n\n  PyObject *gca = PyObject_GetAttrString(fig, \"gca\");\n  if (!gca) throw std::runtime_error(\"No gca\");\n  Py_INCREF(gca);\n  PyObject *axis = PyObject_Call(\n      gca, detail::_interpreter::get().s_python_empty_tuple, gca_kwargs);\n\n  if (!axis) throw std::runtime_error(\"No axis\");\n  Py_INCREF(axis);\n\n  Py_DECREF(gca);\n  Py_DECREF(gca_kwargs);\n\n  PyObject *plot3 = PyObject_GetAttrString(axis, \"plot\");\n  if (!plot3) throw std::runtime_error(\"No 3D line plot\");\n  Py_INCREF(plot3);\n  PyObject *res = PyObject_Call(plot3, args, kwargs);\n  if (!res) throw std::runtime_error(\"Failed 3D line plot\");\n  Py_DECREF(plot3);\n\n  Py_DECREF(axis);\n  Py_DECREF(args);\n  Py_DECREF(kwargs);\n  if (res) Py_DECREF(res);\n}\n\ntemplate<typename Numeric>\nbool stem(const std::vector<Numeric> &x, const std::vector<Numeric> &y, const std::map<std::string, std::string>& keywords)\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    // using numpy arrays\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    // construct positional args\n    PyObject* args = PyTuple_New(2);\n    PyTuple_SetItem(args, 0, xarray);\n    PyTuple_SetItem(args, 1, yarray);\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for (std::map<std::string, std::string>::const_iterator it =\n            keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(),\n                PyString_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(\n            detail::_interpreter::get().s_python_function_stem, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    if (res)\n        Py_DECREF(res);\n\n    return res;\n}\n\ntemplate< typename Numeric >\nbool fill(const std::vector<Numeric>& x, const std::vector<Numeric>& y, const std::map<std::string, std::string>& keywords)\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    // using numpy arrays\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    // construct positional args\n    PyObject* args = PyTuple_New(2);\n    PyTuple_SetItem(args, 0, xarray);\n    PyTuple_SetItem(args, 1, yarray);\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for (auto it = keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_fill, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n\n    if (res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate< typename Numeric >\nbool fill_between(const std::vector<Numeric>& x, const std::vector<Numeric>& y1, const std::vector<Numeric>& y2, const std::map<std::string, std::string>& keywords)\n{\n    assert(x.size() == y1.size());\n    assert(x.size() == y2.size());\n\n    detail::_interpreter::get();\n\n    // using numpy arrays\n    PyObject* xarray = detail::get_array(x);\n    PyObject* y1array = detail::get_array(y1);\n    PyObject* y2array = detail::get_array(y2);\n\n    // construct positional args\n    PyObject* args = PyTuple_New(3);\n    PyTuple_SetItem(args, 0, xarray);\n    PyTuple_SetItem(args, 1, y1array);\n    PyTuple_SetItem(args, 2, y2array);\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_fill_between, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate <typename Numeric>\nbool arrow(Numeric x, Numeric y, Numeric end_x, Numeric end_y, const std::string& fc = \"r\",\n           const std::string ec = \"k\", Numeric head_length = 0.25, Numeric head_width = 0.1625) {\n    PyObject* obj_x = PyFloat_FromDouble(x);\n    PyObject* obj_y = PyFloat_FromDouble(y);\n    PyObject* obj_end_x = PyFloat_FromDouble(end_x);\n    PyObject* obj_end_y = PyFloat_FromDouble(end_y);\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"fc\", PyString_FromString(fc.c_str()));\n    PyDict_SetItemString(kwargs, \"ec\", PyString_FromString(ec.c_str()));\n    PyDict_SetItemString(kwargs, \"head_width\", PyFloat_FromDouble(head_width));\n    PyDict_SetItemString(kwargs, \"head_length\", PyFloat_FromDouble(head_length));\n\n    PyObject* plot_args = PyTuple_New(4);\n    PyTuple_SetItem(plot_args, 0, obj_x);\n    PyTuple_SetItem(plot_args, 1, obj_y);\n    PyTuple_SetItem(plot_args, 2, obj_end_x);\n    PyTuple_SetItem(plot_args, 3, obj_end_y);\n\n    PyObject* res =\n            PyObject_Call(detail::_interpreter::get().s_python_function_arrow, plot_args, kwargs);\n\n    Py_DECREF(plot_args);\n    Py_DECREF(kwargs);\n    if (res)\n        Py_DECREF(res);\n\n    return res;\n}\n\ntemplate< typename Numeric>\nbool hist(const std::vector<Numeric>& y, long bins=10,std::string color=\"b\",\n          double alpha=1.0, bool cumulative=false)\n{\n    detail::_interpreter::get();\n\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"bins\", PyLong_FromLong(bins));\n    PyDict_SetItemString(kwargs, \"color\", PyString_FromString(color.c_str()));\n    PyDict_SetItemString(kwargs, \"alpha\", PyFloat_FromDouble(alpha));\n    PyDict_SetItemString(kwargs, \"cumulative\", cumulative ? Py_True : Py_False);\n\n    PyObject* plot_args = PyTuple_New(1);\n\n    PyTuple_SetItem(plot_args, 0, yarray);\n\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_hist, plot_args, kwargs);\n\n\n    Py_DECREF(plot_args);\n    Py_DECREF(kwargs);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\n#ifndef WITHOUT_NUMPY\nnamespace detail {\n\ninline void imshow(void *ptr, const NPY_TYPES type, const int rows, const int columns, const int colors, const std::map<std::string, std::string> &keywords, PyObject** out)\n{\n    assert(type == NPY_UINT8 || type == NPY_FLOAT);\n    assert(colors == 1 || colors == 3 || colors == 4);\n\n    detail::_interpreter::get();\n\n    // construct args\n    npy_intp dims[3] = { rows, columns, colors };\n    PyObject *args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, PyArray_SimpleNewFromData(colors == 1 ? 2 : 3, dims, type, ptr));\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject *res = PyObject_Call(detail::_interpreter::get().s_python_function_imshow, args, kwargs);\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    if (!res)\n        throw std::runtime_error(\"Call to imshow() failed\");\n    if (out)\n        *out = res;\n    else\n        Py_DECREF(res);\n}\n\n} // namespace detail\n\ninline void imshow(const unsigned char *ptr, const int rows, const int columns, const int colors, const std::map<std::string, std::string> &keywords = {}, PyObject** out = nullptr)\n{\n    detail::imshow((void *) ptr, NPY_UINT8, rows, columns, colors, keywords, out);\n}\n\ninline void imshow(const float *ptr, const int rows, const int columns, const int colors, const std::map<std::string, std::string> &keywords = {}, PyObject** out = nullptr)\n{\n    detail::imshow((void *) ptr, NPY_FLOAT, rows, columns, colors, keywords, out);\n}\n\n#ifdef WITH_OPENCV\nvoid imshow(const cv::Mat &image, const std::map<std::string, std::string> &keywords = {})\n{\n    // Convert underlying type of matrix, if needed\n    cv::Mat image2;\n    NPY_TYPES npy_type = NPY_UINT8;\n    switch (image.type() & CV_MAT_DEPTH_MASK) {\n    case CV_8U:\n        image2 = image;\n        break;\n    case CV_32F:\n        image2 = image;\n        npy_type = NPY_FLOAT;\n        break;\n    default:\n        image.convertTo(image2, CV_MAKETYPE(CV_8U, image.channels()));\n    }\n\n    // If color image, convert from BGR to RGB\n    switch (image2.channels()) {\n    case 3:\n        cv::cvtColor(image2, image2, CV_BGR2RGB);\n        break;\n    case 4:\n        cv::cvtColor(image2, image2, CV_BGRA2RGBA);\n    }\n\n    detail::imshow(image2.data, npy_type, image2.rows, image2.cols, image2.channels(), keywords);\n}\n#endif // WITH_OPENCV\n#endif // WITHOUT_NUMPY\n\ntemplate<typename NumericX, typename NumericY>\nbool scatter(const std::vector<NumericX>& x,\n             const std::vector<NumericY>& y,\n             const double s=1.0, // The marker size in points**2\n             const std::map<std::string, std::string> & keywords = {})\n{\n    detail::_interpreter::get();\n\n    assert(x.size() == y.size());\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"s\", PyLong_FromLong(s));\n    for (const auto& it : keywords)\n    {\n        PyDict_SetItemString(kwargs, it.first.c_str(), PyString_FromString(it.second.c_str()));\n    }\n\n    PyObject* plot_args = PyTuple_New(2);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_scatter, plot_args, kwargs);\n\n    Py_DECREF(plot_args);\n    Py_DECREF(kwargs);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool boxplot(const std::vector<std::vector<Numeric>>& data,\n             const std::vector<std::string>& labels = {},\n             const std::map<std::string, std::string> & keywords = {})\n{\n    detail::_interpreter::get();\n\n    PyObject* listlist = detail::get_listlist(data);\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, listlist);\n\n    PyObject* kwargs = PyDict_New();\n\n    // kwargs needs the labels, if there are (the correct number of) labels\n    if (!labels.empty() && labels.size() == data.size()) {\n        PyDict_SetItemString(kwargs, \"labels\", detail::get_array(labels));\n    }\n\n    // take care of the remaining keywords\n    for (const auto& it : keywords)\n    {\n        PyDict_SetItemString(kwargs, it.first.c_str(), PyString_FromString(it.second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_boxplot, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool boxplot(const std::vector<Numeric>& data,\n             const std::map<std::string, std::string> & keywords = {})\n{\n    detail::_interpreter::get();\n\n    PyObject* vector = detail::get_array(data);\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, vector);\n\n    PyObject* kwargs = PyDict_New();\n    for (const auto& it : keywords)\n    {\n        PyDict_SetItemString(kwargs, it.first.c_str(), PyString_FromString(it.second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_boxplot, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate <typename Numeric>\nbool bar(const std::vector<Numeric> &               x,\n         const std::vector<Numeric> &               y,\n         std::string                                ec       = \"black\",\n         std::string                                ls       = \"-\",\n         double                                     lw       = 1.0,\n         const std::map<std::string, std::string> & keywords = {})\n{\n  detail::_interpreter::get();\n\n  PyObject * xarray = detail::get_array(x);\n  PyObject * yarray = detail::get_array(y);\n\n  PyObject * kwargs = PyDict_New();\n\n  PyDict_SetItemString(kwargs, \"ec\", PyString_FromString(ec.c_str()));\n  PyDict_SetItemString(kwargs, \"ls\", PyString_FromString(ls.c_str()));\n  PyDict_SetItemString(kwargs, \"lw\", PyFloat_FromDouble(lw));\n\n  for (std::map<std::string, std::string>::const_iterator it =\n         keywords.begin();\n       it != keywords.end();\n       ++it) {\n    PyDict_SetItemString(\n      kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n  }\n\n  PyObject * plot_args = PyTuple_New(2);\n  PyTuple_SetItem(plot_args, 0, xarray);\n  PyTuple_SetItem(plot_args, 1, yarray);\n\n  PyObject * res = PyObject_Call(\n    detail::_interpreter::get().s_python_function_bar, plot_args, kwargs);\n\n  Py_DECREF(plot_args);\n  Py_DECREF(kwargs);\n  if (res) Py_DECREF(res);\n\n  return res;\n}\n\ntemplate <typename Numeric>\nbool bar(const std::vector<Numeric> &               y,\n         std::string                                ec       = \"black\",\n         std::string                                ls       = \"-\",\n         double                                     lw       = 1.0,\n         const std::map<std::string, std::string> & keywords = {})\n{\n  using T = typename std::remove_reference<decltype(y)>::type::value_type;\n\n  detail::_interpreter::get();\n\n  std::vector<T> x;\n  for (std::size_t i = 0; i < y.size(); i++) { x.push_back(i); }\n\n  return bar(x, y, ec, ls, lw, keywords);\n}\n\ninline bool subplots_adjust(const std::map<std::string, double>& keywords = {})\n{\n    detail::_interpreter::get();\n\n    PyObject* kwargs = PyDict_New();\n    for (std::map<std::string, double>::const_iterator it =\n            keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(),\n                             PyFloat_FromDouble(it->second));\n    }\n\n\n    PyObject* plot_args = PyTuple_New(0);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_subplots_adjust, plot_args, kwargs);\n\n    Py_DECREF(plot_args);\n    Py_DECREF(kwargs);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate< typename Numeric>\nbool named_hist(std::string label,const std::vector<Numeric>& y, long bins=10, std::string color=\"b\", double alpha=1.0)\n{\n    detail::_interpreter::get();\n\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"label\", PyString_FromString(label.c_str()));\n    PyDict_SetItemString(kwargs, \"bins\", PyLong_FromLong(bins));\n    PyDict_SetItemString(kwargs, \"color\", PyString_FromString(color.c_str()));\n    PyDict_SetItemString(kwargs, \"alpha\", PyFloat_FromDouble(alpha));\n\n\n    PyObject* plot_args = PyTuple_New(1);\n    PyTuple_SetItem(plot_args, 0, yarray);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_hist, plot_args, kwargs);\n\n    Py_DECREF(plot_args);\n    Py_DECREF(kwargs);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename NumericX, typename NumericY>\nbool plot(const std::vector<NumericX>& x, const std::vector<NumericY>& y, const std::string& s = \"\")\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(s.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_plot, plot_args);\n\n    Py_DECREF(plot_args);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate <typename NumericX, typename NumericY, typename NumericZ>\nbool contour(const std::vector<NumericX>& x, const std::vector<NumericY>& y,\n             const std::vector<NumericZ>& z,\n             const std::map<std::string, std::string>& keywords = {}) {\n    assert(x.size() == y.size() && x.size() == z.size());\n\n    PyObject* xarray = get_array(x);\n    PyObject* yarray = get_array(y);\n    PyObject* zarray = get_array(z);\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, zarray);\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for (std::map<std::string, std::string>::const_iterator it = keywords.begin();\n         it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res =\n            PyObject_Call(detail::_interpreter::get().s_python_function_contour, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n    if (res)\n        Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename NumericX, typename NumericY, typename NumericU, typename NumericW>\nbool quiver(const std::vector<NumericX>& x, const std::vector<NumericY>& y, const std::vector<NumericU>& u, const std::vector<NumericW>& w, const std::map<std::string, std::string>& keywords = {})\n{\n    assert(x.size() == y.size() && x.size() == u.size() && u.size() == w.size());\n\n    detail::_interpreter::get();\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n    PyObject* uarray = detail::get_array(u);\n    PyObject* warray = detail::get_array(w);\n\n    PyObject* plot_args = PyTuple_New(4);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, uarray);\n    PyTuple_SetItem(plot_args, 3, warray);\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(\n            detail::_interpreter::get().s_python_function_quiver, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n    if (res)\n        Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename NumericX, typename NumericY>\nbool stem(const std::vector<NumericX>& x, const std::vector<NumericY>& y, const std::string& s = \"\")\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(s.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_CallObject(\n            detail::_interpreter::get().s_python_function_stem, plot_args);\n\n    Py_DECREF(plot_args);\n    if (res)\n        Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename NumericX, typename NumericY>\nbool semilogx(const std::vector<NumericX>& x, const std::vector<NumericY>& y, const std::string& s = \"\")\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(s.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_semilogx, plot_args);\n\n    Py_DECREF(plot_args);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename NumericX, typename NumericY>\nbool semilogy(const std::vector<NumericX>& x, const std::vector<NumericY>& y, const std::string& s = \"\")\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(s.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_semilogy, plot_args);\n\n    Py_DECREF(plot_args);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename NumericX, typename NumericY>\nbool loglog(const std::vector<NumericX>& x, const std::vector<NumericY>& y, const std::string& s = \"\")\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(s.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_loglog, plot_args);\n\n    Py_DECREF(plot_args);\n    if(res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename NumericX, typename NumericY>\nbool errorbar(const std::vector<NumericX> &x, const std::vector<NumericY> &y, const std::vector<NumericX> &yerr, const std::map<std::string, std::string> &keywords = {})\n{\n    assert(x.size() == y.size());\n\n    detail::_interpreter::get();\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n    PyObject* yerrarray = detail::get_array(yerr);\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n    }\n\n    PyDict_SetItemString(kwargs, \"yerr\", yerrarray);\n\n    PyObject *plot_args = PyTuple_New(2);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n\n    PyObject *res = PyObject_Call(detail::_interpreter::get().s_python_function_errorbar, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n\n    if (res)\n        Py_DECREF(res);\n    else\n        throw std::runtime_error(\"Call to errorbar() failed.\");\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool named_plot(const std::string& name, const std::vector<Numeric>& y, const std::string& format = \"\")\n{\n    detail::_interpreter::get();\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"label\", PyString_FromString(name.c_str()));\n\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(format.c_str());\n\n    PyObject* plot_args = PyTuple_New(2);\n\n    PyTuple_SetItem(plot_args, 0, yarray);\n    PyTuple_SetItem(plot_args, 1, pystring);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_plot, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n    if (res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool named_plot(const std::string& name, const std::vector<Numeric>& x, const std::vector<Numeric>& y, const std::string& format = \"\")\n{\n    detail::_interpreter::get();\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"label\", PyString_FromString(name.c_str()));\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(format.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_plot, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n    if (res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool named_semilogx(const std::string& name, const std::vector<Numeric>& x, const std::vector<Numeric>& y, const std::string& format = \"\")\n{\n    detail::_interpreter::get();\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"label\", PyString_FromString(name.c_str()));\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(format.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_semilogx, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n    if (res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool named_semilogy(const std::string& name, const std::vector<Numeric>& x, const std::vector<Numeric>& y, const std::string& format = \"\")\n{\n    detail::_interpreter::get();\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"label\", PyString_FromString(name.c_str()));\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(format.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_semilogy, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n    if (res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool named_loglog(const std::string& name, const std::vector<Numeric>& x, const std::vector<Numeric>& y, const std::string& format = \"\")\n{\n    detail::_interpreter::get();\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"label\", PyString_FromString(name.c_str()));\n\n    PyObject* xarray = detail::get_array(x);\n    PyObject* yarray = detail::get_array(y);\n\n    PyObject* pystring = PyString_FromString(format.c_str());\n\n    PyObject* plot_args = PyTuple_New(3);\n    PyTuple_SetItem(plot_args, 0, xarray);\n    PyTuple_SetItem(plot_args, 1, yarray);\n    PyTuple_SetItem(plot_args, 2, pystring);\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_loglog, plot_args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(plot_args);\n    if (res) Py_DECREF(res);\n\n    return res;\n}\n\ntemplate<typename Numeric>\nbool plot(const std::vector<Numeric>& y, const std::string& format = \"\")\n{\n    std::vector<Numeric> x(y.size());\n    for(size_t i=0; i<x.size(); ++i) x.at(i) = i;\n    return plot(x,y,format);\n}\n\ntemplate<typename Numeric>\nbool plot(const std::vector<Numeric>& y, const std::map<std::string, std::string>& keywords)\n{\n    std::vector<Numeric> x(y.size());\n    for(size_t i=0; i<x.size(); ++i) x.at(i) = i;\n    return plot(x,y,keywords);\n}\n\ntemplate<typename Numeric>\nbool stem(const std::vector<Numeric>& y, const std::string& format = \"\")\n{\n    std::vector<Numeric> x(y.size());\n    for (size_t i = 0; i < x.size(); ++i) x.at(i) = i;\n    return stem(x, y, format);\n}\n\ntemplate<typename Numeric>\nvoid text(Numeric x, Numeric y, const std::string& s = \"\")\n{\n    detail::_interpreter::get();\n\n    PyObject* args = PyTuple_New(3);\n    PyTuple_SetItem(args, 0, PyFloat_FromDouble(x));\n    PyTuple_SetItem(args, 1, PyFloat_FromDouble(y));\n    PyTuple_SetItem(args, 2, PyString_FromString(s.c_str()));\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_text, args);\n    if(!res) throw std::runtime_error(\"Call to text() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ninline void colorbar(PyObject* mappable = NULL, const std::map<std::string, float>& keywords = {})\n{\n    if (mappable == NULL)\n        throw std::runtime_error(\"Must call colorbar with PyObject* returned from an image, contour, surface, etc.\");\n\n    detail::_interpreter::get();\n\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, mappable);\n\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, float>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyFloat_FromDouble(it->second));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_colorbar, args, kwargs);\n    if(!res) throw std::runtime_error(\"Call to colorbar() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    Py_DECREF(res);\n}\n\n\ninline long figure(long number = -1)\n{\n    detail::_interpreter::get();\n\n    PyObject *res;\n    if (number == -1)\n        res = PyObject_CallObject(detail::_interpreter::get().s_python_function_figure, detail::_interpreter::get().s_python_empty_tuple);\n    else {\n        assert(number > 0);\n\n        // Make sure interpreter is initialised\n        detail::_interpreter::get();\n\n        PyObject *args = PyTuple_New(1);\n        PyTuple_SetItem(args, 0, PyLong_FromLong(number));\n        res = PyObject_CallObject(detail::_interpreter::get().s_python_function_figure, args);\n        Py_DECREF(args);\n    }\n\n    if(!res) throw std::runtime_error(\"Call to figure() failed.\");\n\n    PyObject* num = PyObject_GetAttrString(res, \"number\");\n    if (!num) throw std::runtime_error(\"Could not get number attribute of figure object\");\n    const long figureNumber = PyLong_AsLong(num);\n\n    Py_DECREF(num);\n    Py_DECREF(res);\n\n    return figureNumber;\n}\n\ninline bool fignum_exists(long number)\n{\n    detail::_interpreter::get();\n\n    PyObject *args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, PyLong_FromLong(number));\n    PyObject *res = PyObject_CallObject(detail::_interpreter::get().s_python_function_fignum_exists, args);\n    if(!res) throw std::runtime_error(\"Call to fignum_exists() failed.\");\n\n    bool ret = PyObject_IsTrue(res);\n    Py_DECREF(res);\n    Py_DECREF(args);\n\n    return ret;\n}\n\ninline void figure_size(size_t w, size_t h)\n{\n    detail::_interpreter::get();\n\n    const size_t dpi = 100;\n    PyObject* size = PyTuple_New(2);\n    PyTuple_SetItem(size, 0, PyFloat_FromDouble((double)w / dpi));\n    PyTuple_SetItem(size, 1, PyFloat_FromDouble((double)h / dpi));\n\n    PyObject* kwargs = PyDict_New();\n    PyDict_SetItemString(kwargs, \"figsize\", size);\n    PyDict_SetItemString(kwargs, \"dpi\", PyLong_FromSize_t(dpi));\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_figure,\n            detail::_interpreter::get().s_python_empty_tuple, kwargs);\n\n    Py_DECREF(kwargs);\n\n    if(!res) throw std::runtime_error(\"Call to figure_size() failed.\");\n    Py_DECREF(res);\n}\n\ninline void legend()\n{\n    detail::_interpreter::get();\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_legend, detail::_interpreter::get().s_python_empty_tuple);\n    if(!res) throw std::runtime_error(\"Call to legend() failed.\");\n\n    Py_DECREF(res);\n}\n\ninline void legend(const std::map<std::string, std::string>& keywords)\n{\n  detail::_interpreter::get();\n\n  // construct keyword args\n  PyObject* kwargs = PyDict_New();\n  for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n  {\n    PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n  }\n\n  PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_legend, detail::_interpreter::get().s_python_empty_tuple, kwargs);\n  if(!res) throw std::runtime_error(\"Call to legend() failed.\");\n\n  Py_DECREF(kwargs);\n  Py_DECREF(res);  \n}\n\ntemplate<typename Numeric>\nvoid ylim(Numeric left, Numeric right)\n{\n    detail::_interpreter::get();\n\n    PyObject* list = PyList_New(2);\n    PyList_SetItem(list, 0, PyFloat_FromDouble(left));\n    PyList_SetItem(list, 1, PyFloat_FromDouble(right));\n\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, list);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_ylim, args);\n    if(!res) throw std::runtime_error(\"Call to ylim() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ntemplate<typename Numeric>\nvoid xlim(Numeric left, Numeric right)\n{\n    detail::_interpreter::get();\n\n    PyObject* list = PyList_New(2);\n    PyList_SetItem(list, 0, PyFloat_FromDouble(left));\n    PyList_SetItem(list, 1, PyFloat_FromDouble(right));\n\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, list);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_xlim, args);\n    if(!res) throw std::runtime_error(\"Call to xlim() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\n\ninline double* xlim()\n{\n    detail::_interpreter::get();\n\n    PyObject* args = PyTuple_New(0);\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_xlim, args);\n    PyObject* left = PyTuple_GetItem(res,0);\n    PyObject* right = PyTuple_GetItem(res,1);\n\n    double* arr = new double[2];\n    arr[0] = PyFloat_AsDouble(left);\n    arr[1] = PyFloat_AsDouble(right);\n\n    if(!res) throw std::runtime_error(\"Call to xlim() failed.\");\n\n    Py_DECREF(res);\n    return arr;\n}\n\n\ninline double* ylim()\n{\n    detail::_interpreter::get();\n\n    PyObject* args = PyTuple_New(0);\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_ylim, args);\n    PyObject* left = PyTuple_GetItem(res,0);\n    PyObject* right = PyTuple_GetItem(res,1);\n\n    double* arr = new double[2];\n    arr[0] = PyFloat_AsDouble(left);\n    arr[1] = PyFloat_AsDouble(right);\n\n    if(!res) throw std::runtime_error(\"Call to ylim() failed.\");\n\n    Py_DECREF(res);\n    return arr;\n}\n\ntemplate<typename Numeric>\ninline void xticks(const std::vector<Numeric> &ticks, const std::vector<std::string> &labels = {}, const std::map<std::string, std::string>& keywords = {})\n{\n    assert(labels.size() == 0 || ticks.size() == labels.size());\n\n    detail::_interpreter::get();\n\n    // using numpy array\n    PyObject* ticksarray = detail::get_array(ticks);\n\n    PyObject* args;\n    if(labels.size() == 0) {\n        // construct positional args\n        args = PyTuple_New(1);\n        PyTuple_SetItem(args, 0, ticksarray);\n    } else {\n        // make tuple of tick labels\n        PyObject* labelstuple = PyTuple_New(labels.size());\n        for (size_t i = 0; i < labels.size(); i++)\n            PyTuple_SetItem(labelstuple, i, PyUnicode_FromString(labels[i].c_str()));\n\n        // construct positional args\n        args = PyTuple_New(2);\n        PyTuple_SetItem(args, 0, ticksarray);\n        PyTuple_SetItem(args, 1, labelstuple);\n    }\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_xticks, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    if(!res) throw std::runtime_error(\"Call to xticks() failed\");\n\n    Py_DECREF(res);\n}\n\ntemplate<typename Numeric>\ninline void xticks(const std::vector<Numeric> &ticks, const std::map<std::string, std::string>& keywords)\n{\n    xticks(ticks, {}, keywords);\n}\n\ntemplate<typename Numeric>\ninline void yticks(const std::vector<Numeric> &ticks, const std::vector<std::string> &labels = {}, const std::map<std::string, std::string>& keywords = {})\n{\n    assert(labels.size() == 0 || ticks.size() == labels.size());\n\n    detail::_interpreter::get();\n\n    // using numpy array\n    PyObject* ticksarray = detail::get_array(ticks);\n\n    PyObject* args;\n    if(labels.size() == 0) {\n        // construct positional args\n        args = PyTuple_New(1);\n        PyTuple_SetItem(args, 0, ticksarray);\n    } else {\n        // make tuple of tick labels\n        PyObject* labelstuple = PyTuple_New(labels.size());\n        for (size_t i = 0; i < labels.size(); i++)\n            PyTuple_SetItem(labelstuple, i, PyUnicode_FromString(labels[i].c_str()));\n\n        // construct positional args\n        args = PyTuple_New(2);\n        PyTuple_SetItem(args, 0, ticksarray);\n        PyTuple_SetItem(args, 1, labelstuple);\n    }\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_yticks, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    if(!res) throw std::runtime_error(\"Call to yticks() failed\");\n\n    Py_DECREF(res);\n}\n\ntemplate<typename Numeric>\ninline void yticks(const std::vector<Numeric> &ticks, const std::map<std::string, std::string>& keywords)\n{\n    yticks(ticks, {}, keywords);\n}\n\ntemplate <typename Numeric> inline void margins(Numeric margin)\n{\n    // construct positional args\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, PyFloat_FromDouble(margin));\n\n    PyObject* res =\n            PyObject_CallObject(detail::_interpreter::get().s_python_function_margins, args);\n    if (!res)\n        throw std::runtime_error(\"Call to margins() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ntemplate <typename Numeric> inline void margins(Numeric margin_x, Numeric margin_y)\n{\n    // construct positional args\n    PyObject* args = PyTuple_New(2);\n    PyTuple_SetItem(args, 0, PyFloat_FromDouble(margin_x));\n    PyTuple_SetItem(args, 1, PyFloat_FromDouble(margin_y));\n\n    PyObject* res =\n            PyObject_CallObject(detail::_interpreter::get().s_python_function_margins, args);\n    if (!res)\n        throw std::runtime_error(\"Call to margins() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\n\ninline void tick_params(const std::map<std::string, std::string>& keywords, const std::string axis = \"both\")\n{\n  detail::_interpreter::get();\n\n  // construct positional args\n  PyObject* args;\n  args = PyTuple_New(1);\n  PyTuple_SetItem(args, 0, PyString_FromString(axis.c_str()));\n\n  // construct keyword args\n  PyObject* kwargs = PyDict_New();\n  for (std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n  {\n    PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n  }\n\n\n  PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_tick_params, args, kwargs);\n\n  Py_DECREF(args);\n  Py_DECREF(kwargs);\n  if (!res) throw std::runtime_error(\"Call to tick_params() failed\");\n\n  Py_DECREF(res);\n}\n\ninline void subplot(long nrows, long ncols, long plot_number)\n{\n    detail::_interpreter::get();\n    \n    // construct positional args\n    PyObject* args = PyTuple_New(3);\n    PyTuple_SetItem(args, 0, PyFloat_FromDouble(nrows));\n    PyTuple_SetItem(args, 1, PyFloat_FromDouble(ncols));\n    PyTuple_SetItem(args, 2, PyFloat_FromDouble(plot_number));\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_subplot, args);\n    if(!res) throw std::runtime_error(\"Call to subplot() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ninline void subplot2grid(long nrows, long ncols, long rowid=0, long colid=0, long rowspan=1, long colspan=1)\n{\n    detail::_interpreter::get();\n\n    PyObject* shape = PyTuple_New(2);\n    PyTuple_SetItem(shape, 0, PyLong_FromLong(nrows));\n    PyTuple_SetItem(shape, 1, PyLong_FromLong(ncols));\n\n    PyObject* loc = PyTuple_New(2);\n    PyTuple_SetItem(loc, 0, PyLong_FromLong(rowid));\n    PyTuple_SetItem(loc, 1, PyLong_FromLong(colid));\n\n    PyObject* args = PyTuple_New(4);\n    PyTuple_SetItem(args, 0, shape);\n    PyTuple_SetItem(args, 1, loc);\n    PyTuple_SetItem(args, 2, PyLong_FromLong(rowspan));\n    PyTuple_SetItem(args, 3, PyLong_FromLong(colspan));\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_subplot2grid, args);\n    if(!res) throw std::runtime_error(\"Call to subplot2grid() failed.\");\n\n    Py_DECREF(shape);\n    Py_DECREF(loc);\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ninline void title(const std::string &titlestr, const std::map<std::string, std::string> &keywords = {})\n{\n    detail::_interpreter::get();\n\n    PyObject* pytitlestr = PyString_FromString(titlestr.c_str());\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, pytitlestr);\n\n    PyObject* kwargs = PyDict_New();\n    for (auto it = keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_title, args, kwargs);\n    if(!res) throw std::runtime_error(\"Call to title() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    Py_DECREF(res);\n}\n\ninline void suptitle(const std::string &suptitlestr, const std::map<std::string, std::string> &keywords = {})\n{\n    detail::_interpreter::get();\n    \n    PyObject* pysuptitlestr = PyString_FromString(suptitlestr.c_str());\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, pysuptitlestr);\n\n    PyObject* kwargs = PyDict_New();\n    for (auto it = keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_suptitle, args, kwargs);\n    if(!res) throw std::runtime_error(\"Call to suptitle() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    Py_DECREF(res);\n}\n\ninline void axis(const std::string &axisstr)\n{\n    detail::_interpreter::get();\n\n    PyObject* str = PyString_FromString(axisstr.c_str());\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, str);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_axis, args);\n    if(!res) throw std::runtime_error(\"Call to title() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ninline void axvline(double x, double ymin = 0., double ymax = 1., const std::map<std::string, std::string>& keywords = std::map<std::string, std::string>())\n{\n    detail::_interpreter::get();\n\n    // construct positional args\n    PyObject* args = PyTuple_New(3);\n    PyTuple_SetItem(args, 0, PyFloat_FromDouble(x));\n    PyTuple_SetItem(args, 1, PyFloat_FromDouble(ymin));\n    PyTuple_SetItem(args, 2, PyFloat_FromDouble(ymax));\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_axvline, args, kwargs);\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n\n    if(res) Py_DECREF(res);\n}\n\ninline void axvspan(double xmin, double xmax, double ymin = 0., double ymax = 1., const std::map<std::string, std::string>& keywords = std::map<std::string, std::string>())\n{\n    // construct positional args\n    PyObject* args = PyTuple_New(4);\n    PyTuple_SetItem(args, 0, PyFloat_FromDouble(xmin));\n    PyTuple_SetItem(args, 1, PyFloat_FromDouble(xmax));\n    PyTuple_SetItem(args, 2, PyFloat_FromDouble(ymin));\n    PyTuple_SetItem(args, 3, PyFloat_FromDouble(ymax));\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n\tif (it->first == \"linewidth\" || it->first == \"alpha\")\n    \t    PyDict_SetItemString(kwargs, it->first.c_str(), PyFloat_FromDouble(std::stod(it->second)));\n  \telse\n    \t    PyDict_SetItemString(kwargs, it->first.c_str(), PyString_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_axvspan, args, kwargs);\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n\n    if(res) Py_DECREF(res);\n}\n\ninline void xlabel(const std::string &str, const std::map<std::string, std::string> &keywords = {})\n{\n    detail::_interpreter::get();\n\n    PyObject* pystr = PyString_FromString(str.c_str());\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, pystr);\n\n    PyObject* kwargs = PyDict_New();\n    for (auto it = keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_xlabel, args, kwargs);\n    if(!res) throw std::runtime_error(\"Call to xlabel() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    Py_DECREF(res);\n}\n\ninline void ylabel(const std::string &str, const std::map<std::string, std::string>& keywords = {})\n{\n    detail::_interpreter::get();\n\n    PyObject* pystr = PyString_FromString(str.c_str());\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, pystr);\n\n    PyObject* kwargs = PyDict_New();\n    for (auto it = keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_ylabel, args, kwargs);\n    if(!res) throw std::runtime_error(\"Call to ylabel() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    Py_DECREF(res);\n}\n\ninline void set_zlabel(const std::string &str, const std::map<std::string, std::string>& keywords = {})\n{\n    detail::_interpreter::get();\n\n    // Same as with plot_surface: We lazily load the modules here the first time \n    // this function is called because I'm not sure that we can assume \"matplotlib \n    // installed\" implies \"mpl_toolkits installed\" on all platforms, and we don't \n    // want to require it for people who don't need 3d plots.\n    static PyObject *mpl_toolkitsmod = nullptr, *axis3dmod = nullptr;\n    if (!mpl_toolkitsmod) {\n        PyObject* mpl_toolkits = PyString_FromString(\"mpl_toolkits\");\n        PyObject* axis3d = PyString_FromString(\"mpl_toolkits.mplot3d\");\n        if (!mpl_toolkits || !axis3d) { throw std::runtime_error(\"couldnt create string\"); }\n\n        mpl_toolkitsmod = PyImport_Import(mpl_toolkits);\n        Py_DECREF(mpl_toolkits);\n        if (!mpl_toolkitsmod) { throw std::runtime_error(\"Error loading module mpl_toolkits!\"); }\n\n        axis3dmod = PyImport_Import(axis3d);\n        Py_DECREF(axis3d);\n        if (!axis3dmod) { throw std::runtime_error(\"Error loading module mpl_toolkits.mplot3d!\"); }\n    }\n\n    PyObject* pystr = PyString_FromString(str.c_str());\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, pystr);\n\n    PyObject* kwargs = PyDict_New();\n    for (auto it = keywords.begin(); it != keywords.end(); ++it) {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject *ax =\n    PyObject_CallObject(detail::_interpreter::get().s_python_function_gca,\n      detail::_interpreter::get().s_python_empty_tuple);\n    if (!ax) throw std::runtime_error(\"Call to gca() failed.\");\n    Py_INCREF(ax);\n\n    PyObject *zlabel = PyObject_GetAttrString(ax, \"set_zlabel\");\n    if (!zlabel) throw std::runtime_error(\"Attribute set_zlabel not found.\");\n    Py_INCREF(zlabel);\n\n    PyObject *res = PyObject_Call(zlabel, args, kwargs);\n    if (!res) throw std::runtime_error(\"Call to set_zlabel() failed.\");\n    Py_DECREF(zlabel);\n\n    Py_DECREF(ax);\n    Py_DECREF(args);\n    Py_DECREF(kwargs);\n    if (res) Py_DECREF(res);\n}\n\ninline void grid(bool flag)\n{\n    detail::_interpreter::get();\n\n    PyObject* pyflag = flag ? Py_True : Py_False;\n    Py_INCREF(pyflag);\n\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, pyflag);\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_grid, args);\n    if(!res) throw std::runtime_error(\"Call to grid() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ninline void show(const bool block = true)\n{\n    detail::_interpreter::get();\n\n    PyObject* res;\n    if(block)\n    {\n        res = PyObject_CallObject(\n                detail::_interpreter::get().s_python_function_show,\n                detail::_interpreter::get().s_python_empty_tuple);\n    }\n    else\n    {\n        PyObject *kwargs = PyDict_New();\n        PyDict_SetItemString(kwargs, \"block\", Py_False);\n        res = PyObject_Call( detail::_interpreter::get().s_python_function_show, detail::_interpreter::get().s_python_empty_tuple, kwargs);\n       Py_DECREF(kwargs);\n    }\n\n\n    if (!res) throw std::runtime_error(\"Call to show() failed.\");\n\n    Py_DECREF(res);\n}\n\ninline void close()\n{\n    detail::_interpreter::get();\n\n    PyObject* res = PyObject_CallObject(\n            detail::_interpreter::get().s_python_function_close,\n            detail::_interpreter::get().s_python_empty_tuple);\n\n    if (!res) throw std::runtime_error(\"Call to close() failed.\");\n\n    Py_DECREF(res);\n}\n\ninline void xkcd() {\n    detail::_interpreter::get();\n\n    PyObject* res;\n    PyObject *kwargs = PyDict_New();\n\n    res = PyObject_Call(detail::_interpreter::get().s_python_function_xkcd,\n            detail::_interpreter::get().s_python_empty_tuple, kwargs);\n\n    Py_DECREF(kwargs);\n\n    if (!res)\n        throw std::runtime_error(\"Call to show() failed.\");\n\n    Py_DECREF(res);\n}\n\ninline void draw()\n{\n    detail::_interpreter::get();\n\n    PyObject* res = PyObject_CallObject(\n        detail::_interpreter::get().s_python_function_draw,\n        detail::_interpreter::get().s_python_empty_tuple);\n\n    if (!res) throw std::runtime_error(\"Call to draw() failed.\");\n\n    Py_DECREF(res);\n}\n\ntemplate<typename Numeric>\ninline void pause(Numeric interval)\n{\n    detail::_interpreter::get();\n\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, PyFloat_FromDouble(interval));\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_pause, args);\n    if(!res) throw std::runtime_error(\"Call to pause() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ninline void save(const std::string& filename)\n{\n    detail::_interpreter::get();\n\n    PyObject* pyfilename = PyString_FromString(filename.c_str());\n\n    PyObject* args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, pyfilename);\n    std::cout<<\"args:\"<<filename.c_str()<<std::endl;\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_save, args);\n    if (!res) throw std::runtime_error(\"Call to save() failed.\");\n\n    Py_DECREF(args);\n    Py_DECREF(res);\n}\n\ninline void clf() {\n    detail::_interpreter::get();\n\n    PyObject *res = PyObject_CallObject(\n        detail::_interpreter::get().s_python_function_clf,\n        detail::_interpreter::get().s_python_empty_tuple);\n\n    if (!res) throw std::runtime_error(\"Call to clf() failed.\");\n\n    Py_DECREF(res);\n}\n\ninline void cla() {\n    detail::_interpreter::get();\n\n    PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_cla,\n                                        detail::_interpreter::get().s_python_empty_tuple);\n\n    if (!res)\n        throw std::runtime_error(\"Call to cla() failed.\");\n\n    Py_DECREF(res);\n}\n\ninline void ion() {\n    detail::_interpreter::get();\n\n    PyObject *res = PyObject_CallObject(\n        detail::_interpreter::get().s_python_function_ion,\n        detail::_interpreter::get().s_python_empty_tuple);\n\n    if (!res) throw std::runtime_error(\"Call to ion() failed.\");\n\n    Py_DECREF(res);\n}\n\ninline std::vector<std::array<double, 2>> ginput(const int numClicks = 1, const std::map<std::string, std::string>& keywords = {})\n{\n    detail::_interpreter::get();\n\n    PyObject *args = PyTuple_New(1);\n    PyTuple_SetItem(args, 0, PyLong_FromLong(numClicks));\n\n    // construct keyword args\n    PyObject* kwargs = PyDict_New();\n    for(std::map<std::string, std::string>::const_iterator it = keywords.begin(); it != keywords.end(); ++it)\n    {\n        PyDict_SetItemString(kwargs, it->first.c_str(), PyUnicode_FromString(it->second.c_str()));\n    }\n\n    PyObject* res = PyObject_Call(\n        detail::_interpreter::get().s_python_function_ginput, args, kwargs);\n\n    Py_DECREF(kwargs);\n    Py_DECREF(args);\n    if (!res) throw std::runtime_error(\"Call to ginput() failed.\");\n\n    const size_t len = PyList_Size(res);\n    std::vector<std::array<double, 2>> out;\n    out.reserve(len);\n    for (size_t i = 0; i < len; i++) {\n        PyObject *current = PyList_GetItem(res, i);\n        std::array<double, 2> position;\n        position[0] = PyFloat_AsDouble(PyTuple_GetItem(current, 0));\n        position[1] = PyFloat_AsDouble(PyTuple_GetItem(current, 1));\n        out.push_back(position);\n    }\n    Py_DECREF(res);\n\n    return out;\n}\n\n// Actually, is there any reason not to call this automatically for every plot?\ninline void tight_layout() {\n    detail::_interpreter::get();\n\n    PyObject *res = PyObject_CallObject(\n        detail::_interpreter::get().s_python_function_tight_layout,\n        detail::_interpreter::get().s_python_empty_tuple);\n\n    if (!res) throw std::runtime_error(\"Call to tight_layout() failed.\");\n\n    Py_DECREF(res);\n}\n\n// Support for variadic plot() and initializer lists:\n\nnamespace detail {\n\ntemplate<typename T>\nusing is_function = typename std::is_function<std::remove_pointer<std::remove_reference<T>>>::type;\n\ntemplate<bool obj, typename T>\nstruct is_callable_impl;\n\ntemplate<typename T>\nstruct is_callable_impl<false, T>\n{\n    typedef is_function<T> type;\n}; // a non-object is callable iff it is a function\n\ntemplate<typename T>\nstruct is_callable_impl<true, T>\n{\n    struct Fallback { void operator()(); };\n    struct Derived : T, Fallback { };\n\n    template<typename U, U> struct Check;\n\n    template<typename U>\n    static std::true_type test( ... ); // use a variadic function to make sure (1) it accepts everything and (2) its always the worst match\n\n    template<typename U>\n    static std::false_type test( Check<void(Fallback::*)(), &U::operator()>* );\n\npublic:\n    typedef decltype(test<Derived>(nullptr)) type;\n    typedef decltype(&Fallback::operator()) dtype;\n    static constexpr bool value = type::value;\n}; // an object is callable iff it defines operator()\n\ntemplate<typename T>\nstruct is_callable\n{\n    // dispatch to is_callable_impl<true, T> or is_callable_impl<false, T> depending on whether T is of class type or not\n    typedef typename is_callable_impl<std::is_class<T>::value, T>::type type;\n};\n\ntemplate<typename IsYDataCallable>\nstruct plot_impl { };\n\ntemplate<>\nstruct plot_impl<std::false_type>\n{\n    template<typename IterableX, typename IterableY>\n    bool operator()(const IterableX& x, const IterableY& y, const std::string& format)\n    {\n        // 2-phase lookup for distance, begin, end\n        using std::distance;\n        using std::begin;\n        using std::end;\n\n        auto xs = distance(begin(x), end(x));\n        auto ys = distance(begin(y), end(y));\n        assert(xs == ys && \"x and y data must have the same number of elements!\");\n\n        PyObject* xlist = PyList_New(xs);\n        PyObject* ylist = PyList_New(ys);\n        PyObject* pystring = PyString_FromString(format.c_str());\n\n        auto itx = begin(x), ity = begin(y);\n        for(size_t i = 0; i < xs; ++i) {\n            PyList_SetItem(xlist, i, PyFloat_FromDouble(*itx++));\n            PyList_SetItem(ylist, i, PyFloat_FromDouble(*ity++));\n        }\n\n        PyObject* plot_args = PyTuple_New(3);\n        PyTuple_SetItem(plot_args, 0, xlist);\n        PyTuple_SetItem(plot_args, 1, ylist);\n        PyTuple_SetItem(plot_args, 2, pystring);\n\n        PyObject* res = PyObject_CallObject(detail::_interpreter::get().s_python_function_plot, plot_args);\n\n        Py_DECREF(plot_args);\n        if(res) Py_DECREF(res);\n\n        return res;\n    }\n};\n\ntemplate<>\nstruct plot_impl<std::true_type>\n{\n    template<typename Iterable, typename Callable>\n    bool operator()(const Iterable& ticks, const Callable& f, const std::string& format)\n    {\n        if(begin(ticks) == end(ticks)) return true;\n\n        // We could use additional meta-programming to deduce the correct element type of y,\n        // but all values have to be convertible to double anyways\n        std::vector<double> y;\n        for(auto x : ticks) y.push_back(f(x));\n        return plot_impl<std::false_type>()(ticks,y,format);\n    }\n};\n\n} // end namespace detail\n\n// recursion stop for the above\ntemplate<typename... Args>\nbool plot() { return true; }\n\ntemplate<typename A, typename B, typename... Args>\nbool plot(const A& a, const B& b, const std::string& format, Args... args)\n{\n    return detail::plot_impl<typename detail::is_callable<B>::type>()(a,b,format) && plot(args...);\n}\n\n/*\n * This group of plot() functions is needed to support initializer lists, i.e. calling\n *    plot( {1,2,3,4} )\n */\ninline bool plot(const std::vector<double>& x, const std::vector<double>& y, const std::string& format = \"\") {\n    return plot<double,double>(x,y,format);\n}\n\ninline bool plot(const std::vector<double>& y, const std::string& format = \"\") {\n    return plot<double>(y,format);\n}\n\ninline bool plot(const std::vector<double>& x, const std::vector<double>& y, const std::map<std::string, std::string>& keywords) {\n    return plot<double>(x,y,keywords);\n}\n\n/*\n * This class allows dynamic plots, ie changing the plotted data without clearing and re-plotting\n */\nclass Plot\n{\npublic:\n    // default initialization with plot label, some data and format\n    template<typename Numeric>\n    Plot(const std::string& name, const std::vector<Numeric>& x, const std::vector<Numeric>& y, const std::string& format = \"\") {\n        detail::_interpreter::get();\n\n        assert(x.size() == y.size());\n\n        PyObject* kwargs = PyDict_New();\n        if(name != \"\")\n            PyDict_SetItemString(kwargs, \"label\", PyString_FromString(name.c_str()));\n\n        PyObject* xarray = detail::get_array(x);\n        PyObject* yarray = detail::get_array(y);\n\n        PyObject* pystring = PyString_FromString(format.c_str());\n\n        PyObject* plot_args = PyTuple_New(3);\n        PyTuple_SetItem(plot_args, 0, xarray);\n        PyTuple_SetItem(plot_args, 1, yarray);\n        PyTuple_SetItem(plot_args, 2, pystring);\n\n        PyObject* res = PyObject_Call(detail::_interpreter::get().s_python_function_plot, plot_args, kwargs);\n\n        Py_DECREF(kwargs);\n        Py_DECREF(plot_args);\n\n        if(res)\n        {\n            line= PyList_GetItem(res, 0);\n\n            if(line)\n                set_data_fct = PyObject_GetAttrString(line,\"set_data\");\n            else\n                Py_DECREF(line);\n            Py_DECREF(res);\n        }\n    }\n\n    // shorter initialization with name or format only\n    // basically calls line, = plot([], [])\n    Plot(const std::string& name = \"\", const std::string& format = \"\")\n        : Plot(name, std::vector<double>(), std::vector<double>(), format) {}\n\n    template<typename Numeric>\n    bool update(const std::vector<Numeric>& x, const std::vector<Numeric>& y) {\n        assert(x.size() == y.size());\n        if(set_data_fct)\n        {\n            PyObject* xarray = detail::get_array(x);\n            PyObject* yarray = detail::get_array(y);\n\n            PyObject* plot_args = PyTuple_New(2);\n            PyTuple_SetItem(plot_args, 0, xarray);\n            PyTuple_SetItem(plot_args, 1, yarray);\n\n            PyObject* res = PyObject_CallObject(set_data_fct, plot_args);\n            if (res) Py_DECREF(res);\n            return res;\n        }\n        return false;\n    }\n\n    // clears the plot but keep it available\n    bool clear() {\n        return update(std::vector<double>(), std::vector<double>());\n    }\n\n    // definitely remove this line\n    void remove() {\n        if(line)\n        {\n            auto remove_fct = PyObject_GetAttrString(line,\"remove\");\n            PyObject* args = PyTuple_New(0);\n            PyObject* res = PyObject_CallObject(remove_fct, args);\n            if (res) Py_DECREF(res);\n        }\n        decref();\n    }\n\n    ~Plot() {\n        decref();\n    }\nprivate:\n\n    void decref() {\n        if(line)\n            Py_DECREF(line);\n        if(set_data_fct)\n            Py_DECREF(set_data_fct);\n    }\n\n\n    PyObject* line = nullptr;\n    PyObject* set_data_fct = nullptr;\n};\n\n} // end namespace matplotlibcpp\n"
  },
  {
    "path": "include/scope_timer.hpp",
    "content": "//\n// Created by yunfan on 2021/3/19.\n// Version: 1.0.0\n//\n\n\n#ifndef SCROPE_TIMER_HPP//SRC_POLY_VISUAL_UTILS_HPP\n#define SCROPE_TIMER_HPP\n\n#include <chrono>\n#include \"color.h\"\n#include \"cstring\"\n\nusing namespace std;\n\nclass TimeConsuming {\npublic:\n    TimeConsuming();\n\n    TimeConsuming(string msg, int repeat_time) {\n        repeat_time_ = repeat_time;\n        msg_ = msg;\n        tc_start = std::chrono::high_resolution_clock::now();\n        has_shown = false;\n    }\n\n    TimeConsuming(string msg) {\n        msg_ = msg;\n        repeat_time_ = 1;\n        tc_start = std::chrono::high_resolution_clock::now();\n        has_shown = false;\n    }\n\n    ~TimeConsuming() {\n        if (!has_shown && enable_) {\n            tc_end = std::chrono::high_resolution_clock::now();\n            double dt = std::chrono::duration_cast<std::chrono::duration<double>>(tc_end - tc_start).count();\n            double t_us = (double) dt * 1e6 / repeat_time_;\n            if (t_us < 1) {\n                t_us *= 1000;\n                printf(\"[TIMER] %s time consuming \\033[32m %lf ns\\033[0m\\n\", msg_.c_str(), t_us);\n            } else if (t_us > 1e6) {\n                t_us /= 1e6;\n                printf(\"[TIMER] %s time consuming \\033[32m %lf s\\033[0m\\n\", msg_.c_str(), t_us);\n            } else if (t_us > 1e3) {\n                t_us /= 1e3;\n                printf(\"[TIMER] %s time consuming \\033[32m %lf ms\\033[0m\\n\", msg_.c_str(), t_us);\n            }else\n                printf(\"[TIMER] %s time consuming \\033[32m %lf us\\033[0m\\n\", msg_.c_str(), t_us);\n        }\n    }\n\n    void set_enbale(bool enable){\n        enable_ = enable;\n    }\n\n    void start() {\n        tc_start = std::chrono::high_resolution_clock::now();\n    }\n\n    void stop() {\n        if(!enable_){return;}\n        has_shown = true;\n        tc_end = std::chrono::high_resolution_clock::now();\n        double dt = std::chrono::duration_cast<std::chrono::duration<double>>(tc_end - tc_start).count();\n        //            ROS_WARN(\"%s time consuming %lf us.\",msg_.c_str(),(double)(end_t - start_t).toNSec()/ 1e3);\n        double t_us = (double) dt * 1e6 / repeat_time_;\n        if (t_us < 1) {\n            t_us *= 1000;\n            printf(\" -- [TIMER] %s time consuming \\033[32m %lf ns\\033[0m\\n\", msg_.c_str(), t_us);\n        } else if (t_us > 1e6) {\n            t_us /= 1e6;\n            printf(\" -- [TIMER] %s time consuming \\033[32m %lf s\\033[0m\\n\", msg_.c_str(), t_us);\n        } else if (t_us > 1e3) {\n            t_us /= 1e3;\n            printf(\" -- [TIMER] %s time consuming \\033[32m %lf ms\\033[0m\\n\", msg_.c_str(), t_us);\n        }else\n            printf(\" -- [TIMER] %s time consuming \\033[32m %lf us\\033[0m\\n\", msg_.c_str(), t_us);\n    }\n\nprivate:\n    std::chrono::high_resolution_clock::time_point tc_start, tc_end;\n    string msg_;\n    int repeat_time_;\n    bool has_shown = false;\n    bool enable_{true};\n};\n\n#endif //SRC_POLY_VISUAL_UTILS_HPP\n"
  },
  {
    "path": "include/so3_math.h",
    "content": "#ifndef SO3_MATH_H\n#define SO3_MATH_H\n\n#include <math.h>\n#include <Eigen/Core>\n\n\n#define SKEW_SYM_MATRX(v) 0.0,-v[2],v[1],v[2],0.0,-v[0],-v[1],v[0],0.0\n\ntemplate<typename T>\nEigen::Matrix<T, 3, 3> skew_sym_mat(const Eigen::Matrix<T, 3, 1> &v)\n{\n    Eigen::Matrix<T, 3, 3> skew_sym_mat;\n    skew_sym_mat<<0.0,-v[2],v[1],v[2],0.0,-v[0],-v[1],v[0],0.0;\n    return skew_sym_mat;\n}\n\ntemplate<typename T>\nEigen::Matrix<T, 3, 3> Exp(const Eigen::Matrix<T, 3, 1> &ang)\n{\n    T ang_norm = ang.norm();\n    Eigen::Matrix<T, 3, 3> Eye3 = Eigen::Matrix<T, 3, 3>::Identity();\n    if (ang_norm > 0.0000001)\n    {\n        Eigen::Matrix<T, 3, 1> r_axis = ang / ang_norm;\n        Eigen::Matrix<T, 3, 3> K;\n        K << SKEW_SYM_MATRX(r_axis);\n        /// Roderigous Tranformation\n        return Eye3 + std::sin(ang_norm) * K + (1.0 - std::cos(ang_norm)) * K * K;\n    }\n    else\n    {\n        return Eye3;\n    }\n}\n\ntemplate<typename T, typename Ts>\nEigen::Matrix<T, 3, 3> Exp(const Eigen::Matrix<T, 3, 1> &ang_vel, const Ts &dt)\n{\n    T ang_vel_norm = ang_vel.norm();\n    Eigen::Matrix<T, 3, 3> Eye3 = Eigen::Matrix<T, 3, 3>::Identity();\n\n    if (ang_vel_norm > 0.0000001)\n    {\n        Eigen::Matrix<T, 3, 1> r_axis = ang_vel / ang_vel_norm;\n        Eigen::Matrix<T, 3, 3> K;\n\n        K << SKEW_SYM_MATRX(r_axis);\n\n        T r_ang = ang_vel_norm * dt;\n\n        /// Roderigous Tranformation\n        return Eye3 + std::sin(r_ang) * K + (1.0 - std::cos(r_ang)) * K * K;\n    }\n    else\n    {\n        return Eye3;\n    }\n}\n\ntemplate<typename T>\nEigen::Matrix<T, 3, 3> Exp(const T &v1, const T &v2, const T &v3)\n{\n    T &&norm = sqrt(v1 * v1 + v2 * v2 + v3 * v3);\n    Eigen::Matrix<T, 3, 3> Eye3 = Eigen::Matrix<T, 3, 3>::Identity();\n    if (norm > 0.00001)\n    {\n        T r_ang[3] = {v1 / norm, v2 / norm, v3 / norm};\n        Eigen::Matrix<T, 3, 3> K;\n        K << SKEW_SYM_MATRX(r_ang);\n\n        /// Roderigous Tranformation\n        return Eye3 + std::sin(norm) * K + (1.0 - std::cos(norm)) * K * K;\n    }\n    else\n    {\n        return Eye3;\n    }\n}\n\n\ntemplate<typename T>\nEigen::Matrix<T, 3, 3> A_cal(const Eigen::Matrix<T, 3, 1> & ang_vel)\n{\n    T norm = ang_vel.norm();\n    Eigen::Matrix<T, 3, 3> Eye3 = Eigen::Matrix<T, 3, 3>::Identity();\n    if (norm > 0.00001)\n    {\n        Eigen::Matrix<T, 3, 1> r_ang = ang_vel / norm;\n        Eigen::Matrix<T, 3, 3> K;\n        K << SKEW_SYM_MATRX(r_ang);\n        return Eye3  + (1.0 - std::cos(norm)/norm) * K + (1.0 - std::sin(norm)/norm) * K * K ;\n    }\n    else\n    {\n        return Eye3;\n    }\n}\n\n/* Logrithm of a Rotation Matrix */\ntemplate<typename T>\nEigen::Matrix<T,3,1> Log(const Eigen::Matrix<T, 3, 3> &R)\n{\n    T theta = (R.trace() > 3.0 - 1e-6) ? 0.0 : std::acos(0.5 * (R.trace() - 1));\n    Eigen::Matrix<T,3,1> K(R(2,1) - R(1,2), R(0,2) - R(2,0), R(1,0) - R(0,1));\n    return (std::abs(theta) < 0.001) ? (0.5 * K) : (0.5 * theta / std::sin(theta) * K);\n}\n\ntemplate<typename T>\nEigen::Matrix<T, 3, 1> RotMtoEuler(const Eigen::Matrix<T, 3, 3> &rot)\n{\n    T sy = sqrt(rot(0,0)*rot(0,0) + rot(1,0)*rot(1,0));\n    bool singular = sy < 1e-6;\n    T x, y, z;\n    if(!singular)\n    {\n        x = atan2(rot(2, 1), rot(2, 2));\n        y = atan2(-rot(2, 0), sy);   \n        z = atan2(rot(1, 0), rot(0, 0));  \n    }\n    else\n    {    \n        x = atan2(-rot(1, 2), rot(1, 1));    \n        y = atan2(-rot(2, 0), sy);    \n        z = 0;\n    }\n    Eigen::Matrix<T, 3, 1> ang(x, y, z);\n    return ang;\n}\n\n\n#endif\n"
  },
  {
    "path": "launch/hesai_pandarXT.launch",
    "content": "<launch>\n    <!-- Launch file for Hesai PandarXT LiDAR -->\n\n    <arg name=\"rviz\" default=\"true\" />\n\n    <rosparam command=\"load\" file=\"$(find i2ekf_lo)/config/pandar.yaml\" />\n\n    <node pkg=\"i2ekf_lo\" type=\"i2ekf_lo_mapping\" name=\"i2ekf_lo_mapping\" output=\"screen\"/>\n\n    <group if=\"$(arg rviz)\">\n      <node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find i2ekf_lo)/rviz_cfg/spinning.rviz\" />\n    </group>\n\n    launch-prefix=\"gdb -ex run --args\"\n\n</launch>\n"
  },
  {
    "path": "launch/livox_avia.launch",
    "content": "<launch>\n<!-- Launch file for Livox AVIA LiDAR -->\n\n\t<arg name=\"rviz\" default=\"true\" />\n\n\t<rosparam command=\"load\" file=\"$(find i2ekf_lo)/config/avia.yaml\" />\n\n\t<node pkg=\"i2ekf_lo\" type=\"i2ekf_lo_mapping\" name=\"i2ekf_lo_mapping\" output=\"screen\" />\n\n\t<group if=\"$(arg rviz)\">\n\t<node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find i2ekf_lo)/rviz_cfg/fast_lo.rviz\" />\n\t</group>\n\n\tlaunch-prefix=\"gdb -ex run --args\"\n\n</launch>\n"
  },
  {
    "path": "launch/livox_horizon.launch",
    "content": "<launch>\n<!-- Launch file for Livox Horizon LiDAR -->\n\n\t<arg name=\"rviz\" default=\"true\" />\n\n\t<rosparam command=\"load\" file=\"$(find i2ekf_lo)/config/horizon.yaml\" />\n\n\t<node pkg=\"i2ekf_lo\" type=\"i2ekf_lo_mapping\" name=\"i2ekf_lo_mapping\" output=\"screen\"  />\n\n\t<group if=\"$(arg rviz)\">\n\t<node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find i2ekf_lo)/rviz_cfg/fast_lo.rviz\" />\n\t</group>\n\n</launch>\n"
  },
  {
    "path": "launch/livox_mid360.launch",
    "content": "<launch>\n<!-- Launch file for Livox AVIA LiDAR -->\n\n\t<arg name=\"rviz\" default=\"true\" />\n\n\t<rosparam command=\"load\" file=\"$(find i2ekf_lo)/config/mid360.yaml\" />\n\n\t<node pkg=\"i2ekf_lo\" type=\"i2ekf_lo_mapping\" name=\"i2ekf_lo_mapping\" output=\"screen\" />\n\n\t<group if=\"$(arg rviz)\">\n\t<node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find i2ekf_lo)/rviz_cfg/fast_lo.rviz\" />\n\t</group>\n\n\tlaunch-prefix=\"gdb -ex run --args\"\n\n</launch>\n"
  },
  {
    "path": "launch/ouster.launch",
    "content": "<launch>\n    <!-- Launch file for ouster Ouster LiDAR -->\n\n    <arg name=\"rviz\" default=\"true\" />\n\n    <rosparam command=\"load\" file=\"$(find i2ekf_lo)/config/ouster.yaml\" />\n\n    <node pkg=\"i2ekf_lo\" type=\"i2ekf_lo_mapping\" name=\"i2ekf_lo_mapping\" output=\"screen\"/>\n\n\n    <group if=\"$(arg rviz)\">\n      <node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find i2ekf_lo)/rviz_cfg/spinning.rviz\" />\n    </group>\n\n    launch-prefix=\"gdb -ex run --args\"\n\n</launch>\n"
  },
  {
    "path": "launch/robosense.launch",
    "content": "<launch>\n  <!-- Launch file for velodyne LiDAR -->\n\n    <arg name=\"rviz\" default=\"true\" />\n\n    <rosparam command=\"load\" file=\"$(find i2ekf_lo)/config/robosense.yaml\" />\n\n    <node pkg=\"i2ekf_lo\" type=\"i2ekf_lo_mapping\" name=\"i2ekf_lo_mapping\" output=\"screen\"/>\n\n    <group if=\"$(arg rviz)\">\n    <node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find i2ekf_lo)/rviz_cfg/spinning.rviz\" />\n    </group>\n   launch-prefix=\"gdb -ex run --args\" \n</launch>\n"
  },
  {
    "path": "launch/velodyne.launch",
    "content": "<launch>\n  <!-- Launch file for velodyne LiDAR -->\n\n    <arg name=\"rviz\" default=\"true\" />\n\n    <rosparam command=\"load\" file=\"$(find i2ekf_lo)/config/velodyne.yaml\" />\n\n    <node pkg=\"i2ekf_lo\" type=\"i2ekf_lo_mapping\" name=\"i2ekf_lo_mapping\" output=\"screen\"/>\n\n    <group if=\"$(arg rviz)\">\n    <node launch-prefix=\"nice\" pkg=\"rviz\" type=\"rviz\" name=\"rviz\" args=\"-d $(find i2ekf_lo)/rviz_cfg/spinning.rviz\" />\n    </group>\n   launch-prefix=\"gdb -ex run --args\" \n</launch>\n"
  },
  {
    "path": "msg/Pose6D.msg",
    "content": "# the preintegrated Lidar states at the time of IMU measurements in a frame\nfloat64  offset_time # the offset time of IMU measurement w.r.t the first lidar point\nfloat64[3] acc       # the preintegrated total acceleration (global frame) at the Lidar origin\nfloat64[3] gyr       # the unbiased angular velocity (body frame) at the Lidar origin\nfloat64[3] vel       # the preintegrated velocity (global frame) at the Lidar origin\nfloat64[3] pos       # the preintegrated position (global frame) at the Lidar origin\nfloat64[9] rot       # the preintegrated rotation (global frame) at the Lidar origin"
  },
  {
    "path": "msg/States.msg",
    "content": "Header header          # timestamp of the first lidar in a frame\nfloat64[] rot_end      # the estimated attitude (rotation matrix) at the end lidar point\nfloat64[] pos_end      # the estimated position at the end lidar point (world frame)\nfloat64[] vel_end      # the estimated velocity at the end lidar point (world frame)\nfloat64[] bias_gyr     # gyroscope bias\nfloat64[] bias_acc     # accelerator bias\nfloat64[] gravity      # the estimated gravity acceleration\nfloat64[] cov          # states covariance\n# Pose6D[] IMUpose        # 6D pose at each imu measurements"
  },
  {
    "path": "package.xml",
    "content": "<?xml version=\"1.0\"?>\n<package>\n  <name>i2ekf_lo</name>\n  <version>0.0.0</version>\n\n  <description>\n    This is a modified version of LOAM which is original algorithm\n    is described in the following paper:\n    J. Zhang and S. Singh. LOAM: Lidar Odometry and Mapping in Real-time.\n      Robotics: Science and Systems Conference (RSS). Berkeley, CA, July 2014.\n  </description>\n\n  <maintainer email=\"dev@livoxtech.com\">claydergc</maintainer>\n\n  <license>BSD</license>\n\n  <author email=\"zhangji@cmu.edu\">Ji Zhang</author>\n  \n  <buildtool_depend>catkin</buildtool_depend>\n  <build_depend>geometry_msgs</build_depend>\n  <build_depend>nav_msgs</build_depend>\n  <build_depend>roscpp</build_depend>\n  <build_depend>rospy</build_depend>\n  <build_depend>std_msgs</build_depend>\n  <build_depend>sensor_msgs</build_depend>\n  <build_depend>tf</build_depend>\n  <build_depend>pcl_ros</build_depend>\n  <build_depend>livox_ros_driver</build_depend>\n  <build_depend>message_generation</build_depend>\n\n  <run_depend>geometry_msgs</run_depend>\n  <run_depend>nav_msgs</run_depend>\n  <run_depend>sensor_msgs</run_depend>\n  <run_depend>roscpp</run_depend>\n  <run_depend>rospy</run_depend>\n  <run_depend>std_msgs</run_depend>\n  <run_depend>tf</run_depend>\n  <run_depend>pcl_ros</run_depend>\n  <run_depend>livox_ros_driver</run_depend>\n  <run_depend>message_runtime</run_depend>\n\n  <test_depend>rostest</test_depend>\n  <test_depend>rosbag</test_depend>\n\n  <export>\n  </export>\n</package>\n"
  },
  {
    "path": "rviz_cfg/.gitignore",
    "content": ""
  },
  {
    "path": "rviz_cfg/fast_lo.rviz",
    "content": "Panels:\n  - Class: rviz/Displays\n    Help Height: 0\n    Name: Displays\n    Property Tree Widget:\n      Expanded:\n        - /mapping1\n        - /mapping1/surround1\n        - /mapping1/currPoints1\n        - /mapping1/currPoints1/Autocompute Value Bounds1\n        - /Odometry1/Odometry1/Shape1\n      Splitter Ratio: 0.6432291865348816\n    Tree Height: 874\n  - Class: rviz/Selection\n    Name: Selection\n  - Class: rviz/Tool Properties\n    Expanded:\n      - /2D Pose Estimate1\n      - /2D Nav Goal1\n      - /Publish Point1\n    Name: Tool Properties\n    Splitter Ratio: 0.5886790156364441\n  - Class: rviz/Views\n    Expanded:\n      - /Current View1\n    Name: Views\n    Splitter Ratio: 0.5\n  - Class: rviz/Time\n    Experimental: false\n    Name: Time\n    SyncMode: 0\n    SyncSource: surround\nPreferences:\n  PromptSaveOnExit: true\nToolbars:\n  toolButtonStyle: 2\nVisualization Manager:\n  Class: \"\"\n  Displays:\n    - Alpha: 1\n      Cell Size: 1\n      Class: rviz/Grid\n      Color: 160; 160; 164\n      Enabled: false\n      Line Style:\n        Line Width: 0.029999999329447746\n        Value: Lines\n      Name: Grid\n      Normal Cell Count: 0\n      Offset:\n        X: 0\n        Y: 0\n        Z: 0\n      Plane: XY\n      Plane Cell Count: 100\n      Reference Frame: <Fixed Frame>\n      Value: false\n    - Alpha: 1\n      Class: rviz/Axes\n      Enabled: false\n      Length: 0.699999988079071\n      Name: Axes\n      Radius: 0.05999999865889549\n      Reference Frame: <Fixed Frame>\n      Show Trail: false\n      Value: false\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10.069751739501953\n            Min Value: -2.178016424179077\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 238; 238; 236\n          Color Transformer: Intensity\n          Decay Time: 0\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: surround\n          Position Transformer: XYZ\n          Queue Size: 1\n          Selectable: false\n          Size (Pixels): 3\n          Size (m): 0.05000000074505806\n          Style: Points\n          Topic: /cloud_registered\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n        - Alpha: 0.10000000149011612\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 7\n            Min Value: -1\n            Value: false\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: Intensity\n          Decay Time: 1000\n          Enabled: true\n          Invert Rainbow: true\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: currPoints\n          Position Transformer: XYZ\n          Queue Size: 100000\n          Selectable: true\n          Size (Pixels): 1\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /cloud_registered\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 10\n            Min Value: -10\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 0; 0\n          Color Transformer: FlatColor\n          Decay Time: 0\n          Enabled: false\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: PointCloud2\n          Position Transformer: XYZ\n          Queue Size: 10\n          Selectable: true\n          Size (Pixels): 3\n          Size (m): 0.10000000149011612\n          Style: Flat Squares\n          Topic: /Laser_map\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: false\n      Enabled: true\n      Name: mapping\n    - Class: rviz/Group\n      Displays:\n        - Angle Tolerance: 0.009999999776482582\n          Class: rviz/Odometry\n          Covariance:\n            Orientation:\n              Alpha: 0.5\n              Color: 255; 255; 127\n              Color Style: Unique\n              Frame: Local\n              Offset: 1\n              Scale: 1\n              Value: true\n            Position:\n              Alpha: 0.30000001192092896\n              Color: 204; 51; 204\n              Scale: 1\n              Value: true\n            Value: true\n          Enabled: true\n          Keep: 1\n          Name: Odometry\n          Position Tolerance: 0.0010000000474974513\n          Queue Size: 10\n          Shape:\n            Alpha: 1\n            Axes Length: 0.699999988079071\n            Axes Radius: 0.10000000149011612\n            Color: 255; 85; 0\n            Head Length: 0\n            Head Radius: 0\n            Shaft Length: 0.05000000074505806\n            Shaft Radius: 0.05000000074505806\n            Value: Axes\n          Topic: /aft_mapped_to_init\n          Unreliable: false\n          Value: true\n      Enabled: true\n      Name: Odometry\n    - Alpha: 1\n      Class: rviz/Axes\n      Enabled: true\n      Length: 0.699999988079071\n      Name: Axes\n      Radius: 0.10000000149011612\n      Reference Frame: <Fixed Frame>\n      Show Trail: false\n      Value: true\n    - Alpha: 0\n      Buffer Length: 2\n      Class: rviz/Path\n      Color: 25; 255; 255\n      Enabled: true\n      Head Diameter: 0\n      Head Length: 0\n      Length: 0.6000000238418579\n      Line Style: Billboards\n      Line Width: 0.05000000074505806\n      Name: Path\n      Offset:\n        X: 0\n        Y: 0\n        Z: 0\n      Pose Color: 25; 255; 255\n      Pose Style: None\n      Queue Size: 10\n      Radius: 0.07999999821186066\n      Shaft Diameter: 0.4000000059604645\n      Shaft Length: 0.4000000059604645\n      Topic: /path\n      Unreliable: false\n      Value: true\n    - Alpha: 1\n      Autocompute Intensity Bounds: false\n      Autocompute Value Bounds:\n        Max Value: 10\n        Min Value: -10\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 255; 255; 255\n      Color Transformer: Intensity\n      Decay Time: 0\n      Enabled: false\n      Invert Rainbow: false\n      Max Color: 239; 41; 41\n      Max Intensity: 0\n      Min Color: 239; 41; 41\n      Min Intensity: 0\n      Name: PointCloud2\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 4\n      Size (m): 0.05000000074505806\n      Style: Spheres\n      Topic: /cloud_effected\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: false\n    - Alpha: 1\n      Autocompute Intensity Bounds: true\n      Autocompute Value Bounds:\n        Max Value: 13.139549255371094\n        Min Value: -32.08251953125\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 138; 226; 52\n      Color Transformer: FlatColor\n      Decay Time: 0\n      Enabled: false\n      Invert Rainbow: false\n      Max Color: 138; 226; 52\n      Min Color: 138; 226; 52\n      Name: PointCloud2\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 3\n      Size (m): 0.10000000149011612\n      Style: Flat Squares\n      Topic: /Laser_map\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: false\n    - Alpha: 1\n      Autocompute Intensity Bounds: true\n      Autocompute Value Bounds:\n        Max Value: 10\n        Min Value: -10\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 255; 255; 255\n      Color Transformer: Intensity\n      Decay Time: 0\n      Enabled: true\n      Invert Rainbow: false\n      Max Color: 255; 255; 255\n      Min Color: 0; 0; 0\n      Name: PointCloud2\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 3\n      Size (m): 0.10000000149011612\n      Style: Flat Squares\n      Topic: /points_raw\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: true\n    - Class: rviz/Marker\n      Enabled: true\n      Marker Topic: /planner_normal\n      Name: Marker\n      Namespaces:\n        {}\n      Queue Size: 100\n      Value: true\n    - Alpha: 0.699999988079071\n      Class: rviz/Map\n      Color Scheme: map\n      Draw Behind: false\n      Enabled: true\n      Name: Map\n      Topic: /projected_map\n      Unreliable: false\n      Use Timestamp: false\n      Value: true\n  Enabled: true\n  Global Options:\n    Background Color: 0; 0; 0\n    Default Light: true\n    Fixed Frame: camera_init\n    Frame Rate: 10\n  Name: root\n  Tools:\n    - Class: rviz/Interact\n      Hide Inactive Objects: true\n    - Class: rviz/MoveCamera\n    - Class: rviz/Select\n    - Class: rviz/FocusCamera\n    - Class: rviz/Measure\n    - Class: rviz/SetInitialPose\n      Theta std deviation: 0.2617993950843811\n      Topic: /initialpose\n      X std deviation: 0.5\n      Y std deviation: 0.5\n    - Class: rviz/SetGoal\n      Topic: /move_base_simple/goal\n    - Class: rviz/PublishPoint\n      Single click: true\n      Topic: /clicked_point\n  Value: true\n  Views:\n    Current:\n      Class: rviz/ThirdPersonFollower\n      Distance: 70.7806625366211\n      Enable Stereo Rendering:\n        Stereo Eye Separation: 0.05999999865889549\n        Stereo Focal Distance: 1\n        Swap Stereo Eyes: false\n        Value: false\n      Field of View: 0.7853981852531433\n      Focal Point:\n        X: -38.91015625\n        Y: -13.405705451965332\n        Z: 3.5762786865234375e-06\n      Focal Shape Fixed Size: true\n      Focal Shape Size: 0.05000000074505806\n      Invert Z Axis: false\n      Name: Current View\n      Near Clip Distance: 0.009999999776482582\n      Pitch: 1.249796748161316\n      Target Frame: camera_init\n      Yaw: 2.609495162963867\n    Saved: ~\nWindow Geometry:\n  Displays:\n    collapsed: false\n  Height: 1025\n  Hide Left Dock: false\n  Hide Right Dock: true\n  QMainWindow State: 000000ff00000000fd000000040000000000000156000003a7fc020000000dfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed0000025900000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000003a7000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650000000297000001dc0000000000000000fb0000000a0049006d0061006700650000000394000001600000000000000000fb0000000a0049006d00610067006501000002c5000000c70000000000000000fb0000000a0049006d00610067006501000002c5000000c70000000000000000fb0000000a0049006d00610067006501000002c5000000c700000000000000000000000100000152000003a7fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000003a7000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d00000039fc0100000002fb0000000800540069006d006500000000000000073d000004f300fffffffb0000000800540069006d00650100000000000004500000000000000000000005e1000003a700000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000\n  Selection:\n    collapsed: false\n  Time:\n    collapsed: false\n  Tool Properties:\n    collapsed: false\n  Views:\n    collapsed: true\n  Width: 1853\n  X: 3967\n  Y: 176\n"
  },
  {
    "path": "rviz_cfg/spinning.rviz",
    "content": "Panels:\n  - Class: rviz/Displays\n    Help Height: 0\n    Name: Displays\n    Property Tree Widget:\n      Expanded:\n        - /mapping1\n        - /mapping1/surround1\n        - /mapping1/currPoints1/Autocompute Value Bounds1\n        - /Odometry1/Odometry1/Shape1\n      Splitter Ratio: 0.6432291865348816\n    Tree Height: 985\n  - Class: rviz/Selection\n    Name: Selection\n  - Class: rviz/Tool Properties\n    Expanded:\n      - /2D Pose Estimate1\n      - /2D Nav Goal1\n      - /Publish Point1\n    Name: Tool Properties\n    Splitter Ratio: 0.5886790156364441\n  - Class: rviz/Views\n    Expanded:\n      - /Current View1\n    Name: Views\n    Splitter Ratio: 0.5\n  - Class: rviz/Time\n    Name: Time\n    SyncMode: 0\n    SyncSource: currPoints\nPreferences:\n  PromptSaveOnExit: true\nToolbars:\n  toolButtonStyle: 2\nVisualization Manager:\n  Class: \"\"\n  Displays:\n    - Alpha: 1\n      Cell Size: 1\n      Class: rviz/Grid\n      Color: 160; 160; 164\n      Enabled: false\n      Line Style:\n        Line Width: 0.029999999329447746\n        Value: Lines\n      Name: Grid\n      Normal Cell Count: 0\n      Offset:\n        X: 0\n        Y: 0\n        Z: 0\n      Plane: XY\n      Plane Cell Count: 100\n      Reference Frame: <Fixed Frame>\n      Value: false\n    - Alpha: 1\n      Class: rviz/Axes\n      Enabled: false\n      Length: 0.699999988079071\n      Name: Axes\n      Radius: 0.05999999865889549\n      Reference Frame: <Fixed Frame>\n      Show Trail: false\n      Value: false\n    - Class: rviz/Group\n      Displays:\n        - Alpha: 1\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 9.486757278442383\n            Min Value: -1.5193465948104858\n            Value: true\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 238; 238; 236\n          Color Transformer: AxisColor\n          Decay Time: 0.10000000149011612\n          Enabled: true\n          Invert Rainbow: false\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: surround\n          Position Transformer: XYZ\n          Queue Size: 1\n          Selectable: false\n          Size (Pixels): 3\n          Size (m): 0.05000000074505806\n          Style: Points\n          Topic: /cloud_registered\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n        - Alpha: 0.10000000149011612\n          Autocompute Intensity Bounds: true\n          Autocompute Value Bounds:\n            Max Value: 7\n            Min Value: -1\n            Value: false\n          Axis: Z\n          Channel Name: intensity\n          Class: rviz/PointCloud2\n          Color: 255; 255; 255\n          Color Transformer: AxisColor\n          Decay Time: 1000\n          Enabled: true\n          Invert Rainbow: true\n          Max Color: 255; 255; 255\n          Min Color: 0; 0; 0\n          Name: currPoints\n          Position Transformer: XYZ\n          Queue Size: 100000\n          Selectable: true\n          Size (Pixels): 1\n          Size (m): 0.009999999776482582\n          Style: Points\n          Topic: /cloud_registered\n          Unreliable: false\n          Use Fixed Frame: true\n          Use rainbow: true\n          Value: true\n      Enabled: true\n      Name: mapping\n    - Class: rviz/Group\n      Displays:\n        - Angle Tolerance: 0.009999999776482582\n          Class: rviz/Odometry\n          Covariance:\n            Orientation:\n              Alpha: 0.5\n              Color: 255; 255; 127\n              Color Style: Unique\n              Frame: Local\n              Offset: 1\n              Scale: 1\n              Value: true\n            Position:\n              Alpha: 0.30000001192092896\n              Color: 204; 51; 204\n              Scale: 1\n              Value: true\n            Value: true\n          Enabled: true\n          Keep: 1\n          Name: Odometry\n          Position Tolerance: 0.0010000000474974513\n          Queue Size: 10\n          Shape:\n            Alpha: 1\n            Axes Length: 0.699999988079071\n            Axes Radius: 0.10000000149011612\n            Color: 255; 85; 0\n            Head Length: 0\n            Head Radius: 0\n            Shaft Length: 0.05000000074505806\n            Shaft Radius: 0.05000000074505806\n            Value: Axes\n          Topic: /aft_mapped_to_init\n          Unreliable: false\n          Value: true\n      Enabled: true\n      Name: Odometry\n    - Alpha: 1\n      Class: rviz/Axes\n      Enabled: true\n      Length: 0.699999988079071\n      Name: Axes\n      Radius: 0.10000000149011612\n      Reference Frame: <Fixed Frame>\n      Show Trail: false\n      Value: true\n    - Alpha: 0\n      Buffer Length: 2\n      Class: rviz/Path\n      Color: 25; 255; 255\n      Enabled: true\n      Head Diameter: 0\n      Head Length: 0\n      Length: 0.6000000238418579\n      Line Style: Billboards\n      Line Width: 0.05000000074505806\n      Name: Path\n      Offset:\n        X: 0\n        Y: 0\n        Z: 0\n      Pose Color: 25; 255; 255\n      Pose Style: None\n      Queue Size: 10\n      Radius: 0.07999999821186066\n      Shaft Diameter: 0.4000000059604645\n      Shaft Length: 0.4000000059604645\n      Topic: /path\n      Unreliable: false\n      Value: true\n    - Alpha: 1\n      Autocompute Intensity Bounds: false\n      Autocompute Value Bounds:\n        Max Value: 10\n        Min Value: -10\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 255; 255; 255\n      Color Transformer: Intensity\n      Decay Time: 0\n      Enabled: false\n      Invert Rainbow: false\n      Max Color: 239; 41; 41\n      Max Intensity: 0\n      Min Color: 239; 41; 41\n      Min Intensity: 0\n      Name: PointCloud2\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 4\n      Size (m): 0.05000000074505806\n      Style: Spheres\n      Topic: /cloud_effected\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: false\n    - Alpha: 1\n      Autocompute Intensity Bounds: true\n      Autocompute Value Bounds:\n        Max Value: 13.139549255371094\n        Min Value: -32.08251953125\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 138; 226; 52\n      Color Transformer: FlatColor\n      Decay Time: 0\n      Enabled: false\n      Invert Rainbow: false\n      Max Color: 138; 226; 52\n      Min Color: 138; 226; 52\n      Name: PointCloud2\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 3\n      Size (m): 0.10000000149011612\n      Style: Flat Squares\n      Topic: /Laser_map\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: false\n    - Alpha: 1\n      Autocompute Intensity Bounds: true\n      Autocompute Value Bounds:\n        Max Value: 10\n        Min Value: -10\n        Value: true\n      Axis: Z\n      Channel Name: intensity\n      Class: rviz/PointCloud2\n      Color: 255; 255; 255\n      Color Transformer: Intensity\n      Decay Time: 0\n      Enabled: false\n      Invert Rainbow: false\n      Max Color: 255; 255; 255\n      Min Color: 0; 0; 0\n      Name: PointCloud2\n      Position Transformer: XYZ\n      Queue Size: 10\n      Selectable: true\n      Size (Pixels): 3\n      Size (m): 0.10000000149011612\n      Style: Flat Squares\n      Topic: /points_raw\n      Unreliable: false\n      Use Fixed Frame: true\n      Use rainbow: true\n      Value: false\n    - Class: rviz/Marker\n      Enabled: true\n      Marker Topic: /planner_normal\n      Name: Marker\n      Namespaces:\n        {}\n      Queue Size: 100\n      Value: true\n    - Alpha: 0.699999988079071\n      Class: rviz/Map\n      Color Scheme: map\n      Draw Behind: false\n      Enabled: true\n      Name: Map\n      Topic: /projected_map\n      Unreliable: false\n      Use Timestamp: false\n      Value: true\n  Enabled: true\n  Global Options:\n    Background Color: 0; 0; 0\n    Default Light: true\n    Fixed Frame: camera_init\n    Frame Rate: 50\n  Name: root\n  Tools:\n    - Class: rviz/Interact\n      Hide Inactive Objects: true\n    - Class: rviz/MoveCamera\n    - Class: rviz/Select\n    - Class: rviz/FocusCamera\n    - Class: rviz/Measure\n    - Class: rviz/SetInitialPose\n      Theta std deviation: 0.2617993950843811\n      Topic: /initialpose\n      X std deviation: 0.5\n      Y std deviation: 0.5\n    - Class: rviz/SetGoal\n      Topic: /move_base_simple/goal\n    - Class: rviz/PublishPoint\n      Single click: true\n      Topic: /clicked_point\n  Value: true\n  Views:\n    Current:\n      Class: rviz/ThirdPersonFollower\n      Distance: 11.642395973205566\n      Enable Stereo Rendering:\n        Stereo Eye Separation: 0.05999999865889549\n        Stereo Focal Distance: 1\n        Swap Stereo Eyes: false\n        Value: false\n      Field of View: 0.7853981852531433\n      Focal Point:\n        X: 0.9479854106903076\n        Y: 4.469108581542969\n        Z: -8.34069624033873e-07\n      Focal Shape Fixed Size: true\n      Focal Shape Size: 0.05000000074505806\n      Invert Z Axis: false\n      Name: Current View\n      Near Clip Distance: 0.009999999776482582\n      Pitch: 0.6647981405258179\n      Target Frame: camera_init\n      Yaw: 1.3131378889083862\n    Saved: ~\nWindow Geometry:\n  Displays:\n    collapsed: false\n  Height: 1136\n  Hide Left Dock: false\n  Hide Right Dock: true\n  QMainWindow State: 000000ff00000000fd00000004000000000000015600000416fc020000000dfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000416000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650000000297000001dc0000000000000000fb0000000a0049006d0061006700650000000394000001600000000000000000fb0000000a0049006d00610067006501000002c5000000c70000000000000000fb0000000a0049006d00610067006501000002c5000000c70000000000000000fb0000000a0049006d00610067006501000002c5000000c700000000000000000000000100000152000003a7fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000003a7000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d00000039fc0100000002fb0000000800540069006d006500000000000000073d0000041800fffffffb0000000800540069006d00650100000000000004500000000000000000000005dc0000041600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000\n  Selection:\n    collapsed: false\n  Time:\n    collapsed: false\n  Tool Properties:\n    collapsed: false\n  Views:\n    collapsed: true\n  Width: 1848\n  X: 72\n  Y: 27\n"
  },
  {
    "path": "src/IMU_Processing.hpp",
    "content": "#ifndef _IMU_PROCESSING_HPP\n#define _IMU_PROCESSING_HPP\n\n#include <cmath>\n#include <math.h>\n#include <deque>\n#include <mutex>\n#include <thread>\n#include <fstream>\n#include <csignal>\n#include <ros/ros.h>\n#include <so3_math.h>\n#include <Eigen/Eigen>\n#include <common_lib.h>\n#include <pcl/common/io.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <condition_variable>\n#include <nav_msgs/Odometry.h>\n#include <pcl/common/transforms.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <tf/transform_broadcaster.h>\n#include <eigen_conversions/eigen_msg.h>\n#include <pcl_conversions/pcl_conversions.h>\n#include <sensor_msgs/Imu.h>\n#include <sensor_msgs/PointCloud2.h>\n#include <geometry_msgs/Vector3.h>\n\n/// *************Preconfiguration\n\n#define MAX_INI_COUNT (200)\n\nconst bool time_list(PointType &x, PointType &y) {return (x.curvature < y.curvature);};\n\n/// *************IMU Process and undistortion\nclass ImuProcess\n{\n public:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  ImuProcess();\n  ~ImuProcess();\n  \n  void Reset();\n  void Reset(double start_timestamp, const sensor_msgs::ImuConstPtr &lastimu);\n  void set_R_LI_cov(const V3D &R_LI_cov);\n  void set_T_LI_cov(const V3D &T_LI_cov);\n  void set_gyr_cov(const V3D &scaler);\n  void set_acc_cov(const V3D &scaler);\n  void set_mean_acc_norm(const double &mean_acc_norm);\n  void set_gyr_bias_cov(const V3D &b_g);\n  void set_acc_bias_cov(const V3D &b_a);\n  void Process(const MeasureGroup &meas, StatesGroup &state, PointCloudXYZI::Ptr pcl_un_);\n  void undistort_without_imu(StatesGroup &state_inout, PointCloudXYZI &pcl_out);\n  void Reforward_propagation_without_imu(StatesGroup& last_state, StatesGroup &state_inout, V3D& cov_v, V3D& cov_omega);\n\n\n//  ros::NodeHandle nh;\n  ofstream fout_imu;\n  V3D cov_acc;\n  V3D cov_gyr;\n  V3D cov_R_LI;\n  V3D cov_T_LI;\n  V3D cov_acc_scale;\n  V3D cov_gyr_scale;\n  V3D cov_bias_gyr;\n  V3D cov_bias_acc;\n  double first_lidar_time;\n  int    lidar_type;\n  bool   imu_en;\n  bool LI_init_done = false;\n  double IMU_mean_acc_norm;\n  double frame_dt = 0.0;\n  double frame_end_time = 0.0;\n  double dt = 0.0;\n\n\nprivate:\n  void IMU_init(const MeasureGroup &meas, StatesGroup &state, int &N);\n  void propagation_and_undist(const MeasureGroup &meas, StatesGroup &state_inout, PointCloudXYZI &pcl_in_out);\n  void Forward_propagation_without_imu(const MeasureGroup &meas, StatesGroup &state_inout, PointCloudXYZI &pcl_out);\n  PointCloudXYZI::Ptr cur_pcl_un_;\n  sensor_msgs::ImuConstPtr last_imu_;\n  deque<sensor_msgs::ImuConstPtr> v_imu_;\n  vector<Pose6D> IMUpose;\n  V3D mean_acc;\n  V3D mean_gyr;\n  V3D angvel_last;\n  V3D acc_s_last;\n  double last_lidar_end_time_;\n  double time_last_scan;\n  int    init_iter_num = 1;\n  bool   b_first_frame_ = true;\n  bool   imu_need_init_ = true;\n};\n\nImuProcess::ImuProcess()\n    : b_first_frame_(true), imu_need_init_(true)\n{\n  imu_en = true;\n  init_iter_num = 1;\n  cov_acc         = V3D(0.1, 0.1, 0.1);\n  cov_gyr         = V3D(0.1, 0.1, 0.1);\n  cov_R_LI        = V3D(0.00001, 0.00001, 0.00001);\n  cov_T_LI        = V3D(0.0001, 0.0001, 0.0001);\n  cov_bias_gyr    = V3D(0.0001, 0.0001, 0.0001);\n  cov_bias_acc    = V3D(0.0001, 0.0001, 0.0001);\n  mean_acc        = V3D(0, 0, -1.0);\n  mean_gyr        = V3D(0, 0, 0);\n  angvel_last     = Zero3d;\n  last_imu_.reset(new sensor_msgs::Imu());\n  fout_imu.open(DEBUG_FILE_DIR(\"imu.txt\"),ios::out);\n}\n\nImuProcess::~ImuProcess() {}\n\nvoid ImuProcess::Reset() \n{\n  ROS_WARN(\"Reset ImuProcess\");\n  mean_acc      = V3D(0, 0, -1.0);\n  mean_gyr      = V3D(0, 0, 0);\n  angvel_last       = Zero3d;\n  imu_need_init_    = true;\n  init_iter_num     = 1;\n  v_imu_.clear();\n  IMUpose.clear();\n  last_imu_.reset(new sensor_msgs::Imu());\n  cur_pcl_un_.reset(new PointCloudXYZI());\n}\n\n\nvoid ImuProcess::set_gyr_cov(const V3D &scaler)\n{\n  cov_gyr_scale = scaler;\n}\n\nvoid ImuProcess::set_acc_cov(const V3D &scaler)\n{\n  cov_acc_scale = scaler;\n}\n\nvoid ImuProcess::set_R_LI_cov(const V3D &R_LI_cov)\n{\n    cov_R_LI = R_LI_cov;\n}\n\nvoid ImuProcess::set_T_LI_cov(const V3D &T_LI_cov)\n{\n    cov_T_LI = T_LI_cov;\n}\n\nvoid ImuProcess::set_mean_acc_norm(const double &mean_acc_norm){\n    IMU_mean_acc_norm = mean_acc_norm;\n}\n\nvoid ImuProcess::set_gyr_bias_cov(const V3D &b_g)\n{\n  cov_bias_gyr = b_g;\n}\n\nvoid ImuProcess::set_acc_bias_cov(const V3D &b_a)\n{\n  cov_bias_acc = b_a;\n}\n\nvoid ImuProcess::IMU_init(const MeasureGroup &meas, StatesGroup &state_inout, int &N)\n{\n  /** 1. initializing the gravity, gyro bias, acc and gyro covariance\n   ** 2. normalize the acceleration measurements to unit gravity **/\n  ROS_INFO(\"IMU Initializing: %.1f %%\", double(N) / MAX_INI_COUNT * 100);\n  V3D cur_acc, cur_gyr;\n  \n  if (b_first_frame_)\n  {\n    Reset();\n    N = 1;\n    b_first_frame_ = false;\n    const auto &imu_acc = meas.imu.front()->linear_acceleration;\n    const auto &gyr_acc = meas.imu.front()->angular_velocity;\n    mean_acc << imu_acc.x, imu_acc.y, imu_acc.z;\n    mean_gyr << gyr_acc.x, gyr_acc.y, gyr_acc.z;\n    first_lidar_time = meas.lidar_beg_time;\n  }\n\n  for (const auto &imu : meas.imu)\n  {\n    const auto &imu_acc = imu->linear_acceleration;\n    const auto &gyr_acc = imu->angular_velocity;\n    cur_acc << imu_acc.x, imu_acc.y, imu_acc.z;\n    cur_gyr << gyr_acc.x, gyr_acc.y, gyr_acc.z;\n\n    mean_acc      += (cur_acc - mean_acc) / N;\n    mean_gyr      += (cur_gyr - mean_gyr) / N;\n\n    cov_acc = cov_acc * (N - 1.0) / N + (cur_acc - mean_acc).cwiseProduct(cur_acc - mean_acc) * (N - 1.0) / (N * N);\n    cov_gyr = cov_gyr * (N - 1.0) / N + (cur_gyr - mean_gyr).cwiseProduct(cur_gyr - mean_gyr) * (N - 1.0) / (N * N);\n\n    N ++;\n  }\n\n  state_inout.gravity = - mean_acc / mean_acc.norm() * G_m_s2;\n  state_inout.rot_end = Eye3d;\n  state_inout.bias_g.setZero();\n  last_imu_ = meas.imu.back();\n}\n\n\n\nvoid ImuProcess::Forward_propagation_without_imu(const MeasureGroup &meas, StatesGroup &state_inout,\n                             PointCloudXYZI &pcl_out) {\n    pcl_out = *(meas.lidar);\n    /*** sort point clouds by offset time ***/\n    const double &pcl_beg_time = meas.lidar_beg_time;\n    sort(pcl_out.points.begin(), pcl_out.points.end(), time_list);\n    const double &pcl_end_offset_time = pcl_out.points.back().curvature / double(1000);\n\n    MD(DIM_STATE, DIM_STATE) F_x, cov_w;\n\n    if (b_first_frame_)\n    {\n        dt = 0.1;\n        b_first_frame_ = false;\n        frame_dt = pcl_out.points.back().curvature / double(1000) - pcl_out.points.front().curvature / double(1000);\n        frame_end_time = pcl_out.points.back().curvature;\n    }\n    else\n    {\n        dt = pcl_beg_time - time_last_scan;\n        time_last_scan = pcl_beg_time;\n        frame_dt = pcl_out.points.back().curvature / double(1000) - pcl_out.points.front().curvature / double(1000);\n        frame_end_time = pcl_out.points.back().curvature;\n\n    }\n\n    M3D Exp_f = Exp(state_inout.bias_g, dt);\n    /** Forward propagation of attitude **/\n    state_inout.rot_end = state_inout.rot_end * Exp_f;\n\n    /** Position Propagation **/\n    state_inout.pos_end += state_inout.rot_end * state_inout.vel_end * dt;\n\n    /* covariance propagation */\n    F_x.setIdentity();\n    cov_w.setZero();\n    /** In CV model, bias_g represents angular velocity **/\n    /** In CV model，bias_a represents linear acceleration **/\n    F_x.block<3, 3>(0, 0) = Exp(state_inout.bias_g, -dt);\n    F_x.block<3, 3>(0, 15) = Eye3d * dt;\n    F_x.block<3, 3>(3, 12) = state_inout.rot_end * dt;\n    F_x.block<3, 3>(3, 0) = -state_inout.rot_end * SKEW_SYM_MATRX(state_inout.vel_end) * Exp(state_inout.bias_g, -dt) * dt;\n    F_x.block<3, 3>(3, 15) = -state_inout.rot_end * SKEW_SYM_MATRX(state_inout.vel_end) * dt;\n\n    cov_w.block<3, 3>(15, 15).diagonal() = cov_gyr_scale * dt * dt;\n    cov_w.block<3, 3>(12, 12).diagonal() = cov_acc_scale * dt * dt;\n    /** Forward propagation of covariance**/\n    state_inout.cov = F_x * state_inout.cov * F_x.transpose() + cov_w;\n}\n\nvoid ImuProcess::Reforward_propagation_without_imu(StatesGroup& last_state, StatesGroup &state_inout, V3D& cov_v, V3D& cov_omega)\n{\n    MD(DIM_STATE, DIM_STATE) F_x, cov_w;\n\n    /* covariance propagation */\n    F_x.setIdentity();\n    cov_w.setZero();\n\n    StatesGroup temp_state;\n    temp_state = last_state;\n    M3D Exp_f = Exp(temp_state.bias_g, dt);\n    temp_state.rot_end = temp_state.rot_end * Exp_f;\n\n    F_x.block<3, 3>(0, 0) = Exp(temp_state.bias_g, -dt);\n    F_x.block<3, 3>(0, 15) = Eye3d * dt;\n    F_x.block<3, 3>(3, 12) = temp_state.rot_end * dt;\n\n    cov_w.block<3, 3>(15, 15).diagonal() = cov_omega * dt * dt;\n    cov_w.block<3, 3>(12, 12).diagonal() = cov_v * dt * dt;\n\n    /** Forward propagation of covariance**/\n    state_inout.cov = F_x * last_state.cov * F_x.transpose() + cov_w;\n}\n\nvoid ImuProcess::propagation_and_undist(const MeasureGroup &meas, StatesGroup &state_inout, PointCloudXYZI &pcl_out)\n{\n  /*** add the imu of the last frame-tail to the current frame-head ***/\n  pcl_out = *(meas.lidar);\n  auto v_imu = meas.imu;\n  v_imu.push_front(last_imu_);\n  double imu_end_time = v_imu.back()->header.stamp.toSec();\n  double pcl_beg_time, pcl_end_time;\n\n  if (lidar_type == L515)\n  {\n    pcl_beg_time = last_lidar_end_time_;\n    pcl_end_time = meas.lidar_beg_time;\n  }\n  else\n  {\n    pcl_beg_time = meas.lidar_beg_time;\n    /*** sort point clouds by offset time ***/\n    sort(pcl_out.points.begin(), pcl_out.points.end(), time_list);\n    pcl_end_time = pcl_beg_time + pcl_out.points.back().curvature / double(1000);\n  }\n\n\n  /*** Initialize IMU pose ***/\n  IMUpose.clear();\n  IMUpose.push_back(set_pose6d(0.0, acc_s_last, angvel_last, state_inout.vel_end, state_inout.pos_end, state_inout.rot_end));\n\n  /*** forward propagation at each imu point ***/\n  V3D acc_imu, angvel_avr, acc_avr, vel_imu(state_inout.vel_end), pos_imu(state_inout.pos_end);\n  M3D R_imu(state_inout.rot_end);\n  MD(DIM_STATE, DIM_STATE) F_x, cov_w;\n  \n  for (auto it_imu = v_imu.begin(); it_imu < (v_imu.end() - 1); it_imu++)\n  {\n    auto &&head = *(it_imu);\n    auto &&tail = *(it_imu + 1);\n\n    if (tail->header.stamp.toSec() < last_lidar_end_time_)    continue;\n    \n    angvel_avr<<0.5 * (head->angular_velocity.x + tail->angular_velocity.x),\n                0.5 * (head->angular_velocity.y + tail->angular_velocity.y),\n                0.5 * (head->angular_velocity.z + tail->angular_velocity.z);\n\n\n    acc_avr   <<0.5 * (head->linear_acceleration.x + tail->linear_acceleration.x),\n                0.5 * (head->linear_acceleration.y + tail->linear_acceleration.y),\n                0.5 * (head->linear_acceleration.z + tail->linear_acceleration.z);\n\n      V3D angvel_now(head->angular_velocity.x, head->angular_velocity.y, head->angular_velocity.z);\n      V3D acc_now(head->linear_acceleration.x, head->linear_acceleration.y, head->linear_acceleration.z);\n      fout_imu << setw(10) << head->header.stamp.toSec() << \"  \" << angvel_now.transpose()<< \" \" << acc_now.transpose() << endl;\n\n    angvel_avr -= state_inout.bias_g;\n    acc_avr     = acc_avr / IMU_mean_acc_norm * G_m_s2 - state_inout.bias_a;\n\n    if(head->header.stamp.toSec() < last_lidar_end_time_)\n        dt = tail->header.stamp.toSec() - last_lidar_end_time_;\n    else\n        dt = tail->header.stamp.toSec() - head->header.stamp.toSec();\n    \n    /* covariance propagation */\n    M3D acc_avr_skew;\n    M3D Exp_f   = Exp(angvel_avr, dt);\n    acc_avr_skew<<SKEW_SYM_MATRX(acc_avr);\n\n    F_x.setIdentity();\n    cov_w.setZero();\n\n    F_x.block<3,3>(0,0)  = Exp(angvel_avr, -dt);\n    F_x.block<3,3>(0,15)  = - Eye3d * dt;\n    F_x.block<3,3>(3,12)  = Eye3d * dt;\n    F_x.block<3,3>(12,0)  = - R_imu * acc_avr_skew * dt;\n    F_x.block<3,3>(12,18) = - R_imu * dt;\n    F_x.block<3,3>(12,21) = Eye3d * dt;\n\n    cov_w.block<3,3>(0,0).diagonal()   = cov_gyr * dt * dt;\n    cov_w.block<3,3>(6,6).diagonal()   = cov_R_LI * dt * dt;\n    cov_w.block<3,3>(9,9).diagonal()   = cov_T_LI * dt * dt;\n    cov_w.block<3,3>(12,12)            = R_imu * cov_acc.asDiagonal() * R_imu.transpose() * dt * dt;\n    cov_w.block<3,3>(15,15).diagonal() = cov_bias_gyr * dt * dt; // bias gyro covariance\n    cov_w.block<3,3>(18,18).diagonal() = cov_bias_acc * dt * dt; // bias acc covariance\n\n    state_inout.cov = F_x * state_inout.cov * F_x.transpose() + cov_w;\n\n    /* propagation of IMU attitude (global frame)*/\n    R_imu = R_imu * Exp_f;\n\n    /* Specific acceleration (global frame) of IMU */\n    acc_imu = R_imu * acc_avr + state_inout.gravity;\n\n    /* propagation of IMU position (global frame)*/\n    pos_imu = pos_imu + vel_imu * dt + 0.5 * acc_imu * dt * dt;\n\n    /* velocity of IMU (global frame)*/\n    vel_imu = vel_imu + acc_imu * dt;\n\n    /* save the poses at each IMU measurements (global frame)*/\n    angvel_last = angvel_avr;\n    acc_s_last  = acc_imu;\n    double &&offs_t = tail->header.stamp.toSec() - pcl_beg_time;\n    IMUpose.push_back(set_pose6d(offs_t, acc_imu, angvel_avr, vel_imu, pos_imu, R_imu));\n  }\n\n  /*** calculated the pos and attitude prediction at the frame-end ***/\n  double note = pcl_end_time > imu_end_time ? 1.0 : -1.0;\n  dt = note * (pcl_end_time - imu_end_time);\n  state_inout.vel_end = vel_imu + note * acc_imu * dt;\n  state_inout.rot_end = R_imu * Exp(V3D(note * angvel_avr), dt);\n  state_inout.pos_end = pos_imu + note * vel_imu * dt + note * 0.5 * acc_imu * dt * dt;\n\n\n  last_imu_ = meas.imu.back();\n  last_lidar_end_time_ = pcl_end_time;\n\n  if (lidar_type != L515)\n  {\n    #ifdef DEBUG_PRINT\n      cout<<\"[ IMU Process ]: vel \"<<state_inout.vel_end.transpose()<<\" pos \"<<state_inout.pos_end.transpose()<<\" ba\"<<state_inout.bias_a.transpose()<<\" bg \"<<state_inout.bias_g.transpose()<<endl;\n      cout<<\"propagated cov: \"<<state_inout.cov.diagonal().transpose()<<endl;\n    #endif\n    /*** un-distort each lidar point (backward propagation) ***/\n    auto it_pcl = pcl_out.points.end() - 1; //a single point in k-th frame\n    for (auto it_kp = IMUpose.end() - 1; it_kp != IMUpose.begin(); it_kp--)\n    {\n        auto head = it_kp - 1;\n        R_imu << MAT_FROM_ARRAY(head->rot);\n        acc_imu << VEC_FROM_ARRAY(head->acc);\n        // cout<<\"head imu acc: \"<<acc_imu.transpose()<<endl;\n        vel_imu << VEC_FROM_ARRAY(head->vel);\n        pos_imu << VEC_FROM_ARRAY(head->pos);\n        angvel_avr << VEC_FROM_ARRAY(head->gyr);\n        for (; it_pcl->curvature / double(1000) > head->offset_time; it_pcl--) {\n            dt = it_pcl->curvature / double(1000) - head->offset_time; //dt = t_j - t_i > 0\n            /* Transform to the 'scan-end' IMU frame（I_k frame)*/\n            M3D R_i(R_imu * Exp(angvel_avr, dt));\n            V3D P_i = pos_imu + vel_imu * dt + 0.5 * acc_imu * dt * dt;\n            V3D p_in(it_pcl->x, it_pcl->y, it_pcl->z);\n            V3D P_compensate = state_inout.offset_R_L_I.transpose() * (state_inout.rot_end.transpose() * (R_i * (state_inout.offset_R_L_I * p_in + state_inout.offset_T_L_I) + P_i - state_inout.pos_end) - state_inout.offset_T_L_I);\n            /// save Undistorted points\n            it_pcl->x = P_compensate(0);\n            it_pcl->y = P_compensate(1);\n            it_pcl->z = P_compensate(2);\n            if (it_pcl == pcl_out.points.begin()) break;\n        }\n    }\n  }\n}\n\n\nvoid ImuProcess::Process(const MeasureGroup &meas, StatesGroup &stat, PointCloudXYZI::Ptr cur_pcl_un_)\n{\n    Forward_propagation_without_imu(meas, stat, *cur_pcl_un_);\n}\n#endif"
  },
  {
    "path": "src/laserMapping.cpp",
    "content": "// This is an advanced implementation of the algorithm described in the\n// following paper:\n//   J. Zhang and S. Singh. LOAM: Lidar Odometry and Mapping in Real-time.\n//     Robotics: Science and Systems Conference (RSS). Berkeley, CA, July 2014.\n\n// Modifier: Livox               dev@livoxtech.com\n\n// Copyright 2013, Ji Zhang, Carnegie Mellon University\n// Further contributions copyright (c) 2016, Southwest Research Institute\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//#include \"ros/package.h\"\n// 1. Redistributions of source code must retain the above copyright notice,\n//    this list of conditions and the following disclaimer.\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n//    this list of conditions and the following disclaimer in the documentation\n//    and/or other materials provided with the distribution.\n// 3. Neither the name of the copyright holder nor the names of its\n//    contributors may be used to endorse or promote products derived from this\n//    software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n// POSSIBILITY OF SUCH DAMAGE.\n#include <omp.h>\n#include \"IMU_Processing.hpp\"\n#include \"ros/package.h\"\n#include <unistd.h>\n#include <Python.h>\n#include <Eigen/Core>\n#include <nav_msgs/Odometry.h>\n#include <nav_msgs/Path.h>\n#include <algorithm>\n#include <pcl_conversions/pcl_conversions.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl/io/pcd_io.h>\n#include <tf/transform_datatypes.h>\n#include <tf/transform_broadcaster.h>\n#include <livox_ros_driver/CustomMsg.h>\n#include \"preprocess.h\"\n#include <ikd-Tree/ikd_Tree.h>\n#include <math.h>\n#ifndef DEPLOY\n#include \"matplotlibcpp.h\"\nnamespace plt = matplotlibcpp;\n#endif\n\n#define PUBFRAME_PERIOD     (20)\n\nfloat DET_RANGE = 300.0f;\nconst float MOV_THRESHOLD = 1.5f;\n\nmutex mtx_buffer;\ncondition_variable sig_buffer;\n\nstring root_dir = ROOT_DIR;\nstring map_file_path, lid_topic, imu_topic;\n\nint iterCount = 0, feats_down_size = 0, NUM_MAX_ITERATIONS = 0, laserCloudValidNum = 0, \\\n effect_feat_num = 0, scan_count = 0, publish_count = 0;\n\nint feats_points_size = 0;\n\nint feats_points_full_size = 0;\n\nint NUM_MAX_UNDISTORT = 0;\n\ndouble res_mean_last = 0.05;\ndouble gyr_cov = 0.1, acc_cov = 0.1;\ndouble last_timestamp_lidar = 0, last_timestamp_imu = 0.0;\ndouble filter_size_surf_min = 0, filter_size_map_min = 0;\ndouble cube_len = 0, total_distance = 0, lidar_end_time = 0, first_lidar_time = 0.0;\n\n// Time Log Variables\nint kdtree_size_st = 0, kdtree_size_end = 0, add_point_size = 0;\n\nint distort_time = 0;\n\nint lidar_type, pcd_save_interval = -1, pcd_index = 0;\nbool lidar_pushed, flg_reset, flg_exit = false, flg_EKF_inited = true;\nbool imu_en = false;\nbool scan_pub_en = false, dense_pub_en = false, scan_body_pub_en = false;\nbool runtime_pos_log = false, pcd_save_en = false,  path_en = true;\n\nbool cut_frame = true;\nint cut_frame_num = 1, frame_num = 0;\ndouble time_lag_IMU_wtr_lidar = 0.0;\nofstream fout_result;\ndouble cov_lidar = 0.001;\n\nbool adaptive_cov = false;\ndouble K_cov = 100;\ndouble a_cov = 5;\ndouble b_cov = 10;\n\nshared_ptr<ImuProcess> p_imu(new ImuProcess());\nStatesGroup state_propagat;\n\nvector<BoxPointType> cub_needrm;\n\ndeque<PointCloudXYZI::Ptr> lidar_buffer;\ndeque<std::vector<Point3D>> lidar_points_buffer;\ndeque<double> time_buffer;\ndeque<sensor_msgs::Imu::Ptr> imu_buffer;\n\nvector<vector<int>> pointSearchInd_surf;\nvector<PointVector> Nearest_Points;\nbool point_selected_surf[100000] = {0};\nfloat res_last[100000] = {0.0};\ndouble total_residual;\n\n//surf feature in map\nPointCloudXYZI::Ptr featsFromMap(new PointCloudXYZI());\nPointCloudXYZI::Ptr feats_undistort(new PointCloudXYZI());\nPointCloudXYZI::Ptr feats_down_body(new PointCloudXYZI());\nPointCloudXYZI::Ptr feats_down_world(new PointCloudXYZI());\nPointCloudXYZI::Ptr normvec(new PointCloudXYZI(100000, 1));\nPointCloudXYZI::Ptr laserCloudOri(new PointCloudXYZI(100000, 1));\nPointCloudXYZI::Ptr corr_normvect(new PointCloudXYZI(100000, 1));\nPointCloudXYZI::Ptr _featsArray;\n\nstd::vector<Point3D> feats_points;\nstd::vector<Point3D> feats_points_full;\n\ndouble current_dt = 0.1;\ndouble current_end_time = 0.1;\n\npcl::VoxelGrid<PointType> downSizeFilterSurf;\npcl::VoxelGrid<PointType> downSizeFilterMap;\n\nKD_TREE ikdtree;\n\nM3D last_rot(M3D::Zero());\nV3F XAxisPoint_body(LIDAR_SP_LEN, 0.0, 0.0);\nV3F XAxisPoint_world(LIDAR_SP_LEN, 0.0, 0.0);\nV3D euler_cur;\nV3D position_last(Zero3d);\nV3D last_odom(Zero3d);\n\n\n//estimator inputs and output;\nMeasureGroup Measures;\nStatesGroup state;\nStatesGroup last_state;\n\nPointCloudXYZI::Ptr pcl_wait_save(new PointCloudXYZI());\npcl::PCDWriter pcd_writer;\nstring all_points_dir;\n\nnav_msgs::Path path;\nnav_msgs::Odometry odomAftMapped;\ngeometry_msgs::Quaternion geoQuat;\ngeometry_msgs::PoseStamped msg_body_pose;\n\nshared_ptr<Preprocess> p_pre(new Preprocess());\n\n\nfloat calc_dist(PointType p1, PointType p2) {\n    float d = (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y) + (p1.z - p2.z) * (p1.z - p2.z);\n    return d;\n}\n\nvoid calcBodyVar(Eigen::Vector3d &pb, const float range_inc,\n                 const float degree_inc, Eigen::Matrix3d &var) {\n    float range = sqrt(pb[0] * pb[0] + pb[1] * pb[1] + pb[2] * pb[2]);\n    float range_var = range_inc * range_inc;\n    Eigen::Matrix2d direction_var;\n    direction_var << pow(sin(DEG2RAD(degree_inc)), 2), 0, 0,\n            pow(sin(DEG2RAD(degree_inc)), 2);\n    Eigen::Vector3d direction(pb);\n    direction.normalize();\n    Eigen::Matrix3d direction_hat;\n    direction_hat << 0, -direction(2), direction(1), direction(2), 0,\n            -direction(0), -direction(1), direction(0), 0;\n    Eigen::Vector3d base_vector1(1, 1,\n                                 -(direction(0) + direction(1)) / direction(2));\n    base_vector1.normalize();\n    Eigen::Vector3d base_vector2 = base_vector1.cross(direction);\n    base_vector2.normalize();\n    Eigen::Matrix<double, 3, 2> N;\n    N << base_vector1(0), base_vector2(0), base_vector1(1), base_vector2(1),\n            base_vector1(2), base_vector2(2);\n    Eigen::Matrix<double, 3, 2> A = range * direction_hat * N;\n    var = direction * range_var * direction.transpose() +\n          A * direction_var * A.transpose();\n}\n\nvoid SigHandle(int sig) {\n    if (pcd_save_en && pcd_save_interval < 0){\n        all_points_dir = string(root_dir + \"/PCD/PCD_all\" + string(\".pcd\"));\n        pcd_writer.writeBinary(all_points_dir, *pcl_wait_save);\n    }\n    flg_exit = true;\n    ROS_WARN(\"catch sig %d\", sig);\n    sig_buffer.notify_all();\n}\n\ninline void dump_lio_state_to_log(FILE *fp) {\n    V3D rot_ang(Log(state.rot_end));\n    fprintf(fp, \"%lf \", Measures.lidar_beg_time - first_lidar_time);\n    fprintf(fp, \"%lf %lf %lf \", rot_ang(0), rot_ang(1), rot_ang(2));                   // Angle\n    fprintf(fp, \"%lf %lf %lf \", state.pos_end(0), state.pos_end(1), state.pos_end(2)); // Pos\n    fprintf(fp, \"%lf %lf %lf \", state.vel_end(0), state.vel_end(1), state.vel_end(2)); // Vel\n    fprintf(fp, \"%lf %lf %lf \", 0.0, 0.0, 0.0);                                        // omega\n    fprintf(fp, \"%lf %lf %lf \", 0.0, 0.0, 0.0);                                        // Acc\n    fprintf(fp, \"%lf %lf %lf \", state.bias_g(0), state.bias_g(1), state.bias_g(2));    // Bias_g\n    fprintf(fp, \"%lf %lf %lf \", state.bias_a(0), state.bias_a(1), state.bias_a(2));    // Bias_a\n    fprintf(fp, \"%lf %lf %lf \", state.gravity(0), state.gravity(1), state.gravity(2)); // Bias_a  \n    fprintf(fp, \"\\r\\n\");\n    fflush(fp);\n}\n\n\nvoid pointBodyToWorld(PointType const *const pi, PointType *const po) {\n    V3D p_body(pi->x, pi->y, pi->z);\n    V3D p_global(state.rot_end * (state.offset_R_L_I * p_body + state.offset_T_L_I) + state.pos_end);\n\n    po->x = p_global(0);\n    po->y = p_global(1);\n    po->z = p_global(2);\n    po->normal_x = pi->normal_x;\n    po->normal_y = pi->normal_y;\n    po->normal_z = pi->normal_z;\n    po->intensity = pi->intensity;\n}\n\ntemplate<typename T>\nvoid pointBodyToWorld(const Matrix<T, 3, 1> &pi, Matrix<T, 3, 1> &po) {\n    V3D p_body(pi[0], pi[1], pi[2]);\n    V3D p_global(state.rot_end * (state.offset_R_L_I * p_body + state.offset_T_L_I) + state.pos_end);\n    po[0] = p_global(0);\n    po[1] = p_global(1);\n    po[2] = p_global(2);\n}\n\nvoid RGBpointBodyToWorld(PointType const *const pi, PointTypeRGB *const po) {\n    V3D p_body(pi->x, pi->y, pi->z);\n    V3D p_global(state.rot_end * (state.offset_R_L_I * p_body + state.offset_T_L_I) + state.pos_end);\n    po->x = p_global(0);\n    po->y = p_global(1);\n    po->z = p_global(2);\n    po->r = pi->normal_x;\n    po->g = pi->normal_y;\n    po->b = pi->normal_z;\n\n    float intensity = pi->intensity;\n    intensity = intensity - floor(intensity);\n\n    int reflection_map = intensity * 10000;\n}\n\nint points_cache_size = 0;\n\nvoid points_cache_collect()\n{\n    PointVector points_history;\n    ikdtree.acquire_removed_points(points_history);\n    points_cache_size = points_history.size();\n    for (int i = 0; i < points_history.size(); i++) _featsArray->push_back(points_history[i]);\n}\n\n\nBoxPointType LocalMap_Points;\nbool Localmap_Initialized = false;\nint kdtree_delete_counter = 0;\nvoid lasermap_fov_segment()\n{\n    cub_needrm.clear();\n\n    pointBodyToWorld(XAxisPoint_body, XAxisPoint_world);\n    V3D pos_LiD = state.pos_end;\n\n    if (!Localmap_Initialized)\n    {\n        for (int i = 0; i < 3; i++)\n        {\n            LocalMap_Points.vertex_min[i] = pos_LiD(i) - cube_len / 2.0;\n            LocalMap_Points.vertex_max[i] = pos_LiD(i) + cube_len / 2.0;\n        }\n        Localmap_Initialized = true;\n        return;\n    }\n\n    float dist_to_map_edge[3][2];\n    bool need_move = false;\n    for (int i = 0; i < 3; i++)\n    {\n        dist_to_map_edge[i][0] = fabs(pos_LiD(i) - LocalMap_Points.vertex_min[i]);\n        dist_to_map_edge[i][1] = fabs(pos_LiD(i) - LocalMap_Points.vertex_max[i]);\n        if (dist_to_map_edge[i][0] <= MOV_THRESHOLD * DET_RANGE ||\n            dist_to_map_edge[i][1] <= MOV_THRESHOLD * DET_RANGE)\n            need_move = true;\n    }\n    if (!need_move) return;\n    BoxPointType New_LocalMap_Points, tmp_boxpoints;\n    New_LocalMap_Points = LocalMap_Points;\n    float mov_dist = max((cube_len - 2.0 * MOV_THRESHOLD * DET_RANGE) * 0.5 * 0.9,\n                         double(DET_RANGE * (MOV_THRESHOLD - 1)));\n    for (int i = 0; i < 3; i++)\n    {\n        tmp_boxpoints = LocalMap_Points;\n        if (dist_to_map_edge[i][0] <= MOV_THRESHOLD * DET_RANGE)\n        {\n            New_LocalMap_Points.vertex_max[i] -= mov_dist;\n            New_LocalMap_Points.vertex_min[i] -= mov_dist;\n            tmp_boxpoints.vertex_min[i] = LocalMap_Points.vertex_max[i] - mov_dist;\n            cub_needrm.push_back(tmp_boxpoints);\n        }\n        else if (dist_to_map_edge[i][1] <= MOV_THRESHOLD * DET_RANGE)\n        {\n            New_LocalMap_Points.vertex_max[i] += mov_dist;\n            New_LocalMap_Points.vertex_min[i] += mov_dist;\n            tmp_boxpoints.vertex_max[i] = LocalMap_Points.vertex_min[i] + mov_dist;\n            cub_needrm.push_back(tmp_boxpoints);\n        }\n    }\n    LocalMap_Points = New_LocalMap_Points;\n    points_cache_collect();\n\n    if(cub_needrm.size() > 0) kdtree_delete_counter = ikdtree.Delete_Point_Boxes(cub_needrm);\n\n}\n\ndouble timediff_imu_wrt_lidar = 0.0;\nbool timediff_set_flg = false;\n\nvoid livox_pcl_cbk(const livox_ros_driver::CustomMsg::ConstPtr &msg)\n{\n    mtx_buffer.lock();\n    scan_count++;\n    if (msg->header.stamp.toSec() < last_timestamp_lidar)\n    {\n        ROS_WARN(\"lidar loop back, clear buffer\");\n        lidar_buffer.clear();\n        time_buffer.clear();\n        lidar_points_buffer.clear();\n    }\n    last_timestamp_lidar = msg->header.stamp.toSec();\n\n    if (abs(last_timestamp_imu - last_timestamp_lidar) > 1.0 && !timediff_set_flg && !imu_buffer.empty())\n    {\n        timediff_set_flg = true;\n        timediff_imu_wrt_lidar = last_timestamp_imu - last_timestamp_lidar;\n        printf(\"Self sync IMU and LiDAR, HARD time lag is %.10lf \\n \\n\", timediff_imu_wrt_lidar);\n    }\n\n    if (cut_frame)\n    {\n        deque<PointCloudXYZI::Ptr> ptr;\n        deque<double> timestamp_lidar;\n        deque<std::vector<Point3D>> points_lidar;\n\n        p_pre->process_cut_frame_livox(msg, ptr, timestamp_lidar, points_lidar, cut_frame_num, scan_count);\n\n        while (!ptr.empty() && !timestamp_lidar.empty() && !points_lidar.empty())\n        {\n            lidar_buffer.push_back(ptr.front());\n            ptr.pop_front();\n            time_buffer.push_back(timestamp_lidar.front() / double(1000));//unit:s\n            timestamp_lidar.pop_front();\n            lidar_points_buffer.push_back(points_lidar.front());\n            points_lidar.pop_front();\n        }\n    }\n    else\n    {\n        PointCloudXYZI::Ptr ptr(new PointCloudXYZI());\n        std::vector<Point3D> points;\n        p_pre->process(msg, ptr, points);\n        lidar_buffer.push_back(ptr);\n        time_buffer.push_back(last_timestamp_lidar);\n        lidar_points_buffer.push_back(points);\n    }\n    mtx_buffer.unlock();\n    sig_buffer.notify_all();\n}\n\nvoid standard_pcl_cbk(const sensor_msgs::PointCloud2::ConstPtr &msg)\n{\n    mtx_buffer.lock();\n    scan_count++;\n    if (msg->header.stamp.toSec() < last_timestamp_lidar) {\n        ROS_ERROR(\"lidar loop back, clear Lidar buffer.\");\n        lidar_buffer.clear();\n        time_buffer.clear();\n    }\n\n    last_timestamp_lidar = msg->header.stamp.toSec();\n    if (abs(last_timestamp_imu - last_timestamp_lidar) > 1.0 && !timediff_set_flg && !imu_buffer.empty()) {\n        timediff_set_flg = true;\n        timediff_imu_wrt_lidar = last_timestamp_imu - last_timestamp_lidar;\n        printf(\"Self sync IMU and LiDAR, HARD time lag is %.10lf \\n \\n\", timediff_imu_wrt_lidar);\n    }\n\n    if ((lidar_type == VELO || lidar_type == OUSTER || lidar_type == PANDAR || lidar_type == ROBOSENSE) && cut_frame)\n    {\n        deque<PointCloudXYZI::Ptr> ptr;\n        deque<double> timestamp_lidar;\n        deque<std::vector<Point3D>> pts_lidar;\n        p_pre->process_cut_frame_pcl2(msg, ptr, timestamp_lidar, pts_lidar, cut_frame_num, scan_count);\n        while (!ptr.empty() && !timestamp_lidar.empty() && !pts_lidar.empty())\n        {\n            lidar_buffer.push_back(ptr.front());\n            ptr.pop_front();\n            time_buffer.push_back(timestamp_lidar.front() / double(1000));//unit:s\n            timestamp_lidar.pop_front();\n            lidar_points_buffer.push_back(pts_lidar.front());\n            pts_lidar.pop_front();\n        }\n    }\n    else\n    {\n        PointCloudXYZI::Ptr ptr(new PointCloudXYZI());\n        std::vector<Point3D> points;\n\n        p_pre->process(msg, ptr, points);\n        lidar_buffer.push_back(ptr);\n        time_buffer.push_back(msg->header.stamp.toSec());\n        lidar_points_buffer.push_back(points);\n\n    }\n    mtx_buffer.unlock();\n    sig_buffer.notify_all();\n}\n\nbool sync_packages(MeasureGroup &meas)\n{\n    if (lidar_buffer.empty() || lidar_points_buffer.empty())\n    {\n        return false;\n    }\n\n\n    /** push a lidar scan **/\n    if (!lidar_pushed)\n    {\n        meas.lidar = lidar_buffer.front();\n        meas.points = lidar_points_buffer.front();\n\n        if (meas.lidar->points.size() <= 1)\n        {\n            ROS_WARN(\"Too few input point cloud!\\n\");\n            lidar_buffer.pop_front();\n            time_buffer.pop_front();\n            lidar_points_buffer.pop_front();\n            return false;\n        }\n\n        meas.lidar_beg_time = time_buffer.front(); //unit:s\n\n        if (lidar_type == L515)\n            lidar_end_time = meas.lidar_beg_time;\n        else\n            lidar_end_time = meas.lidar_beg_time + meas.lidar->points.back().curvature / double(1000); //unit:s\n\n        lidar_pushed = true;\n    }\n\n    lidar_buffer.pop_front();\n    lidar_points_buffer.pop_front();\n    time_buffer.pop_front();\n    lidar_pushed = false;\n    return true;\n}\n\nbool sync_packages_only_lidar(MeasureGroup &meas) {\n    if (lidar_buffer.empty())\n        return false;\n\n    /** push a lidar scan **/\n    if (!lidar_pushed) {\n        meas.lidar = lidar_buffer.front();\n\n        if (meas.lidar->points.size() <= 1) {\n            ROS_WARN(\"Too few input point cloud!\\n\");\n            lidar_buffer.pop_front();\n            time_buffer.pop_front();\n            return false;\n        }\n\n        meas.lidar_beg_time = time_buffer.front(); //unit:s\n\n        if (lidar_type == L515)\n            lidar_end_time = meas.lidar_beg_time;\n        else\n            lidar_end_time = meas.lidar_beg_time + meas.lidar->points.back().curvature / double(1000); //unit:s\n\n        lidar_pushed = true;\n    }\n\n    lidar_buffer.pop_front();\n    time_buffer.pop_front();\n    lidar_pushed = false;\n    return true;\n}\n\nPointCloudXYZI point3DtoPCL(std::vector<Point3D> &v_point_temp, int type)\n{\n    PointCloudXYZI::Ptr p_cloud_temp(new PointCloudXYZI);\n    switch (type)\n    {\n        case RAW:\n            for(int i = 0; i < v_point_temp.size(); i++)\n            {\n                PointType cloud_temp;\n                cloud_temp.x = v_point_temp[i].raw_point.x();\n                cloud_temp.y = v_point_temp[i].raw_point.y();\n                cloud_temp.z = v_point_temp[i].raw_point.z();\n                cloud_temp.normal_x = 0;\n                cloud_temp.normal_y = 0;\n                cloud_temp.normal_z = 0;\n                cloud_temp.intensity = v_point_temp[i].intensity;\n                cloud_temp.curvature = v_point_temp[i].relative_time;\n                p_cloud_temp->points.push_back(cloud_temp);\n            }\n            break;\n        case UNDISTORT:\n            for(int i = 0; i < v_point_temp.size(); i++)\n            {\n                PointType cloud_temp;\n                cloud_temp.x = v_point_temp[i].undistort_lidar_point.x();\n                cloud_temp.y = v_point_temp[i].undistort_lidar_point.y();\n                cloud_temp.z = v_point_temp[i].undistort_lidar_point.z();\n                cloud_temp.normal_x = 0;\n                cloud_temp.normal_y = 0;\n                cloud_temp.normal_z = 0;\n                cloud_temp.intensity = v_point_temp[i].intensity;\n                cloud_temp.curvature = v_point_temp[i].relative_time;\n                p_cloud_temp->points.push_back(cloud_temp);\n            }\n            break;\n        case WORLD:\n            for(int i = 0; i < v_point_temp.size(); i++)\n            {\n                PointType cloud_temp;\n                cloud_temp.x = v_point_temp[i].world_point.x();\n                cloud_temp.y = v_point_temp[i].world_point.y();\n                cloud_temp.z = v_point_temp[i].world_point.z();\n                cloud_temp.normal_x = 0;\n                cloud_temp.normal_y = 0;\n                cloud_temp.normal_z = 0;\n                cloud_temp.intensity = v_point_temp[i].intensity;\n                cloud_temp.curvature = v_point_temp[i].relative_time;\n\n                p_cloud_temp->points.push_back(cloud_temp);\n            }\n            break;\n        default:\n            for(int i = 0; i < v_point_temp.size(); i++)\n            {\n                PointType cloud_temp;\n                cloud_temp.x = v_point_temp[i].raw_point.x();\n                cloud_temp.y = v_point_temp[i].raw_point.y();\n                cloud_temp.z = v_point_temp[i].raw_point.z();\n                cloud_temp.normal_x = 0;\n                cloud_temp.normal_y = 0;\n                cloud_temp.normal_z = 0;\n                cloud_temp.intensity = v_point_temp[i].intensity;\n                cloud_temp.curvature = v_point_temp[i].relative_time;\n\n                p_cloud_temp->points.push_back(cloud_temp);\n            }\n            break;\n    }\n    return *p_cloud_temp;\n}\n\nPointType point3DtoPCLPoint(Point3D& point_3d, int type)\n{\n    PointType point_temp;\n    switch (type)\n    {\n        case RAW:\n            point_temp.x = point_3d.raw_point.x();\n            point_temp.y = point_3d.raw_point.y();\n            point_temp.z = point_3d.raw_point.z();\n            break;\n        case UNDISTORT:\n            point_temp.x = point_3d.undistort_lidar_point.x();\n            point_temp.y = point_3d.undistort_lidar_point.y();\n            point_temp.z = point_3d.undistort_lidar_point.z();\n            break;\n        case WORLD:\n            point_temp.x = point_3d.world_point.x();\n            point_temp.y = point_3d.world_point.y();\n            point_temp.z = point_3d.world_point.z();\n            break;\n        default:\n            point_temp.x = point_3d.raw_point.x();\n            point_temp.y = point_3d.raw_point.y();\n            point_temp.z = point_3d.raw_point.z();\n            break;\n    }\n    point_temp.intensity = point_3d.intensity;\n    return point_temp;\n}\n\nbool log_time = false;\n\n\nvoid iterCurrentPoint3D(Point3D& current_point)\n{\n    Sophus::SE3d T_begin(last_state.rot_end, last_state.pos_end);\n    Sophus::SE3d T_end(state.rot_end, state.pos_end);\n    double offset_time = current_dt - (current_end_time - current_point.relative_time) / 1000.0;\n    double scale = offset_time / current_dt;\n    scale = scale >= 1 ? 1 : scale <= 0 ? 0 : scale;\n//    if (log_time)\n//        f << \"offset_time = \" << offset_time << \" current_dt = \" << current_dt << \" \" << \"current_end_time = \"  << current_end_time << \" relative_time = \" <<  current_point.relative_time << \" scale = \" << scale << endl;\n\n//     T_wj = inter(T_wb, T_we)   T_ej = T_ew * T_wj   P_e = T_ej * P_j  P_w = T_wj * P_j\n//    Sophus::SE3d T_j = Sophus::interpolate(T_begin, T_end, scale);\n//    Sophus::SE3d T_ej = T_end.inverse() * T_wj;\n//    current_point.undistort_lidar_point = T_ej * current_point.raw_point;\n//    current_point.world_point = T_j * current_point.raw_point;\n}\n\n\nvoid iterCurrentPoint3D(Point3D& current_point, int iter_num)\n{\n    Sophus::SE3d T_begin(last_state.rot_end, last_state.pos_end);\n    Sophus::SE3d T_end(state.rot_end, state.pos_end);\n\n    if (iter_num == 0  || iter_num == 18  )\n    {\n        double offset_time = current_dt - (current_end_time - current_point.relative_time) / 1000.0;\n        double scale = offset_time / current_dt;\n        scale = scale >= 1 ? 1 : scale <= 0 ? 0 : scale;\n        // T_wj = inter(T_wb, T_we)   T_ej = T_ew * T_wj   P_e = T_ej * P_j  P_w = T_wj * P_j\n        Sophus::SE3d T_j = Sophus::interpolate(T_begin, T_end, scale);\n        Sophus::SE3d T_ej = T_end.inverse() * T_j;\n        current_point.undistort_lidar_point = T_ej * current_point.raw_point;\n        current_point.world_point = T_j * current_point.raw_point;\n    }\n    else\n    {\n        current_point.world_point = T_end * current_point.undistort_lidar_point;\n    }\n}\n\n\n\nvoid iterCurrentPoint3D(Point3D& current_point, bool converge)\n{\n    Sophus::SE3d T_begin(last_state.rot_end, last_state.pos_end);\n    Sophus::SE3d T_end(state.rot_end, state.pos_end);\n\n    if (converge)\n    {\n        double offset_time = current_dt - (current_end_time - current_point.relative_time) / 1000.0;\n        double scale = offset_time / current_dt;\n        scale = scale >= 1 ? 1 : scale <= 0 ? 0 : scale;\n        // T_wj = inter(T_wb, T_we)   T_ej = T_ew * T_wj   P_e = T_ej * P_j  P_w = T_wj * P_j\n        Sophus::SE3d T_j = Sophus::interpolate(T_begin, T_end, scale);\n        Sophus::SE3d T_ej = T_end.inverse() * T_j;\n        current_point.undistort_lidar_point = T_ej * current_point.raw_point;\n        current_point.world_point = T_j * current_point.raw_point;\n    }\n    else\n    {\n        current_point.world_point = T_end * current_point.undistort_lidar_point;\n    }\n}\n\nint process_increments = 0;\n\n\nvoid map_incremental()\n{\n    PointVector PointToAdd;\n    PointVector PointNoNeedDownsample;\n    PointToAdd.reserve(feats_points_size);\n    PointNoNeedDownsample.reserve(feats_points_size);\n    for (int i = 0; i < feats_points_size; i++)\n    {\n        /* decide if need add to map */\n        if (!Nearest_Points[i].empty() && flg_EKF_inited)\n        {\n            const PointVector &points_near = Nearest_Points[i];\n            bool need_add = true;\n            BoxPointType Box_of_Point;\n            PointType downsample_result, mid_point;\n            mid_point.x = floor(point3DtoPCLPoint(feats_points[i], WORLD).x / filter_size_map_min) * filter_size_map_min +\n                          0.5 * filter_size_map_min;\n            mid_point.y = floor(point3DtoPCLPoint(feats_points[i], WORLD).y / filter_size_map_min) * filter_size_map_min +\n                          0.5 * filter_size_map_min;\n            mid_point.z = floor(point3DtoPCLPoint(feats_points[i], WORLD).z / filter_size_map_min) * filter_size_map_min +\n                          0.5 * filter_size_map_min;\n            float dist = calc_dist(point3DtoPCLPoint(feats_points[i], WORLD), mid_point);\n            if (fabs(points_near[0].x - mid_point.x) > 0.5 * filter_size_map_min &&\n                fabs(points_near[0].y - mid_point.y) > 0.5 * filter_size_map_min &&\n                fabs(points_near[0].z - mid_point.z) > 0.5 * filter_size_map_min)\n            {\n                PointNoNeedDownsample.push_back(point3DtoPCLPoint(feats_points[i], WORLD));\n                continue;\n            }\n            for (int readd_i = 0; readd_i < NUM_MATCH_POINTS; readd_i++)\n            {\n                if (points_near.size() < NUM_MATCH_POINTS) break;\n                if (calc_dist(points_near[readd_i], mid_point) < dist)\n                {\n                    need_add = false;\n                    break;\n                }\n            }\n            if (need_add) PointToAdd.push_back(point3DtoPCLPoint(feats_points[i], WORLD));\n        }\n        else\n        {\n            PointToAdd.push_back(point3DtoPCLPoint(feats_points[i], WORLD));\n        }\n    }\n\n    add_point_size = ikdtree.Add_Points(PointToAdd, true);\n    ikdtree.Add_Points(PointNoNeedDownsample, false);\n    add_point_size = PointToAdd.size() + PointNoNeedDownsample.size();\n}\n\nvoid publish_frame_world(const ros::Publisher &pubLaserCloudFullRes) {\n    if (scan_pub_en)\n    {\n\n        PointCloudXYZI::Ptr laserCloudFullRes(dense_pub_en ? feats_down_body : feats_down_body);\n        int size = laserCloudFullRes->points.size();\n\n        PointCloudXYZRGB::Ptr laserCloudWorldRGB(new PointCloudXYZRGB(size, 1));\n        PointCloudXYZI::Ptr laserCloudWorld(new PointCloudXYZI(size, 1));\n\n        for (int i = 0; i < size; i++) {\n            if (lidar_type == L515)\n                RGBpointBodyToWorld(&laserCloudFullRes->points[i], \\\n                                &laserCloudWorldRGB->points[i]);\n            else\n                pointBodyToWorld(&laserCloudFullRes->points[i], \\\n                                &laserCloudWorld->points[i]);\n        }\n\n        sensor_msgs::PointCloud2 laserCloudmsg;\n        if (lidar_type == L515)\n            pcl::toROSMsg(*laserCloudWorldRGB, laserCloudmsg);\n        else\n            pcl::toROSMsg(*laserCloudWorld, laserCloudmsg);\n\n        laserCloudmsg.header.stamp = ros::Time().fromSec(lidar_end_time);\n        laserCloudmsg.header.frame_id = \"camera_init\";\n        pubLaserCloudFullRes.publish(laserCloudmsg);\n        publish_count -= PUBFRAME_PERIOD;\n    }\n\n\n    /**************** save map ****************/\n    /* 1. make sure you have enough memories\n       2. noted that pcd save will influence the real-time performences **/\n    if (pcd_save_en) {\n        boost::filesystem::create_directories(root_dir + \"/PCD\");\n        int size = feats_undistort->points.size();\n        PointCloudXYZI::Ptr laserCloudWorld(new PointCloudXYZI(size, 1));\n        for (int i = 0; i < size; i++) {\n            pointBodyToWorld(&feats_undistort->points[i], &laserCloudWorld->points[i]);\n        }\n\n        *pcl_wait_save += *laserCloudWorld;\n        static int scan_wait_num = 0;\n        scan_wait_num++;\n        if (pcl_wait_save->size() > 0 && pcd_save_interval > 0 && scan_wait_num >= pcd_save_interval) {\n            pcd_index++;\n            all_points_dir = string(root_dir + \"/PCD/PCD\") + to_string(pcd_index) + string(\".pcd\");\n            cout << \"current scan saved to \" << all_points_dir << endl;\n            pcd_writer.writeBinary(all_points_dir, *pcl_wait_save);\n            pcl_wait_save->clear();\n            scan_wait_num = 0;\n        }\n    }\n}\n\nvoid publish_frame_body(const ros::Publisher &pubLaserCloudFullRes_body) {\n    PointCloudXYZI::Ptr laserCloudFullRes(feats_undistort);\n    sensor_msgs::PointCloud2 laserCloudmsg;\n    pcl::toROSMsg(*feats_undistort, laserCloudmsg);\n    laserCloudmsg.header.stamp = ros::Time().fromSec(lidar_end_time);\n    laserCloudmsg.header.frame_id = \"camera_init\";\n    pubLaserCloudFullRes_body.publish(laserCloudmsg);\n}\n\nvoid publish_effect_world(const ros::Publisher &pubLaserCloudEffect) {\n    PointCloudXYZI::Ptr laserCloudWorld(\\\n                    new PointCloudXYZI(effect_feat_num, 1));\n    for (int i = 0; i < effect_feat_num; i++) {\n        pointBodyToWorld(&laserCloudOri->points[i], &laserCloudWorld->points[i]);\n    }\n    sensor_msgs::PointCloud2 laserCloudFullRes3;\n    pcl::toROSMsg(*laserCloudWorld, laserCloudFullRes3);\n    laserCloudFullRes3.header.stamp = ros::Time().fromSec(lidar_end_time);\n    laserCloudFullRes3.header.frame_id = \"camera_init\";\n    pubLaserCloudEffect.publish(laserCloudFullRes3);\n}\n\nvoid publish_map(const ros::Publisher &pubLaserCloudMap) {\n    sensor_msgs::PointCloud2 laserCloudMap;\n    pcl::toROSMsg(*featsFromMap, laserCloudMap);\n    laserCloudMap.header.stamp = ros::Time().fromSec(lidar_end_time);\n    laserCloudMap.header.frame_id = \"camera_init\";\n    pubLaserCloudMap.publish(laserCloudMap);\n}\n\ntemplate<typename T>\nvoid set_posestamp(T &out)\n{\n    if (!imu_en) {\n        out.position.x = state.pos_end(0);\n        out.position.y = state.pos_end(1);\n        out.position.z = state.pos_end(2);\n    } else {\n        //Pubulish LiDAR's pose and position\n        V3D pos_cur_lidar = state.rot_end * state.offset_T_L_I + state.pos_end;\n        out.position.x = pos_cur_lidar(0);\n        out.position.y = pos_cur_lidar(1);\n        out.position.z = pos_cur_lidar(2);\n    }\n    out.orientation.x = geoQuat.x;\n    out.orientation.y = geoQuat.y;\n    out.orientation.z = geoQuat.z;\n    out.orientation.w = geoQuat.w;\n}\n\nvoid publish_odometry(const ros::Publisher &pubOdomAftMapped) {\n    odomAftMapped.header.frame_id = \"camera_init\";\n    odomAftMapped.child_frame_id = \"aft_mapped\";\n    odomAftMapped.header.stamp = ros::Time().fromSec(lidar_end_time);\n    set_posestamp(odomAftMapped.pose.pose);\n\n    pubOdomAftMapped.publish(odomAftMapped);\n\n    static tf::TransformBroadcaster br;\n    tf::Transform transform;\n    tf::Quaternion q;\n    transform.setOrigin(tf::Vector3(odomAftMapped.pose.pose.position.x, \\\n                                    odomAftMapped.pose.pose.position.y, \\\n                                    odomAftMapped.pose.pose.position.z));\n    q.setW(odomAftMapped.pose.pose.orientation.w);\n    q.setX(odomAftMapped.pose.pose.orientation.x);\n    q.setY(odomAftMapped.pose.pose.orientation.y);\n    q.setZ(odomAftMapped.pose.pose.orientation.z);\n    transform.setRotation(q);\n    br.sendTransform(tf::StampedTransform(transform, odomAftMapped.header.stamp, \"camera_init\", \"aft_mapped\"));\n}\n\nvoid publish_mavros(const ros::Publisher &mavros_pose_publisher) {\n    msg_body_pose.header.stamp = ros::Time().fromSec(lidar_end_time);\n\n    msg_body_pose.header.frame_id = \"camera_odom_frame\";\n    set_posestamp(msg_body_pose.pose);\n    mavros_pose_publisher.publish(msg_body_pose);\n}\n\nvoid publish_path(const ros::Publisher pubPath) {\n    set_posestamp(msg_body_pose.pose);\n    msg_body_pose.header.stamp = ros::Time().fromSec(lidar_end_time);\n    msg_body_pose.header.frame_id = \"camera_init\";\n    path.poses.push_back(msg_body_pose);\n    pubPath.publish(path);\n}\n\nvoid fileout_calib_result() {\n    fout_result.setf(ios::fixed);\n    fout_result << setprecision(6)\n                << \"Rotation LiDAR to IMU (degree)     = \" << RotMtoEuler(state.offset_R_L_I).transpose() * 57.3\n                << endl;\n    fout_result << \"Translation LiDAR to IMU (meter)   = \" << state.offset_T_L_I.transpose() << endl;\n    fout_result << \"Time Lag IMU to LiDAR (second)     = \" << time_lag_IMU_wtr_lidar + timediff_imu_wrt_lidar << endl;\n    fout_result << \"Bias of Gyroscope  (rad/s)         = \" << state.bias_g.transpose() << endl;\n    fout_result << \"Bias of Accelerometer (meters/s^2) = \" << state.bias_a.transpose() << endl;\n    fout_result << \"Gravity in World Frame(meters/s^2) = \" << state.gravity.transpose() << endl << endl;\n\n    MD(4, 4) Transform;\n    Transform.setIdentity();\n    Transform.block<3, 3>(0, 0) = state.offset_R_L_I;\n    Transform.block<3, 1>(0, 3) = state.offset_T_L_I;\n    fout_result << \"Homogeneous Transformation Matrix from LiDAR to IMU: \" << endl;\n    fout_result << Transform << endl << endl << endl;\n}\n\nvoid print_refine_result() {\n    cout.setf(ios::fixed);\n    cout << endl;\n    printf(BOLDGREEN \"[Final Result] \" RESET);\n    cout << setprecision(6)\n         << \"Rotation LiDAR to IMU    = \" << RotMtoEuler(state.offset_R_L_I).transpose() * 57.3 << \" deg\" << endl;\n    printf(BOLDGREEN \"[Final Result] \" RESET);\n    cout << \"Translation LiDAR to IMU = \" << state.offset_T_L_I.transpose() << \" m\" << endl;\n    printf(BOLDGREEN \"[Final Result] \" RESET);\n    printf(\"Time Lag IMU to LiDAR    = %.8lf s \\n\", time_lag_IMU_wtr_lidar + timediff_imu_wrt_lidar);\n    printf(BOLDGREEN \"[Final Result] \" RESET);\n    cout << \"Bias of Gyroscope        = \" << state.bias_g.transpose() << \" rad/s\" << endl;\n    printf(BOLDGREEN \"[Final Result] \" RESET);\n    cout << \"Bias of Accelerometer    = \" << state.bias_a.transpose() << \" m/s^2\" << endl;\n    printf(BOLDGREEN \"[Final Result] \" RESET);\n    cout << \"Gravity in World Frame   = \" << state.gravity.transpose() << \" m/s^2\" << endl;\n}\n\nvoid printProgress(double percentage) {\n    int val = (int) (percentage * 100);\n    int lpad = (int) (percentage * PBWIDTH);\n    int rpad = PBWIDTH - lpad;\n    printf(\"\\033[1A\\r\");\n    printf(BOLDMAGENTA \"[Refinement] \");\n    if (percentage < 1) {\n        printf(BOLDYELLOW \"Online Refinement: \");\n        printf(YELLOW \"%3d%% [%.*s%*s]\\n\", val, lpad, PBSTR, rpad, \"\");\n        cout << RESET;\n    } else {\n        printf(BOLDGREEN \" Online Refinement \");\n        printf(GREEN \"%3d%% [%.*s%*s]\\n\", val, lpad, PBSTR, rpad, \"\");\n        cout << RESET;\n    }\n    fflush(stdout);\n}\n\n\nvoid subSampleFrame(std::vector<Point3D>& frame, double size_voxel)\n{\n    std::tr1::unordered_map<voxel, std::vector<Point3D>, std::hash<voxel>> grid;\n    for (int i = 0; i < (int) frame.size(); i++)\n    {\n        auto kx = static_cast<short>(frame[i].raw_point[0] / size_voxel);\n        auto ky = static_cast<short>(frame[i].raw_point[1] / size_voxel);\n        auto kz = static_cast<short>(frame[i].raw_point[2] / size_voxel);\n        grid[voxel(kx, ky, kz)].push_back(frame[i]);\n    }\n    frame.resize(0);\n    int step = 0;\n    for(const auto &n: grid)\n    {\n        if(n.second.size() > 0)\n        {\n            frame.push_back(n.second[0]);\n            step++;\n        }\n    }\n}\n\nvoid adjustCVCov()\n{\n    auto delta_state = state - state_propagat;\n\n    double delta_v = delta_state.block<3, 1>(12, 0).norm() ;\n    double delta_omega = delta_state.block<3, 1>(15, 0).norm() ;\n\n    double cov_v = K_cov / (1 + exp(-1 * a_cov * delta_v + b_cov)) + 0.01;\n    double cov_omega = K_cov / (1 + exp(-1 * a_cov * delta_omega + b_cov)) +  0.01;\n    V3D new_cov_v(cov_v, cov_v, cov_v);\n    V3D new_cov_omega(cov_omega, cov_omega, cov_omega);\n\n    p_imu->Reforward_propagation_without_imu(last_state, state, new_cov_v, new_cov_omega);\n\n//    cout << \" current delta_t = \" << delta_v << \" scale = \" << delta_v << \" acc_cov = \" << cov_v << endl;\n//    cout << \" current delta_R = \" << delta_omega << \" scale = \" << delta_omega << \" gyr_cov = \" << cov_omega << endl;\n//    cout << endl;\n\n}\n\nint main(int argc, char **argv)\n{\n    ros::init(argc, argv, \"laserMapping\");\n    ros::NodeHandle nh;\n\n    nh.param<int>(\"mapping/max_iteration\", NUM_MAX_ITERATIONS, 10);\n    nh.param<int>(\"mapping/max_undistort\", NUM_MAX_UNDISTORT, 3);\n    nh.param<bool>(\"adaptive_cov/use\", adaptive_cov, false);\n    nh.param<double>(\"adaptive_cov/K\", K_cov, 1);\n    nh.param<double>(\"adaptive_cov/a\", a_cov, 1.5);\n    nh.param<double>(\"adaptive_cov/b\", b_cov, 5.0);\n    nh.param<int>(\"initialization/cut_frame_init_num\", p_pre->cut_frame_init_num, 5);\n    nh.param<int>(\"preprocess/point_filter_num\", p_pre->point_filter_num, 2);\n    nh.param<string>(\"map_file_path\", map_file_path, \"\");\n    nh.param<string>(\"common/lid_topic\", lid_topic, \"/livox/lidar\");\n    nh.param<double>(\"mapping/filter_size_surf\", filter_size_surf_min, 0.5);\n    nh.param<double>(\"mapping/filter_size_map\", filter_size_map_min, 0.5);\n    nh.param<double>(\"mapping/cube_side_length\", cube_len, 200);\n    nh.param<float>(\"mapping/det_range\", DET_RANGE, 300.f);\n    nh.param<double>(\"mapping/gyr_cov\", gyr_cov, 0.1);\n    nh.param<double>(\"mapping/acc_cov\", acc_cov, 0.1);\n    nh.param<double>(\"preprocess/blind\", p_pre->blind, 1.0);\n    nh.param<int>(\"preprocess/lidar_type\", lidar_type, AVIA);\n    nh.param<int>(\"preprocess/scan_line\", p_pre->N_SCANS, 16);\n    nh.param<bool>(\"preprocess/feature_extract_en\", p_pre->feature_enabled, 0);\n    nh.param<bool>(\"initialization/cut_frame\", cut_frame, true);\n    nh.param<int>(\"initialization/cut_frame_num\", cut_frame_num, 1);\n    nh.param<bool>(\"publish/path_en\", path_en, true);\n    nh.param<bool>(\"publish/scan_publish_en\", scan_pub_en, 1);\n    nh.param<bool>(\"publish/dense_publish_en\", dense_pub_en, 1);\n    nh.param<bool>(\"publish/scan_bodyframe_pub_en\", scan_body_pub_en, 1);\n    nh.param<bool>(\"runtime_pos_log_enable\", runtime_pos_log, 0);\n    nh.param<bool>(\"pcd_save/pcd_save_en\", pcd_save_en, false);\n    nh.param<int>(\"pcd_save/interval\", pcd_save_interval, -1);\n    nh.param<double>(\"mapping/cov_lidar\", cov_lidar, 0.001);\n\n    cout << \"lidar_type: \" << lidar_type << endl;\n\n    path.header.stamp = ros::Time().fromSec(lidar_end_time);\n    path.header.frame_id = \"camera_init\";\n\n    /*** variables definition ***/\n    VD(DIM_STATE) solution;\n    MD(DIM_STATE, DIM_STATE) G, H_T_H, I_STATE;\n    V3D rot_add, T_add, vel_add, gyr_add;\n\n    PointType pointOri, pointSel, coeff;\n\n    double deltaT, deltaR;\n    bool flg_EKF_converged = 0, EKF_stop_flg = 0;\n\n    _featsArray.reset(new PointCloudXYZI());\n\n    memset(point_selected_surf, true, sizeof(point_selected_surf));\n    memset(res_last, -1000.0f, sizeof(res_last));\n    downSizeFilterSurf.setLeafSize(filter_size_surf_min, filter_size_surf_min, filter_size_surf_min);\n    downSizeFilterMap.setLeafSize(filter_size_map_min, filter_size_map_min, filter_size_map_min);\n    memset(point_selected_surf, true, sizeof(point_selected_surf));\n    memset(res_last, -1000.0f, sizeof(res_last));\n\n    p_imu->lidar_type = p_pre->lidar_type = lidar_type;\n    p_imu->imu_en = imu_en;\n    p_imu->set_gyr_cov(V3D(gyr_cov, gyr_cov, gyr_cov));\n    p_imu->set_acc_cov(V3D(acc_cov, acc_cov, acc_cov));\n\n    G.setZero();\n    H_T_H.setZero();\n    I_STATE.setIdentity();\n\n    last_state = state;\n\n    /*** ROS subscribe initialization ***/\n    ros::Subscriber sub_pcl = p_pre->lidar_type == AVIA ? \\\n        nh.subscribe(lid_topic, 200000, livox_pcl_cbk) : \\\n         nh.subscribe(lid_topic, 200000, standard_pcl_cbk);\n\n    ros::Publisher pubLaserCloudFullRes = nh.advertise<sensor_msgs::PointCloud2>\n            (\"/cloud_registered\", 100000);\n    ros::Publisher pubLaserCloudFullRes_body = nh.advertise<sensor_msgs::PointCloud2>\n            (\"/cloud_registered_body\", 100000);\n    ros::Publisher pubLaserCloudEffect = nh.advertise<sensor_msgs::PointCloud2>\n            (\"/cloud_effected\", 100000);\n    ros::Publisher pubLaserCloudMap = nh.advertise<sensor_msgs::PointCloud2>\n            (\"/Laser_map\", 100000);\n    ros::Publisher pubOdomAftMapped = nh.advertise<nav_msgs::Odometry>\n            (\"/aft_mapped_to_init\", 100000);\n    ros::Publisher pubPath = nh.advertise<nav_msgs::Path>\n            (\"/path\", 100000);\n\n//------------------------------------------------------------------------------------------------------\n    signal(SIGINT, SigHandle);\n    ros::Rate rate(5000);\n    bool status = ros::ok();\n    int frame_id = 0;\n    while (status)\n    {\n        if (flg_exit) break;\n        ros::spinOnce();\n        if (sync_packages(Measures))\n        {\n            frame_id++;\n            if (flg_reset)\n            {\n                ROS_WARN(\"reset when rosbag play back.\");\n                p_imu->Reset();\n                flg_reset = false;\n                continue;\n            }\n\n\n            feats_points = Measures.points;\n            feats_points_full = Measures.points;\n\n            if (feats_undistort->empty() || (feats_undistort == NULL))\n            {\n                first_lidar_time = Measures.lidar_beg_time;\n                p_imu->first_lidar_time = first_lidar_time;\n                ROS_WARN(\"First frame, no points stored.\");\n            }\n\n            p_imu->Process(Measures, state, feats_undistort);\n\n            current_dt = p_imu->frame_dt;\n            current_end_time = p_imu->frame_end_time;\n\n            state_propagat = state;\n\n\n            /*** Segment the map in lidar FOV ***/\n            lasermap_fov_segment();\n\n\n            boost::mt19937_64 seed;\n            std::shuffle(feats_points.begin(), feats_points.end(), seed);\n            subSampleFrame(feats_points, filter_size_surf_min);\n            std::shuffle(feats_points.begin(), feats_points.end(), seed);\n\n\n            feats_points_size = feats_points.size();\n\n            feats_points_full_size = feats_points_full.size();\n\n            /*** initialize the map kdtree ***/\n            if (ikdtree.Root_Node == nullptr )\n            {\n                if (feats_points_size > 5)\n                {\n                    ikdtree.set_downsample_param(filter_size_map_min);\n                    feats_down_world->resize(feats_points_full_size);\n                    for (int i = 0; i < feats_points_full_size; i++)\n                    {\n                        feats_down_world->points[i] = point3DtoPCLPoint(feats_points_full[i], RAW);\n                    }\n                    ikdtree.Build(feats_down_world->points);\n                }\n                continue;\n            }\n\n            int featsFromMapNum = ikdtree.validnum();\n            kdtree_size_st = ikdtree.size();\n\n            /*** ICP and iterated Kalman filter update ***/\n            normvec->resize(feats_points_size);\n            feats_down_world->resize(feats_points_size);\n            euler_cur = RotMtoEuler(state.rot_end);\n\n            pointSearchInd_surf.resize(feats_points_size);\n            Nearest_Points.resize(feats_points_size);\n            int rematch_num = 0;\n            bool nearest_search_en = true;\n\n            /*** iterated state estimation ***/\n            std::vector<M3D> body_var;\n            std::vector<M3D> crossmat_list;\n            body_var.reserve(feats_points_size);\n            crossmat_list.reserve(feats_points_size);\n\n\n            double knn_time = 0;\n            double solve_time = 0;\n            int knn_count = 0;\n            int iter_count = 0;\n            StatesGroup last_converge_state = state;\n\n            bool current_converge = true;\n            int current_iter_count = 0;\n\n            distort_time = 0;\n\n            for (iterCount = 0; iterCount < NUM_MAX_ITERATIONS; iterCount++)\n            {\n                laserCloudOri->clear();\n                corr_normvect->clear();\n                total_residual = 0.0;\n\n                iter_count++;\n\n                if (nearest_search_en)\n                    knn_count++;\n\n                if (current_converge)\n                    distort_time++;\n\n                /** closest surface search and residual computation **/\n                #ifdef MP_EN\n                    omp_set_num_threads(MP_PROC_NUM);\n                    #pragma omp parallel for\n                #endif\n                for (int i = 0; i < feats_points_size; i++)\n                {\n                    iterCurrentPoint3D(feats_points[i], current_converge);\n                    PointType point_body = point3DtoPCLPoint(feats_points[i], UNDISTORT);\n                    PointType point_world = point3DtoPCLPoint(feats_points[i], WORLD);\n\n                    V3D p_body(point_body.x, point_body.y, point_body.z);\n\n                    vector<float> pointSearchSqDis(NUM_MATCH_POINTS);\n                    auto &points_near = Nearest_Points[i];\n                    uint8_t search_flag = 0;\n\n                    if (nearest_search_en)\n                    {\n                        /** Find the closest surfaces in the map **/\n                        ikdtree.Nearest_Search(point_world, NUM_MATCH_POINTS, points_near, pointSearchSqDis, 5);\n                        if (points_near.size() < NUM_MATCH_POINTS)\n                            point_selected_surf[i] = false;\n                        else\n                            point_selected_surf[i] = !(pointSearchSqDis[NUM_MATCH_POINTS - 1] > 5);\n                    }\n\n                    res_last[i] = -1000.0f;\n\n                    if (!point_selected_surf[i] || points_near.size() < NUM_MATCH_POINTS)\n                    {\n                        point_selected_surf[i] = false;\n                        continue;\n                    }\n\n                    point_selected_surf[i] = false;\n                    VD(4) pabcd;\n                    pabcd.setZero();\n                    if (esti_plane(pabcd, points_near, 0.1)) //(planeValid)\n                    {\n                        float pd2 = pabcd(0) * point_world.x + pabcd(1) * point_world.y + pabcd(2) * point_world.z +\n                                    pabcd(3);\n                        float s = 1 - 0.9 * fabs(pd2) / sqrt(p_body.norm());\n\n                        if (s > 0.9) {\n                            point_selected_surf[i] = true;\n                            normvec->points[i].x = pabcd(0);\n                            normvec->points[i].y = pabcd(1);\n                            normvec->points[i].z = pabcd(2);\n                            normvec->points[i].intensity = pd2;\n                            res_last[i] = abs(pd2);\n                        }\n                    }\n                }\n\n                effect_feat_num = 0;\n                for (int i = 0; i < feats_points_size; i++)\n                {\n                    if (point_selected_surf[i])\n                    {\n                        laserCloudOri->points[effect_feat_num] = point3DtoPCLPoint(feats_points[i], UNDISTORT);\n                        corr_normvect->points[effect_feat_num] = normvec->points[i];\n                        effect_feat_num++;\n                    }\n                }\n\n                res_mean_last = total_residual / effect_feat_num;\n\n                /*** Computation of Measurement Jacobian matrix H and measurents vector ***/\n\n                MatrixXd Hsub(effect_feat_num, 12);\n                MatrixXd Hsub_T_R_inv(12, effect_feat_num);\n                VectorXd R_inv(effect_feat_num);\n                VectorXd meas_vec(effect_feat_num);\n\n                Hsub.setZero();\n                Hsub_T_R_inv.setZero();\n                meas_vec.setZero();\n\n\n                for (int i = 0; i < effect_feat_num; i++)\n                {\n                    const PointType &laser_p = laserCloudOri->points[i];\n                    V3D point_this_L(laser_p.x, laser_p.y, laser_p.z);\n\n                    V3D point_this = state.offset_R_L_I * point_this_L + state.offset_T_L_I;\n                    M3D var;\n                    calcBodyVar(point_this, 0.02, 0.05, var);\n                    var = state.rot_end * var * state.rot_end.transpose();\n                    M3D point_crossmat;\n                    point_crossmat << SKEW_SYM_MATRX(point_this);\n\n                    /*** get the normal vector of closest surface/corner ***/\n                    const PointType &norm_p = corr_normvect->points[i];\n                    V3D norm_vec(norm_p.x, norm_p.y, norm_p.z);\n\n                    R_inv(i) = 1000;\n                    laserCloudOri->points[i].intensity = sqrt(R_inv(i));\n\n                    /*** calculate the Measurement Jacobian matrix H ***/\n                    V3D A(point_crossmat * state.rot_end.transpose() * norm_vec);\n                    Hsub.row(i) << VEC_FROM_ARRAY(A), norm_p.x, norm_p.y, norm_p.z, 0, 0, 0, 0, 0, 0;\n\n                    Hsub_T_R_inv.col(i) = Hsub.row(i).transpose() / cov_lidar;\n                    /*** Measurement: distance to the closest surface/corner ***/\n                    meas_vec(i) = -norm_p.intensity;\n                }\n\n                MatrixXd K(DIM_STATE, effect_feat_num);\n\n                EKF_stop_flg = false;\n                flg_EKF_converged = false;\n                current_converge = false;\n                /*** Iterative Kalman Filter Update ***/\n\n                H_T_H.block<12, 12>(0, 0) = Hsub_T_R_inv * Hsub;\n                MD(DIM_STATE, DIM_STATE) &&K_1 = (H_T_H + state.cov.inverse()).inverse();\n                K = K_1.block<DIM_STATE, 12>(0, 0) * Hsub_T_R_inv;\n                auto vec = state_propagat - state;\n                solution = K * meas_vec + vec - K * Hsub * vec.block<12, 1>(0, 0);\n\n                //state update\n                state += solution;\n\n                rot_add = solution.block<3, 1>(0, 0);\n                T_add = solution.block<3, 1>(3, 0);\n\n                if ((rot_add.norm() * 57.3 < 0.01) && (T_add.norm() * 100 < 0.015))\n                    flg_EKF_converged = true;\n\n                deltaR = rot_add.norm() * 57.3;\n                deltaT = T_add.norm() * 100;\n\n                euler_cur = RotMtoEuler(state.rot_end);\n\n                current_iter_count++;\n                /*** Rematch Judgement ***/\n                nearest_search_en = false;\n                if (flg_EKF_converged || ((rematch_num == 0) && (iterCount == (NUM_MAX_ITERATIONS - 2))))\n                {\n                    nearest_search_en = true;\n                    rematch_num++;\n\n                    auto delta_state = state - last_converge_state;\n\n                    if (((delta_state.block<3, 1>(0, 0).norm() * 57.3 < 1) && (delta_state.block<3, 1>(3, 0).norm() * 100 < 1.0) && iterCount >= 4)\n                            || current_iter_count >= 15)\n                    {\n                        current_converge = true;\n                        current_iter_count = 0;\n                        if (distort_time >= NUM_MAX_UNDISTORT)\n                        {\n                            EKF_stop_flg = true;\n                        }\n                    }\n\n                    last_converge_state = state;\n                }\n\n                /*** Adjust const velocity model cov ***/\n                if (iterCount == 1 && adaptive_cov) adjustCVCov();\n\n\n                /*** Convergence Judgements and Covariance Update ***/\n                if (EKF_stop_flg || ((iterCount == NUM_MAX_ITERATIONS - 1)) )\n                {\n                    if (flg_EKF_inited)\n                    {\n                        /*** Covariance Update ***/\n                        G.setZero();\n                        G.block<DIM_STATE, 12>(0, 0) = K * Hsub;\n                        state.cov = (I_STATE - G) * state.cov;\n                        total_distance += (state.pos_end - position_last).norm();\n                        position_last = state.pos_end;\n                        M3D rot_cur_lidar = state.rot_end * state.offset_R_L_I;\n                        V3D euler_cur_lidar = RotMtoEuler(rot_cur_lidar);\n                        geoQuat = tf::createQuaternionMsgFromRollPitchYaw\n                                (euler_cur_lidar(0), euler_cur_lidar(1), euler_cur_lidar(2));\n                        VD(DIM_STATE) K_sum = K.rowwise().sum();\n                        VD(DIM_STATE) P_diag = state.cov.diagonal();\n                    }\n                    EKF_stop_flg = true;\n                }\n\n                if (EKF_stop_flg) break;\n            }\n\n            *feats_down_body = point3DtoPCL(feats_points, UNDISTORT);\n\n            last_state = state;\n\n            /******* Publish odometry *******/\n            publish_odometry(pubOdomAftMapped);\n\n            /*** add the feature points to map kdtree ***/\n            map_incremental();\n            kdtree_size_end = ikdtree.size();\n\n            /******* Publish points *******/\n            if (scan_pub_en || pcd_save_en) publish_frame_world(pubLaserCloudFullRes);\n            last_odom = state.pos_end;\n            last_rot = state.rot_end;\n            publish_effect_world(pubLaserCloudEffect);\n            if (path_en) publish_path(pubPath);\n\n\n            frame_num++;\n\n\n        }\n        status = ros::ok();\n        rate.sleep();\n    }\n\n    return 0;\n}\n"
  },
  {
    "path": "src/preprocess.cpp",
    "content": "#include \"preprocess.h\"\n\n#define RETURN0     0x00\n#define RETURN0AND1 0x10\n\nconst bool time_list_cut_frame(PointType &x, PointType &y) {\n    return (x.curvature < y.curvature);\n}\n\nconst bool time_list_cut_frame3D(Point3D &x, Point3D &y) {\n    return (x.relative_time < y.relative_time);\n}\n\nPreprocess::Preprocess()\n        : feature_enabled(0), lidar_type(AVIA), blind(1.0), point_filter_num(1) {\n    inf_bound = 10;\n    N_SCANS = 6;\n    group_size = 8;\n    disA = 0.01;\n    disA = 0.1; // B?\n    p2l_ratio = 225;\n    limit_maxmid = 6.25;\n    limit_midmin = 6.25;\n    limit_maxmin = 3.24;\n    jump_up_limit = 170.0;\n    jump_down_limit = 8.0;\n    cos160 = 160.0;\n    edgea = 2;\n    edgeb = 0.1;\n    smallp_intersect = 172.5;\n    smallp_ratio = 1.2;\n    given_offset_time = false;\n\n    jump_up_limit = cos(jump_up_limit / 180 * M_PI);\n    jump_down_limit = cos(jump_down_limit / 180 * M_PI);\n    cos160 = cos(cos160 / 180 * M_PI);\n    smallp_intersect = cos(smallp_intersect / 180 * M_PI);\n}\n\nPreprocess::~Preprocess() {}\n\nvoid Preprocess::set(bool feat_en, int lid_type, double bld, int pfilt_num) {\n    feature_enabled = feat_en;\n    lidar_type = lid_type;\n    blind = bld;\n    point_filter_num = pfilt_num;\n}\n\nvoid Preprocess::process(const livox_ros_driver::CustomMsg::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out) {\n    avia_handler(msg);\n    *pcl_out = pl_surf;\n}\n\n\nvoid Preprocess::process(const livox_ros_driver::CustomMsg::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out, std::vector<Point3D>& points_out)\n{\n    avia_handler(msg);\n    points_out = pt_surf;\n    *pcl_out = pl_surf;\n}\n\n\n\nvoid Preprocess::process_cut_frame_livox(const livox_ros_driver::CustomMsg::ConstPtr &msg,\n                                         deque<PointCloudXYZI::Ptr> &pcl_out, deque<double> &time_lidar,\n                                         const int required_frame_num, int scan_count)\n{\n    int plsize = msg->point_num;\n    pl_surf.clear();\n    pl_surf.reserve(plsize);\n    pl_full.clear();\n    pl_full.resize(plsize);\n    int valid_point_num = 0;\n\n    for (uint i = 1; i < plsize; i++)\n    {\n        if ((msg->points[i].line < N_SCANS) &&\n        ((msg->points[i].tag & 0x30) == 0x10 || (msg->points[i].tag & 0x30) == 0x00))\n        {\n            valid_point_num++;\n            if (valid_point_num % point_filter_num == 0) {\n                pl_full[i].x = msg->points[i].x;\n                pl_full[i].y = msg->points[i].y;\n                pl_full[i].z = msg->points[i].z;\n                pl_full[i].intensity = msg->points[i].reflectivity;\n                //use curvature as time of each laser points，unit: ms\n                pl_full[i].curvature = msg->points[i].offset_time / float(1000000);\n\n                double dist = pl_full[i].x * pl_full[i].x + pl_full[i].y * pl_full[i].y + pl_full[i].z * pl_full[i].z;\n                if (dist < blind * blind) continue;\n\n                if ((abs(pl_full[i].x - pl_full[i - 1].x) > 1e-7)\n                    || (abs(pl_full[i].y - pl_full[i - 1].y) > 1e-7)\n                    || (abs(pl_full[i].z - pl_full[i - 1].z) > 1e-7)) {\n                    pl_surf.push_back(pl_full[i]);\n                }\n            }\n        }\n    }\n    sort(pl_surf.points.begin(), pl_surf.points.end(), time_list_cut_frame);\n    double last_frame_end_time = msg->header.stamp.toSec() * 1000;\n    uint valid_num = 0;\n    uint cut_num = 0;\n    uint valid_pcl_size = pl_surf.points.size();\n\n    int required_cut_num = required_frame_num;\n    if (scan_count < 5)\n        required_cut_num = 1;\n\n    PointCloudXYZI pcl_cut;\n    for (uint i = 1; i < valid_pcl_size; i++)\n    {\n        valid_num++;\n        //Compute new opffset time of each point：ms\n        pl_surf[i].curvature += msg->header.stamp.toSec() * 1000 - last_frame_end_time;\n        pcl_cut.push_back(pl_surf[i]);\n        if (valid_num == (int((cut_num + 1) * valid_pcl_size / required_cut_num) - 1))\n        {\n            cut_num++;\n            time_lidar.push_back(last_frame_end_time);\n            PointCloudXYZI::Ptr pcl_temp(new PointCloudXYZI()); //Initialize shared_ptr\n            *pcl_temp = pcl_cut;\n            pcl_out.push_back(pcl_temp);\n            //Update frame head\n            last_frame_end_time += pl_surf[i].curvature;\n            pcl_cut.clear();\n            pcl_cut.reserve(valid_pcl_size * 2 / required_frame_num);\n        }\n    }\n}\n\n\nvoid Preprocess::process_cut_frame_livox(const livox_ros_driver::CustomMsg::ConstPtr &msg,\n                                         deque<PointCloudXYZI::Ptr> &pcl_out, deque<double> &time_lidar, deque<std::vector<Point3D>>& points_lidar,\n                                         const int required_frame_num, int scan_count)\n{\n    int plsize = msg->point_num;\n    pl_surf.clear();\n    pl_surf.reserve(plsize);\n    pl_full.clear();\n    pl_full.resize(plsize);\n\n    pt_surf.clear();\n    pt_surf.reserve(plsize);\n\n    int valid_point_num = 0;\n\n    int real_point_filter_num = point_filter_num;\n    if (scan_count <= cut_frame_init_num)\n        real_point_filter_num = 1;\n\n    for (uint i = 1; i < plsize; i++)\n    {\n        if ((msg->points[i].line < N_SCANS) &&\n            ((msg->points[i].tag & 0x30) == 0x10 || (msg->points[i].tag & 0x30) == 0x00))\n        {\n            valid_point_num++;\n            if (valid_point_num % real_point_filter_num == 0)\n            {\n                pl_full[i].x = msg->points[i].x;\n                pl_full[i].y = msg->points[i].y;\n                pl_full[i].z = msg->points[i].z;\n                pl_full[i].intensity = msg->points[i].reflectivity;\n                //use curvature as time of each laser points，unit: ms\n                pl_full[i].curvature = msg->points[i].offset_time / float(1000000);\n\n                double dist = pl_full[i].x * pl_full[i].x + pl_full[i].y * pl_full[i].y + pl_full[i].z * pl_full[i].z;\n                if (dist < blind * blind) continue;\n\n                if ((abs(pl_full[i].x - pl_full[i - 1].x) > 1e-7)\n                    || (abs(pl_full[i].y - pl_full[i - 1].y) > 1e-7)\n                    || (abs(pl_full[i].z - pl_full[i - 1].z) > 1e-7))\n                {\n                    pl_surf.push_back(pl_full[i]);\n\n                    Point3D point;\n                    point.raw_point = pl_full[i].getVector3fMap().cast<double>();\n                    point.relative_time = pl_full[i].curvature;\n                    point.intensity = pl_full[i].intensity;\n                    pt_surf.push_back(point);\n\n                }\n            }\n        }\n    }\n\n    sort(pl_surf.points.begin(), pl_surf.points.end(), time_list_cut_frame);\n    sort(pt_surf.begin(), pt_surf.end(), time_list_cut_frame3D);\n\n    //end time of last frame，单位ms\n    double last_frame_end_time = msg->header.stamp.toSec() * 1000;\n    uint valid_num = 0;\n    uint cut_num = 0;\n    uint valid_pcl_size = pl_surf.points.size();\n\n    int required_cut_num = required_frame_num;\n    if (scan_count <= cut_frame_init_num)\n        required_cut_num = 1;\n\n    PointCloudXYZI pcl_cut;\n    std::vector<Point3D> pt_cut;\n    for (uint i = 1; i < valid_pcl_size; i++)\n    {\n        valid_num++;\n        //Compute new opffset time of each point：ms\n        pl_surf[i].curvature += msg->header.stamp.toSec() * 1000 - last_frame_end_time;\n        pt_surf[i].relative_time += msg->header.stamp.toSec() * 1000 - last_frame_end_time;\n\n        pcl_cut.push_back(pl_surf[i]);\n        pt_cut.push_back(pt_surf[i]);\n\n        if (valid_num == (int((cut_num + 1) * valid_pcl_size / required_cut_num) - 1))\n        {\n            cut_num++;\n            time_lidar.push_back(last_frame_end_time);\n            points_lidar.push_back(pt_cut);\n            PointCloudXYZI::Ptr pcl_temp(new PointCloudXYZI()); //Initialize shared_ptr\n            *pcl_temp = pcl_cut;\n            pcl_out.push_back(pcl_temp);\n            //Update frame head\n            last_frame_end_time += pl_surf[i].curvature;\n            pcl_cut.clear();\n            pt_cut.clear();\n            pcl_cut.reserve(valid_pcl_size * 2 / required_frame_num);\n            pt_cut.reserve(valid_pcl_size * 2 / required_frame_num);\n        }\n    }\n}\n\n\n#define MAX_LINE_NUM 128\nvoid Preprocess::process_cut_frame_pcl2(const sensor_msgs::PointCloud2::ConstPtr &msg, deque<PointCloudXYZI::Ptr> &pcl_out,\n                                   deque<double> &time_lidar, const int required_frame_num, int scan_count)\n{\n    pl_surf.clear();\n    pl_corn.clear();\n    pl_full.clear();\n    if (lidar_type == VELO)\n    {\n        pcl::PointCloud<velodyne_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n\n        bool is_first[MAX_LINE_NUM];\n        double yaw_fp[MAX_LINE_NUM] = {0};     // yaw of first scan point\n        double omega_l = 3.61;       // scan angular velocity (deg/ms)\n        float yaw_last[MAX_LINE_NUM] = {0.0};  // yaw of last scan point\n        float time_last[MAX_LINE_NUM] = {0.0}; // last offset time\n\n        if (pl_orig.points[plsize - 1].time > 0) {\n            given_offset_time = true;\n        } else {\n            cout << \"Compute offset time using constant rotation model.\" << endl;\n            given_offset_time = false;\n            memset(is_first, true, sizeof(is_first));\n        }\n\n        for (int i = 0; i < plsize; i++) {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = pl_orig.points[i].time * 1000.0;  //ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (!given_offset_time) {\n                int layer = pl_orig.points[i].ring;\n                double yaw_angle = atan2(added_pt.y, added_pt.x) * 57.2957;\n\n                if (is_first[layer]) {\n                    yaw_fp[layer] = yaw_angle;\n                    is_first[layer] = false;\n                    added_pt.curvature = 0.0;\n                    yaw_last[layer] = yaw_angle;\n                    time_last[layer] = added_pt.curvature;\n                    continue;\n                }\n                // compute offset time\n                if (yaw_angle <= yaw_fp[layer]) {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle) / omega_l;\n                } else {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle + 360.0) / omega_l;\n                }\n                if (added_pt.curvature < time_last[layer]) added_pt.curvature += 360.0 / omega_l;\n\n                yaw_last[layer] = yaw_angle;\n                time_last[layer] = added_pt.curvature;\n            }\n\n\n            if (i % point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS) {\n                pl_surf.points.push_back(added_pt);\n            }\n        }\n    }\n    else if (lidar_type == OUSTER)\n    {\n        pcl::PointCloud<ouster_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n        for (int i = 0; i < plsize; i++) {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = pl_orig.points[i].t / 1e6;  //ns to ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (i % point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS) {\n                pl_surf.points.push_back(added_pt);\n            }\n        }\n    }\n    else if(lidar_type == PANDAR)\n    {\n        pcl::PointCloud<pandar_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n        for (int i = 0; i < plsize; i++) {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = (pl_orig.points[i].timestamp - msg->header.stamp.toSec()) * 1000;  //s to ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (i % point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS) {\n                pl_surf.points.push_back(added_pt);\n            }\n        }\n    }\n    else if(lidar_type == ROBOSENSE)\n    {\n        pcl::PointCloud<robosense_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n\n        bool is_first[MAX_LINE_NUM];\n        double yaw_fp[MAX_LINE_NUM] = {0};     // yaw of first scan point\n        double omega_l = 3.61;       // scan angular velocity (deg/ms)\n        float yaw_last[MAX_LINE_NUM] = {0.0};  // yaw of last scan point\n        float time_last[MAX_LINE_NUM] = {0.0}; // last offset time\n\n        if (pl_orig.points[plsize - 1].timestamp > 0) {\n            given_offset_time = true;\n        } else {\n            cout << \"Compute offset time using constant rotation model.\" << endl;\n            given_offset_time = false;\n            memset(is_first, true, sizeof(is_first));\n        }\n\n        for (int i = 0; i < plsize; i++) {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = (pl_orig.points[i].timestamp - msg->header.stamp.toSec() + 0.1)* 1000.0;  //ms\n\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (!given_offset_time) {\n                int layer = pl_orig.points[i].ring;\n                double yaw_angle = atan2(added_pt.y, added_pt.x) * 57.2957;\n\n                if (is_first[layer]) {\n                    yaw_fp[layer] = yaw_angle;\n                    is_first[layer] = false;\n                    added_pt.curvature = 0.0;\n                    yaw_last[layer] = yaw_angle;\n                    time_last[layer] = added_pt.curvature;\n                    continue;\n                }\n                // compute offset time\n                if (yaw_angle <= yaw_fp[layer]) {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle) / omega_l;\n                } else {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle + 360.0) / omega_l;\n                }\n                if (added_pt.curvature < time_last[layer]) added_pt.curvature += 360.0 / omega_l;\n\n                yaw_last[layer] = yaw_angle;\n                time_last[layer] = added_pt.curvature;\n            }\n\n            if (i % point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS) {\n                pl_surf.points.push_back(added_pt);\n            }\n        }\n    }\n    else\n    {\n        cout << BOLDRED << \"Wrong LiDAR Type!!!\" << endl;\n        return;\n    }\n\n\n    sort(pl_surf.points.begin(), pl_surf.points.end(), time_list_cut_frame);\n\n    //ms\n    double last_frame_end_time = msg->header.stamp.toSec() * 1000;\n    uint valid_num = 0;\n    uint cut_num = 0;\n    uint valid_pcl_size = pl_surf.points.size();\n\n    int required_cut_num = required_frame_num;\n\n    if (scan_count < 20)\n        required_cut_num = 1;\n\n\n    PointCloudXYZI pcl_cut;\n    for (uint i = 1; i < valid_pcl_size; i++) {\n        valid_num++;\n        pl_surf[i].curvature += msg->header.stamp.toSec() * 1000 - last_frame_end_time;\n        pcl_cut.push_back(pl_surf[i]);\n\n        if (valid_num == (int((cut_num + 1) * valid_pcl_size / required_cut_num) - 1)) {\n            cut_num++;\n            time_lidar.push_back(last_frame_end_time);\n            PointCloudXYZI::Ptr pcl_temp(new PointCloudXYZI());\n            *pcl_temp = pcl_cut;\n            pcl_out.push_back(pcl_temp);\n            last_frame_end_time += pl_surf[i].curvature;\n            pcl_cut.clear();\n            pcl_cut.reserve(valid_pcl_size * 2 / required_frame_num);\n        }\n    }\n}\n\nvoid Preprocess::process_cut_frame_pcl2(const sensor_msgs::PointCloud2::ConstPtr &msg, deque<PointCloudXYZI::Ptr> &pcl_out, deque<double> &time_lidar, deque<std::vector<Point3D>> &points_lidar, const int required_frame_num, int scan_count)\n{\n\n    pl_surf.clear();\n    pl_corn.clear();\n    pl_full.clear();\n\n    pt_surf.clear();\n\n    int real_point_filter_num = point_filter_num;\n    if (scan_count <= cut_frame_init_num)\n        real_point_filter_num = 1;\n\n    if (lidar_type == VELO)\n    {\n        pcl::PointCloud<velodyne_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n        pt_surf.reserve(plsize);\n\n        bool is_first[MAX_LINE_NUM];\n        double yaw_fp[MAX_LINE_NUM] = {0};     // yaw of first scan point\n        double omega_l = 3.61;       // scan angular velocity (deg/ms)\n        float yaw_last[MAX_LINE_NUM] = {0.0};  // yaw of last scan point\n        float time_last[MAX_LINE_NUM] = {0.0}; // last offset time\n\n        if (pl_orig.points[plsize - 1].time > 0)\n        {\n            given_offset_time = true;\n        }\n        else\n        {\n            cout << \"Compute offset time using constant rotation model.\" << endl;\n            given_offset_time = false;\n            memset(is_first, true, sizeof(is_first));\n        }\n\n        for (int i = 0; i < plsize; i++)\n        {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = pl_orig.points[i].time * 1000.0;  //ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (!given_offset_time)\n            {\n                int layer = pl_orig.points[i].ring;\n                double yaw_angle = atan2(added_pt.y, added_pt.x) * 57.2957;\n\n                if (is_first[layer])\n                {\n                    yaw_fp[layer] = yaw_angle;\n                    is_first[layer] = false;\n                    added_pt.curvature = 0.0;\n                    yaw_last[layer] = yaw_angle;\n                    time_last[layer] = added_pt.curvature;\n                    continue;\n                }\n                // compute offset time\n                if (yaw_angle <= yaw_fp[layer])\n                {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle) / omega_l;\n                }\n                else\n                {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle + 360.0) / omega_l;\n                }\n                if (added_pt.curvature < time_last[layer]) added_pt.curvature += 360.0 / omega_l;\n\n                yaw_last[layer] = yaw_angle;\n                time_last[layer] = added_pt.curvature;\n            }\n\n\n            if (i % real_point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS)\n            {\n                pl_surf.points.push_back(added_pt);\n\n                Point3D point;\n                point.raw_point = added_pt.getVector3fMap().cast<double>();\n                point.relative_time = added_pt.curvature;\n                point.intensity = added_pt.intensity;\n                pt_surf.push_back(point);\n            }\n        }\n    }\n    else if (lidar_type == OUSTER)\n    {\n        pcl::PointCloud<ouster_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n        pt_surf.reserve(plsize);\n\n        for (int i = 0; i < plsize; i++)\n        {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = pl_orig.points[i].t / 1e6;  //ns to ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (i % real_point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS)\n            {\n                pl_surf.points.push_back(added_pt);\n\n                Point3D point;\n                point.raw_point = added_pt.getVector3fMap().cast<double>();\n                point.relative_time = added_pt.curvature;\n                point.intensity = added_pt.intensity;\n                pt_surf.push_back(point);\n            }\n        }\n    }\n    else if(lidar_type == PANDAR)\n    {\n        pcl::PointCloud<pandar_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n        pt_surf.reserve(plsize);\n\n        for (int i = 0; i < plsize; i++)\n        {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = (pl_orig.points[i].timestamp - msg->header.stamp.toSec()) * 1000;  //s to ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (i % real_point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS)\n            {\n                pl_surf.points.push_back(added_pt);\n\n                Point3D point;\n                point.raw_point = added_pt.getVector3fMap().cast<double>();\n                point.relative_time = added_pt.curvature;\n                point.intensity = added_pt.intensity;\n                pt_surf.push_back(point);\n            }\n        }\n    }\n    else if(lidar_type == ROBOSENSE)\n    {\n        pcl::PointCloud<robosense_ros::Point> pl_orig;\n        pcl::fromROSMsg(*msg, pl_orig);\n        int plsize = pl_orig.points.size();\n        pl_surf.reserve(plsize);\n        pt_surf.reserve(plsize);\n\n        bool is_first[MAX_LINE_NUM];\n        double yaw_fp[MAX_LINE_NUM] = {0};     // yaw of first scan point\n        double omega_l = 3.61;       // scan angular velocity (deg/ms)\n        float yaw_last[MAX_LINE_NUM] = {0.0};  // yaw of last scan point\n        float time_last[MAX_LINE_NUM] = {0.0}; // last offset time\n\n        if (pl_orig.points[plsize - 1].timestamp > 0)\n        {\n            given_offset_time = true;\n        }\n        else\n        {\n            cout << \"Compute offset time using constant rotation model.\" << endl;\n            given_offset_time = false;\n            memset(is_first, true, sizeof(is_first));\n        }\n\n        for (int i = 0; i < plsize; i++)\n        {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = (pl_orig.points[i].timestamp - msg->header.stamp.toSec() + 0.1)* 1000.0;  //ms\n\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (!given_offset_time)\n            {\n                int layer = pl_orig.points[i].ring;\n                double yaw_angle = atan2(added_pt.y, added_pt.x) * 57.2957;\n\n                if (is_first[layer])\n                {\n                    yaw_fp[layer] = yaw_angle;\n                    is_first[layer] = false;\n                    added_pt.curvature = 0.0;\n                    yaw_last[layer] = yaw_angle;\n                    time_last[layer] = added_pt.curvature;\n                    continue;\n                }\n                // compute offset time\n                if (yaw_angle <= yaw_fp[layer])\n                {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle) / omega_l;\n                }\n                else\n                {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle + 360.0) / omega_l;\n                }\n                if (added_pt.curvature < time_last[layer]) added_pt.curvature += 360.0 / omega_l;\n\n                yaw_last[layer] = yaw_angle;\n                time_last[layer] = added_pt.curvature;\n            }\n\n            if (i % real_point_filter_num == 0 && pl_orig.points[i].ring < N_SCANS)\n            {\n                pl_surf.points.push_back(added_pt);\n\n                Point3D point;\n                point.raw_point = added_pt.getVector3fMap().cast<double>();\n                point.relative_time = added_pt.curvature;\n                point.intensity = added_pt.intensity;\n                pt_surf.push_back(point);\n            }\n        }\n    }\n    else\n    {\n        cout << BOLDRED << \"Wrong LiDAR Type!!!\" << endl;\n        return;\n    }\n\n    sort(pl_surf.points.begin(), pl_surf.points.end(), time_list_cut_frame);\n    sort(pt_surf.begin(), pt_surf.end(), time_list_cut_frame3D);\n\n    //ms\n    double last_frame_end_time = msg->header.stamp.toSec() * 1000;\n    uint valid_num = 0;\n    uint cut_num = 0;\n    uint valid_pcl_size = pl_surf.points.size();\n\n    int required_cut_num = required_frame_num;\n\n    if (scan_count <= cut_frame_init_num)\n        required_cut_num = 1;\n\n\n    PointCloudXYZI pcl_cut;\n    std::vector<Point3D> pt_cut;\n    for (uint i = 1; i < valid_pcl_size; i++)\n    {\n        valid_num++;\n        pl_surf[i].curvature += msg->header.stamp.toSec() * 1000 - last_frame_end_time;\n        pt_surf[i].relative_time += msg->header.stamp.toSec() * 1000 - last_frame_end_time;\n\n        pcl_cut.push_back(pl_surf[i]);\n        pt_cut.push_back(pt_surf[i]);\n\n        if (valid_num == (int((cut_num + 1) * valid_pcl_size / required_cut_num) - 1))\n        {\n            cut_num++;\n            time_lidar.push_back(last_frame_end_time);\n            PointCloudXYZI::Ptr pcl_temp(new PointCloudXYZI());\n            *pcl_temp = pcl_cut;\n            pcl_out.push_back(pcl_temp);\n            points_lidar.push_back(pt_cut);\n            last_frame_end_time += pl_surf[i].curvature;\n\n            pcl_cut.clear();\n            pcl_cut.reserve(valid_pcl_size * 2 / required_frame_num);\n\n            pt_cut.clear();\n            pt_cut.reserve(valid_pcl_size * 2 / required_frame_num);\n        }\n    }\n\n}\n\nvoid Preprocess::process(const sensor_msgs::PointCloud2::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out)\n{\n    switch (lidar_type) {\n        case OUSTER:\n            oust_handler(msg);\n            break;\n        case VELO:\n            velodyne_handler(msg);\n            break;\n        case L515:\n            l515_handler(msg);\n            break;\n\n        default:\n            printf(\"Error LiDAR Type\");\n            break;\n    }\n    *pcl_out = pl_surf;\n}\n\nvoid Preprocess::process(const sensor_msgs::PointCloud2::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out, std::vector<Point3D>& points_out)\n{\n    switch (lidar_type)\n    {\n        case OUSTER:\n            oust_handler(msg);\n            break;\n        case VELO:\n            velodyne_handler(msg);\n            break;\n        case L515:\n            l515_handler(msg);\n            break;\n\n        default:\n            printf(\"Error LiDAR Type\");\n            break;\n    }\n    *pcl_out = pl_surf;\n    points_out = pt_surf;\n}\n\nvoid Preprocess::avia_handler(const livox_ros_driver::CustomMsg::ConstPtr &msg)\n{\n    static int scan_count = 0;\n    scan_count++;\n\n    pl_surf.clear();\n    pl_corn.clear();\n    pl_full.clear();\n\n    pt_surf.clear();\n\n    double t1 = omp_get_wtime();\n    int plsize = msg->point_num;\n\n    pl_corn.reserve(plsize);\n    pl_surf.reserve(plsize);\n    pl_full.resize(plsize);\n\n\n    for (int i = 0; i < N_SCANS; i++)\n    {\n        pl_buff[i].clear();\n        pl_buff[i].reserve(plsize);\n    }\n    uint valid_num = 0;\n\n    int real_point_filter_num = point_filter_num;\n    if (scan_count <= cut_frame_init_num)\n        real_point_filter_num = 1;\n\n    if (feature_enabled)\n    {\n        for (uint i = 1; i < plsize; i++) {\n            if ((msg->points[i].line < N_SCANS) && ((msg->points[i].tag & 0x30) == 0x10) ||\n                ((msg->points[i].tag & 0x30) == 0x00)) {\n                pl_full[i].x = msg->points[i].x;\n                pl_full[i].y = msg->points[i].y;\n                pl_full[i].z = msg->points[i].z;\n                pl_full[i].intensity = msg->points[i].reflectivity;\n                pl_full[i].curvature =\n                        msg->points[i].offset_time / float(1000000); //use curvature as time of each laser points\n\n                double dist = pl_full[i].x * pl_full[i].x + pl_full[i].y * pl_full[i].y + pl_full[i].z * pl_full[i].z;\n                if (dist < blind * blind) continue;\n\n                if ((abs(pl_full[i].x - pl_full[i - 1].x) > 1e-7)\n                    || (abs(pl_full[i].y - pl_full[i - 1].y) > 1e-7)\n                    || (abs(pl_full[i].z - pl_full[i - 1].z) > 1e-7)) {\n                    pl_buff[msg->points[i].line].push_back(pl_full[i]);\n                }\n            }\n        }\n        static int count = 0;\n        static double time = 0.0;\n        count++;\n        double t0 = omp_get_wtime();\n        for (int j = 0; j < N_SCANS; j++) {\n            if (pl_buff[j].size() <= 5) continue;\n            pcl::PointCloud<PointType> &pl = pl_buff[j];\n            plsize = pl.size();\n            vector<orgtype> &types = typess[j];\n            types.clear();\n            types.resize(plsize);\n            plsize--;\n            for (uint i = 0; i < plsize; i++) {\n                types[i].range = pl[i].x * pl[i].x + pl[i].y * pl[i].y;\n                vx = pl[i].x - pl[i + 1].x;\n                vy = pl[i].y - pl[i + 1].y;\n                vz = pl[i].z - pl[i + 1].z;\n                types[i].dista = vx * vx + vy * vy + vz * vz;\n            }\n            types[plsize].range = pl[plsize].x * pl[plsize].x + pl[plsize].y * pl[plsize].y;\n            give_feature(pl, types);\n            // pl_surf += pl;\n        }\n        time += omp_get_wtime() - t0;\n        printf(\"Feature extraction time: %lf \\n\", time / count);\n    }\n    else\n    {\n        for (uint i = 1; i < plsize; i++)\n        {\n            if ((msg->points[i].line < N_SCANS) && ((msg->points[i].tag & 0x30) == 0x10 ||\n                (msg->points[i].tag & 0x30) == 0x00))\n            {\n                valid_num++;\n                if (valid_num % real_point_filter_num == 0)\n                {\n                    pl_full[i].x = msg->points[i].x;\n                    pl_full[i].y = msg->points[i].y;\n                    pl_full[i].z = msg->points[i].z;\n                    pl_full[i].intensity = msg->points[i].reflectivity;\n                    pl_full[i].curvature =\n                            msg->points[i].offset_time / float(1000000); //use curvature as time of each laser points\n\n                    double dist = pl_full[i].x * pl_full[i].x + pl_full[i].y * pl_full[i].y + pl_full[i].z * pl_full[i].z;\n                    if (dist < blind * blind) continue;\n\n                    if ((abs(pl_full[i].x - pl_full[i - 1].x) > 1e-7)\n                        || (abs(pl_full[i].y - pl_full[i - 1].y) > 1e-7)\n                        || (abs(pl_full[i].z - pl_full[i - 1].z) > 1e-7))\n                    {\n                        pl_surf.push_back(pl_full[i]);\n\n                        Point3D point;\n                        point.raw_point = pl_full[i].getVector3fMap().cast<double>();\n                        point.relative_time = pl_full[i].curvature;\n                        point.intensity = pl_full[i].intensity;\n                        pt_surf.push_back(point);\n                    }\n                }\n            }\n        }\n    }\n}\n\nvoid Preprocess::l515_handler(const sensor_msgs::PointCloud2::ConstPtr &msg)\n{\n    pl_surf.clear();\n    pl_corn.clear();\n    pl_full.clear();\n\n    pt_surf.clear();\n\n\n    pcl::PointCloud<pcl::PointXYZRGB> pl_orig;\n    pcl::fromROSMsg(*msg, pl_orig);\n    int plsize = pl_orig.size();\n    pl_corn.reserve(plsize);\n    pl_surf.reserve(plsize);\n\n    for (int i = 0; i < pl_orig.points.size(); i++)\n    {\n\n        if (i % point_filter_num != 0) continue;\n\n        double range = pl_orig.points[i].x * pl_orig.points[i].x + pl_orig.points[i].y * pl_orig.points[i].y +\n                       pl_orig.points[i].z * pl_orig.points[i].z;\n\n        if (range < blind * blind) continue;\n\n        Eigen::Vector3d pt_vec;\n        PointType added_pt;\n        added_pt.x = pl_orig.points[i].x;\n        added_pt.y = pl_orig.points[i].y;\n        added_pt.z = pl_orig.points[i].z;\n        added_pt.normal_x = pl_orig.points[i].r;\n        added_pt.normal_y = pl_orig.points[i].g;\n        added_pt.normal_z = pl_orig.points[i].b;\n        added_pt.curvature = 0.0;\n        pl_surf.points.push_back(added_pt);\n\n        Point3D point;\n        point.raw_point = pl_full[i].getVector3fMap().cast<double>();\n        point.relative_time = pl_full[i].curvature;\n        point.intensity = pl_full[i].intensity;\n        pt_surf.push_back(point);\n    }\n}\n\nvoid Preprocess::oust_handler(const sensor_msgs::PointCloud2::ConstPtr &msg)\n{\n    static int scan_count = 0;\n    scan_count++;\n    pl_surf.clear();\n    pl_corn.clear();\n    pl_full.clear();\n\n    pt_surf.clear();\n\n    int real_point_filter_num = point_filter_num;\n    if (scan_count <= cut_frame_init_num)\n        real_point_filter_num = 1;\n\n\n    pcl::PointCloud<ouster_ros::Point> pl_orig;\n    pcl::fromROSMsg(*msg, pl_orig);\n    int plsize = pl_orig.size();\n    pl_corn.reserve(plsize);\n    pl_surf.reserve(plsize);\n    if (feature_enabled)\n    {\n        for (int i = 0; i < N_SCANS; i++)\n        {\n            pl_buff[i].clear();\n            pl_buff[i].reserve(plsize);\n        }\n\n        for (uint i = 0; i < plsize; i++)\n        {\n            Eigen::Vector3d pt_vec;\n            PointType added_pt;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            double yaw_angle = atan2(added_pt.y, added_pt.x) * 57.3;\n            if (yaw_angle >= 180.0)\n                yaw_angle -= 360.0;\n            if (yaw_angle <= -180.0)\n                yaw_angle += 360.0;\n\n            added_pt.curvature = pl_orig.points[i].t / 1e6;\n            if (pl_orig.points[i].ring < N_SCANS) {\n                pl_buff[pl_orig.points[i].ring].push_back(added_pt);\n            }\n        }\n\n        for (int j = 0; j < N_SCANS; j++) {\n            PointCloudXYZI &pl = pl_buff[j];\n            int linesize = pl.size();\n            vector<orgtype> &types = typess[j];\n            types.clear();\n            types.resize(linesize);\n            linesize--;\n            for (uint i = 0; i < linesize; i++) {\n                types[i].range = sqrt(pl[i].x * pl[i].x + pl[i].y * pl[i].y);\n                vx = pl[i].x - pl[i + 1].x;\n                vy = pl[i].y - pl[i + 1].y;\n                vz = pl[i].z - pl[i + 1].z;\n                types[i].dista = vx * vx + vy * vy + vz * vz;\n            }\n            types[linesize].range = sqrt(pl[linesize].x * pl[linesize].x + pl[linesize].y * pl[linesize].y);\n            give_feature(pl, types);\n        }\n    }\n    else\n    {\n        for (int i = 0; i < pl_orig.points.size(); i++)\n        {\n            if (i % real_point_filter_num != 0) continue;\n            Eigen::Vector3d pt_vec;\n            PointType added_pt;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.curvature = pl_orig.points[i].t / 1e6;  //ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (pl_orig.points[i].ring < N_SCANS)\n            {\n                pl_surf.points.push_back(added_pt);\n                Point3D point;\n                point.raw_point = added_pt.getVector3fMap().cast<double>();\n                point.relative_time = added_pt.curvature;\n                point.intensity = added_pt.intensity;\n                pt_surf.push_back(point);\n            }\n        }\n    }\n}\n\nvoid Preprocess::velodyne_handler(const sensor_msgs::PointCloud2::ConstPtr &msg)\n{\n    static int scan_count = 0;\n    scan_count++;\n\n    pl_surf.clear();\n    pl_corn.clear();\n    pl_full.clear();\n\n    pt_surf.clear();\n\n\n    pcl::PointCloud<velodyne_ros::Point> pl_orig;\n    pcl::fromROSMsg(*msg, pl_orig);\n    int plsize = pl_orig.points.size();\n    pl_surf.reserve(plsize);\n\n    bool is_first[MAX_LINE_NUM];\n    double yaw_fp[MAX_LINE_NUM] = {0};     // yaw of first scan point\n    double omega_l = 3.61;       // scan angular velocity (deg/ms)\n    float yaw_last[MAX_LINE_NUM] = {0.0};  // yaw of last scan point\n    float time_last[MAX_LINE_NUM] = {0.0}; // last offset time\n\n    if (pl_orig.points[plsize - 1].time > 0) {\n        given_offset_time = true;\n    } else {\n        cout << \"Compute offset time using constant rotation model.\" << endl;\n        given_offset_time = false;\n        memset(is_first, true, sizeof(is_first));\n    }\n\n    int real_point_filter_num = point_filter_num;\n    if (scan_count <= cut_frame_init_num)\n        real_point_filter_num = 1;\n\n    if (feature_enabled)\n    {\n        for (int i = 0; i < N_SCANS; i++)\n        {\n            pl_buff[i].clear();\n            pl_buff[i].reserve(plsize);\n        }\n\n        for (int i = 0; i < plsize; i++)\n        {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            int layer = pl_orig.points[i].ring;\n            if (layer >= N_SCANS) continue;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = pl_orig.points[i].time * 1000.0; // unit: ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (!given_offset_time) {\n                double yaw_angle = atan2(added_pt.y, added_pt.x) * 57.2957;\n                if (is_first[layer]) {\n                    // printf(\"layer: %d; is first: %d\", layer, is_first[layer]);\n                    yaw_fp[layer] = yaw_angle;\n                    is_first[layer] = false;\n                    added_pt.curvature = 0.0;\n                    yaw_last[layer] = yaw_angle;\n                    time_last[layer] = added_pt.curvature;\n                    continue;\n                }\n\n                if (yaw_angle <= yaw_fp[layer]) {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle) / omega_l;\n                } else {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle + 360.0) / omega_l;\n                }\n\n                if (added_pt.curvature < time_last[layer]) added_pt.curvature += 360.0 / omega_l;\n\n                yaw_last[layer] = yaw_angle;\n                time_last[layer] = added_pt.curvature;\n            }\n\n            pl_buff[layer].points.push_back(added_pt);\n        }\n\n        for (int j = 0; j < N_SCANS; j++)\n        {\n            PointCloudXYZI &pl = pl_buff[j];\n            int linesize = pl.size();\n            if (linesize < 2) continue;\n            vector<orgtype> &types = typess[j];\n            types.clear();\n            types.resize(linesize);\n            linesize--;\n            for (uint i = 0; i < linesize; i++) {\n                types[i].range = sqrt(pl[i].x * pl[i].x + pl[i].y * pl[i].y);\n                vx = pl[i].x - pl[i + 1].x;\n                vy = pl[i].y - pl[i + 1].y;\n                vz = pl[i].z - pl[i + 1].z;\n                types[i].dista = vx * vx + vy * vy + vz * vz;\n            }\n            types[linesize].range = sqrt(pl[linesize].x * pl[linesize].x + pl[linesize].y * pl[linesize].y);\n            give_feature(pl, types);\n        }\n    }\n    else\n    {\n        for (int i = 0; i < plsize; i++)\n        {\n            PointType added_pt;\n            added_pt.normal_x = 0;\n            added_pt.normal_y = 0;\n            added_pt.normal_z = 0;\n            added_pt.x = pl_orig.points[i].x;\n            added_pt.y = pl_orig.points[i].y;\n            added_pt.z = pl_orig.points[i].z;\n            added_pt.intensity = pl_orig.points[i].intensity;\n            added_pt.curvature = pl_orig.points[i].time * 1000.0; //ms\n\n            double dist = added_pt.x * added_pt.x + added_pt.y * added_pt.y + added_pt.z * added_pt.z;\n            if ( dist < blind * blind || isnan(added_pt.x) || isnan(added_pt.y) || isnan(added_pt.z))\n                continue;\n\n            if (!given_offset_time)\n            {\n                int layer = pl_orig.points[i].ring;\n                double yaw_angle = atan2(added_pt.y, added_pt.x) * 57.2957;\n\n                if (is_first[layer])\n                {\n                    yaw_fp[layer] = yaw_angle;\n                    is_first[layer] = false;\n                    added_pt.curvature = 0.0;\n                    yaw_last[layer] = yaw_angle;\n                    time_last[layer] = added_pt.curvature;\n                    continue;\n                }\n\n                // compute offset time\n                if (yaw_angle <= yaw_fp[layer])\n                {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle) / omega_l;\n                }\n                else\n                {\n                    added_pt.curvature = (yaw_fp[layer] - yaw_angle + 360.0) / omega_l;\n                }\n\n                if (added_pt.curvature < time_last[layer]) added_pt.curvature += 360.0 / omega_l;\n\n                yaw_last[layer] = yaw_angle;\n                time_last[layer] = added_pt.curvature;\n            }\n\n            if (i % real_point_filter_num == 0)\n            {\n                pl_surf.points.push_back(added_pt);\n                Point3D point;\n                point.raw_point = added_pt.getVector3fMap().cast<double>();\n                point.relative_time = added_pt.curvature;\n                point.intensity = added_pt.intensity;\n                pt_surf.push_back(point);\n            }\n        }\n    }\n}\n\nvoid Preprocess::velodyne_handler_kitti(const sensor_msgs::PointCloud2::ConstPtr &msg) {\n    pl_surf.clear();\n    pl_full.clear();\n    pcl::fromROSMsg(*msg, pl_full);\n    for (int i = 0; i < pl_full.size(); i++) {\n        if (i % point_filter_num == 0) {\n            double dist = pl_full[i].x * pl_full[i].x + pl_full[i].y * pl_full[i].y + pl_full[i].z * pl_full[i].z;\n            if (dist > blind * blind)\n                pl_surf.points.push_back(pl_full[i]);\n        }\n    }\n}\n\nvoid Preprocess::give_feature(pcl::PointCloud<PointType> &pl, vector<orgtype> &types) {\n    int plsize = pl.size();\n    int plsize2;\n    if (plsize == 0) {\n        printf(\"something wrong\\n\");\n        return;\n    }\n    uint head = 0;\n\n    while (types[head].range < blind) {\n        head++;\n    }\n\n    // Surf\n    plsize2 = (plsize > group_size) ? (plsize - group_size) : 0;\n\n    Eigen::Vector3d curr_direct(Eigen::Vector3d::Zero());\n    Eigen::Vector3d last_direct(Eigen::Vector3d::Zero());\n\n    uint i_nex = 0, i2;\n    uint last_i = 0;\n    uint last_i_nex = 0;\n    int last_state = 0;\n    int plane_type;\n\n    for (uint i = head; i < plsize2; i++) {\n        if (types[i].range < blind) {\n            continue;\n        }\n\n        i2 = i;\n\n        plane_type = plane_judge(pl, types, i, i_nex, curr_direct);\n\n        if (plane_type == 1) {\n            for (uint j = i; j <= i_nex; j++) {\n                if (j != i && j != i_nex) {\n                    types[j].ftype = Real_Plane;\n                } else {\n                    types[j].ftype = Poss_Plane;\n                }\n            }\n\n            // if(last_state==1 && fabs(last_direct.sum())>0.5)\n            if (last_state == 1 && last_direct.norm() > 0.1) {\n                double mod = last_direct.transpose() * curr_direct;\n                if (mod > -0.707 && mod < 0.707) {\n                    types[i].ftype = Edge_Plane;\n                } else {\n                    types[i].ftype = Real_Plane;\n                }\n            }\n\n            i = i_nex - 1;\n            last_state = 1;\n        } else // if(plane_type == 2)\n        {\n            i = i_nex;\n            last_state = 0;\n        }\n        // else if(plane_type == 0)\n        // {\n        //   if(last_state == 1)\n        //   {\n        //     uint i_nex_tem;\n        //     uint j;\n        //     for(j=last_i+1; j<=last_i_nex; j++)\n        //     {\n        //       uint i_nex_tem2 = i_nex_tem;\n        //       Eigen::Vector3d curr_direct2;\n\n        //       uint ttem = plane_judge(pl, types, j, i_nex_tem, curr_direct2);\n\n        //       if(ttem != 1)\n        //       {\n        //         i_nex_tem = i_nex_tem2;\n        //         break;\n        //       }\n        //       curr_direct = curr_direct2;\n        //     }\n\n        //     if(j == last_i+1)\n        //     {\n        //       last_state = 0;\n        //     }\n        //     else\n        //     {\n        //       for(uint k=last_i_nex; k<=i_nex_tem; k++)\n        //       {\n        //         if(k != i_nex_tem)\n        //         {\n        //           types[k].ftype = Real_Plane;\n        //         }\n        //         else\n        //         {\n        //           types[k].ftype = Poss_Plane;\n        //         }\n        //       }\n        //       i = i_nex_tem-1;\n        //       i_nex = i_nex_tem;\n        //       i2 = j-1;\n        //       last_state = 1;\n        //     }\n\n        //   }\n        // }\n\n        last_i = i2;\n        last_i_nex = i_nex;\n        last_direct = curr_direct;\n    }\n\n    plsize2 = plsize > 3 ? plsize - 3 : 0;\n    for (uint i = head + 3; i < plsize2; i++) {\n        if (types[i].range < blind || types[i].ftype >= Real_Plane) {\n            continue;\n        }\n\n        if (types[i - 1].dista < 1e-16 || types[i].dista < 1e-16) {\n            continue;\n        }\n\n        Eigen::Vector3d vec_a(pl[i].x, pl[i].y, pl[i].z);\n        Eigen::Vector3d vecs[2];\n\n        for (int j = 0; j < 2; j++) {\n            int m = -1;\n            if (j == 1) {\n                m = 1;\n            }\n\n            if (types[i + m].range < blind) {\n                if (types[i].range > inf_bound) {\n                    types[i].edj[j] = Nr_inf;\n                } else {\n                    types[i].edj[j] = Nr_blind;\n                }\n                continue;\n            }\n\n            vecs[j] = Eigen::Vector3d(pl[i + m].x, pl[i + m].y, pl[i + m].z);\n            vecs[j] = vecs[j] - vec_a;\n\n            types[i].angle[j] = vec_a.dot(vecs[j]) / vec_a.norm() / vecs[j].norm();\n            if (types[i].angle[j] < jump_up_limit) {\n                types[i].edj[j] = Nr_180;\n            } else if (types[i].angle[j] > jump_down_limit) {\n                types[i].edj[j] = Nr_zero;\n            }\n        }\n\n        types[i].intersect = vecs[Prev].dot(vecs[Next]) / vecs[Prev].norm() / vecs[Next].norm();\n        if (types[i].edj[Prev] == Nr_nor && types[i].edj[Next] == Nr_zero && types[i].dista > 0.0225 &&\n            types[i].dista > 4 * types[i - 1].dista) {\n            if (types[i].intersect > cos160) {\n                if (edge_jump_judge(pl, types, i, Prev)) {\n                    types[i].ftype = Edge_Jump;\n                }\n            }\n        } else if (types[i].edj[Prev] == Nr_zero && types[i].edj[Next] == Nr_nor && types[i - 1].dista > 0.0225 &&\n                   types[i - 1].dista > 4 * types[i].dista) {\n            if (types[i].intersect > cos160) {\n                if (edge_jump_judge(pl, types, i, Next)) {\n                    types[i].ftype = Edge_Jump;\n                }\n            }\n        } else if (types[i].edj[Prev] == Nr_nor && types[i].edj[Next] == Nr_inf) {\n            if (edge_jump_judge(pl, types, i, Prev)) {\n                types[i].ftype = Edge_Jump;\n            }\n        } else if (types[i].edj[Prev] == Nr_inf && types[i].edj[Next] == Nr_nor) {\n            if (edge_jump_judge(pl, types, i, Next)) {\n                types[i].ftype = Edge_Jump;\n            }\n\n        } else if (types[i].edj[Prev] > Nr_nor && types[i].edj[Next] > Nr_nor) {\n            if (types[i].ftype == Nor) {\n                types[i].ftype = Wire;\n            }\n        }\n    }\n\n    plsize2 = plsize - 1;\n    double ratio;\n    for (uint i = head + 1; i < plsize2; i++) {\n        if (types[i].range < blind || types[i - 1].range < blind || types[i + 1].range < blind) {\n            continue;\n        }\n\n        if (types[i - 1].dista < 1e-8 || types[i].dista < 1e-8) {\n            continue;\n        }\n\n        if (types[i].ftype == Nor) {\n            if (types[i - 1].dista > types[i].dista) {\n                ratio = types[i - 1].dista / types[i].dista;\n            } else {\n                ratio = types[i].dista / types[i - 1].dista;\n            }\n\n            if (types[i].intersect < smallp_intersect && ratio < smallp_ratio) {\n                if (types[i - 1].ftype == Nor) {\n                    types[i - 1].ftype = Real_Plane;\n                }\n                if (types[i + 1].ftype == Nor) {\n                    types[i + 1].ftype = Real_Plane;\n                }\n                types[i].ftype = Real_Plane;\n            }\n        }\n    }\n\n    int last_surface = -1;\n    for (uint j = head; j < plsize; j++) {\n        if (types[j].ftype == Poss_Plane || types[j].ftype == Real_Plane) {\n            if (last_surface == -1) {\n                last_surface = j;\n            }\n\n            if (j == uint(last_surface + point_filter_num - 1)) {\n                PointType ap;\n                ap.x = pl[j].x;\n                ap.y = pl[j].y;\n                ap.z = pl[j].z;\n                ap.curvature = pl[j].curvature;\n                pl_surf.push_back(ap);\n\n                last_surface = -1;\n            }\n        } else {\n            if (types[j].ftype == Edge_Jump || types[j].ftype == Edge_Plane) {\n                pl_corn.push_back(pl[j]);\n            }\n            if (last_surface != -1) {\n                PointType ap;\n                for (uint k = last_surface; k < j; k++) {\n                    ap.x += pl[k].x;\n                    ap.y += pl[k].y;\n                    ap.z += pl[k].z;\n                    ap.curvature += pl[k].curvature;\n                }\n                ap.x /= (j - last_surface);\n                ap.y /= (j - last_surface);\n                ap.z /= (j - last_surface);\n                ap.curvature /= (j - last_surface);\n                pl_surf.push_back(ap);\n            }\n            last_surface = -1;\n        }\n    }\n}\n\nvoid Preprocess::pub_func(PointCloudXYZI &pl, const ros::Time &ct) {\n    pl.height = 1;\n    pl.width = pl.size();\n    sensor_msgs::PointCloud2 output;\n    pcl::toROSMsg(pl, output);\n    output.header.frame_id = \"livox\";\n    output.header.stamp = ct;\n}\n\nint Preprocess::plane_judge(const PointCloudXYZI &pl, vector<orgtype> &types, uint i_cur, uint &i_nex,\n                            Eigen::Vector3d &curr_direct) {\n    double group_dis = disA * types[i_cur].range + disB;\n    group_dis = group_dis * group_dis;\n    // i_nex = i_cur;\n\n    double two_dis;\n    vector<double> disarr;\n    disarr.reserve(20);\n\n    for (i_nex = i_cur; i_nex < i_cur + group_size; i_nex++) {\n        if (types[i_nex].range < blind) {\n            curr_direct.setZero();\n            return 2;\n        }\n        disarr.push_back(types[i_nex].dista);\n    }\n\n    for (;;) {\n        if ((i_cur >= pl.size()) || (i_nex >= pl.size())) break;\n\n        if (types[i_nex].range < blind) {\n            curr_direct.setZero();\n            return 2;\n        }\n        vx = pl[i_nex].x - pl[i_cur].x;\n        vy = pl[i_nex].y - pl[i_cur].y;\n        vz = pl[i_nex].z - pl[i_cur].z;\n        two_dis = vx * vx + vy * vy + vz * vz;\n        if (two_dis >= group_dis) {\n            break;\n        }\n        disarr.push_back(types[i_nex].dista);\n        i_nex++;\n    }\n\n    double leng_wid = 0;\n    double v1[3], v2[3];\n    for (uint j = i_cur + 1; j < i_nex; j++) {\n        if ((j >= pl.size()) || (i_cur >= pl.size())) break;\n        v1[0] = pl[j].x - pl[i_cur].x;\n        v1[1] = pl[j].y - pl[i_cur].y;\n        v1[2] = pl[j].z - pl[i_cur].z;\n\n        v2[0] = v1[1] * vz - vy * v1[2];\n        v2[1] = v1[2] * vx - v1[0] * vz;\n        v2[2] = v1[0] * vy - vx * v1[1];\n\n        double lw = v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2];\n        if (lw > leng_wid) {\n            leng_wid = lw;\n        }\n    }\n\n\n    if ((two_dis * two_dis / leng_wid) < p2l_ratio) {\n        curr_direct.setZero();\n        return 0;\n    }\n\n    uint disarrsize = disarr.size();\n    for (uint j = 0; j < disarrsize - 1; j++) {\n        for (uint k = j + 1; k < disarrsize; k++) {\n            if (disarr[j] < disarr[k]) {\n                leng_wid = disarr[j];\n                disarr[j] = disarr[k];\n                disarr[k] = leng_wid;\n            }\n        }\n    }\n\n    if (disarr[disarr.size() - 2] < 1e-16) {\n        curr_direct.setZero();\n        return 0;\n    }\n\n    if (lidar_type == AVIA) {\n        double dismax_mid = disarr[0] / disarr[disarrsize / 2];\n        double dismid_min = disarr[disarrsize / 2] / disarr[disarrsize - 2];\n\n        if (dismax_mid >= limit_maxmid || dismid_min >= limit_midmin) {\n            curr_direct.setZero();\n            return 0;\n        }\n    } else {\n        double dismax_min = disarr[0] / disarr[disarrsize - 2];\n        if (dismax_min >= limit_maxmin) {\n            curr_direct.setZero();\n            return 0;\n        }\n    }\n\n    curr_direct << vx, vy, vz;\n    curr_direct.normalize();\n    return 1;\n}\n\nbool Preprocess::edge_jump_judge(const PointCloudXYZI &pl, vector<orgtype> &types, uint i, Surround nor_dir) {\n    if (nor_dir == 0) {\n        if (types[i - 1].range < blind || types[i - 2].range < blind) {\n            return false;\n        }\n    } else if (nor_dir == 1) {\n        if (types[i + 1].range < blind || types[i + 2].range < blind) {\n            return false;\n        }\n    }\n    double d1 = types[i + nor_dir - 1].dista;\n    double d2 = types[i + 3 * nor_dir - 2].dista;\n    double d;\n\n    if (d1 < d2) {\n        d = d1;\n        d1 = d2;\n        d2 = d;\n    }\n\n    d1 = sqrt(d1);\n    d2 = sqrt(d2);\n\n\n    if (d1 > edgea * d2 || (d1 - d2) > edgeb) {\n        return false;\n    }\n\n    return true;\n}"
  },
  {
    "path": "src/preprocess.h",
    "content": "#ifndef PREPROCESS_H\n#define PREPROCESS_H\n\n#include \"common_lib.h\"\n#include <ros/ros.h>\n#include <pcl_conversions/pcl_conversions.h>\n#include <sensor_msgs/PointCloud2.h>\n#include <livox_ros_driver/CustomMsg.h>\n\nusing namespace std;\n\nenum Feature{Nor, Poss_Plane, Real_Plane, Edge_Jump, Edge_Plane, Wire, ZeroPoint};\nenum Surround{Prev, Next};\nenum E_jump{Nr_nor, Nr_zero, Nr_180, Nr_inf, Nr_blind};\n\nconst bool time_list_cut_frame(PointType &x, PointType &y);\n\nstruct orgtype\n{\n  double range;\n  double dista; \n  double angle[2];\n  double intersect;\n  E_jump edj[2];\n  Feature ftype;\n  orgtype()\n  {\n    range = 0;\n    edj[Prev] = Nr_nor;\n    edj[Next] = Nr_nor;\n    ftype = Nor;\n    intersect = 2;\n  }\n};\nnamespace velodyne_ros {\n    struct EIGEN_ALIGN16 Point {\n        PCL_ADD_POINT4D;\n        float intensity;\n        float time;\n        uint16_t ring;\n        EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n    };\n}  // namespace velodyne_ros\nPOINT_CLOUD_REGISTER_POINT_STRUCT(velodyne_ros::Point,\n        (float, x, x)\n        (float, y, y)\n        (float, z, z)\n        (float, intensity, intensity)\n        (float, time, time)\n        (uint16_t, ring, ring)\n)\n\nnamespace ouster_ros {\n  struct EIGEN_ALIGN16 Point {\n      PCL_ADD_POINT4D;\n      float intensity;\n      uint32_t t;\n      uint16_t reflectivity;\n      uint8_t  ring;\n      uint16_t ambient;\n      uint32_t range;\n      EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  };\n}  // namespace ouster_ros\nPOINT_CLOUD_REGISTER_POINT_STRUCT(ouster_ros::Point,\n    (float, x, x)\n    (float, y, y)\n    (float, z, z)\n    (float, intensity, intensity)\n    // use std::uint32_t to avoid conflicting with pcl::uint32_t\n    (std::uint32_t, t, t)\n    (std::uint16_t, reflectivity, reflectivity)\n    (std::uint8_t, ring, ring)\n    (std::uint16_t, ambient, ambient)\n    (std::uint32_t, range, range)\n)\n// namespace pandar_ros\nnamespace pandar_ros {\n    struct EIGEN_ALIGN16 Point {\n        PCL_ADD_POINT4D;\n        float intensity;\n        double timestamp;\n        uint16_t  ring;\n        EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n    };\n}\nPOINT_CLOUD_REGISTER_POINT_STRUCT(pandar_ros::Point,\n                                  (float, x, x)\n                                          (float, y, y)\n                                          (float, z, z)\n                                          (float, intensity, intensity)\n                                          (double, timestamp, timestamp)\n                                          (std::uint16_t, ring, ring)\n)\n\n//ANCHOR robosense modify\nnamespace robosense_ros {\n    struct EIGEN_ALIGN16 Point {\n        PCL_ADD_POINT4D;\n        std::uint8_t intensity;\n        std::uint16_t ring;\n        double timestamp;\n        EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n    };\n}\n\n// namespace robosense_ros\nPOINT_CLOUD_REGISTER_POINT_STRUCT(robosense_ros::Point,\n                                  (float, x, x)\n                                          (float, y, y)\n                                          (float, z, z)\n                                          // use std::uint32_t to avoid conflicting with pcl::uint32_t\n                                          (std::uint8_t, intensity, intensity)\n                                          (std::uint16_t, ring, ring)\n                                          (double, timestamp, timestamp)\n)\n\nclass Preprocess\n{\n  public:\n//   EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  Preprocess();\n  ~Preprocess();\n  \n  void process(const livox_ros_driver::CustomMsg::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out);\n  void process(const livox_ros_driver::CustomMsg::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out, std::vector<Point3D>& points_out);\n  void process_cut_frame_livox(const livox_ros_driver::CustomMsg::ConstPtr &msg, deque<PointCloudXYZI::Ptr> &pcl_out, deque<double> &time_lidar, const int required_frame_num, int scan_count);\n  void process_cut_frame_livox(const livox_ros_driver::CustomMsg::ConstPtr &msg, deque<PointCloudXYZI::Ptr> &pcl_out, deque<double> &time_lidar, deque<std::vector<Point3D>> &points_lidar, const int required_frame_num, int scan_count);\n  void process(const sensor_msgs::PointCloud2::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out);\n  void process(const sensor_msgs::PointCloud2::ConstPtr &msg, PointCloudXYZI::Ptr &pcl_out, std::vector<Point3D>& points_out);\n  void process_cut_frame_pcl2(const sensor_msgs::PointCloud2::ConstPtr &msg, deque<PointCloudXYZI::Ptr> &pcl_out, deque<double> &time_lidar, const int required_frame_num, int scan_count);\n  void process_cut_frame_pcl2(const sensor_msgs::PointCloud2::ConstPtr &msg, deque<PointCloudXYZI::Ptr> &pcl_out, deque<double> &time_lidar, deque<std::vector<Point3D>> &points_lidar, const int required_frame_num, int scan_count);\n  void set(bool feat_en, int lid_type, double bld, int pfilt_num);\n\n  // sensor_msgs::PointCloud2::ConstPtr pointcloud;\n  PointCloudXYZI pl_full, pl_corn, pl_surf;\n  std::vector<Point3D> pt_surf;\n  PointCloudXYZI pl_buff[128]; //maximum 128 line lidar\n  vector<orgtype> typess[128]; //maximum 128 line lidar\n  int lidar_type, point_filter_num, N_SCANS;;\n  double blind;\n  bool feature_enabled, given_offset_time;\n  ros::Publisher pub_full, pub_surf, pub_corn;\n  int cut_frame_init_num;\n\n  private:\n  void avia_handler(const livox_ros_driver::CustomMsg::ConstPtr &msg);\n  void oust_handler(const sensor_msgs::PointCloud2::ConstPtr &msg);\n  void velodyne_handler(const sensor_msgs::PointCloud2::ConstPtr &msg);\n  void velodyne_handler_kitti(const sensor_msgs::PointCloud2::ConstPtr &msg);\n  void l515_handler(const sensor_msgs::PointCloud2::ConstPtr &msg);\n  void give_feature(PointCloudXYZI &pl, vector<orgtype> &types);\n  void pub_func(PointCloudXYZI &pl, const ros::Time &ct);\n  int  plane_judge(const PointCloudXYZI &pl, vector<orgtype> &types, uint i, uint &i_nex, Eigen::Vector3d &curr_direct);\n  bool small_plane(const PointCloudXYZI &pl, vector<orgtype> &types, uint i_cur, uint &i_nex, Eigen::Vector3d &curr_direct);\n  bool edge_jump_judge(const PointCloudXYZI &pl, vector<orgtype> &types, uint i, Surround nor_dir);\n  \n  int group_size;\n  double disA, disB, inf_bound;\n  double limit_maxmid, limit_midmin, limit_maxmin;\n  double p2l_ratio;\n  double jump_up_limit, jump_down_limit;\n  double cos160;\n  double edgea, edgeb;\n  double smallp_intersect, smallp_ratio;\n  double vx, vy, vz;\n};\n#endif"
  }
]