Full Code of karnikram/rp-vio for AI

main 42a0ac1c6541 cached
106 files
60.9 MB
367.6k tokens
205 symbols
1 requests
Download .txt
Showing preview only (1,239K chars total). Download the full file or copy to clipboard to get everything.
Repository: karnikram/rp-vio
Branch: main
Commit: 42a0ac1c6541
Files: 106
Total size: 60.9 MB

Directory structure:
gitextract_7sq47s8w/

├── LICENCE
├── README.md
├── camera_model/
│   ├── CMakeLists.txt
│   ├── cmake/
│   │   └── FindEigen.cmake
│   ├── include/
│   │   └── camodocal/
│   │       ├── calib/
│   │       │   └── CameraCalibration.h
│   │       ├── camera_models/
│   │       │   ├── Camera.h
│   │       │   ├── CameraFactory.h
│   │       │   ├── CataCamera.h
│   │       │   ├── CostFunctionFactory.h
│   │       │   ├── EquidistantCamera.h
│   │       │   ├── PinholeCamera.h
│   │       │   └── ScaramuzzaCamera.h
│   │       ├── chessboard/
│   │       │   ├── Chessboard.h
│   │       │   ├── ChessboardCorner.h
│   │       │   ├── ChessboardQuad.h
│   │       │   └── Spline.h
│   │       ├── gpl/
│   │       │   ├── EigenQuaternionParameterization.h
│   │       │   ├── EigenUtils.h
│   │       │   └── gpl.h
│   │       └── sparse_graph/
│   │           └── Transform.h
│   ├── instruction
│   ├── package.xml
│   ├── readme.md
│   └── src/
│       ├── calib/
│       │   └── CameraCalibration.cc
│       ├── camera_models/
│       │   ├── Camera.cc
│       │   ├── CameraFactory.cc
│       │   ├── CataCamera.cc
│       │   ├── CostFunctionFactory.cc
│       │   ├── EquidistantCamera.cc
│       │   ├── PinholeCamera.cc
│       │   └── ScaramuzzaCamera.cc
│       ├── chessboard/
│       │   └── Chessboard.cc
│       ├── gpl/
│       │   ├── EigenQuaternionParameterization.cc
│       │   └── gpl.cc
│       ├── intrinsic_calib.cc
│       └── sparse_graph/
│           └── Transform.cc
├── config/
│   ├── advio_12_config.yaml
│   ├── ol_market1_config.yaml
│   ├── rpvio_rviz_config.rviz
│   └── rpvio_sim_config.yaml
├── plane_segmentation/
│   ├── RecoverPlane_perpendicular.py
│   ├── crf_inference.py
│   ├── data_loader_new.py
│   ├── inference.py
│   ├── net.py
│   ├── openloris.txt
│   ├── pretrained_model/
│   │   ├── model.data-00000-of-00001
│   │   ├── model.index
│   │   └── model.meta
│   ├── requirements.txt
│   ├── train.py
│   └── utils.py
├── rpvio.patch
├── rpvio_estimator/
│   ├── CMakeLists.txt
│   ├── cmake/
│   │   └── FindEigen.cmake
│   ├── launch/
│   │   ├── advio_12.launch
│   │   ├── ol_market1.launch
│   │   ├── rpvio_rviz.launch
│   │   └── rpvio_sim.launch
│   ├── package.xml
│   └── src/
│       ├── estimator.cpp
│       ├── estimator.h
│       ├── estimator_node.cpp
│       ├── factor/
│       │   ├── homography_factor.h
│       │   ├── imu_factor.h
│       │   ├── integration_base.h
│       │   ├── marginalization_factor.cpp
│       │   ├── marginalization_factor.h
│       │   ├── pose_local_parameterization.cpp
│       │   ├── pose_local_parameterization.h
│       │   ├── projection_factor.cpp
│       │   ├── projection_factor.h
│       │   ├── projection_td_factor.cpp
│       │   └── projection_td_factor.h
│       ├── feature_manager.cpp
│       ├── feature_manager.h
│       ├── initial/
│       │   ├── initial_aligment.cpp
│       │   ├── initial_alignment.h
│       │   ├── initial_ex_rotation.cpp
│       │   ├── initial_ex_rotation.h
│       │   ├── initial_sfm.cpp
│       │   ├── initial_sfm.h
│       │   ├── solve_5pts.cpp
│       │   └── solve_5pts.h
│       ├── parameters.cpp
│       ├── parameters.h
│       └── utility/
│           ├── CameraPoseVisualization.cpp
│           ├── CameraPoseVisualization.h
│           ├── tic_toc.h
│           ├── utility.cpp
│           ├── utility.h
│           ├── visualization.cpp
│           └── visualization.h
├── rpvio_feature_tracker/
│   ├── CMakeLists.txt
│   ├── cmake/
│   │   └── FindEigen.cmake
│   ├── package.xml
│   └── src/
│       ├── feature_tracker.cpp
│       ├── feature_tracker.h
│       ├── feature_tracker_node.cpp
│       ├── parameters.cpp
│       ├── parameters.h
│       └── tic_toc.h
└── scripts/
    ├── convert_vins_to_tum.py
    ├── run_advio_12.sh
    ├── run_ol_market1.sh
    └── run_rpvio_sim.sh

================================================
FILE CONTENTS
================================================

================================================
FILE: LICENCE
================================================
                    GNU GENERAL PUBLIC LICENSE
                       Version 3, 29 June 2007

 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

                            Preamble

  The GNU General Public License is a free, copyleft license for
software and other kinds of works.

  The licenses for most software and other practical works are designed
to take away your freedom to share and change the works.  By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.  We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors.  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.

  To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights.  Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received.  You must make sure that they, too, receive
or can get the source code.  And you must show them these terms so they
know their rights.

  Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.

  For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software.  For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.

  Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so.  This is fundamentally incompatible with the aim of
protecting users' freedom to change the software.  The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable.  Therefore, we
have designed this version of the GPL to prohibit the practice for those
products.  If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.

  Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary.  To prevent this, the GPL assures that
patents cannot be used to render the program non-free.

  The precise terms and conditions for copying, distribution and
modification follow.

                       TERMS AND CONDITIONS

  0. Definitions.

  "This License" refers to version 3 of the GNU General Public License.

  "Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.

  "The Program" refers to any copyrightable work licensed under this
License.  Each licensee is addressed as "you".  "Licensees" and
"recipients" may be individuals or organizations.

  To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy.  The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.

  A "covered work" means either the unmodified Program or a work based
on the Program.

  To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy.  Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.

  To "convey" a work means any kind of propagation that enables other
parties to make or receive copies.  Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.

  An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License.  If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.

  1. Source Code.

  The "source code" for a work means the preferred form of the work
for making modifications to it.  "Object code" means any non-source
form of a work.

  A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.

  The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form.  A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.

  The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities.  However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work.  For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.

  The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.

  The Corresponding Source for a work in source code form is that
same work.

  2. Basic Permissions.

  All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met.  This License explicitly affirms your unlimited
permission to run the unmodified Program.  The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work.  This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.

  You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force.  You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright.  Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.

  Conveying under any other circumstances is permitted solely under
the conditions stated below.  Sublicensing is not allowed; section 10
makes it unnecessary.

  3. Protecting Users' Legal Rights From Anti-Circumvention Law.

  No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.

  When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.

  4. Conveying Verbatim Copies.

  You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.

  You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.

  5. Conveying Modified Source Versions.

  You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:

    a) The work must carry prominent notices stating that you modified
    it, and giving a relevant date.

    b) The work must carry prominent notices stating that it is
    released under this License and any conditions added under section
    7.  This requirement modifies the requirement in section 4 to
    "keep intact all notices".

    c) You must license the entire work, as a whole, under this
    License to anyone who comes into possession of a copy.  This
    License will therefore apply, along with any applicable section 7
    additional terms, to the whole of the work, and all its parts,
    regardless of how they are packaged.  This License gives no
    permission to license the work in any other way, but it does not
    invalidate such permission if you have separately received it.

    d) If the work has interactive user interfaces, each must display
    Appropriate Legal Notices; however, if the Program has interactive
    interfaces that do not display Appropriate Legal Notices, your
    work need not make them do so.

  A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit.  Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.

  6. Conveying Non-Source Forms.

  You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:

    a) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by the
    Corresponding Source fixed on a durable physical medium
    customarily used for software interchange.

    b) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by a
    written offer, valid for at least three years and valid for as
    long as you offer spare parts or customer support for that product
    model, to give anyone who possesses the object code either (1) a
    copy of the Corresponding Source for all the software in the
    product that is covered by this License, on a durable physical
    medium customarily used for software interchange, for a price no
    more than your reasonable cost of physically performing this
    conveying of source, or (2) access to copy the
    Corresponding Source from a network server at no charge.

    c) Convey individual copies of the object code with a copy of the
    written offer to provide the Corresponding Source.  This
    alternative is allowed only occasionally and noncommercially, and
    only if you received the object code with such an offer, in accord
    with subsection 6b.

    d) Convey the object code by offering access from a designated
    place (gratis or for a charge), and offer equivalent access to the
    Corresponding Source in the same way through the same place at no
    further charge.  You need not require recipients to copy the
    Corresponding Source along with the object code.  If the place to
    copy the object code is a network server, the Corresponding Source
    may be on a different server (operated by you or a third party)
    that supports equivalent copying facilities, provided you maintain
    clear directions next to the object code saying where to find the
    Corresponding Source.  Regardless of what server hosts the
    Corresponding Source, you remain obligated to ensure that it is
    available for as long as needed to satisfy these requirements.

    e) Convey the object code using peer-to-peer transmission, provided
    you inform other peers where the object code and Corresponding
    Source of the work are being offered to the general public at no
    charge under subsection 6d.

  A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.

  A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling.  In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage.  For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product.  A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.

  "Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source.  The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.

  If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information.  But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).

  The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed.  Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.

  Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.

  7. Additional Terms.

  "Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law.  If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.

  When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it.  (Additional permissions may be written to require their own
removal in certain cases when you modify the work.)  You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.

  Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:

    a) Disclaiming warranty or limiting liability differently from the
    terms of sections 15 and 16 of this License; or

    b) Requiring preservation of specified reasonable legal notices or
    author attributions in that material or in the Appropriate Legal
    Notices displayed by works containing it; or

    c) Prohibiting misrepresentation of the origin of that material, or
    requiring that modified versions of such material be marked in
    reasonable ways as different from the original version; or

    d) Limiting the use for publicity purposes of names of licensors or
    authors of the material; or

    e) Declining to grant rights under trademark law for use of some
    trade names, trademarks, or service marks; or

    f) Requiring indemnification of licensors and authors of that
    material by anyone who conveys the material (or modified versions of
    it) with contractual assumptions of liability to the recipient, for
    any liability that these contractual assumptions directly impose on
    those licensors and authors.

  All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10.  If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term.  If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.

  If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.

  Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.

  8. Termination.

  You may not propagate or modify a covered work except as expressly
provided under this License.  Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).

  However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.

  Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.

  Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License.  If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.

  9. Acceptance Not Required for Having Copies.

  You are not required to accept this License in order to receive or
run a copy of the Program.  Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance.  However,
nothing other than this License grants you permission to propagate or
modify any covered work.  These actions infringe copyright if you do
not accept this License.  Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.

  10. Automatic Licensing of Downstream Recipients.

  Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License.  You are not responsible
for enforcing compliance by third parties with this License.

  An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations.  If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.

  You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License.  For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.

  11. Patents.

  A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based.  The
work thus licensed is called the contributor's "contributor version".

  A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version.  For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.

  Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.

  In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement).  To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.

  If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients.  "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.

  If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.

  A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License.  You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.

  Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.

  12. No Surrender of Others' Freedom.

  If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all.  For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.

  13. Use with the GNU Affero General Public License.

  Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work.  The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.

  14. Revised Versions of this License.

  The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

  Each version is given a distinguishing version number.  If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation.  If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.

  If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.

  Later license versions may give you additional or different
permissions.  However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.

  15. Disclaimer of Warranty.

  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. Limitation of Liability.

  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.

  17. Interpretation of Sections 15 and 16.

  If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.

                     END OF TERMS AND CONDITIONS

            How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    {one line to give the program's name and a brief idea of what it does.}
    Copyright (C) {year}  {name of author}

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.

Also add information on how to contact you by electronic and paper mail.

  If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:

    {project}  Copyright (C) {year}  {fullname}
    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".

  You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.

  The GNU General Public License does not permit incorporating your program
into proprietary programs.  If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library.  If this is what you want to do, use the GNU Lesser General
Public License instead of this License.  But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.


================================================
FILE: README.md
================================================
## RP-VIO: Robust Plane-based Visual-Inertial Odometry for Dynamic Environments
Karnik Ram, Chaitanya Kharyal, Sudarshan S. Harithas, K. Madhava Krishna.

[[`arXiv`](https://arxiv.org/pdf/2103.10400.pdf)]
[[`Project Page`](https://karnikram.info/rp-vio/)]

In IROS 2021

<p align="center">
<a href="https://user-images.githubusercontent.com/12653355/111314569-88dfb000-8687-11eb-87c8-212f7ad13489.png"><img src="https://user-images.githubusercontent.com/12653355/111314569-88dfb000-8687-11eb-87c8-212f7ad13489.png" width="700"/></a>
</p>
RP-VIO is a monocular visual-inertial odometry (VIO) system that uses only planar features and their induced homographies, during both initialization and sliding-window estimation, for increased robustness and accuracy in dynamic environments.

## Setup
Our evaluation setup is a 6-core Intel Core i5-8400 CPU with 8GB RAM and a 1 TB HDD, running Ubuntu 18.04.1. We recommend using a more powerful setup, especially for heavy datasets like ADVIO or OpenLORIS.

### Pre-requisites
[ROS Melodic](http://wiki.ros.org/melodic) (OpenCV 3.2.0, Eigen 3.3.4-4)<br>
[Ceres Solver 1.14.0](https://github.com/ceres-solver/ceres-solver/releases)<br>
[EVO](https://github.com/MichaelGrupp/evo)

The versions indicated are the versions used in our evaluation setup, and we do not guarantee our code to run on newer versions like ROS Noetic (OpenCV 4.2).

### Build
Run the following commands in your terminal to clone our project and build,

```
    cd ~/catkin_ws/src
    git clone https://github.com/karnikram/rp-vio.git
    cd ../
    catkin_make -j4
    source ~/catkin_ws/devel/setup.bash
```


## Evaluation
We provide evaluation scripts to run RP-VIO on the [RPVIO-Sim](https://github.com/karnikram/rp-vio#rpvio-sim-dataset-1) dataset, and select sequences from the [OpenLORIS-Scene]((https://lifelong-robotic-vision.github.io/dataset/scene.html)), [ADVIO](https://github.com/AaltoVision/ADVIO), and [VIODE](https://github.com/kminoda/VIODE) datasets. The output errors from your evaluation might not be exactly the same as reported in our paper, but should be similar.

### RPVIO-Sim Dataset
Download the [dataset](https://github.com/karnikram/rp-vio#rpvio-sim-dataset-1) files to a parent folder, and then run the following commands to launch our evaluation script. The script runs rp-vio on each of the six sequences once and computes the ATE error statistics.

```
    cd ~/catkin_ws/src/rp-vio/scripts/
    ./run_rpvio_sim.sh <PATH-TO-DATASET-FOLDER>
```

To run the multiple planes version (RPVIO-Multi), checkout the corresponding branch by running `git checkout rpvio-multi`, and re-run the above script.

### Real-world sequences
We evaluate on two real-world sequences: the market1-1 sequence from the OpenLORIS-Scene dataset and the metro station sequence (12) from the ADVIO dataset. Both of these sequences along with their segmented plane masks are available as bagfiles for download [here](https://iiitaphyd-my.sharepoint.com/:f:/g/personal/robotics_iiit_ac_in/EozZ6vJP5UZFmZhA-9w0bBcBvTXpszD42fPx3x3ZlKvD6A?e=FtzFRz). After downloading and extracting the files run the following commands for evaluation,

```
    cd ~/catkin_ws/src/rp-vio/scripts/
    ./run_ol_market1.sh <PATH-TO-EXTRACTED-DATASET-FOLDER>
    ./run_advio_12.sh <PATH-TO-EXTRACTED-DATASET-FOLDER>
```

### Own data
To run RP-VIO on your own data, you need to provide synchronized monocular images, IMU readings, and plane masks on three separate ROS topics. The camera and IMU need to be properly calibrated and synchronized as there is no online calibration. A plane segmentation model to segment plane masks from images is provided [below](https://github.com/karnikram/rp-vio#plane-segmentation).

A semantic segmentation model can also be used as long as the RGB labels of the (static) planar semantic classes are provided. As an example, we evaluate on a sequence from the VIODE dataset (provided [here](https://iiitaphyd-my.sharepoint.com/:f:/g/personal/robotics_iiit_ac_in/EoxFVvuAxUdFsnXu0XJY0egBMFxB9D8XbNqe0jkUkRdjVg?e=G18fDo)) using semantic segmentation labels which are specified in the [config file](https://github.com/karnikram/rp-vio/blob/semantic-viode/config/viode_config.yaml). To run, 

```
    cd ~/catkin_ws/src/rp-vio/scripts
    git checkout semantic-viode
    ./run_viode_night.sh <PATH-TO-EXTRACTED-DATASET-FOLDER>
```

## Plane segmentation
We provide a pre-trained plane instance segmentation model, based on the [Plane-Recover](https://github.com/fuy34/planerecover) model. We retrained their model, with an added inter-plane constraint, on their SYNTHIA training data and two additional sequences (00,01) from the ScanNet dataset. The model was trained on a single Titan X (maxwell) GPU for about 700K iterations. We also provide the training script.

We run the model offline, after extracting and [processing](https://github.com/fuy34/planerecover#preparing-training-data) the input RGB images from their ROS bagfiles. Follow the steps given below to run the pre-trained model on your custom dataset (requires CUDA 9.0),

#### Create Environment 

Run the following commands to create a suitable conda environemnt,
```
cd plane_segmentation/
conda create --name plane_seg --file requirements.txt
conda activate plane_seg
```

#### Run inference

Now extract images from your dataset to a test folder, resize them to (192,320) (height, width), and run the following, 

```
python inference.py --dataset=<PATH_TO_DATASET> --output_dir=<PATH_TO_OUTPUT_DIRECTORY> --test_list=<TEST_DATA_LIST.txt FILE> --ckpt_file=<MODEL> --use_preprocessed=true 
```
*TEST_DATA_LIST.txt* is a file that points to every image within the test dataset, an example can be found [here](./plane_segmentation/openloris.txt).  *PATH_TO_DATASET* is the path to the parent directory of the test folder.


The result of the inference would be a stored in three folders that are named as *plane_sgmts* (predicted masks in grayscale), *plane_sgmts_vis* (predicted masks in color), *plane_sgmts_modified* (grayscale masks but suitable for visualization (feed this output to the CRF inference)).

#### Run CRF inference

We also use a dense CRF model (from [PyDenseCRF](https://github.com/lucasb-eyer/pydensecrf)) to further refine the output masks. To run,

```
python crf_inference.py <rgb_image_dir> <labels_dir> <output_dir>
```
where the *labels_dir* is the path to the *plane_sgmts_modified* folder. 
 
We then write these outputs mask images back into the original bagfile on a separate topic for running with RP-VIO.

## RPVIO-Sim Dataset
<figure>
<a href="https://user-images.githubusercontent.com/12653355/111727645-48538280-8891-11eb-90db-027f82087586.png"><img src="https://user-images.githubusercontent.com/12653355/111727645-48538280-8891-11eb-90db-027f82087586.png" width="400"/></a>
</figure>
<br>

For an effective evaluation of the capabilities of modern VINS systems, we generate a highly-dynamic visual-inertial dataset using [AirSim](https://github.com/microsoft/AirSim/) which contains dynamic characters present throughout the sequences (including initialization), and with sufficient IMU excitation. Dynamic characters are progressively added, keeping everything else fixed, starting from no characters in the `static` sequence to eight characters in the `C8` sequence. All the generated sequences (six) in rosbag format, along with their groundtruth files, have been made available via [Zenodo](https://zenodo.org/record/4603494#.YE4BzlMzZH4).

Each rosbag contains RGB images published on the `/image` topic at 20 Hz, imu measurements published on the`/imu` topic at ~1000 Hz (which we sub-sample to 200Hz for our evaluations), and plane-instance mask images published on the`/mask` topic at 20 Hz. The groundtruth trajectory is saved as a txt file in TUM format. The parameters for the camera and IMU used in our dataset are as follows,
<br>
<figure>
<a href="https://user-images.githubusercontent.com/12653355/111068192-c3ade080-84ed-11eb-82ba-486ee0cfa2a4.png"><img src="https://user-images.githubusercontent.com/12653355/111068192-c3ade080-84ed-11eb-82ba-486ee0cfa2a4.png" width="300"/></a>
</figure>

To quantify the dynamic nature of our generated sequences, we compute the percentage of dynamic pixels out of all the pixels present in every image. We report these values in the following table,
<figure>
<a href="https://user-images.githubusercontent.com/12653355/111068119-6f0a6580-84ed-11eb-86ee-12571e7c7476.png"><img src="https://user-images.githubusercontent.com/12653355/111068119-6f0a6580-84ed-11eb-86ee-12571e7c7476.png" width="400"/></a>
</figure>

### TO-DO
- [ ] Provide Unreal Engine environment
- [ ] Provide AirSim recording scripts

## Acknowledgement
Our code is built upon [VINS-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono). Its implementations of feature tracking, IMU preintegration, IMU state initialization, the reprojection factor, and marginalization are used as such. Our contributions include planar features tracking, planar homography based initialization, and the planar homography factor. All these changes (corresponding to a slightly older version) are available as a [git patch file](./rpvio.patch).

For our simulated dataset, we imported several high-quality assets from the [FlightGoggles](https://flightgoggles.mit.edu/) project into [Unreal Engine](unrealengine.com) before integrating it with AirSim. The dynamic characters were downloaded from [Mixamo](https://mixamo.com).


================================================
FILE: camera_model/CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.8.3)
project(camera_model)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC")

find_package(catkin REQUIRED COMPONENTS
    roscpp
    std_msgs
    )

find_package(Boost REQUIRED COMPONENTS filesystem program_options system)
include_directories(${Boost_INCLUDE_DIRS})

find_package(OpenCV REQUIRED)

# set(EIGEN_INCLUDE_DIR "/usr/local/include/eigen3")
find_package(Ceres REQUIRED)
include_directories(${CERES_INCLUDE_DIRS})


catkin_package(
    INCLUDE_DIRS include
    LIBRARIES camera_model
    CATKIN_DEPENDS roscpp std_msgs
#    DEPENDS system_lib
    )

include_directories(
    ${catkin_INCLUDE_DIRS}
    )

include_directories("include")


add_executable(Calibration 
    src/intrinsic_calib.cc
    src/chessboard/Chessboard.cc
    src/calib/CameraCalibration.cc
    src/camera_models/Camera.cc
    src/camera_models/CameraFactory.cc
    src/camera_models/CostFunctionFactory.cc
    src/camera_models/PinholeCamera.cc
    src/camera_models/CataCamera.cc
    src/camera_models/EquidistantCamera.cc
    src/camera_models/ScaramuzzaCamera.cc
    src/sparse_graph/Transform.cc
    src/gpl/gpl.cc
    src/gpl/EigenQuaternionParameterization.cc)

add_library(camera_model
    src/chessboard/Chessboard.cc
    src/calib/CameraCalibration.cc
    src/camera_models/Camera.cc
    src/camera_models/CameraFactory.cc
    src/camera_models/CostFunctionFactory.cc
    src/camera_models/PinholeCamera.cc
    src/camera_models/CataCamera.cc
    src/camera_models/EquidistantCamera.cc
    src/camera_models/ScaramuzzaCamera.cc
    src/sparse_graph/Transform.cc
    src/gpl/gpl.cc
    src/gpl/EigenQuaternionParameterization.cc)

target_link_libraries(Calibration ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
target_link_libraries(camera_model ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})


================================================
FILE: camera_model/cmake/FindEigen.cmake
================================================
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2015 Google Inc. All rights reserved.
# http://ceres-solver.org/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
#   this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
#   this list of conditions and the following disclaimer in the documentation
#   and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
#   used to endorse or promote products derived from this software without
#   specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: alexs.mac@gmail.com (Alex Stewart)
#

# FindEigen.cmake - Find Eigen library, version >= 3.
#
# This module defines the following variables:
#
# EIGEN_FOUND: TRUE iff Eigen is found.
# EIGEN_INCLUDE_DIRS: Include directories for Eigen.
#
# EIGEN_VERSION: Extracted from Eigen/src/Core/util/Macros.h
# EIGEN_WORLD_VERSION: Equal to 3 if EIGEN_VERSION = 3.2.0
# EIGEN_MAJOR_VERSION: Equal to 2 if EIGEN_VERSION = 3.2.0
# EIGEN_MINOR_VERSION: Equal to 0 if EIGEN_VERSION = 3.2.0
#
# The following variables control the behaviour of this module:
#
# EIGEN_INCLUDE_DIR_HINTS: List of additional directories in which to
#                          search for eigen includes, e.g: /timbuktu/eigen3.
#
# The following variables are also defined by this module, but in line with
# CMake recommended FindPackage() module style should NOT be referenced directly
# by callers (use the plural variables detailed above instead).  These variables
# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which
# are NOT re-called (i.e. search for library is not repeated) if these variables
# are set with valid values _in the CMake cache_. This means that if these
# variables are set directly in the cache, either by the user in the CMake GUI,
# or by the user passing -DVAR=VALUE directives to CMake when called (which
# explicitly defines a cache variable), then they will be used verbatim,
# bypassing the HINTS variables and other hard-coded search locations.
#
# EIGEN_INCLUDE_DIR: Include directory for CXSparse, not including the
#                    include directory of any dependencies.

# Called if we failed to find Eigen or any of it's required dependencies,
# unsets all public (designed to be used externally) variables and reports
# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.
macro(EIGEN_REPORT_NOT_FOUND REASON_MSG)
  unset(EIGEN_FOUND)
  unset(EIGEN_INCLUDE_DIRS)
  # Make results of search visible in the CMake GUI if Eigen has not
  # been found so that user does not have to toggle to advanced view.
  mark_as_advanced(CLEAR EIGEN_INCLUDE_DIR)
  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()
  # use the camelcase library name, not uppercase.
  if (Eigen_FIND_QUIETLY)
    message(STATUS "Failed to find Eigen - " ${REASON_MSG} ${ARGN})
  elseif (Eigen_FIND_REQUIRED)
    message(FATAL_ERROR "Failed to find Eigen - " ${REASON_MSG} ${ARGN})
  else()
    # Neither QUIETLY nor REQUIRED, use no priority which emits a message
    # but continues configuration and allows generation.
    message("-- Failed to find Eigen - " ${REASON_MSG} ${ARGN})
  endif ()
  return()
endmacro(EIGEN_REPORT_NOT_FOUND)

# Protect against any alternative find_package scripts for this library having
# been called previously (in a client project) which set EIGEN_FOUND, but not
# the other variables we require / set here which could cause the search logic
# here to fail.
unset(EIGEN_FOUND)

# Search user-installed locations first, so that we prefer user installs
# to system installs where both exist.
list(APPEND EIGEN_CHECK_INCLUDE_DIRS
  /usr/local/include
  /usr/local/homebrew/include # Mac OS X
  /opt/local/var/macports/software # Mac OS X.
  /opt/local/include
  /usr/include)
# Additional suffixes to try appending to each search path.
list(APPEND EIGEN_CHECK_PATH_SUFFIXES
  eigen3 # Default root directory for Eigen.
  Eigen/include/eigen3 # Windows (for C:/Program Files prefix) < 3.3
  Eigen3/include/eigen3 ) # Windows (for C:/Program Files prefix) >= 3.3

# Search supplied hint directories first if supplied.
find_path(EIGEN_INCLUDE_DIR
  NAMES Eigen/Core
  PATHS ${EIGEN_INCLUDE_DIR_HINTS}
  ${EIGEN_CHECK_INCLUDE_DIRS}
  PATH_SUFFIXES ${EIGEN_CHECK_PATH_SUFFIXES})

if (NOT EIGEN_INCLUDE_DIR OR
    NOT EXISTS ${EIGEN_INCLUDE_DIR})
  eigen_report_not_found(
    "Could not find eigen3 include directory, set EIGEN_INCLUDE_DIR to "
    "path to eigen3 include directory, e.g. /usr/local/include/eigen3.")
endif (NOT EIGEN_INCLUDE_DIR OR
       NOT EXISTS ${EIGEN_INCLUDE_DIR})

# Mark internally as found, then verify. EIGEN_REPORT_NOT_FOUND() unsets
# if called.
set(EIGEN_FOUND TRUE)

# Extract Eigen version from Eigen/src/Core/util/Macros.h
if (EIGEN_INCLUDE_DIR)
  set(EIGEN_VERSION_FILE ${EIGEN_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h)
  if (NOT EXISTS ${EIGEN_VERSION_FILE})
    eigen_report_not_found(
      "Could not find file: ${EIGEN_VERSION_FILE} "
      "containing version information in Eigen install located at: "
      "${EIGEN_INCLUDE_DIR}.")
  else (NOT EXISTS ${EIGEN_VERSION_FILE})
    file(READ ${EIGEN_VERSION_FILE} EIGEN_VERSION_FILE_CONTENTS)

    string(REGEX MATCH "#define EIGEN_WORLD_VERSION [0-9]+"
      EIGEN_WORLD_VERSION "${EIGEN_VERSION_FILE_CONTENTS}")
    string(REGEX REPLACE "#define EIGEN_WORLD_VERSION ([0-9]+)" "\\1"
      EIGEN_WORLD_VERSION "${EIGEN_WORLD_VERSION}")

    string(REGEX MATCH "#define EIGEN_MAJOR_VERSION [0-9]+"
      EIGEN_MAJOR_VERSION "${EIGEN_VERSION_FILE_CONTENTS}")
    string(REGEX REPLACE "#define EIGEN_MAJOR_VERSION ([0-9]+)" "\\1"
      EIGEN_MAJOR_VERSION "${EIGEN_MAJOR_VERSION}")

    string(REGEX MATCH "#define EIGEN_MINOR_VERSION [0-9]+"
      EIGEN_MINOR_VERSION "${EIGEN_VERSION_FILE_CONTENTS}")
    string(REGEX REPLACE "#define EIGEN_MINOR_VERSION ([0-9]+)" "\\1"
      EIGEN_MINOR_VERSION "${EIGEN_MINOR_VERSION}")

    # This is on a single line s/t CMake does not interpret it as a list of
    # elements and insert ';' separators which would result in 3.;2.;0 nonsense.
    set(EIGEN_VERSION "${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION}")
  endif (NOT EXISTS ${EIGEN_VERSION_FILE})
endif (EIGEN_INCLUDE_DIR)

# Set standard CMake FindPackage variables if found.
if (EIGEN_FOUND)
  set(EIGEN_INCLUDE_DIRS ${EIGEN_INCLUDE_DIR})
endif (EIGEN_FOUND)

# Handle REQUIRED / QUIET optional arguments and version.
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Eigen
  REQUIRED_VARS EIGEN_INCLUDE_DIRS
  VERSION_VAR EIGEN_VERSION)

# Only mark internal variables as advanced if we found Eigen, otherwise
# leave it visible in the standard GUI for the user to set manually.
if (EIGEN_FOUND)
  mark_as_advanced(FORCE EIGEN_INCLUDE_DIR)
endif (EIGEN_FOUND)


================================================
FILE: camera_model/include/camodocal/calib/CameraCalibration.h
================================================
#ifndef CAMERACALIBRATION_H
#define CAMERACALIBRATION_H

#include <opencv2/core/core.hpp>

#include "camodocal/camera_models/Camera.h"

namespace camodocal
{

class CameraCalibration
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    CameraCalibration();

    CameraCalibration(Camera::ModelType modelType,
                      const std::string& cameraName,
                      const cv::Size& imageSize,
                      const cv::Size& boardSize,
                      float squareSize);

    void clear(void);

    void addChessboardData(const std::vector<cv::Point2f>& corners);

    bool calibrate(void);

    int sampleCount(void) const;
    std::vector<std::vector<cv::Point2f> >& imagePoints(void);
    const std::vector<std::vector<cv::Point2f> >& imagePoints(void) const;
    std::vector<std::vector<cv::Point3f> >& scenePoints(void);
    const std::vector<std::vector<cv::Point3f> >& scenePoints(void) const;
    CameraPtr& camera(void);
    const CameraConstPtr camera(void) const;

    Eigen::Matrix2d& measurementCovariance(void);
    const Eigen::Matrix2d& measurementCovariance(void) const;

    cv::Mat& cameraPoses(void);
    const cv::Mat& cameraPoses(void) const;

    void drawResults(std::vector<cv::Mat>& images) const;

    void writeParams(const std::string& filename) const;

    bool writeChessboardData(const std::string& filename) const;
    bool readChessboardData(const std::string& filename);

    void setVerbose(bool verbose);

private:
    bool calibrateHelper(CameraPtr& camera,
                         std::vector<cv::Mat>& rvecs, std::vector<cv::Mat>& tvecs) const;

    void optimize(CameraPtr& camera,
                  std::vector<cv::Mat>& rvecs, std::vector<cv::Mat>& tvecs) const;

    template<typename T>
    void readData(std::ifstream& ifs, T& data) const;

    template<typename T>
    void writeData(std::ofstream& ofs, T data) const;

    cv::Size m_boardSize;
    float m_squareSize;

    CameraPtr m_camera;
    cv::Mat m_cameraPoses;

    std::vector<std::vector<cv::Point2f> > m_imagePoints;
    std::vector<std::vector<cv::Point3f> > m_scenePoints;

    Eigen::Matrix2d m_measurementCovariance;

    bool m_verbose;
};

}

#endif


================================================
FILE: camera_model/include/camodocal/camera_models/Camera.h
================================================
#ifndef CAMERA_H
#define CAMERA_H

#include <boost/shared_ptr.hpp>
#include <eigen3/Eigen/Dense>
#include <opencv2/core/core.hpp>
#include <vector>

namespace camodocal
{

class Camera
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    enum ModelType
    {
        KANNALA_BRANDT,
        MEI,
        PINHOLE,
        SCARAMUZZA
    };

    class Parameters
    {
    public:
        EIGEN_MAKE_ALIGNED_OPERATOR_NEW
        Parameters(ModelType modelType);

        Parameters(ModelType modelType, const std::string& cameraName,
                   int w, int h);

        ModelType& modelType(void);
        std::string& cameraName(void);
        int& imageWidth(void);
        int& imageHeight(void);

        ModelType modelType(void) const;
        const std::string& cameraName(void) const;
        int imageWidth(void) const;
        int imageHeight(void) const;

        int nIntrinsics(void) const;

        virtual bool readFromYamlFile(const std::string& filename) = 0;
        virtual void writeToYamlFile(const std::string& filename) const = 0;

    protected:
        ModelType m_modelType;
        int m_nIntrinsics;
        std::string m_cameraName;
        int m_imageWidth;
        int m_imageHeight;
    };

    virtual ModelType modelType(void) const = 0;
    virtual const std::string& cameraName(void) const = 0;
    virtual int imageWidth(void) const = 0;
    virtual int imageHeight(void) const = 0;

    virtual cv::Mat& mask(void);
    virtual const cv::Mat& mask(void) const;

    virtual void estimateIntrinsics(const cv::Size& boardSize,
                                    const std::vector< std::vector<cv::Point3f> >& objectPoints,
                                    const std::vector< std::vector<cv::Point2f> >& imagePoints) = 0;
    virtual void estimateExtrinsics(const std::vector<cv::Point3f>& objectPoints,
                                    const std::vector<cv::Point2f>& imagePoints,
                                    cv::Mat& rvec, cv::Mat& tvec) const;

    // Lift points from the image plane to the sphere
    virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const = 0;
    //%output P

    // Lift points from the image plane to the projective space
    virtual void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const = 0;
    //%output P

    // Projects 3D points to the image plane (Pi function)
    virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const = 0;
    //%output p

    // Projects 3D points to the image plane (Pi function)
    // and calculates jacobian
    //virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
    //                          Eigen::Matrix<double,2,3>& J) const = 0;
    //%output p
    //%output J

    virtual void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const = 0;
    //%output p

    //virtual void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const = 0;
    virtual cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
                                            float fx = -1.0f, float fy = -1.0f,
                                            cv::Size imageSize = cv::Size(0, 0),
                                            float cx = -1.0f, float cy = -1.0f,
                                            cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const = 0;

    virtual int parameterCount(void) const = 0;

    virtual void readParameters(const std::vector<double>& parameters) = 0;
    virtual void writeParameters(std::vector<double>& parameters) const = 0;

    virtual void writeParametersToYamlFile(const std::string& filename) const = 0;

    virtual std::string parametersToString(void) const = 0;

    /**
     * \brief Calculates the reprojection distance between points
     *
     * \param P1 first 3D point coordinates
     * \param P2 second 3D point coordinates
     * \return euclidean distance in the plane
     */
    double reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const;

    double reprojectionError(const std::vector< std::vector<cv::Point3f> >& objectPoints,
                             const std::vector< std::vector<cv::Point2f> >& imagePoints,
                             const std::vector<cv::Mat>& rvecs,
                             const std::vector<cv::Mat>& tvecs,
                             cv::OutputArray perViewErrors = cv::noArray()) const;

    double reprojectionError(const Eigen::Vector3d& P,
                             const Eigen::Quaterniond& camera_q,
                             const Eigen::Vector3d& camera_t,
                             const Eigen::Vector2d& observed_p) const;

    void projectPoints(const std::vector<cv::Point3f>& objectPoints,
                       const cv::Mat& rvec,
                       const cv::Mat& tvec,
                       std::vector<cv::Point2f>& imagePoints) const;
protected:
    cv::Mat m_mask;
};

typedef boost::shared_ptr<Camera> CameraPtr;
typedef boost::shared_ptr<const Camera> CameraConstPtr;

}

#endif


================================================
FILE: camera_model/include/camodocal/camera_models/CameraFactory.h
================================================
#ifndef CAMERAFACTORY_H
#define CAMERAFACTORY_H

#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>

#include "camodocal/camera_models/Camera.h"

namespace camodocal
{

class CameraFactory
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    CameraFactory();

    static boost::shared_ptr<CameraFactory> instance(void);

    CameraPtr generateCamera(Camera::ModelType modelType,
                             const std::string& cameraName,
                             cv::Size imageSize) const;

    CameraPtr generateCameraFromYamlFile(const std::string& filename);

private:
    static boost::shared_ptr<CameraFactory> m_instance;
};

}

#endif


================================================
FILE: camera_model/include/camodocal/camera_models/CataCamera.h
================================================
#ifndef CATACAMERA_H
#define CATACAMERA_H

#include <opencv2/core/core.hpp>
#include <string>

#include "ceres/rotation.h"
#include "Camera.h"

namespace camodocal
{

/**
 * C. Mei, and P. Rives, Single View Point Omnidirectional Camera Calibration
 * from Planar Grids, ICRA 2007
 */

class CataCamera: public Camera
{
public:
    class Parameters: public Camera::Parameters
    {
    public:
        Parameters();
        Parameters(const std::string& cameraName,
                   int w, int h,
                   double xi,
                   double k1, double k2, double p1, double p2,
                   double gamma1, double gamma2, double u0, double v0);

        double& xi(void);
        double& k1(void);
        double& k2(void);
        double& p1(void);
        double& p2(void);
        double& gamma1(void);
        double& gamma2(void);
        double& u0(void);
        double& v0(void);

        double xi(void) const;
        double k1(void) const;
        double k2(void) const;
        double p1(void) const;
        double p2(void) const;
        double gamma1(void) const;
        double gamma2(void) const;
        double u0(void) const;
        double v0(void) const;

        bool readFromYamlFile(const std::string& filename);
        void writeToYamlFile(const std::string& filename) const;

        Parameters& operator=(const Parameters& other);
        friend std::ostream& operator<< (std::ostream& out, const Parameters& params);

    private:
        double m_xi;
        double m_k1;
        double m_k2;
        double m_p1;
        double m_p2;
        double m_gamma1;
        double m_gamma2;
        double m_u0;
        double m_v0;
    };

    CataCamera();

    /**
    * \brief Constructor from the projection model parameters
    */
    CataCamera(const std::string& cameraName,
               int imageWidth, int imageHeight,
               double xi, double k1, double k2, double p1, double p2,
               double gamma1, double gamma2, double u0, double v0);
    /**
    * \brief Constructor from the projection model parameters
    */
    CataCamera(const Parameters& params);

    Camera::ModelType modelType(void) const;
    const std::string& cameraName(void) const;
    int imageWidth(void) const;
    int imageHeight(void) const;

    void estimateIntrinsics(const cv::Size& boardSize,
                            const std::vector< std::vector<cv::Point3f> >& objectPoints,
                            const std::vector< std::vector<cv::Point2f> >& imagePoints);

    // Lift points from the image plane to the sphere
    void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Lift points from the image plane to the projective space
    void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Projects 3D points to the image plane (Pi function)
    void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const;
    //%output p

    // Projects 3D points to the image plane (Pi function)
    // and calculates jacobian
    void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
                      Eigen::Matrix<double,2,3>& J) const;
    //%output p
    //%output J

    void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const;
    //%output p

    template <typename T>
    static void spaceToPlane(const T* const params,
                             const T* const q, const T* const t,
                             const Eigen::Matrix<T, 3, 1>& P,
                             Eigen::Matrix<T, 2, 1>& p);

    void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const;
    void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
                    Eigen::Matrix2d& J) const;

    void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const;
    cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
                                    float fx = -1.0f, float fy = -1.0f,
                                    cv::Size imageSize = cv::Size(0, 0),
                                    float cx = -1.0f, float cy = -1.0f,
                                    cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;

    int parameterCount(void) const;

    const Parameters& getParameters(void) const;
    void setParameters(const Parameters& parameters);

    void readParameters(const std::vector<double>& parameterVec);
    void writeParameters(std::vector<double>& parameterVec) const;

    void writeParametersToYamlFile(const std::string& filename) const;

    std::string parametersToString(void) const;

private:
    Parameters mParameters;

    double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
    bool m_noDistortion;
};

typedef boost::shared_ptr<CataCamera> CataCameraPtr;
typedef boost::shared_ptr<const CataCamera> CataCameraConstPtr;

template <typename T>
void
CataCamera::spaceToPlane(const T* const params,
                         const T* const q, const T* const t,
                         const Eigen::Matrix<T, 3, 1>& P,
                         Eigen::Matrix<T, 2, 1>& p)
{
    T P_w[3];
    P_w[0] = T(P(0));
    P_w[1] = T(P(1));
    P_w[2] = T(P(2));

    // Convert quaternion from Eigen convention (x, y, z, w)
    // to Ceres convention (w, x, y, z)
    T q_ceres[4] = {q[3], q[0], q[1], q[2]};

    T P_c[3];
    ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);

    P_c[0] += t[0];
    P_c[1] += t[1];
    P_c[2] += t[2];

    // project 3D object point to the image plane
    T xi = params[0];
    T k1 = params[1];
    T k2 = params[2];
    T p1 = params[3];
    T p2 = params[4];
    T gamma1 = params[5];
    T gamma2 = params[6];
    T alpha = T(0); //cameraParams.alpha();
    T u0 = params[7];
    T v0 = params[8];

    // Transform to model plane
    T len = sqrt(P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]);
    P_c[0] /= len;
    P_c[1] /= len;
    P_c[2] /= len;

    T u = P_c[0] / (P_c[2] + xi);
    T v = P_c[1] / (P_c[2] + xi);

    T rho_sqr = u * u + v * v;
    T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr;
    T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u);
    T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v;

    u = L * u + du;
    v = L * v + dv;
    p(0) = gamma1 * (u + alpha * v) + u0;
    p(1) = gamma2 * v + v0;
}

}

#endif


================================================
FILE: camera_model/include/camodocal/camera_models/CostFunctionFactory.h
================================================
#ifndef COSTFUNCTIONFACTORY_H
#define COSTFUNCTIONFACTORY_H

#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>

#include "camodocal/camera_models/Camera.h"

namespace ceres
{
    class CostFunction;
}

namespace camodocal
{

enum
{
    CAMERA_INTRINSICS =         1 << 0,
    CAMERA_POSE =               1 << 1,
    POINT_3D =                  1 << 2,
    ODOMETRY_INTRINSICS =       1 << 3,
    ODOMETRY_3D_POSE =          1 << 4,
    ODOMETRY_6D_POSE =          1 << 5,
    CAMERA_ODOMETRY_TRANSFORM = 1 << 6
};

class CostFunctionFactory
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    CostFunctionFactory();

    static boost::shared_ptr<CostFunctionFactory> instance(void);

    ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
                                              const Eigen::Vector3d& observed_P,
                                              const Eigen::Vector2d& observed_p,
                                              int flags) const;

    ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
                                              const Eigen::Vector3d& observed_P,
                                              const Eigen::Vector2d& observed_p,
                                              const Eigen::Matrix2d& sqrtPrecisionMat,
                                              int flags) const;

    ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
                                              const Eigen::Vector2d& observed_p,
                                              int flags, bool optimize_cam_odo_z = true) const;

    ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
                                              const Eigen::Vector2d& observed_p,
                                              const Eigen::Matrix2d& sqrtPrecisionMat,
                                              int flags, bool optimize_cam_odo_z = true) const;

    ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
                                              const Eigen::Vector3d& odo_pos,
                                              const Eigen::Vector3d& odo_att,
                                              const Eigen::Vector2d& observed_p,
                                              int flags, bool optimize_cam_odo_z = true) const;

    ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
                                              const Eigen::Quaterniond& cam_odo_q,
                                              const Eigen::Vector3d& cam_odo_t,
                                              const Eigen::Vector3d& odo_pos,
                                              const Eigen::Vector3d& odo_att,
                                              const Eigen::Vector2d& observed_p,
                                              int flags) const;

    ceres::CostFunction* generateCostFunction(const CameraConstPtr& cameraLeft,
                                              const CameraConstPtr& cameraRight,
                                              const Eigen::Vector3d& observed_P,
                                              const Eigen::Vector2d& observed_p_left,
                                              const Eigen::Vector2d& observed_p_right) const;

private:
    static boost::shared_ptr<CostFunctionFactory> m_instance;
};

}

#endif


================================================
FILE: camera_model/include/camodocal/camera_models/EquidistantCamera.h
================================================
#ifndef EQUIDISTANTCAMERA_H
#define EQUIDISTANTCAMERA_H

#include <opencv2/core/core.hpp>
#include <string>

#include "ceres/rotation.h"
#include "Camera.h"

namespace camodocal
{

/**
 * J. Kannala, and S. Brandt, A Generic Camera Model and Calibration Method
 * for Conventional, Wide-Angle, and Fish-Eye Lenses, PAMI 2006
 */

class EquidistantCamera: public Camera
{
public:
    class Parameters: public Camera::Parameters
    {
    public:
        Parameters();
        Parameters(const std::string& cameraName,
                   int w, int h,
                   double k2, double k3, double k4, double k5,
                   double mu, double mv,
                   double u0, double v0);

        double& k2(void);
        double& k3(void);
        double& k4(void);
        double& k5(void);
        double& mu(void);
        double& mv(void);
        double& u0(void);
        double& v0(void);

        double k2(void) const;
        double k3(void) const;
        double k4(void) const;
        double k5(void) const;
        double mu(void) const;
        double mv(void) const;
        double u0(void) const;
        double v0(void) const;

        bool readFromYamlFile(const std::string& filename);
        void writeToYamlFile(const std::string& filename) const;

        Parameters& operator=(const Parameters& other);
        friend std::ostream& operator<< (std::ostream& out, const Parameters& params);

    private:
        // projection
        double m_k2;
        double m_k3;
        double m_k4;
        double m_k5;

        double m_mu;
        double m_mv;
        double m_u0;
        double m_v0;
    };

    EquidistantCamera();

    /**
    * \brief Constructor from the projection model parameters
    */
    EquidistantCamera(const std::string& cameraName,
                      int imageWidth, int imageHeight,
                      double k2, double k3, double k4, double k5,
                      double mu, double mv,
                      double u0, double v0);
    /**
    * \brief Constructor from the projection model parameters
    */
    EquidistantCamera(const Parameters& params);

    Camera::ModelType modelType(void) const;
    const std::string& cameraName(void) const;
    int imageWidth(void) const;
    int imageHeight(void) const;

    void estimateIntrinsics(const cv::Size& boardSize,
                            const std::vector< std::vector<cv::Point3f> >& objectPoints,
                            const std::vector< std::vector<cv::Point2f> >& imagePoints);

    // Lift points from the image plane to the sphere
    virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Lift points from the image plane to the projective space
    void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Projects 3D points to the image plane (Pi function)
    void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const;
    //%output p

    // Projects 3D points to the image plane (Pi function)
    // and calculates jacobian
    void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
                      Eigen::Matrix<double,2,3>& J) const;
    //%output p
    //%output J

    void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const;
    //%output p

    template <typename T>
    static void spaceToPlane(const T* const params,
                             const T* const q, const T* const t,
                             const Eigen::Matrix<T, 3, 1>& P,
                             Eigen::Matrix<T, 2, 1>& p);

    void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const;
    cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
                                    float fx = -1.0f, float fy = -1.0f,
                                    cv::Size imageSize = cv::Size(0, 0),
                                    float cx = -1.0f, float cy = -1.0f,
                                    cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;

    int parameterCount(void) const;

    const Parameters& getParameters(void) const;
    void setParameters(const Parameters& parameters);

    void readParameters(const std::vector<double>& parameterVec);
    void writeParameters(std::vector<double>& parameterVec) const;

    void writeParametersToYamlFile(const std::string& filename) const;

    std::string parametersToString(void) const;

private:
    template<typename T>
    static T r(T k2, T k3, T k4, T k5, T theta);


    void fitOddPoly(const std::vector<double>& x, const std::vector<double>& y,
                    int n, std::vector<double>& coeffs) const;

    void backprojectSymmetric(const Eigen::Vector2d& p_u,
                              double& theta, double& phi) const;

    Parameters mParameters;

    double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
};

typedef boost::shared_ptr<EquidistantCamera> EquidistantCameraPtr;
typedef boost::shared_ptr<const EquidistantCamera> EquidistantCameraConstPtr;

template<typename T>
T
EquidistantCamera::r(T k2, T k3, T k4, T k5, T theta)
{
    // k1 = 1
    return theta +
           k2 * theta * theta * theta +
           k3 * theta * theta * theta * theta * theta +
           k4 * theta * theta * theta * theta * theta * theta * theta +
           k5 * theta * theta * theta * theta * theta * theta * theta * theta * theta;
}

template <typename T>
void
EquidistantCamera::spaceToPlane(const T* const params,
                                const T* const q, const T* const t,
                                const Eigen::Matrix<T, 3, 1>& P,
                                Eigen::Matrix<T, 2, 1>& p)
{
    T P_w[3];
    P_w[0] = T(P(0));
    P_w[1] = T(P(1));
    P_w[2] = T(P(2));

    // Convert quaternion from Eigen convention (x, y, z, w)
    // to Ceres convention (w, x, y, z)
    T q_ceres[4] = {q[3], q[0], q[1], q[2]};

    T P_c[3];
    ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);

    P_c[0] += t[0];
    P_c[1] += t[1];
    P_c[2] += t[2];

    // project 3D object point to the image plane;
    T k2 = params[0];
    T k3 = params[1];
    T k4 = params[2];
    T k5 = params[3];
    T mu = params[4];
    T mv = params[5];
    T u0 = params[6];
    T v0 = params[7];

    T len = sqrt(P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2]);
    T theta = acos(P_c[2] / len);
    T phi = atan2(P_c[1], P_c[0]);

    Eigen::Matrix<T,2,1> p_u = r(k2, k3, k4, k5, theta) * Eigen::Matrix<T,2,1>(cos(phi), sin(phi));

    p(0) = mu * p_u(0) + u0;
    p(1) = mv * p_u(1) + v0;
}

}

#endif


================================================
FILE: camera_model/include/camodocal/camera_models/PinholeCamera.h
================================================
#ifndef PINHOLECAMERA_H
#define PINHOLECAMERA_H

#include <opencv2/core/core.hpp>
#include <string>

#include "ceres/rotation.h"
#include "Camera.h"

namespace camodocal
{

class PinholeCamera: public Camera
{
public:
    class Parameters: public Camera::Parameters
    {
    public:
        Parameters();
        Parameters(const std::string& cameraName,
                   int w, int h,
                   double k1, double k2, double p1, double p2,
                   double fx, double fy, double cx, double cy);

        double& k1(void);
        double& k2(void);
        double& p1(void);
        double& p2(void);
        double& fx(void);
        double& fy(void);
        double& cx(void);
        double& cy(void);

        double xi(void) const;
        double k1(void) const;
        double k2(void) const;
        double p1(void) const;
        double p2(void) const;
        double fx(void) const;
        double fy(void) const;
        double cx(void) const;
        double cy(void) const;

        bool readFromYamlFile(const std::string& filename);
        void writeToYamlFile(const std::string& filename) const;

        Parameters& operator=(const Parameters& other);
        friend std::ostream& operator<< (std::ostream& out, const Parameters& params);

    private:
        double m_k1;
        double m_k2;
        double m_p1;
        double m_p2;
        double m_fx;
        double m_fy;
        double m_cx;
        double m_cy;
    };

    PinholeCamera();

    /**
    * \brief Constructor from the projection model parameters
    */
    PinholeCamera(const std::string& cameraName,
                  int imageWidth, int imageHeight,
                  double k1, double k2, double p1, double p2,
                  double fx, double fy, double cx, double cy);
    /**
    * \brief Constructor from the projection model parameters
    */
    PinholeCamera(const Parameters& params);

    Camera::ModelType modelType(void) const;
    const std::string& cameraName(void) const;
    int imageWidth(void) const;
    int imageHeight(void) const;

    void estimateIntrinsics(const cv::Size& boardSize,
                            const std::vector< std::vector<cv::Point3f> >& objectPoints,
                            const std::vector< std::vector<cv::Point2f> >& imagePoints);

    // Lift points from the image plane to the sphere
    virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Lift points from the image plane to the projective space
    void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Projects 3D points to the image plane (Pi function)
    void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const;
    //%output p

    // Projects 3D points to the image plane (Pi function)
    // and calculates jacobian
    void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
                      Eigen::Matrix<double,2,3>& J) const;
    //%output p
    //%output J

    void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const;
    //%output p

    template <typename T>
    static void spaceToPlane(const T* const params,
                             const T* const q, const T* const t,
                             const Eigen::Matrix<T, 3, 1>& P,
                             Eigen::Matrix<T, 2, 1>& p);

    void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const;
    void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
                    Eigen::Matrix2d& J) const;

    void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const;
    cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
                                    float fx = -1.0f, float fy = -1.0f,
                                    cv::Size imageSize = cv::Size(0, 0),
                                    float cx = -1.0f, float cy = -1.0f,
                                    cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;

    int parameterCount(void) const;

    const Parameters& getParameters(void) const;
    void setParameters(const Parameters& parameters);

    void readParameters(const std::vector<double>& parameterVec);
    void writeParameters(std::vector<double>& parameterVec) const;

    void writeParametersToYamlFile(const std::string& filename) const;

    std::string parametersToString(void) const;

private:
    Parameters mParameters;

    double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
    bool m_noDistortion;
};

typedef boost::shared_ptr<PinholeCamera> PinholeCameraPtr;
typedef boost::shared_ptr<const PinholeCamera> PinholeCameraConstPtr;

template <typename T>
void
PinholeCamera::spaceToPlane(const T* const params,
                            const T* const q, const T* const t,
                            const Eigen::Matrix<T, 3, 1>& P,
                            Eigen::Matrix<T, 2, 1>& p)
{
    T P_w[3];
    P_w[0] = T(P(0));
    P_w[1] = T(P(1));
    P_w[2] = T(P(2));

    // Convert quaternion from Eigen convention (x, y, z, w)
    // to Ceres convention (w, x, y, z)
    T q_ceres[4] = {q[3], q[0], q[1], q[2]};

    T P_c[3];
    ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);

    P_c[0] += t[0];
    P_c[1] += t[1];
    P_c[2] += t[2];

    // project 3D object point to the image plane
    T k1 = params[0];
    T k2 = params[1];
    T p1 = params[2];
    T p2 = params[3];
    T fx = params[4];
    T fy = params[5];
    T alpha = T(0); //cameraParams.alpha();
    T cx = params[6];
    T cy = params[7];

    // Transform to model plane
    T u = P_c[0] / P_c[2];
    T v = P_c[1] / P_c[2];

    T rho_sqr = u * u + v * v;
    T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr;
    T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u);
    T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v;

    u = L * u + du;
    v = L * v + dv;
    p(0) = fx * (u + alpha * v) + cx;
    p(1) = fy * v + cy;
}

}

#endif


================================================
FILE: camera_model/include/camodocal/camera_models/ScaramuzzaCamera.h
================================================
#ifndef SCARAMUZZACAMERA_H
#define SCARAMUZZACAMERA_H

#include <opencv2/core/core.hpp>
#include <string>

#include "ceres/rotation.h"
#include "Camera.h"

namespace camodocal
{

#define SCARAMUZZA_POLY_SIZE 5
#define SCARAMUZZA_INV_POLY_SIZE 20

#define SCARAMUZZA_CAMERA_NUM_PARAMS (SCARAMUZZA_POLY_SIZE + SCARAMUZZA_INV_POLY_SIZE + 2 /*center*/ + 3 /*affine*/)

/**
 * Scaramuzza Camera (Omnidirectional)
 * https://sites.google.com/site/scarabotix/ocamcalib-toolbox
 */

class OCAMCamera: public Camera
{
public:
    class Parameters: public Camera::Parameters
    {
    public:
        Parameters();

        double& C(void) { return m_C; }
        double& D(void) { return m_D; }
        double& E(void) { return m_E; }

        double& center_x(void) { return m_center_x; }
        double& center_y(void) { return m_center_y; }

        double& poly(int idx) { return m_poly[idx]; }
        double& inv_poly(int idx) { return m_inv_poly[idx]; }

        double C(void) const { return m_C; }
        double D(void) const { return m_D; }
        double E(void) const { return m_E; }

        double center_x(void) const { return m_center_x; }
        double center_y(void) const { return m_center_y; }

        double poly(int idx) const { return m_poly[idx]; }
        double inv_poly(int idx) const { return m_inv_poly[idx]; }

        bool readFromYamlFile(const std::string& filename);
        void writeToYamlFile(const std::string& filename) const;

        Parameters& operator=(const Parameters& other);
        friend std::ostream& operator<< (std::ostream& out, const Parameters& params);

    private:
        double m_poly[SCARAMUZZA_POLY_SIZE];
        double m_inv_poly[SCARAMUZZA_INV_POLY_SIZE];
        double m_C;
        double m_D;
        double m_E;
        double m_center_x;
        double m_center_y;
    };

    OCAMCamera();

    /**
    * \brief Constructor from the projection model parameters
    */
    OCAMCamera(const Parameters& params);

    Camera::ModelType modelType(void) const;
    const std::string& cameraName(void) const;
    int imageWidth(void) const;
    int imageHeight(void) const;

    void estimateIntrinsics(const cv::Size& boardSize,
                            const std::vector< std::vector<cv::Point3f> >& objectPoints,
                            const std::vector< std::vector<cv::Point2f> >& imagePoints);

    // Lift points from the image plane to the sphere
    void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Lift points from the image plane to the projective space
    void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
    //%output P

    // Projects 3D points to the image plane (Pi function)
    void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const;
    //%output p

    // Projects 3D points to the image plane (Pi function)
    // and calculates jacobian
    //void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
    //                  Eigen::Matrix<double,2,3>& J) const;
    //%output p
    //%output J

    void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const;
    //%output p

    template <typename T>
    static void spaceToPlane(const T* const params,
                             const T* const q, const T* const t,
                             const Eigen::Matrix<T, 3, 1>& P,
                             Eigen::Matrix<T, 2, 1>& p);
    template <typename T>
    static void spaceToSphere(const T* const params,
                              const T* const q, const T* const t,
                              const Eigen::Matrix<T, 3, 1>& P,
                              Eigen::Matrix<T, 3, 1>& P_s);
    template <typename T>
    static void LiftToSphere(const T* const params,
                              const Eigen::Matrix<T, 2, 1>& p,
                              Eigen::Matrix<T, 3, 1>& P);

    template <typename T>
    static void SphereToPlane(const T* const params, const Eigen::Matrix<T, 3, 1>& P,
                               Eigen::Matrix<T, 2, 1>& p);


    void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const;
    cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
                                    float fx = -1.0f, float fy = -1.0f,
                                    cv::Size imageSize = cv::Size(0, 0),
                                    float cx = -1.0f, float cy = -1.0f,
                                    cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;

    int parameterCount(void) const;

    const Parameters& getParameters(void) const;
    void setParameters(const Parameters& parameters);

    void readParameters(const std::vector<double>& parameterVec);
    void writeParameters(std::vector<double>& parameterVec) const;

    void writeParametersToYamlFile(const std::string& filename) const;

    std::string parametersToString(void) const;

private:
    Parameters mParameters;

    double m_inv_scale;
};

typedef boost::shared_ptr<OCAMCamera> OCAMCameraPtr;
typedef boost::shared_ptr<const OCAMCamera> OCAMCameraConstPtr;

template <typename T>
void
OCAMCamera::spaceToPlane(const T* const params,
                         const T* const q, const T* const t,
                         const Eigen::Matrix<T, 3, 1>& P,
                         Eigen::Matrix<T, 2, 1>& p)
{
    T P_c[3];
    {
        T P_w[3];
        P_w[0] = T(P(0));
        P_w[1] = T(P(1));
        P_w[2] = T(P(2));

        // Convert quaternion from Eigen convention (x, y, z, w)
        // to Ceres convention (w, x, y, z)
        T q_ceres[4] = {q[3], q[0], q[1], q[2]};

        ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);

        P_c[0] += t[0];
        P_c[1] += t[1];
        P_c[2] += t[2];
    }

    T c = params[0];
    T d = params[1];
    T e = params[2];
    T xc[2] = { params[3], params[4] };

    //T poly[SCARAMUZZA_POLY_SIZE];
    //for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
    //    poly[i] = params[5+i];

    T inv_poly[SCARAMUZZA_INV_POLY_SIZE];
    for (int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
        inv_poly[i] = params[5 + SCARAMUZZA_POLY_SIZE + i];

    T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1];
    T norm = T(0.0);
    if (norm_sqr > T(0.0))
        norm = sqrt(norm_sqr);

    T theta = atan2(-P_c[2], norm);
    T rho = T(0.0);
    T theta_i = T(1.0);

    for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
    {
        rho += theta_i * inv_poly[i];
        theta_i *= theta;
    }

    T invNorm = T(1.0) / norm;
    T xn[2] = {
        P_c[0] * invNorm * rho,
        P_c[1] * invNorm * rho
    };

    p(0) = xn[0] * c + xn[1] * d + xc[0];
    p(1) = xn[0] * e + xn[1]     + xc[1];
}

template <typename T>
void
OCAMCamera::spaceToSphere(const T* const params,
                          const T* const q, const T* const t,
                          const Eigen::Matrix<T, 3, 1>& P,
                          Eigen::Matrix<T, 3, 1>& P_s)
{
    T P_c[3];
    {
        T P_w[3];
        P_w[0] = T(P(0));
        P_w[1] = T(P(1));
        P_w[2] = T(P(2));

        // Convert quaternion from Eigen convention (x, y, z, w)
        // to Ceres convention (w, x, y, z)
        T q_ceres[4] = {q[3], q[0], q[1], q[2]};

        ceres::QuaternionRotatePoint(q_ceres, P_w, P_c);

        P_c[0] += t[0];
        P_c[1] += t[1];
        P_c[2] += t[2];
    }

    //T poly[SCARAMUZZA_POLY_SIZE];
    //for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
    //    poly[i] = params[5+i];

    T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1] + P_c[2] * P_c[2];
    T norm = T(0.0);
    if (norm_sqr > T(0.0))
        norm = sqrt(norm_sqr);

    P_s(0) = P_c[0] / norm;
    P_s(1) = P_c[1] / norm;
    P_s(2) = P_c[2] / norm;
}

template <typename T>
void
OCAMCamera::LiftToSphere(const T* const params,
                          const Eigen::Matrix<T, 2, 1>& p,
                          Eigen::Matrix<T, 3, 1>& P)
{
    T c = params[0];
    T d = params[1];
    T e = params[2];
    T cc[2] = { params[3], params[4] };
    T poly[SCARAMUZZA_POLY_SIZE];
    for (int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
       poly[i] = params[5+i];

    // Relative to Center
    T p_2d[2];
    p_2d[0] = T(p(0));
    p_2d[1] = T(p(1));

    T xc[2] = { p_2d[0] - cc[0], p_2d[1] - cc[1]};

    T inv_scale = T(1.0) / (c - d * e);

    // Affine Transformation
    T xc_a[2];

    xc_a[0] = inv_scale * (xc[0] - d * xc[1]);
    xc_a[1] = inv_scale * (-e * xc[0] + c * xc[1]);

    T norm_sqr = xc_a[0] * xc_a[0] + xc_a[1] * xc_a[1];
    T phi = sqrt(norm_sqr);
    T phi_i = T(1.0);
    T z = T(0.0);

    for (int i = 0; i < SCARAMUZZA_POLY_SIZE; i++)
    {
        if (i!=1) {
            z += phi_i * poly[i];
        }
        phi_i *= phi;
    }

    T p_3d[3];
    p_3d[0] = xc[0];
    p_3d[1] = xc[1];
    p_3d[2] = -z;

    T p_3d_norm_sqr = p_3d[0] * p_3d[0] + p_3d[1] * p_3d[1] + p_3d[2] * p_3d[2];
    T p_3d_norm = sqrt(p_3d_norm_sqr);

    P << p_3d[0] / p_3d_norm, p_3d[1] / p_3d_norm, p_3d[2] / p_3d_norm;
}

template <typename T>
void OCAMCamera::SphereToPlane(const T* const params, const Eigen::Matrix<T, 3, 1>& P,
                               Eigen::Matrix<T, 2, 1>& p) {
    T P_c[3];
    {
        P_c[0] = T(P(0));
        P_c[1] = T(P(1));
        P_c[2] = T(P(2));
    }

    T c = params[0];
    T d = params[1];
    T e = params[2];
    T xc[2] = {params[3], params[4]};

    T inv_poly[SCARAMUZZA_INV_POLY_SIZE];
    for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
        inv_poly[i] = params[5 + SCARAMUZZA_POLY_SIZE + i];

    T norm_sqr = P_c[0] * P_c[0] + P_c[1] * P_c[1];
    T norm = T(0.0);
    if (norm_sqr > T(0.0)) norm = sqrt(norm_sqr);

    T theta = atan2(-P_c[2], norm);
    T rho = T(0.0);
    T theta_i = T(1.0);

    for (int i = 0; i < SCARAMUZZA_INV_POLY_SIZE; i++) {
        rho += theta_i * inv_poly[i];
        theta_i *= theta;
    }

    T invNorm = T(1.0) / norm;
    T xn[2] = {P_c[0] * invNorm * rho, P_c[1] * invNorm * rho};

    p(0) = xn[0] * c + xn[1] * d + xc[0];
    p(1) = xn[0] * e + xn[1] + xc[1];
}
}

#endif


================================================
FILE: camera_model/include/camodocal/chessboard/Chessboard.h
================================================
#ifndef CHESSBOARD_H
#define CHESSBOARD_H

#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>

namespace camodocal
{

// forward declarations
class ChessboardCorner;
typedef boost::shared_ptr<ChessboardCorner> ChessboardCornerPtr;
class ChessboardQuad;
typedef boost::shared_ptr<ChessboardQuad> ChessboardQuadPtr;

class Chessboard
{
public:
    Chessboard(cv::Size boardSize, cv::Mat& image);

    void findCorners(bool useOpenCV = false);
    const std::vector<cv::Point2f>& getCorners(void) const;
    bool cornersFound(void) const;

    const cv::Mat& getImage(void) const;
    const cv::Mat& getSketch(void) const;

private:
    bool findChessboardCorners(const cv::Mat& image,
                               const cv::Size& patternSize,
                               std::vector<cv::Point2f>& corners,
                               int flags, bool useOpenCV);

    bool findChessboardCornersImproved(const cv::Mat& image,
                                       const cv::Size& patternSize,
                                       std::vector<cv::Point2f>& corners,
                                       int flags);

    void cleanFoundConnectedQuads(std::vector<ChessboardQuadPtr>& quadGroup, cv::Size patternSize);

    void findConnectedQuads(std::vector<ChessboardQuadPtr>& quads,
                            std::vector<ChessboardQuadPtr>& group,
                            int group_idx, int dilation);

//    int checkQuadGroup(std::vector<ChessboardQuadPtr>& quadGroup,
//                       std::vector<ChessboardCornerPtr>& outCorners,
//                       cv::Size patternSize);

    void labelQuadGroup(std::vector<ChessboardQuadPtr>& quad_group,
                        cv::Size patternSize, bool firstRun);

    void findQuadNeighbors(std::vector<ChessboardQuadPtr>& quads, int dilation);

    int augmentBestRun(std::vector<ChessboardQuadPtr>& candidateQuads, int candidateDilation,
                       std::vector<ChessboardQuadPtr>& existingQuads, int existingDilation);

    void generateQuads(std::vector<ChessboardQuadPtr>& quads,
                       cv::Mat& image, int flags,
                       int dilation, bool firstRun);

    bool checkQuadGroup(std::vector<ChessboardQuadPtr>& quads,
                        std::vector<ChessboardCornerPtr>& corners,
                        cv::Size patternSize);

    void getQuadrangleHypotheses(const std::vector< std::vector<cv::Point> >& contours,
                                 std::vector< std::pair<float, int> >& quads,
                                 int classId) const;

    bool checkChessboard(const cv::Mat& image, cv::Size patternSize) const;

    bool checkBoardMonotony(std::vector<ChessboardCornerPtr>& corners,
                            cv::Size patternSize);

    bool matchCorners(ChessboardQuadPtr& quad1, int corner1,
                      ChessboardQuadPtr& quad2, int corner2) const;

    cv::Mat mImage;
    cv::Mat mSketch;
    std::vector<cv::Point2f> mCorners;
    cv::Size mBoardSize;
    bool mCornersFound;
};

}

#endif


================================================
FILE: camera_model/include/camodocal/chessboard/ChessboardCorner.h
================================================
#ifndef CHESSBOARDCORNER_H
#define CHESSBOARDCORNER_H

#include <boost/shared_ptr.hpp>
#include <opencv2/core/core.hpp>

namespace camodocal
{

class ChessboardCorner;
typedef boost::shared_ptr<ChessboardCorner> ChessboardCornerPtr;

class ChessboardCorner
{
public:
    ChessboardCorner() : row(0), column(0), needsNeighbor(true), count(0) {}

    float meanDist(int &n) const
    {
        float sum = 0;
        n = 0;
        for (int i = 0; i < 4; ++i)
        {
            if (neighbors[i].get())
            {
                float dx = neighbors[i]->pt.x - pt.x;
                float dy = neighbors[i]->pt.y - pt.y;
                sum += sqrt(dx*dx + dy*dy);
                n++;
            }
        }
        return sum / std::max(n, 1);
    }

    cv::Point2f pt;                     // X and y coordinates
    int row;                            // Row and column of the corner
    int column;                         // in the found pattern
    bool needsNeighbor;                 // Does the corner require a neighbor?
    int count;                          // number of corner neighbors
    ChessboardCornerPtr neighbors[4];   // pointer to all corner neighbors
};

}

#endif


================================================
FILE: camera_model/include/camodocal/chessboard/ChessboardQuad.h
================================================
#ifndef CHESSBOARDQUAD_H
#define CHESSBOARDQUAD_H

#include <boost/shared_ptr.hpp>

#include "camodocal/chessboard/ChessboardCorner.h"

namespace camodocal
{

class ChessboardQuad;
typedef boost::shared_ptr<ChessboardQuad> ChessboardQuadPtr;

class ChessboardQuad
{
public:
    ChessboardQuad() : count(0), group_idx(-1), edge_len(FLT_MAX), labeled(false) {}

    int count;                         // Number of quad neighbors
    int group_idx;                     // Quad group ID
    float edge_len;                    // Smallest side length^2
    ChessboardCornerPtr corners[4];    // Coordinates of quad corners
    ChessboardQuadPtr neighbors[4];    // Pointers of quad neighbors
    bool labeled;                      // Has this corner been labeled?
};

}

#endif


================================================
FILE: camera_model/include/camodocal/chessboard/Spline.h
================================================
/*  dynamo:- Event driven molecular dynamics simulator
    http://www.marcusbannerman.co.uk/dynamo
    Copyright (C) 2011  Marcus N Campbell Bannerman <m.bannerman@gmail.com>

    This program is free software: you can redistribute it and/or
    modify it under the terms of the GNU General Public License
    version 3 as published by the Free Software Foundation.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#pragma once

#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/vector_proxy.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/lu.hpp>
#include <exception>

namespace ublas = boost::numeric::ublas;

class Spline : private std::vector<std::pair<double, double> >
{
public:
  //The boundary conditions available
  enum BC_type {
	FIXED_1ST_DERIV_BC,
	FIXED_2ND_DERIV_BC,
	PARABOLIC_RUNOUT_BC
  };

  enum Spline_type {
	LINEAR,
	CUBIC
  };

  //Constructor takes the boundary conditions as arguments, this
  //sets the first derivative (gradient) at the lower and upper
  //end points
  Spline():
	_valid(false),
	_BCLow(FIXED_2ND_DERIV_BC), _BCHigh(FIXED_2ND_DERIV_BC),
	_BCLowVal(0), _BCHighVal(0),
	_type(CUBIC)
  {}

  typedef std::vector<std::pair<double, double> > base;
  typedef base::const_iterator const_iterator;

  //Standard STL read-only container stuff
  const_iterator begin() const { return base::begin(); }
  const_iterator end() const { return base::end(); }
  void clear() { _valid = false; base::clear(); _data.clear(); }
  size_t size() const { return base::size(); }
  size_t max_size() const { return base::max_size(); }
  size_t capacity() const { return base::capacity(); }
  bool empty() const { return base::empty(); }

  //Add a point to the spline, and invalidate it so its
  //recalculated on the next access
  inline void addPoint(double x, double y)
  {
	_valid = false;
	base::push_back(std::pair<double, double>(x,y));
  }

  //Reset the boundary conditions
  inline void setLowBC(BC_type BC, double val = 0)
  { _BCLow = BC; _BCLowVal = val; _valid = false; }

  inline void setHighBC(BC_type BC, double val = 0)
  { _BCHigh = BC; _BCHighVal = val; _valid = false; }

  void setType(Spline_type type) { _type = type; _valid = false; }

  //Check if the spline has been calculated, then generate the
  //spline interpolated value
  double operator()(double xval)
  {
	if (!_valid) generate();

	//Special cases when we're outside the range of the spline points
	if (xval <= x(0)) return lowCalc(xval);
	if (xval >= x(size()-1)) return highCalc(xval);

	//Check all intervals except the last one
	for (std::vector<SplineData>::const_iterator iPtr = _data.begin();
		 iPtr != _data.end()-1; ++iPtr)
		if ((xval >= iPtr->x) && (xval <= (iPtr+1)->x))
		  return splineCalc(iPtr, xval);

	return splineCalc(_data.end() - 1, xval);
  }

private:

  ///////PRIVATE DATA MEMBERS
  struct SplineData { double x,a,b,c,d; };
  //vector of calculated spline data
  std::vector<SplineData> _data;
  //Second derivative at each point
  ublas::vector<double> _ddy;
  //Tracks whether the spline parameters have been calculated for
  //the current set of points
  bool _valid;
  //The boundary conditions
  BC_type _BCLow, _BCHigh;
  //The values of the boundary conditions
  double _BCLowVal, _BCHighVal;

  Spline_type _type;

  ///////PRIVATE FUNCTIONS
  //Function to calculate the value of a given spline at a point xval
  inline double splineCalc(std::vector<SplineData>::const_iterator i, double xval)
  {
	const double lx = xval - i->x;
	return ((i->a * lx + i->b) * lx + i->c) * lx + i->d;
  }

  inline double lowCalc(double xval)
  {
	const double lx = xval - x(0);

	if (_type == LINEAR)
	  return lx * _BCHighVal + y(0);

	const double firstDeriv = (y(1) - y(0)) / h(0) - 2 * h(0) * (_data[0].b + 2 * _data[1].b) / 6;

	switch(_BCLow)
	  {
	  case FIXED_1ST_DERIV_BC:
		return lx * _BCLowVal + y(0);
	  case FIXED_2ND_DERIV_BC:
		  return lx * lx * _BCLowVal + firstDeriv * lx + y(0);
	  case PARABOLIC_RUNOUT_BC:
		return lx * lx * _ddy[0] + lx * firstDeriv  + y(0);
	  }
	throw std::runtime_error("Unknown BC");
  }

  inline double highCalc(double xval)
  {
	const double lx = xval - x(size() - 1);

	if (_type == LINEAR)
	  return lx * _BCHighVal + y(size() - 1);

	const double firstDeriv = 2 * h(size() - 2) * (_ddy[size() - 2] + 2 * _ddy[size() - 1]) / 6 + (y(size() - 1) - y(size() - 2)) / h(size() - 2);

	switch(_BCHigh)
	  {
	  case FIXED_1ST_DERIV_BC:
		return lx * _BCHighVal + y(size() - 1);
	  case FIXED_2ND_DERIV_BC:
		return lx * lx * _BCHighVal + firstDeriv * lx + y(size() - 1);
	  case PARABOLIC_RUNOUT_BC:
		return lx * lx * _ddy[size()-1] + lx * firstDeriv  + y(size() - 1);
	  }
	throw std::runtime_error("Unknown BC");
  }

  //These just provide access to the point data in a clean way
  inline double x(size_t i) const { return operator[](i).first; }
  inline double y(size_t i) const { return operator[](i).second; }
  inline double h(size_t i) const { return x(i+1) - x(i); }

  //Invert a arbitrary matrix using the boost ublas library
  template<class T>
  bool InvertMatrix(ublas::matrix<T> A,
		ublas::matrix<T>& inverse)
  {
	using namespace ublas;

	// create a permutation matrix for the LU-factorization
	permutation_matrix<std::size_t> pm(A.size1());

	// perform LU-factorization
	int res = lu_factorize(A,pm);
		if( res != 0 ) return false;

	// create identity matrix of "inverse"
	inverse.assign(ublas::identity_matrix<T>(A.size1()));

	// backsubstitute to get the inverse
	lu_substitute(A, pm, inverse);

	return true;
  }

  //This function will recalculate the spline parameters and store
  //them in _data, ready for spline interpolation
  void generate()
  {
	if (size() < 2)
	  throw std::runtime_error("Spline requires at least 2 points");

	//If any spline points are at the same x location, we have to
	//just slightly seperate them
	{
	  bool testPassed(false);
	  while (!testPassed)
		{
		  testPassed = true;
		  std::sort(base::begin(), base::end());

		  for (base::iterator iPtr = base::begin(); iPtr != base::end() - 1; ++iPtr)
		if (iPtr->first == (iPtr+1)->first)
		  {
			if ((iPtr+1)->first != 0)
			  (iPtr+1)->first += (iPtr+1)->first
			* std::numeric_limits<double>::epsilon() * 10;
			else
			  (iPtr+1)->first = std::numeric_limits<double>::epsilon() * 10;
			testPassed = false;
			break;
		  }
		}
	}

	const size_t e = size() - 1;

	switch (_type)
	  {
	  case LINEAR:
		{
		  _data.resize(e);
		  for (size_t i(0); i < e; ++i)
		{
		  _data[i].x = x(i);
		  _data[i].a = 0;
		  _data[i].b = 0;
		  _data[i].c = (y(i+1) - y(i)) / (x(i+1) - x(i));
		  _data[i].d = y(i);
		}
		  break;
		}
	  case CUBIC:
		{
		  ublas::matrix<double> A(size(), size());
		  for (size_t yv(0); yv <= e; ++yv)
		for (size_t xv(0); xv <= e; ++xv)
		  A(xv,yv) = 0;

		  for (size_t i(1); i < e; ++i)
		{
		  A(i-1,i) = h(i-1);
		  A(i,i) = 2 * (h(i-1) + h(i));
		  A(i+1,i) = h(i);
		}

		  ublas::vector<double> C(size());
		  for (size_t xv(0); xv <= e; ++xv)
		C(xv) = 0;

		  for (size_t i(1); i < e; ++i)
		C(i) = 6 *
		  ((y(i+1) - y(i)) / h(i)
		   - (y(i) - y(i-1)) / h(i-1));

		  //Boundary conditions
		  switch(_BCLow)
		{
		case FIXED_1ST_DERIV_BC:
		  C(0) = 6 * ((y(1) - y(0)) / h(0) - _BCLowVal);
		  A(0,0) = 2 * h(0);
		  A(1,0) = h(0);
		  break;
		case FIXED_2ND_DERIV_BC:
		  C(0) = _BCLowVal;
		  A(0,0) = 1;
		  break;
		case PARABOLIC_RUNOUT_BC:
		  C(0) = 0; A(0,0) = 1; A(1,0) = -1;
		  break;
		}

		  switch(_BCHigh)
		{
		case FIXED_1ST_DERIV_BC:
		  C(e) = 6 * (_BCHighVal - (y(e) - y(e-1)) / h(e-1));
		  A(e,e) = 2 * h(e - 1);
		  A(e-1,e) = h(e - 1);
		  break;
		case FIXED_2ND_DERIV_BC:
		  C(e) = _BCHighVal;
		  A(e,e) = 1;
		  break;
		case PARABOLIC_RUNOUT_BC:
		  C(e) = 0; A(e,e) = 1; A(e-1,e) = -1;
		  break;
		}

		  ublas::matrix<double> AInv(size(), size());
		  InvertMatrix(A,AInv);

		  _ddy = ublas::prod(C, AInv);

		  _data.resize(size()-1);
		  for (size_t i(0); i < e; ++i)
		{
		  _data[i].x = x(i);
		  _data[i].a = (_ddy(i+1) - _ddy(i)) / (6 * h(i));
		  _data[i].b = _ddy(i) / 2;
		  _data[i].c = (y(i+1) - y(i)) / h(i) - _ddy(i+1) * h(i) / 6 - _ddy(i) * h(i) / 3;
		  _data[i].d = y(i);
		}
		}
	  }
	_valid = true;
  }
};


================================================
FILE: camera_model/include/camodocal/gpl/EigenQuaternionParameterization.h
================================================
#ifndef EIGENQUATERNIONPARAMETERIZATION_H
#define EIGENQUATERNIONPARAMETERIZATION_H

#include "ceres/local_parameterization.h"

namespace camodocal
{

class EigenQuaternionParameterization : public ceres::LocalParameterization
{
public:
    virtual ~EigenQuaternionParameterization() {}
    virtual bool Plus(const double* x,
                      const double* delta,
                      double* x_plus_delta) const;
    virtual bool ComputeJacobian(const double* x,
                                 double* jacobian) const;
    virtual int GlobalSize() const { return 4; }
    virtual int LocalSize() const { return 3; }

private:
    template<typename T>
    void EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const;
};


template<typename T>
void
EigenQuaternionParameterization::EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const
{
    zw[0] = z[3] * w[0] + z[0] * w[3] + z[1] * w[2] - z[2] * w[1];
    zw[1] = z[3] * w[1] - z[0] * w[2] + z[1] * w[3] + z[2] * w[0];
    zw[2] = z[3] * w[2] + z[0] * w[1] - z[1] * w[0] + z[2] * w[3];
    zw[3] = z[3] * w[3] - z[0] * w[0] - z[1] * w[1] - z[2] * w[2];
}

}

#endif



================================================
FILE: camera_model/include/camodocal/gpl/EigenUtils.h
================================================
#ifndef EIGENUTILS_H
#define EIGENUTILS_H

#include <eigen3/Eigen/Dense>

#include "ceres/rotation.h"
#include "camodocal/gpl/gpl.h"

namespace camodocal
{

// Returns the 3D cross product skew symmetric matrix of a given 3D vector
template<typename T>
Eigen::Matrix<T, 3, 3> skew(const Eigen::Matrix<T, 3, 1>& vec)
{
    return (Eigen::Matrix<T, 3, 3>() << T(0), -vec(2), vec(1),
                                        vec(2), T(0), -vec(0),
                                        -vec(1), vec(0), T(0)).finished();
}

template<typename Derived>
typename Eigen::MatrixBase<Derived>::PlainObject sqrtm(const Eigen::MatrixBase<Derived>& A)
{
    Eigen::SelfAdjointEigenSolver<typename Derived::PlainObject> es(A);

    return es.operatorSqrt();
}

template<typename T>
Eigen::Matrix<T, 3, 3> AngleAxisToRotationMatrix(const Eigen::Matrix<T, 3, 1>& rvec)
{
    T angle = rvec.norm();
    if (angle == T(0))
    {
        return Eigen::Matrix<T, 3, 3>::Identity();
    }

    Eigen::Matrix<T, 3, 1> axis;
    axis = rvec.normalized();

    Eigen::Matrix<T, 3, 3> rmat;
    rmat = Eigen::AngleAxis<T>(angle, axis);

    return rmat;
}

template<typename T>
Eigen::Quaternion<T> AngleAxisToQuaternion(const Eigen::Matrix<T, 3, 1>& rvec)
{
    Eigen::Matrix<T, 3, 3> rmat = AngleAxisToRotationMatrix<T>(rvec);

    return Eigen::Quaternion<T>(rmat);
}

template<typename T>
void AngleAxisToQuaternion(const Eigen::Matrix<T, 3, 1>& rvec, T* q)
{
    Eigen::Quaternion<T> quat = AngleAxisToQuaternion<T>(rvec);

    q[0] = quat.x();
    q[1] = quat.y();
    q[2] = quat.z();
    q[3] = quat.w();
}

template<typename T>
Eigen::Matrix<T, 3, 1> RotationToAngleAxis(const Eigen::Matrix<T, 3, 3> & rmat)
{
    Eigen::AngleAxis<T> angleaxis; 
    angleaxis.fromRotationMatrix(rmat); 
    return angleaxis.angle() * angleaxis.axis(); 
    
}

template<typename T>
void QuaternionToAngleAxis(const T* const q, Eigen::Matrix<T, 3, 1>& rvec)
{
    Eigen::Quaternion<T> quat(q[3], q[0], q[1], q[2]);

    Eigen::Matrix<T, 3, 3> rmat = quat.toRotationMatrix();

    Eigen::AngleAxis<T> angleaxis;
    angleaxis.fromRotationMatrix(rmat);

    rvec = angleaxis.angle() * angleaxis.axis();
}

template<typename T>
Eigen::Matrix<T, 3, 3> QuaternionToRotation(const T* const q)
{
    T R[9];
    ceres::QuaternionToRotation(q, R);

    Eigen::Matrix<T, 3, 3> rmat;
    for (int i = 0; i < 3; ++i)
    {
        for (int j = 0; j < 3; ++j)
        {
            rmat(i,j) = R[i * 3 + j];
        }
    }

    return rmat;
}

template<typename T>
void QuaternionToRotation(const T* const q, T* rot)
{
    ceres::QuaternionToRotation(q, rot);
}

template<typename T>
Eigen::Matrix<T,4,4> QuaternionMultMatLeft(const Eigen::Quaternion<T>& q)
{
    return (Eigen::Matrix<T,4,4>() << q.w(), -q.z(), q.y(), q.x(),
                                      q.z(), q.w(), -q.x(), q.y(),
                                      -q.y(), q.x(), q.w(), q.z(),
                                      -q.x(), -q.y(), -q.z(), q.w()).finished();
}

template<typename T>
Eigen::Matrix<T,4,4> QuaternionMultMatRight(const Eigen::Quaternion<T>& q)
{
    return (Eigen::Matrix<T,4,4>() << q.w(), q.z(), -q.y(), q.x(),
                                      -q.z(), q.w(), q.x(), q.y(),
                                      q.y(), -q.x(), q.w(), q.z(),
                                      -q.x(), -q.y(), -q.z(), q.w()).finished();
}

/// @param theta - rotation about screw axis
/// @param d - projection of tvec on the rotation axis
/// @param l - screw axis direction
/// @param m - screw axis moment
template<typename T>
void AngleAxisAndTranslationToScrew(const Eigen::Matrix<T, 3, 1>& rvec,
                                    const Eigen::Matrix<T, 3, 1>& tvec,
                                    T& theta, T& d,
                                    Eigen::Matrix<T, 3, 1>& l,
                                    Eigen::Matrix<T, 3, 1>& m)
{

    theta = rvec.norm();
    if (theta == 0)
    {
        l.setZero(); 
        m.setZero(); 
        std::cout << "Warning: Undefined screw! Returned 0. " << std::endl; 
    }

    l = rvec.normalized();

    Eigen::Matrix<T, 3, 1> t = tvec;

    d = t.transpose() * l;

    // point on screw axis - projection of origin on screw axis
    Eigen::Matrix<T, 3, 1> c;
    c = 0.5 * (t - d * l + (1.0 / tan(theta / 2.0) * l).cross(t));

    // c and hence the screw axis is not defined if theta is either 0 or M_PI
    m = c.cross(l);
}

template<typename T>
Eigen::Matrix<T, 3, 3> RPY2mat(T roll, T pitch, T yaw)
{
    Eigen::Matrix<T, 3, 3> m;

    T cr = cos(roll);
    T sr = sin(roll);
    T cp = cos(pitch);
    T sp = sin(pitch);
    T cy = cos(yaw);
    T sy = sin(yaw);

    m(0,0) = cy * cp;
    m(0,1) = cy * sp * sr - sy * cr;
    m(0,2) = cy * sp * cr + sy * sr;
    m(1,0) = sy * cp;
    m(1,1) = sy * sp * sr + cy * cr;
    m(1,2) = sy * sp * cr - cy * sr;
    m(2,0) = - sp;
    m(2,1) = cp * sr;
    m(2,2) = cp * cr;
    return m; 
}

template<typename T>
void mat2RPY(const Eigen::Matrix<T, 3, 3>& m, T& roll, T& pitch, T& yaw)
{
    roll = atan2(m(2,1), m(2,2));
    pitch = atan2(-m(2,0), sqrt(m(2,1) * m(2,1) + m(2,2) * m(2,2)));
    yaw = atan2(m(1,0), m(0,0));
}

template<typename T>
Eigen::Matrix<T, 4, 4> homogeneousTransform(const Eigen::Matrix<T, 3, 3>& R, const Eigen::Matrix<T, 3, 1>& t)
{
    Eigen::Matrix<T, 4, 4> H;
    H.setIdentity();

    H.block(0,0,3,3) = R;
    H.block(0,3,3,1) = t;

    return H;
}

template<typename T>
Eigen::Matrix<T, 4, 4> poseWithCartesianTranslation(const T* const q, const T* const p)
{
    Eigen::Matrix<T, 4, 4> pose = Eigen::Matrix<T, 4, 4>::Identity();

    T rotation[9];
    ceres::QuaternionToRotation(q, rotation);
    for (int i = 0; i < 3; ++i)
    {
        for (int j = 0; j < 3; ++j)
        {
            pose(i,j) = rotation[i * 3 + j];
        }
    }

    pose(0,3) = p[0];
    pose(1,3) = p[1];
    pose(2,3) = p[2];

    return pose;
}

template<typename T>
Eigen::Matrix<T, 4, 4> poseWithSphericalTranslation(const T* const q, const T* const p, const T scale = T(1.0))
{
    Eigen::Matrix<T, 4, 4> pose = Eigen::Matrix<T, 4, 4>::Identity();

    T rotation[9];
    ceres::QuaternionToRotation(q, rotation);
    for (int i = 0; i < 3; ++i)
    {
        for (int j = 0; j < 3; ++j)
        {
            pose(i,j) = rotation[i * 3 + j];
        }
    }

    T theta = p[0];
    T phi = p[1];
    pose(0,3) = sin(theta) * cos(phi) * scale;
    pose(1,3) = sin(theta) * sin(phi) * scale;
    pose(2,3) = cos(theta) * scale;

    return pose;
}

// Returns the Sampson error of a given essential matrix and 2 image points
template<typename T>
T sampsonError(const Eigen::Matrix<T, 3, 3>& E,
               const Eigen::Matrix<T, 3, 1>& p1,
               const Eigen::Matrix<T, 3, 1>& p2)
{
    Eigen::Matrix<T, 3, 1> Ex1 = E * p1;
    Eigen::Matrix<T, 3, 1> Etx2 = E.transpose() * p2;

    T x2tEx1 = p2.dot(Ex1);

    // compute Sampson error
    T err = square(x2tEx1) / (square(Ex1(0,0)) + square(Ex1(1,0)) + square(Etx2(0,0)) + square(Etx2(1,0)));

    return err;
}

// Returns the Sampson error of a given rotation/translation and 2 image points
template<typename T>
T sampsonError(const Eigen::Matrix<T, 3, 3>& R,
               const Eigen::Matrix<T, 3, 1>& t,
               const Eigen::Matrix<T, 3, 1>& p1,
               const Eigen::Matrix<T, 3, 1>& p2)
{
    // construct essential matrix
    Eigen::Matrix<T, 3, 3> E = skew(t) * R;

    Eigen::Matrix<T, 3, 1> Ex1 = E * p1;
    Eigen::Matrix<T, 3, 1> Etx2 = E.transpose() * p2;

    T x2tEx1 = p2.dot(Ex1);

    // compute Sampson error
    T err = square(x2tEx1) / (square(Ex1(0,0)) + square(Ex1(1,0)) + square(Etx2(0,0)) + square(Etx2(1,0)));

    return err;
}

// Returns the Sampson error of a given rotation/translation and 2 image points
template<typename T>
T sampsonError(const Eigen::Matrix<T, 4, 4>& H,
               const Eigen::Matrix<T, 3, 1>& p1,
               const Eigen::Matrix<T, 3, 1>& p2)
{
    Eigen::Matrix<T, 3, 3> R = H.block(0, 0, 3, 3);
    Eigen::Matrix<T, 3, 1> t = H.block(0, 3, 3, 1);

    return sampsonError(R, t, p1, p2);
}

template<typename T>
Eigen::Matrix<T, 3, 1>
transformPoint(const Eigen::Matrix<T, 4, 4>& H, const Eigen::Matrix<T, 3, 1>& P)
{
    Eigen::Matrix<T, 3, 1> P_trans = H.block(0, 0, 3, 3) * P + H.block(0, 3, 3, 1);

    return P_trans;
}

template<typename T>
Eigen::Matrix<T, 4, 4>
estimate3DRigidTransform(const std::vector<Eigen::Matrix<T, 3, 1>, Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >& points1,
                         const std::vector<Eigen::Matrix<T, 3, 1>, Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >& points2)
{
    // compute centroids
    Eigen::Matrix<T, 3, 1> c1, c2;
    c1.setZero(); c2.setZero();

    for (size_t i = 0; i < points1.size(); ++i)
    {
        c1 += points1.at(i);
        c2 += points2.at(i);
    }

    c1 /= points1.size();
    c2 /= points1.size();

    Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> X(3, points1.size());
    Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> Y(3, points1.size());
    for (size_t i = 0; i < points1.size(); ++i)
    {
        X.col(i) = points1.at(i) - c1;
        Y.col(i) = points2.at(i) - c2;
    }

    Eigen::Matrix<T, 3, 3> H = X * Y.transpose();

    Eigen::JacobiSVD< Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > svd(H, Eigen::ComputeFullU | Eigen::ComputeFullV);

    Eigen::Matrix<T, 3, 3> U = svd.matrixU();
    Eigen::Matrix<T, 3, 3> V = svd.matrixV();
    if (U.determinant() * V.determinant() < 0.0)
    {
        V.col(2) *= -1.0;
    }

    Eigen::Matrix<T, 3, 3> R = V * U.transpose();
    Eigen::Matrix<T, 3, 1> t = c2 - R * c1;

    return homogeneousTransform(R, t);
}

template<typename T>
Eigen::Matrix<T, 4, 4>
estimate3DRigidSimilarityTransform(const std::vector<Eigen::Matrix<T, 3, 1>, Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >& points1,
                                   const std::vector<Eigen::Matrix<T, 3, 1>, Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > >& points2)
{
    // compute centroids
    Eigen::Matrix<T, 3, 1> c1, c2;
    c1.setZero(); c2.setZero();

    for (size_t i = 0; i < points1.size(); ++i)
    {
        c1 += points1.at(i);
        c2 += points2.at(i);
    }

    c1 /= points1.size();
    c2 /= points1.size();

    Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> X(3, points1.size());
    Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> Y(3, points1.size());
    for (size_t i = 0; i < points1.size(); ++i)
    {
        X.col(i) = points1.at(i) - c1;
        Y.col(i) = points2.at(i) - c2;
    }

    Eigen::Matrix<T, 3, 3> H = X * Y.transpose();

    Eigen::JacobiSVD< Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> > svd(H, Eigen::ComputeFullU | Eigen::ComputeFullV);

    Eigen::Matrix<T, 3, 3> U = svd.matrixU();
    Eigen::Matrix<T, 3, 3> V = svd.matrixV();
    if (U.determinant() * V.determinant() < 0.0)
    {
        V.col(2) *= -1.0;
    }

    Eigen::Matrix<T, 3, 3> R = V * U.transpose();

    std::vector<Eigen::Matrix<T, 3, 1>, Eigen::aligned_allocator<Eigen::Matrix<T, 3, 1> > > rotatedPoints1(points1.size());
    for (size_t i = 0; i < points1.size(); ++i)
    {
        rotatedPoints1.at(i) = R * (points1.at(i) - c1);
    }

    double sum_ss = 0.0, sum_tt = 0.0;
    for (size_t i = 0; i < points1.size(); ++i)
    {
        sum_ss += (points1.at(i) - c1).squaredNorm();
        sum_tt += (points2.at(i) - c2).dot(rotatedPoints1.at(i));
    }

    double scale = sum_tt / sum_ss;

    Eigen::Matrix<T, 3, 3> sR = scale * R;
    Eigen::Matrix<T, 3, 1> t = c2 - sR * c1;

    return homogeneousTransform(sR, t);
}

}

#endif


================================================
FILE: camera_model/include/camodocal/gpl/gpl.h
================================================
#ifndef GPL_H
#define GPL_H

#include <algorithm>
#include <cmath>
#include <opencv2/core/core.hpp>

namespace camodocal
{

template<class T>
const T clamp(const T& v, const T& a, const T& b)
{
	return std::min(b, std::max(a, v));
}

double hypot3(double x, double y, double z);
float hypot3f(float x, float y, float z);

template<class T>
const T normalizeTheta(const T& theta)
{
	T normTheta = theta;

	while (normTheta < - M_PI)
	{
		normTheta += 2.0 * M_PI;
	}
	while (normTheta > M_PI)
	{
		normTheta -= 2.0 * M_PI;
	}

	return normTheta;
}

double d2r(double deg);
float d2r(float deg);
double r2d(double rad);
float r2d(float rad);

double sinc(double theta);

template<class T>
const T square(const T& x)
{
	return x * x;
}

template<class T>
const T cube(const T& x)
{
	return x * x * x;
}

template<class T>
const T random(const T& a, const T& b)
{
	return static_cast<double>(rand()) / RAND_MAX * (b - a) + a;
}

template<class T>
const T randomNormal(const T& sigma)
{
    T x1, x2, w;

    do
    {
        x1 = 2.0 * random(0.0, 1.0) - 1.0;
        x2 = 2.0 * random(0.0, 1.0) - 1.0;
        w = x1 * x1 + x2 * x2;
    }
    while (w >= 1.0 || w == 0.0);

    w = sqrt((-2.0 * log(w)) / w);

    return x1 * w * sigma;
}

unsigned long long timeInMicroseconds(void);

double timeInSeconds(void);

void colorDepthImage(cv::Mat& imgDepth,
                     cv::Mat& imgColoredDepth,
                     float minRange, float maxRange);

bool colormap(const std::string& name, unsigned char idx,
              float& r, float& g, float& b);

std::vector<cv::Point2i> bresLine(int x0, int y0, int x1, int y1);
std::vector<cv::Point2i> bresCircle(int x0, int y0, int r);

void fitCircle(const std::vector<cv::Point2d>& points,
               double& centerX, double& centerY, double& radius);

std::vector<cv::Point2d> intersectCircles(double x1, double y1, double r1,
                                          double x2, double y2, double r2);

void LLtoUTM(double latitude, double longitude,
             double& utmNorthing, double& utmEasting,
             std::string& utmZone);
void UTMtoLL(double utmNorthing, double utmEasting,
             const std::string& utmZone,
             double& latitude, double& longitude);

long int timestampDiff(uint64_t t1, uint64_t t2);

}

#endif


================================================
FILE: camera_model/include/camodocal/sparse_graph/Transform.h
================================================
#ifndef TRANSFORM_H
#define TRANSFORM_H

#include <boost/shared_ptr.hpp>
#include <eigen3/Eigen/Dense>
#include <stdint.h>

namespace camodocal
{

class Transform
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW

    Transform();
    Transform(const Eigen::Matrix4d& H);

    Eigen::Quaterniond& rotation(void);
    const Eigen::Quaterniond& rotation(void) const;
    double* rotationData(void);
    const double* const rotationData(void) const;

    Eigen::Vector3d& translation(void);
    const Eigen::Vector3d& translation(void) const;
    double* translationData(void);
    const double* const translationData(void) const;

    Eigen::Matrix4d toMatrix(void) const;

private:
    Eigen::Quaterniond m_q;
    Eigen::Vector3d m_t;
};

}

#endif


================================================
FILE: camera_model/instruction
================================================
rosrun camera_model Calibration -w 8 -h 11 -s 70 -i ~/bag/PX/calib/


================================================
FILE: camera_model/package.xml
================================================
<?xml version="1.0"?>
<package>
  <name>camera_model</name>
  <version>0.0.0</version>
  <description>The camera_model package</description>

  <!-- One maintainer tag required, multiple allowed, one person per tag --> 
  <!-- Example:  -->
  <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
  <maintainer email="ionel.heng@ieee.org">lionel</maintainer>


  <!-- One license tag required, multiple allowed, one license per tag -->
  <!-- Commonly used license strings: -->
  <!--   BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
  <license>GPLv3</license>


  <!-- Url tags are optional, but mutiple are allowed, one per tag -->
  <!-- Optional attribute type can be: website, bugtracker, or repository -->
  <!-- Example: -->
  <!-- <url type="website">http://wiki.ros.org/camera_model</url> -->


  <!-- Author tags are optional, mutiple are allowed, one per tag -->
  <!-- Authors do not have to be maintianers, but could be -->
  <!-- Example: -->
  <!-- <author email="jane.doe@example.com">Jane Doe</author> -->


  <!-- The *_depend tags are used to specify dependencies -->
  <!-- Dependencies can be catkin packages or system dependencies -->
  <!-- Examples: -->
  <!-- Use build_depend for packages you need at compile time: -->
  <!--   <build_depend>message_generation</build_depend> -->
  <!-- Use buildtool_depend for build tool packages: -->
  <!--   <buildtool_depend>catkin</buildtool_depend> -->
  <!-- Use run_depend for packages you need at runtime: -->
  <!--   <run_depend>message_runtime</run_depend> -->
  <!-- Use test_depend for packages you need only for testing: -->
  <!--   <test_depend>gtest</test_depend> -->
  <buildtool_depend>catkin</buildtool_depend>
  <build_depend>roscpp</build_depend>
  <build_depend>std_msgs</build_depend>
  <run_depend>roscpp</run_depend>
  <run_depend>std_msgs</run_depend>


  <!-- The export tag contains other, unspecified, tags -->
  <export>
    <!-- Other tools can request additional information be placed here -->

  </export>
</package>


================================================
FILE: camera_model/readme.md
================================================
part of [camodocal](https://github.com/hengli/camodocal)

[Google Ceres](http://ceres-solver.org) is needed.

# Calibration:

Use [intrinsic_calib.cc](https://github.com/dvorak0/camera_model/blob/master/src/intrinsic_calib.cc) to calibrate your camera.

# Undistortion:

See [Camera.h](https://github.com/dvorak0/camera_model/blob/master/include/camodocal/camera_models/Camera.h) for general interface: 

 - liftProjective: Lift points from the image plane to the projective space.
 - spaceToPlane: Projects 3D points to the image plane (Pi function)



================================================
FILE: camera_model/src/calib/CameraCalibration.cc
================================================
#include "camodocal/calib/CameraCalibration.h"

#include <cstdio>
#include <eigen3/Eigen/Dense>
#include <iomanip>
#include <iostream>
#include <algorithm>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>

#include "camodocal/camera_models/CameraFactory.h"
#include "camodocal/sparse_graph/Transform.h"
#include "camodocal/gpl/EigenQuaternionParameterization.h"
#include "camodocal/gpl/EigenUtils.h"
#include "camodocal/camera_models/CostFunctionFactory.h"

#include "ceres/ceres.h"
namespace camodocal
{

CameraCalibration::CameraCalibration()
 : m_boardSize(cv::Size(0,0))
 , m_squareSize(0.0f)
 , m_verbose(false)
{

}

CameraCalibration::CameraCalibration(const Camera::ModelType modelType,
                                     const std::string& cameraName,
                                     const cv::Size& imageSize,
                                     const cv::Size& boardSize,
                                     float squareSize)
 : m_boardSize(boardSize)
 , m_squareSize(squareSize)
 , m_verbose(false)
{
    m_camera = CameraFactory::instance()->generateCamera(modelType, cameraName, imageSize);
}

void
CameraCalibration::clear(void)
{
    m_imagePoints.clear();
    m_scenePoints.clear();
}

void
CameraCalibration::addChessboardData(const std::vector<cv::Point2f>& corners)
{
    m_imagePoints.push_back(corners);

    std::vector<cv::Point3f> scenePointsInView;
    for (int i = 0; i < m_boardSize.height; ++i)
    {
        for (int j = 0; j < m_boardSize.width; ++j)
        {
            scenePointsInView.push_back(cv::Point3f(i * m_squareSize, j * m_squareSize, 0.0));
        }
    }
    m_scenePoints.push_back(scenePointsInView);
}

bool
CameraCalibration::calibrate(void)
{
    int imageCount = m_imagePoints.size();

    // compute intrinsic camera parameters and extrinsic parameters for each of the views
    std::vector<cv::Mat> rvecs;
    std::vector<cv::Mat> tvecs;
    bool ret = calibrateHelper(m_camera, rvecs, tvecs);

    m_cameraPoses = cv::Mat(imageCount, 6, CV_64F);
    for (int i = 0; i < imageCount; ++i)
    {
        m_cameraPoses.at<double>(i,0) = rvecs.at(i).at<double>(0);
        m_cameraPoses.at<double>(i,1) = rvecs.at(i).at<double>(1);
        m_cameraPoses.at<double>(i,2) = rvecs.at(i).at<double>(2);
        m_cameraPoses.at<double>(i,3) = tvecs.at(i).at<double>(0);
        m_cameraPoses.at<double>(i,4) = tvecs.at(i).at<double>(1);
        m_cameraPoses.at<double>(i,5) = tvecs.at(i).at<double>(2);
    }

    // Compute measurement covariance.
    std::vector<std::vector<cv::Point2f> > errVec(m_imagePoints.size());
    Eigen::Vector2d errSum = Eigen::Vector2d::Zero();
    size_t errCount = 0;
    for (size_t i = 0; i < m_imagePoints.size(); ++i)
    {
        std::vector<cv::Point2f> estImagePoints;
        m_camera->projectPoints(m_scenePoints.at(i), rvecs.at(i), tvecs.at(i),
                                estImagePoints);

        for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j)
        {
            cv::Point2f pObs = m_imagePoints.at(i).at(j);
            cv::Point2f pEst = estImagePoints.at(j);

            cv::Point2f err = pObs - pEst;

            errVec.at(i).push_back(err);

            errSum += Eigen::Vector2d(err.x, err.y);
        }

        errCount += m_imagePoints.at(i).size();
    }

    Eigen::Vector2d errMean = errSum / static_cast<double>(errCount);

    Eigen::Matrix2d measurementCovariance = Eigen::Matrix2d::Zero();
    for (size_t i = 0; i < errVec.size(); ++i)
    {
        for (size_t j = 0; j < errVec.at(i).size(); ++j)
        {
            cv::Point2f err = errVec.at(i).at(j);
            double d0 = err.x - errMean(0);
            double d1 = err.y - errMean(1);

            measurementCovariance(0,0) += d0 * d0;
            measurementCovariance(0,1) += d0 * d1;
            measurementCovariance(1,1) += d1 * d1;
        }
    }
    measurementCovariance /= static_cast<double>(errCount);
    measurementCovariance(1,0) = measurementCovariance(0,1);

    m_measurementCovariance = measurementCovariance;

    return ret;
}

int
CameraCalibration::sampleCount(void) const
{
    return m_imagePoints.size();
}

std::vector<std::vector<cv::Point2f> >&
CameraCalibration::imagePoints(void)
{
    return m_imagePoints;
}

const std::vector<std::vector<cv::Point2f> >&
CameraCalibration::imagePoints(void) const
{
    return m_imagePoints;
}

std::vector<std::vector<cv::Point3f> >&
CameraCalibration::scenePoints(void)
{
    return m_scenePoints;
}

const std::vector<std::vector<cv::Point3f> >&
CameraCalibration::scenePoints(void) const
{
    return m_scenePoints;
}

CameraPtr&
CameraCalibration::camera(void)
{
    return m_camera;
}

const CameraConstPtr
CameraCalibration::camera(void) const
{
    return m_camera;
}

Eigen::Matrix2d&
CameraCalibration::measurementCovariance(void)
{
    return m_measurementCovariance;
}

const Eigen::Matrix2d&
CameraCalibration::measurementCovariance(void) const
{
    return m_measurementCovariance;
}

cv::Mat&
CameraCalibration::cameraPoses(void)
{
    return m_cameraPoses;
}

const cv::Mat&
CameraCalibration::cameraPoses(void) const
{
    return m_cameraPoses;
}

void
CameraCalibration::drawResults(std::vector<cv::Mat>& images) const
{
    std::vector<cv::Mat> rvecs, tvecs;

    for (size_t i = 0; i < images.size(); ++i)
    {
        cv::Mat rvec(3, 1, CV_64F);
        rvec.at<double>(0) = m_cameraPoses.at<double>(i,0);
        rvec.at<double>(1) = m_cameraPoses.at<double>(i,1);
        rvec.at<double>(2) = m_cameraPoses.at<double>(i,2);

        cv::Mat tvec(3, 1, CV_64F);
        tvec.at<double>(0) = m_cameraPoses.at<double>(i,3);
        tvec.at<double>(1) = m_cameraPoses.at<double>(i,4);
        tvec.at<double>(2) = m_cameraPoses.at<double>(i,5);

        rvecs.push_back(rvec);
        tvecs.push_back(tvec);
    }

    int drawShiftBits = 4;
    int drawMultiplier = 1 << drawShiftBits;

    cv::Scalar green(0, 255, 0);
    cv::Scalar red(0, 0, 255);

    for (size_t i = 0; i < images.size(); ++i)
    {
        cv::Mat& image = images.at(i);
        if (image.channels() == 1)
        {
            cv::cvtColor(image, image, CV_GRAY2RGB);
        }

        std::vector<cv::Point2f> estImagePoints;
        m_camera->projectPoints(m_scenePoints.at(i), rvecs.at(i), tvecs.at(i),
                                estImagePoints);

        float errorSum = 0.0f;
        float errorMax = std::numeric_limits<float>::min();

        for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j)
        {
            cv::Point2f pObs = m_imagePoints.at(i).at(j);
            cv::Point2f pEst = estImagePoints.at(j);

            cv::circle(image,
                       cv::Point(cvRound(pObs.x * drawMultiplier),
                                 cvRound(pObs.y * drawMultiplier)),
                       5, green, 2, CV_AA, drawShiftBits);

            cv::circle(image,
                       cv::Point(cvRound(pEst.x * drawMultiplier),
                                 cvRound(pEst.y * drawMultiplier)),
                       5, red, 2, CV_AA, drawShiftBits);

            float error = cv::norm(pObs - pEst);

            errorSum += error;
            if (error > errorMax)
            {
                errorMax = error;
            }
        }

        std::ostringstream oss;
        oss << "Reprojection error: avg = " << errorSum / m_imagePoints.at(i).size()
            << "   max = " << errorMax;

        cv::putText(image, oss.str(), cv::Point(10, image.rows - 10),
                    cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(255, 255, 255),
                    1, CV_AA);
    }
}

void
CameraCalibration::writeParams(const std::string& filename) const
{
    m_camera->writeParametersToYamlFile(filename);
}

bool
CameraCalibration::writeChessboardData(const std::string& filename) const
{
    std::ofstream ofs(filename.c_str(), std::ios::out | std::ios::binary);
    if (!ofs.is_open())
    {
        return false;
    }

    writeData(ofs, m_boardSize.width);
    writeData(ofs, m_boardSize.height);
    writeData(ofs, m_squareSize);

    writeData(ofs, m_measurementCovariance(0,0));
    writeData(ofs, m_measurementCovariance(0,1));
    writeData(ofs, m_measurementCovariance(1,0));
    writeData(ofs, m_measurementCovariance(1,1));

    writeData(ofs, m_cameraPoses.rows);
    writeData(ofs, m_cameraPoses.cols);
    writeData(ofs, m_cameraPoses.type());
    for (int i = 0; i < m_cameraPoses.rows; ++i)
    {
        for (int j = 0; j < m_cameraPoses.cols; ++j)
        {
            writeData(ofs, m_cameraPoses.at<double>(i,j));
        }
    }

    writeData(ofs, m_imagePoints.size());
    for (size_t i = 0; i < m_imagePoints.size(); ++i)
    {
        writeData(ofs, m_imagePoints.at(i).size());
        for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j)
        {
            const cv::Point2f& ipt = m_imagePoints.at(i).at(j);

            writeData(ofs, ipt.x);
            writeData(ofs, ipt.y);
        }
    }

    writeData(ofs, m_scenePoints.size());
    for (size_t i = 0; i < m_scenePoints.size(); ++i)
    {
        writeData(ofs, m_scenePoints.at(i).size());
        for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j)
        {
            const cv::Point3f& spt = m_scenePoints.at(i).at(j);

            writeData(ofs, spt.x);
            writeData(ofs, spt.y);
            writeData(ofs, spt.z);
        }
    }

    return true;
}

bool
CameraCalibration::readChessboardData(const std::string& filename)
{
    std::ifstream ifs(filename.c_str(), std::ios::in | std::ios::binary);
    if (!ifs.is_open())
    {
        return false;
    }

    readData(ifs, m_boardSize.width);
    readData(ifs, m_boardSize.height);
    readData(ifs, m_squareSize);

    readData(ifs, m_measurementCovariance(0,0));
    readData(ifs, m_measurementCovariance(0,1));
    readData(ifs, m_measurementCovariance(1,0));
    readData(ifs, m_measurementCovariance(1,1));

    int rows, cols, type;
    readData(ifs, rows);
    readData(ifs, cols);
    readData(ifs, type);
    m_cameraPoses = cv::Mat(rows, cols, type);

    for (int i = 0; i < m_cameraPoses.rows; ++i)
    {
        for (int j = 0; j < m_cameraPoses.cols; ++j)
        {
            readData(ifs, m_cameraPoses.at<double>(i,j));
        }
    }

    size_t nImagePointSets;
    readData(ifs, nImagePointSets);

    m_imagePoints.clear();
    m_imagePoints.resize(nImagePointSets);
    for (size_t i = 0; i < m_imagePoints.size(); ++i)
    {
        size_t nImagePoints;
        readData(ifs, nImagePoints);
        m_imagePoints.at(i).resize(nImagePoints);

        for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j)
        {
            cv::Point2f& ipt = m_imagePoints.at(i).at(j);
            readData(ifs, ipt.x);
            readData(ifs, ipt.y);
        }
    }

    size_t nScenePointSets;
    readData(ifs, nScenePointSets);

    m_scenePoints.clear();
    m_scenePoints.resize(nScenePointSets);
    for (size_t i = 0; i < m_scenePoints.size(); ++i)
    {
        size_t nScenePoints;
        readData(ifs, nScenePoints);
        m_scenePoints.at(i).resize(nScenePoints);

        for (size_t j = 0; j < m_scenePoints.at(i).size(); ++j)
        {
            cv::Point3f& spt = m_scenePoints.at(i).at(j);
            readData(ifs, spt.x);
            readData(ifs, spt.y);
            readData(ifs, spt.z);
        }
    }

    return true;
}

void
CameraCalibration::setVerbose(bool verbose)
{
    m_verbose = verbose;
}

bool
CameraCalibration::calibrateHelper(CameraPtr& camera,
                                   std::vector<cv::Mat>& rvecs, std::vector<cv::Mat>& tvecs) const
{
    rvecs.assign(m_scenePoints.size(), cv::Mat());
    tvecs.assign(m_scenePoints.size(), cv::Mat());

    // STEP 1: Estimate intrinsics
    camera->estimateIntrinsics(m_boardSize, m_scenePoints, m_imagePoints);

    // STEP 2: Estimate extrinsics
    for (size_t i = 0; i < m_scenePoints.size(); ++i)
    {
        camera->estimateExtrinsics(m_scenePoints.at(i), m_imagePoints.at(i), rvecs.at(i), tvecs.at(i));
    }

    if (m_verbose)
    {
        std::cout << "[" << camera->cameraName() << "] "
                  << "# INFO: " << "Initial reprojection error: "
                  << std::fixed << std::setprecision(3)
                  << camera->reprojectionError(m_scenePoints, m_imagePoints, rvecs, tvecs)
                  << " pixels" << std::endl;
    }

    // STEP 3: optimization using ceres
    optimize(camera, rvecs, tvecs);

    if (m_verbose)
    {
        double err = camera->reprojectionError(m_scenePoints, m_imagePoints, rvecs, tvecs);
        std::cout << "[" << camera->cameraName() << "] " << "# INFO: Final reprojection error: "
                  << err << " pixels" << std::endl;
        std::cout << "[" << camera->cameraName() << "] " << "# INFO: "
                  << camera->parametersToString() << std::endl;
    }

    return true;
}

void
CameraCalibration::optimize(CameraPtr& camera,
                            std::vector<cv::Mat>& rvecs, std::vector<cv::Mat>& tvecs) const
{
    // Use ceres to do optimization
    ceres::Problem problem;

    std::vector<Transform, Eigen::aligned_allocator<Transform> > transformVec(rvecs.size());
    for (size_t i = 0; i < rvecs.size(); ++i)
    {
        Eigen::Vector3d rvec;
        cv::cv2eigen(rvecs.at(i), rvec);

        transformVec.at(i).rotation() = Eigen::AngleAxisd(rvec.norm(), rvec.normalized());
        transformVec.at(i).translation() << tvecs[i].at<double>(0),
                                            tvecs[i].at<double>(1),
                                            tvecs[i].at<double>(2);
    }

    std::vector<double> intrinsicCameraParams;
    m_camera->writeParameters(intrinsicCameraParams);

    // create residuals for each observation
    for (size_t i = 0; i < m_imagePoints.size(); ++i)
    {
        for (size_t j = 0; j < m_imagePoints.at(i).size(); ++j)
        {
            const cv::Point3f& spt = m_scenePoints.at(i).at(j);
            const cv::Point2f& ipt = m_imagePoints.at(i).at(j);

            ceres::CostFunction* costFunction =
                CostFunctionFactory::instance()->generateCostFunction(camera,
                                                                      Eigen::Vector3d(spt.x, spt.y, spt.z),
                                                                      Eigen::Vector2d(ipt.x, ipt.y),
                                                                      CAMERA_INTRINSICS | CAMERA_POSE);

            ceres::LossFunction* lossFunction = new ceres::CauchyLoss(1.0);
            problem.AddResidualBlock(costFunction, lossFunction,
                                     intrinsicCameraParams.data(),
                                     transformVec.at(i).rotationData(),
                                     transformVec.at(i).translationData());
        }

        ceres::LocalParameterization* quaternionParameterization =
            new EigenQuaternionParameterization;

        problem.SetParameterization(transformVec.at(i).rotationData(),
                                    quaternionParameterization);
    }

    std::cout << "begin ceres" << std::endl;
    ceres::Solver::Options options;
    options.max_num_iterations = 1000;
    options.num_threads = 1;

    if (m_verbose)
    {
        options.minimizer_progress_to_stdout = true;
    }

    ceres::Solver::Summary summary;
    ceres::Solve(options, &problem, &summary);
    std::cout << "end ceres" << std::endl;

    if (m_verbose)
    {
        std::cout << summary.FullReport() << std::endl;
    }

    camera->readParameters(intrinsicCameraParams);

    for (size_t i = 0; i < rvecs.size(); ++i)
    {
        Eigen::AngleAxisd aa(transformVec.at(i).rotation());

        Eigen::Vector3d rvec = aa.angle() * aa.axis();
        cv::eigen2cv(rvec, rvecs.at(i));

        cv::Mat& tvec = tvecs.at(i);
        tvec.at<double>(0) = transformVec.at(i).translation()(0);
        tvec.at<double>(1) = transformVec.at(i).translation()(1);
        tvec.at<double>(2) = transformVec.at(i).translation()(2);
    }
}

template<typename T>
void
CameraCalibration::readData(std::ifstream& ifs, T& data) const
{
    char* buffer = new char[sizeof(T)];

    ifs.read(buffer, sizeof(T));

    data = *(reinterpret_cast<T*>(buffer));

    delete[] buffer;
}

template<typename T>
void
CameraCalibration::writeData(std::ofstream& ofs, T data) const
{
    char* pData = reinterpret_cast<char*>(&data);

    ofs.write(pData, sizeof(T));
}

}


================================================
FILE: camera_model/src/camera_models/Camera.cc
================================================
#include "camodocal/camera_models/Camera.h"
#include "camodocal/camera_models/ScaramuzzaCamera.h"

#include <opencv2/calib3d/calib3d.hpp>

namespace camodocal
{

Camera::Parameters::Parameters(ModelType modelType)
 : m_modelType(modelType)
 , m_imageWidth(0)
 , m_imageHeight(0)
{
    switch (modelType)
    {
    case KANNALA_BRANDT:
        m_nIntrinsics = 8;
        break;
    case PINHOLE:
        m_nIntrinsics = 8;
        break;
    case SCARAMUZZA:
        m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
        break;
    case MEI:
    default:
        m_nIntrinsics = 9;
    }
}

Camera::Parameters::Parameters(ModelType modelType,
                               const std::string& cameraName,
                               int w, int h)
 : m_modelType(modelType)
 , m_cameraName(cameraName)
 , m_imageWidth(w)
 , m_imageHeight(h)
{
    switch (modelType)
    {
    case KANNALA_BRANDT:
        m_nIntrinsics = 8;
        break;
    case PINHOLE:
        m_nIntrinsics = 8;
        break;
    case SCARAMUZZA:
        m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
        break;
    case MEI:
    default:
        m_nIntrinsics = 9;
    }
}

Camera::ModelType&
Camera::Parameters::modelType(void)
{
    return m_modelType;
}

std::string&
Camera::Parameters::cameraName(void)
{
    return m_cameraName;
}

int&
Camera::Parameters::imageWidth(void)
{
    return m_imageWidth;
}

int&
Camera::Parameters::imageHeight(void)
{
    return m_imageHeight;
}

Camera::ModelType
Camera::Parameters::modelType(void) const
{
    return m_modelType;
}

const std::string&
Camera::Parameters::cameraName(void) const
{
    return m_cameraName;
}

int
Camera::Parameters::imageWidth(void) const
{
    return m_imageWidth;
}

int
Camera::Parameters::imageHeight(void) const
{
    return m_imageHeight;
}

int
Camera::Parameters::nIntrinsics(void) const
{
    return m_nIntrinsics;
}

cv::Mat&
Camera::mask(void)
{
    return m_mask;
}

const cv::Mat&
Camera::mask(void) const
{
    return m_mask;
}

void
Camera::estimateExtrinsics(const std::vector<cv::Point3f>& objectPoints,
                           const std::vector<cv::Point2f>& imagePoints,
                           cv::Mat& rvec, cv::Mat& tvec) const
{
    std::vector<cv::Point2f> Ms(imagePoints.size());
    for (size_t i = 0; i < Ms.size(); ++i)
    {
        Eigen::Vector3d P;
        liftProjective(Eigen::Vector2d(imagePoints.at(i).x, imagePoints.at(i).y), P);

        P /= P(2);

        Ms.at(i).x = P(0);
        Ms.at(i).y = P(1);
    }

    // assume unit focal length, zero principal point, and zero distortion
    cv::solvePnP(objectPoints, Ms, cv::Mat::eye(3, 3, CV_64F), cv::noArray(), rvec, tvec);
}

double
Camera::reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const
{
    Eigen::Vector2d p1, p2;

    spaceToPlane(P1, p1);
    spaceToPlane(P2, p2);

    return (p1 - p2).norm();
}

double
Camera::reprojectionError(const std::vector< std::vector<cv::Point3f> >& objectPoints,
                          const std::vector< std::vector<cv::Point2f> >& imagePoints,
                          const std::vector<cv::Mat>& rvecs,
                          const std::vector<cv::Mat>& tvecs,
                          cv::OutputArray _perViewErrors) const
{
    int imageCount = objectPoints.size();
    size_t pointsSoFar = 0;
    double totalErr = 0.0;

    bool computePerViewErrors = _perViewErrors.needed();
    cv::Mat perViewErrors;
    if (computePerViewErrors)
    {
        _perViewErrors.create(imageCount, 1, CV_64F);
        perViewErrors = _perViewErrors.getMat();
    }

    for (int i = 0; i < imageCount; ++i)
    {
        size_t pointCount = imagePoints.at(i).size();

        pointsSoFar += pointCount;

        std::vector<cv::Point2f> estImagePoints;
        projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i),
                      estImagePoints);

        double err = 0.0;
        for (size_t j = 0; j < imagePoints.at(i).size(); ++j)
        {
            err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j));
        }

        if (computePerViewErrors)
        {
            perViewErrors.at<double>(i) = err / pointCount;
        }

        totalErr += err;
    }

    return totalErr / pointsSoFar;
}

double
Camera::reprojectionError(const Eigen::Vector3d& P,
                          const Eigen::Quaterniond& camera_q,
                          const Eigen::Vector3d& camera_t,
                          const Eigen::Vector2d& observed_p) const
{
    Eigen::Vector3d P_cam = camera_q.toRotationMatrix() * P + camera_t;

    Eigen::Vector2d p;
    spaceToPlane(P_cam, p);

    return (p - observed_p).norm();
}

void
Camera::projectPoints(const std::vector<cv::Point3f>& objectPoints,
                      const cv::Mat& rvec,
                      const cv::Mat& tvec,
                      std::vector<cv::Point2f>& imagePoints) const
{
    // project 3D object points to the image plane
    imagePoints.reserve(objectPoints.size());

    //double
    cv::Mat R0;
    cv::Rodrigues(rvec, R0);

    Eigen::MatrixXd R(3,3);
    R << R0.at<double>(0,0), R0.at<double>(0,1), R0.at<double>(0,2),
         R0.at<double>(1,0), R0.at<double>(1,1), R0.at<double>(1,2),
         R0.at<double>(2,0), R0.at<double>(2,1), R0.at<double>(2,2);

    Eigen::Vector3d t;
    t << tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2);

    for (size_t i = 0; i < objectPoints.size(); ++i)
    {
        const cv::Point3f& objectPoint = objectPoints.at(i);

        // Rotate and translate
        Eigen::Vector3d P;
        P << objectPoint.x, objectPoint.y, objectPoint.z;

        P = R * P + t;

        Eigen::Vector2d p;
        spaceToPlane(P, p);

        imagePoints.push_back(cv::Point2f(p(0), p(1)));
    }
}

}


================================================
FILE: camera_model/src/camera_models/CameraFactory.cc
================================================
#include "camodocal/camera_models/CameraFactory.h"

#include <boost/algorithm/string.hpp>


#include "camodocal/camera_models/CataCamera.h"
#include "camodocal/camera_models/EquidistantCamera.h"
#include "camodocal/camera_models/PinholeCamera.h"
#include "camodocal/camera_models/ScaramuzzaCamera.h"

#include "ceres/ceres.h"

namespace camodocal
{

boost::shared_ptr<CameraFactory> CameraFactory::m_instance;

CameraFactory::CameraFactory()
{

}

boost::shared_ptr<CameraFactory>
CameraFactory::instance(void)
{
    if (m_instance.get() == 0)
    {
        m_instance.reset(new CameraFactory);
    }

    return m_instance;
}

CameraPtr
CameraFactory::generateCamera(Camera::ModelType modelType,
                              const std::string& cameraName,
                              cv::Size imageSize) const
{
    switch (modelType)
    {
    case Camera::KANNALA_BRANDT:
    {
        EquidistantCameraPtr camera(new EquidistantCamera);

        EquidistantCamera::Parameters params = camera->getParameters();
        params.cameraName() = cameraName;
        params.imageWidth() = imageSize.width;
        params.imageHeight() = imageSize.height;
        camera->setParameters(params);
        return camera;
    }
    case Camera::PINHOLE:
    {
        PinholeCameraPtr camera(new PinholeCamera);

        PinholeCamera::Parameters params = camera->getParameters();
        params.cameraName() = cameraName;
        params.imageWidth() = imageSize.width;
        params.imageHeight() = imageSize.height;
        camera->setParameters(params);
        return camera;
    }
    case Camera::SCARAMUZZA:
    {
        OCAMCameraPtr camera(new OCAMCamera);

        OCAMCamera::Parameters params = camera->getParameters();
        params.cameraName() = cameraName;
        params.imageWidth() = imageSize.width;
        params.imageHeight() = imageSize.height;
        camera->setParameters(params);
        return camera;
    }
    case Camera::MEI:
    default:
    {
        CataCameraPtr camera(new CataCamera);

        CataCamera::Parameters params = camera->getParameters();
        params.cameraName() = cameraName;
        params.imageWidth() = imageSize.width;
        params.imageHeight() = imageSize.height;
        camera->setParameters(params);
        return camera;
    }
    }
}

CameraPtr
CameraFactory::generateCameraFromYamlFile(const std::string& filename)
{
    cv::FileStorage fs(filename, cv::FileStorage::READ);

    if (!fs.isOpened())
    {
        return CameraPtr();
    }

    Camera::ModelType modelType = Camera::MEI;
    if (!fs["model_type"].isNone())
    {
        std::string sModelType;
        fs["model_type"] >> sModelType;

        if (boost::iequals(sModelType, "kannala_brandt"))
        {
            modelType = Camera::KANNALA_BRANDT;
        }
        else if (boost::iequals(sModelType, "mei"))
        {
            modelType = Camera::MEI;
        }
        else if (boost::iequals(sModelType, "scaramuzza"))
        {
            modelType = Camera::SCARAMUZZA;
        }
        else if (boost::iequals(sModelType, "pinhole"))
        {
            modelType = Camera::PINHOLE;
        }
        else
        {
            std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl;
            return CameraPtr();
        }
    }

    switch (modelType)
    {
    case Camera::KANNALA_BRANDT:
    {
        EquidistantCameraPtr camera(new EquidistantCamera);

        EquidistantCamera::Parameters params = camera->getParameters();
        params.readFromYamlFile(filename);
        camera->setParameters(params);
        return camera;
    }
    case Camera::PINHOLE:
    {
        PinholeCameraPtr camera(new PinholeCamera);

        PinholeCamera::Parameters params = camera->getParameters();
        params.readFromYamlFile(filename);
        camera->setParameters(params);
        return camera;
    }
    case Camera::SCARAMUZZA:
    {
        OCAMCameraPtr camera(new OCAMCamera);

        OCAMCamera::Parameters params = camera->getParameters();
        params.readFromYamlFile(filename);
        camera->setParameters(params);
        return camera;
    }
    case Camera::MEI:
    default:
    {
        CataCameraPtr camera(new CataCamera);

        CataCamera::Parameters params = camera->getParameters();
        params.readFromYamlFile(filename);
        camera->setParameters(params);
        return camera;
    }
    }

    return CameraPtr();
}

}



================================================
FILE: camera_model/src/camera_models/CataCamera.cc
================================================
#include "camodocal/camera_models/CataCamera.h"

#include <cmath>
#include <cstdio>
#include <eigen3/Eigen/Dense>
#include <iomanip>
#include <iostream>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/imgproc/imgproc.hpp>

#include "camodocal/gpl/gpl.h"

namespace camodocal
{

CataCamera::Parameters::Parameters()
 : Camera::Parameters(MEI)
 , m_xi(0.0)
 , m_k1(0.0)
 , m_k2(0.0)
 , m_p1(0.0)
 , m_p2(0.0)
 , m_gamma1(0.0)
 , m_gamma2(0.0)
 , m_u0(0.0)
 , m_v0(0.0)
{

}

CataCamera::Parameters::Parameters(const std::string& cameraName,
                                   int w, int h,
                                   double xi,
                                   double k1, double k2,
                                   double p1, double p2,
                                   double gamma1, double gamma2,
                                   double u0, double v0)
 : Camera::Parameters(MEI, cameraName, w, h)
 , m_xi(xi)
 , m_k1(k1)
 , m_k2(k2)
 , m_p1(p1)
 , m_p2(p2)
 , m_gamma1(gamma1)
 , m_gamma2(gamma2)
 , m_u0(u0)
 , m_v0(v0)
{
}

double&
CataCamera::Parameters::xi(void)
{
    return m_xi;
}

double&
CataCamera::Parameters::k1(void)
{
    return m_k1;
}

double&
CataCamera::Parameters::k2(void)
{
    return m_k2;
}

double&
CataCamera::Parameters::p1(void)
{
    return m_p1;
}

double&
CataCamera::Parameters::p2(void)
{
    return m_p2;
}

double&
CataCamera::Parameters::gamma1(void)
{
    return m_gamma1;
}

double&
CataCamera::Parameters::gamma2(void)
{
    return m_gamma2;
}

double&
CataCamera::Parameters::u0(void)
{
    return m_u0;
}

double&
CataCamera::Parameters::v0(void)
{
    return m_v0;
}

double
CataCamera::Parameters::xi(void) const
{
    return m_xi;
}

double
CataCamera::Parameters::k1(void) const
{
    return m_k1;
}

double
CataCamera::Parameters::k2(void) const
{
    return m_k2;
}

double
CataCamera::Parameters::p1(void) const
{
    return m_p1;
}

double
CataCamera::Parameters::p2(void) const
{
    return m_p2;
}

double
CataCamera::Parameters::gamma1(void) const
{
    return m_gamma1;
}

double
CataCamera::Parameters::gamma2(void) const
{
    return m_gamma2;
}

double
CataCamera::Parameters::u0(void) const
{
    return m_u0;
}

double
CataCamera::Parameters::v0(void) const
{
    return m_v0;
}

bool
CataCamera::Parameters::readFromYamlFile(const std::string& filename)
{
    cv::FileStorage fs(filename, cv::FileStorage::READ);

    if (!fs.isOpened())
    {
        return false;
    }

    if (!fs["model_type"].isNone())
    {
        std::string sModelType;
        fs["model_type"] >> sModelType;

        if (sModelType.compare("MEI") != 0)
        {
            return false;
        }
    }

    m_modelType = MEI;
    fs["camera_name"] >> m_cameraName;
    m_imageWidth = static_cast<int>(fs["image_width"]);
    m_imageHeight = static_cast<int>(fs["image_height"]);

    cv::FileNode n = fs["mirror_parameters"];
    m_xi = static_cast<double>(n["xi"]);

    n = fs["distortion_parameters"];
    m_k1 = static_cast<double>(n["k1"]);
    m_k2 = static_cast<double>(n["k2"]);
    m_p1 = static_cast<double>(n["p1"]);
    m_p2 = static_cast<double>(n["p2"]);

    n = fs["projection_parameters"];
    m_gamma1 = static_cast<double>(n["gamma1"]);
    m_gamma2 = static_cast<double>(n["gamma2"]);
    m_u0 = static_cast<double>(n["u0"]);
    m_v0 = static_cast<double>(n["v0"]);

    return true;
}

void
CataCamera::Parameters::writeToYamlFile(const std::string& filename) const
{
    cv::FileStorage fs(filename, cv::FileStorage::WRITE);

    fs << "model_type" << "MEI";
    fs << "camera_name" << m_cameraName;
    fs << "image_width" << m_imageWidth;
    fs << "image_height" << m_imageHeight;

    // mirror: xi
    fs << "mirror_parameters";
    fs << "{" << "xi" << m_xi << "}";

    // radial distortion: k1, k2
    // tangential distortion: p1, p2
    fs << "distortion_parameters";
    fs << "{" << "k1" << m_k1
              << "k2" << m_k2
              << "p1" << m_p1
              << "p2" << m_p2 << "}";

    // projection: gamma1, gamma2, u0, v0
    fs << "projection_parameters";
    fs << "{" << "gamma1" << m_gamma1
              << "gamma2" << m_gamma2
              << "u0" << m_u0
              << "v0" << m_v0 << "}";

    fs.release();
}

CataCamera::Parameters&
CataCamera::Parameters::operator=(const CataCamera::Parameters& other)
{
    if (this != &other)
    {
        m_modelType = other.m_modelType;
        m_cameraName = other.m_cameraName;
        m_imageWidth = other.m_imageWidth;
        m_imageHeight = other.m_imageHeight;
        m_xi = other.m_xi;
        m_k1 = other.m_k1;
        m_k2 = other.m_k2;
        m_p1 = other.m_p1;
        m_p2 = other.m_p2;
        m_gamma1 = other.m_gamma1;
        m_gamma2 = other.m_gamma2;
        m_u0 = other.m_u0;
        m_v0 = other.m_v0;
    }

    return *this;
}

std::ostream&
operator<< (std::ostream& out, const CataCamera::Parameters& params)
{
    out << "Camera Parameters:" << std::endl;
    out << "    model_type " << "MEI" << std::endl;
    out << "   camera_name " << params.m_cameraName << std::endl;
    out << "   image_width " << params.m_imageWidth << std::endl;
    out << "  image_height " << params.m_imageHeight << std::endl;

    out << "Mirror Parameters" << std::endl;
    out << std::fixed << std::setprecision(10);
    out << "            xi " << params.m_xi << std::endl;

    // radial distortion: k1, k2
    // tangential distortion: p1, p2
    out << "Distortion Parameters" << std::endl;
    out << "            k1 " << params.m_k1 << std::endl
        << "            k2 " << params.m_k2 << std::endl
        << "            p1 " << params.m_p1 << std::endl
        << "            p2 " << params.m_p2 << std::endl;

    // projection: gamma1, gamma2, u0, v0
    out << "Projection Parameters" << std::endl;
    out << "        gamma1 " << params.m_gamma1 << std::endl
        << "        gamma2 " << params.m_gamma2 << std::endl
        << "            u0 " << params.m_u0 << std::endl
        << "            v0 " << params.m_v0 << std::endl;

    return out;
}

CataCamera::CataCamera()
 : m_inv_K11(1.0)
 , m_inv_K13(0.0)
 , m_inv_K22(1.0)
 , m_inv_K23(0.0)
 , m_noDistortion(true)
{

}

CataCamera::CataCamera(const std::string& cameraName,
                       int imageWidth, int imageHeight,
                       double xi, double k1, double k2, double p1, double p2,
                       double gamma1, double gamma2, double u0, double v0)
 : mParameters(cameraName, imageWidth, imageHeight,
               xi, k1, k2, p1, p2, gamma1, gamma2, u0, v0)
{
    if ((mParameters.k1() == 0.0) &&
        (mParameters.k2() == 0.0) &&
        (mParameters.p1() == 0.0) &&
        (mParameters.p2() == 0.0))
    {
        m_noDistortion = true;
    }
    else
    {
        m_noDistortion = false;
    }

    // Inverse camera projection matrix parameters
    m_inv_K11 = 1.0 / mParameters.gamma1();
    m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
    m_inv_K22 = 1.0 / mParameters.gamma2();
    m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
}

CataCamera::CataCamera(const CataCamera::Parameters& params)
 : mParameters(params)
{
    if ((mParameters.k1() == 0.0) &&
        (mParameters.k2() == 0.0) &&
        (mParameters.p1() == 0.0) &&
        (mParameters.p2() == 0.0))
    {
        m_noDistortion = true;
    }
    else
    {
        m_noDistortion = false;
    }

    // Inverse camera projection matrix parameters
    m_inv_K11 = 1.0 / mParameters.gamma1();
    m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
    m_inv_K22 = 1.0 / mParameters.gamma2();
    m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
}

Camera::ModelType
CataCamera::modelType(void) const
{
    return mParameters.modelType();
}

const std::string&
CataCamera::cameraName(void) const
{
    return mParameters.cameraName();
}

int
CataCamera::imageWidth(void) const
{
    return mParameters.imageWidth();
}

int
CataCamera::imageHeight(void) const
{
    return mParameters.imageHeight();
}

void
CataCamera::estimateIntrinsics(const cv::Size& boardSize,
                               const std::vector< std::vector<cv::Point3f> >& objectPoints,
                               const std::vector< std::vector<cv::Point2f> >& imagePoints)
{
    Parameters params = getParameters();

    double u0 = params.imageWidth() / 2.0;
    double v0 = params.imageHeight() / 2.0;

    double gamma0 = 0.0;
    double minReprojErr = std::numeric_limits<double>::max();

    std::vector<cv::Mat> rvecs, tvecs;
    rvecs.assign(objectPoints.size(), cv::Mat());
    tvecs.assign(objectPoints.size(), cv::Mat());

    params.xi() = 1.0;
    params.k1() = 0.0;
    params.k2() = 0.0;
    params.p1() = 0.0;
    params.p2() = 0.0;
    params.u0() = u0;
    params.v0() = v0;

    // Initialize gamma (focal length)
    // Use non-radial line image and xi = 1
    for (size_t i = 0; i < imagePoints.size(); ++i)
    {
        for (int r = 0; r < boardSize.height; ++r)
        {
            cv::Mat P(boardSize.width, 4, CV_64F);
            for (int c = 0; c < boardSize.width; ++c)
            {
                const cv::Point2f& imagePoint = imagePoints.at(i).at(r * boardSize.width + c);

                double u = imagePoint.x - u0;
                double v = imagePoint.y - v0;

                P.at<double>(c, 0) = u;
                P.at<double>(c, 1) = v;
                P.at<double>(c, 2) = 0.5;
                P.at<double>(c, 3) = -0.5 * (square(u) + square(v));
            }

            cv::Mat C;
            cv::SVD::solveZ(P, C);

            double t = square(C.at<double>(0)) + square(C.at<double>(1)) + C.at<double>(2) * C.at<double>(3);
            if (t < 0.0)
            {
                continue;
            }

            // check that line image is not radial
            double d = sqrt(1.0 / t);
            double nx = C.at<double>(0) * d;
            double ny = C.at<double>(1) * d;
            if (hypot(nx, ny) > 0.95)
            {
                continue;
            }

            double gamma = sqrt(C.at<double>(2) / C.at<double>(3));

            params.gamma1() = gamma;
            params.gamma2() = gamma;
            setParameters(params);

            for (size_t j = 0; j < objectPoints.size(); ++j)
            {
                estimateExtrinsics(objectPoints.at(j), imagePoints.at(j), rvecs.at(j), tvecs.at(j));
            }

            double reprojErr = reprojectionError(objectPoints, imagePoints, rvecs, tvecs, cv::noArray());

            if (reprojErr < minReprojErr)
            {
                minReprojErr = reprojErr;
                gamma0 = gamma;
            }
        }
    }

    if (gamma0 <= 0.0 && minReprojErr >= std::numeric_limits<double>::max())
    {
        std::cout << "[" << params.cameraName() << "] "
                  << "# INFO: CataCamera model fails with given data. " << std::endl;

        return;
    }

    params.gamma1() = gamma0;
    params.gamma2() = gamma0;
    setParameters(params);
}

/** 
 * \brief Lifts a point from the image plane to the unit sphere
 *
 * \param p image coordinates
 * \param P coordinates of the point on the sphere
 */
void
CataCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
{
    double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u;
    double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
    double lambda;

    // Lift points to normalised plane
    mx_d = m_inv_K11 * p(0) + m_inv_K13;
    my_d = m_inv_K22 * p(1) + m_inv_K23;

    if (m_noDistortion)
    {
        mx_u = mx_d;
        my_u = my_d;
    }
    else
    {
        // Apply inverse distortion model
        if (0)
        {
            double k1 = mParameters.k1();
            double k2 = mParameters.k2();
            double p1 = mParameters.p1();
            double p2 = mParameters.p2();

            // Inverse distortion model
            // proposed by Heikkila
            mx2_d = mx_d*mx_d;
            my2_d = my_d*my_d;
            mxy_d = mx_d*my_d;
            rho2_d = mx2_d+my2_d;
            rho4_d = rho2_d*rho2_d;
            radDist_d = k1*rho2_d+k2*rho4_d;
            Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d;
            Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d;
            inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d);

            mx_u = mx_d - inv_denom_d*Dx_d;
            my_u = my_d - inv_denom_d*Dy_d;
        }
        else
        {
            // Recursive distortion model
            int n = 6;
            Eigen::Vector2d d_u;
            distortion(Eigen::Vector2d(mx_d, my_d), d_u);
            // Approximate value
            mx_u = mx_d - d_u(0);
            my_u = my_d - d_u(1);

            for (int i = 1; i < n; ++i)
            {
                distortion(Eigen::Vector2d(mx_u, my_u), d_u);
                mx_u = mx_d - d_u(0);
                my_u = my_d - d_u(1);
            }
        }
    }

    // Lift normalised points to the sphere (inv_hslash)
    double xi = mParameters.xi();
    if (xi == 1.0)
    {
        lambda = 2.0 / (mx_u * mx_u + my_u * my_u + 1.0);
        P << lambda * mx_u, lambda * my_u, lambda - 1.0;
    }
    else
    {
        lambda = (xi + sqrt(1.0 + (1.0 - xi * xi) * (mx_u * mx_u + my_u * my_u))) / (1.0 + mx_u * mx_u + my_u * my_u);
        P << lambda * mx_u, lambda * my_u, lambda - xi;
    }
}

/** 
 * \brief Lifts a point from the image plane to its projective ray
 *
 * \param p image coordinates
 * \param P coordinates of the projective ray
 */
void
CataCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
{
    double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u;
    double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
    //double lambda;

    // Lift points to normalised plane
    mx_d = m_inv_K11 * p(0) + m_inv_K13;
    my_d = m_inv_K22 * p(1) + m_inv_K23;

    if (m_noDistortion)
    {
        mx_u = mx_d;
        my_u = my_d;
    }
    else
    {
        if (0)
        {
            double k1 = mParameters.k1();
            double k2 = mParameters.k2();
            double p1 = mParameters.p1();
            double p2 = mParameters.p2();

            // Apply inverse distortion model
            // proposed by Heikkila
            mx2_d = mx_d*mx_d;
            my2_d = my_d*my_d;
            mxy_d = mx_d*my_d;
            rho2_d = mx2_d+my2_d;
            rho4_d = rho2_d*rho2_d;
            radDist_d = k1*rho2_d+k2*rho4_d;
            Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d;
            Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d;
            inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d);

            mx_u = mx_d - inv_denom_d*Dx_d;
            my_u = my_d - inv_denom_d*Dy_d;
        }
        else
        {
            // Recursive distortion model
            int n = 8;
            Eigen::Vector2d d_u;
            distortion(Eigen::Vector2d(mx_d, my_d), d_u);
            // Approximate value
            mx_u = mx_d - d_u(0);
            my_u = my_d - d_u(1);

            for (int i = 1; i < n; ++i)
            {
                distortion(Eigen::Vector2d(mx_u, my_u), d_u);
                mx_u = mx_d - d_u(0);
                my_u = my_d - d_u(1);
            }
        }
    }

    // Obtain a projective ray
    double xi = mParameters.xi();
    if (xi == 1.0)
    {
        P << mx_u, my_u, (1.0 - mx_u * mx_u - my_u * my_u) / 2.0;
    }
    else
    {
        // Reuse variable
        rho2_d = mx_u * mx_u + my_u * my_u;
        P << mx_u, my_u, 1.0 - xi * (rho2_d + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * rho2_d));
    }
}


/** 
 * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
 *
 * \param P 3D point coordinates
 * \param p return value, contains the image point coordinates
 */
void
CataCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const
{
    Eigen::Vector2d p_u, p_d;

    // Project points to the normalised plane
    double z = P(2) + mParameters.xi() * P.norm();
    p_u << P(0) / z, P(1) / z;

    if (m_noDistortion)
    {
        p_d = p_u;
    }
    else
    {
        // Apply distortion
        Eigen::Vector2d d_u;
        distortion(p_u, d_u);
        p_d = p_u + d_u;
    }

    // Apply generalised projection matrix
    p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
         mParameters.gamma2() * p_d(1) + mParameters.v0();
}

#if 0
/** 
 * \brief Project a 3D point to the image plane and calculate Jacobian
 *
 * \param P 3D point coordinates
 * \param p return value, contains the image point coordinates
 */
void
CataCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
                        Eigen::Matrix<double,2,3>& J) const
{
    double xi = mParameters.xi();

    Eigen::Vector2d p_u, p_d;
    double norm, inv_denom;
    double dxdmx, dydmx, dxdmy, dydmy;

    norm = P.norm();
    // Project points to the normalised plane
    inv_denom = 1.0 / (P(2) + xi * norm);
    p_u << inv_denom * P(0), inv_denom * P(1);

    // Calculate jacobian
    inv_denom = inv_denom * inv_denom / norm;
    double dudx = inv_denom * (norm * P(2) + xi * (P(1) * P(1) + P(2) * P(2)));
    double dvdx = -inv_denom * xi * P(0) * P(1);
    double dudy = dvdx;
    double dvdy = inv_denom * (norm * P(2) + xi * (P(0) * P(0) + P(2) * P(2)));
    inv_denom = inv_denom * (-xi * P(2) - norm); // reuse variable
    double dudz = P(0) * inv_denom;
    double dvdz = P(1) * inv_denom;

    if (m_noDistortion)
    {
        p_d = p_u;
    }
    else
    {
        // Apply distortion
        Eigen::Vector2d d_u;
        distortion(p_u, d_u);
        p_d = p_u + d_u;
    }

    double gamma1 = mParameters.gamma1();
    double gamma2 = mParameters.gamma2();

    // Make the product of the jacobians
    // and add projection matrix jacobian
    inv_denom = gamma1 * (dudx * dxdmx + dvdx * dxdmy); // reuse
    dvdx = gamma2 * (dudx * dydmx + dvdx * dydmy);
    dudx = inv_denom;

    inv_denom = gamma1 * (dudy * dxdmx + dvdy * dxdmy); // reuse
    dvdy = gamma2 * (dudy * dydmx + dvdy * dydmy);
    dudy = inv_denom;

    inv_denom = gamma1 * (dudz * dxdmx + dvdz * dxdmy); // reuse
    dvdz = gamma2 * (dudz * dydmx + dvdz * dydmy);
    dudz = inv_denom;
    
    // Apply generalised projection matrix
    p << gamma1 * p_d(0) + mParameters.u0(),
         gamma2 * p_d(1) + mParameters.v0();

    J << dudx, dudy, dudz,
         dvdx, dvdy, dvdz;
}
#endif

/** 
 * \brief Projects an undistorted 2D point p_u to the image plane
 *
 * \param p_u 2D point coordinates
 * \return image point coordinates
 */
void
CataCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const
{
    Eigen::Vector2d p_d;

    if (m_noDistortion)
    {
        p_d = p_u;
    }
    else
    {
        // Apply distortion
        Eigen::Vector2d d_u;
        distortion(p_u, d_u);
        p_d = p_u + d_u;
    }

    // Apply generalised projection matrix
    p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
         mParameters.gamma2() * p_d(1) + mParameters.v0();
}

/** 
 * \brief Apply distortion to input point (from the normalised plane)
 *  
 * \param p_u undistorted coordinates of point on the normalised plane
 * \return to obtain the distorted point: p_d = p_u + d_u
 */
void
CataCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const
{
    double k1 = mParameters.k1();
    double k2 = mParameters.k2();
    double p1 = mParameters.p1();
    double p2 = mParameters.p2();

    double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;

    mx2_u = p_u(0) * p_u(0);
    my2_u = p_u(1) * p_u(1);
    mxy_u = p_u(0) * p_u(1);
    rho2_u = mx2_u + my2_u;
    rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
    d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
           p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
}

/** 
 * \brief Apply distortion to input point (from the normalised plane)
 *        and calculate Jacobian
 *
 * \param p_u undistorted coordinates of point on the normalised plane
 * \return to obtain the distorted point: p_d = p_u + d_u
 */
void
CataCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
                       Eigen::Matrix2d& J) const
{
    double k1 = mParameters.k1();
    double k2 = mParameters.k2();
    double p1 = mParameters.p1();
    double p2 = mParameters.p2();

    double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;

    mx2_u = p_u(0) * p_u(0);
    my2_u = p_u(1) * p_u(1);
    mxy_u = p_u(0) * p_u(1);
    rho2_u = mx2_u + my2_u;
    rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
    d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
           p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);

    double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u + k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) + 6.0 * p2 * p_u(0);
    double dydmx = k1 * 2.0 * p_u(0) * p_u(1) + k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) + 2.0 * p2 * p_u(1);
    double dxdmy = dydmx;
    double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u + k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) + 2.0 * p2 * p_u(0);

    J << dxdmx, dxdmy,
         dydmx, dydmy;
}

void
CataCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const
{
    cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());

    cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F);
    cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F);

    for (int v = 0; v < imageSize.height; ++v)
    {
        for (int u = 0; u < imageSize.width; ++u)
        {
            double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
            double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;

            double xi = mParameters.xi();
            double d2 = mx_u * mx_u + my_u * my_u;

            Eigen::Vector3d P;
            P << mx_u, my_u, 1.0 - xi * (d2 + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * d2));

            Eigen::Vector2d p;
            spaceToPlane(P, p);

            mapX.at<float>(v,u) = p(0);
            mapY.at<float>(v,u) = p(1);
        }
    }

    cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
}

cv::Mat
CataCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
                                    float fx, float fy,
                                    cv::Size imageSize,
                                    float cx, float cy,
                                    cv::Mat rmat) const
{
    if (imageSize == cv::Size(0, 0))
    {
        imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
    }

    cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
    cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);

    Eigen::Matrix3f K_rect;

    if (cx == -1.0f && cy == -1.0f)
    {
        K_rect << fx, 0, imageSize.width / 2,
                  0, fy, imageSize.height / 2,
                  0, 0, 1;
    }
    else
    {
        K_rect << fx, 0, cx,
                  0, fy, cy,
                  0, 0, 1;
    }

    if (fx == -1.0f || fy == -1.0f)
    {
        K_rect(0,0) = mParameters.gamma1();
        K_rect(1,1) = mParameters.gamma2();
    }

    Eigen::Matrix3f K_rect_inv = K_rect.inverse();

    Eigen::Matrix3f R, R_inv;
    cv::cv2eigen(rmat, R);
    R_inv = R.inverse();

    for (int v = 0; v < imageSize.height; ++v)
    {
        for (int u = 0; u < imageSize.width; ++u)
        {
            Eigen::Vector3f xo;
            xo << u, v, 1;

            Eigen::Vector3f uo = R_inv * K_rect_inv * xo;

            Eigen::Vector2d p;
            spaceToPlane(uo.cast<double>(), p);

            mapX.at<float>(v,u) = p(0);
            mapY.at<float>(v,u) = p(1);
        }
    }

    cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);

    cv::Mat K_rect_cv;
    cv::eigen2cv(K_rect, K_rect_cv);
    return K_rect_cv;
}

int
CataCamera::parameterCount(void) const
{
    return 9;
}

const CataCamera::Parameters&
CataCamera::getParameters(void) const
{
    return mParameters;
}

void
CataCamera::setParameters(const CataCamera::Parameters& parameters)
{
    mParameters = parameters;

    if ((mParameters.k1() == 0.0) &&
        (mParameters.k2() == 0.0) &&
        (mParameters.p1() == 0.0) &&
        (mParameters.p2() == 0.0))
    {
        m_noDistortion = true;
    }
    else
    {
        m_noDistortion = false;
    }

    m_inv_K11 = 1.0 / mParameters.gamma1();
    m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
    m_inv_K22 = 1.0 / mParameters.gamma2();
    m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
}

void
CataCamera::readParameters(const std::vector<double>& parameterVec)
{
    if ((int)parameterVec.size() != parameterCount())
    {
        return;
    }

    Parameters params = getParameters();

    params.xi() = parameterVec.at(0);
    params.k1() = parameterVec.at(1);
    params.k2() = parameterVec.at(2);
    params.p1() = parameterVec.at(3);
    params.p2() = parameterVec.at(4);
    params.gamma1() = parameterVec.at(5);
    params.gamma2() = parameterVec.at(6);
    params.u0() = parameterVec.at(7);
    params.v0() = parameterVec.at(8);

    setParameters(params);
}

void
CataCamera::writeParameters(std::vector<double>& parameterVec) const
{
    parameterVec.resize(parameterCount());
    parameterVec.at(0) = mParameters.xi();
    parameterVec.at(1) = mParameters.k1();
    parameterVec.at(2) = mParameters.k2();
    parameterVec.at(3) = mParameters.p1();
    parameterVec.at(4) = mParameters.p2();
    parameterVec.at(5) = mParameters.gamma1();
    parameterVec.at(6) = mParameters.gamma2();
    parameterVec.at(7) = mParameters.u0();
    parameterVec.at(8) = mParameters.v0();
}

void
CataCamera::writeParametersToYamlFile(const std::string& filename) const
{
    mParameters.writeToYamlFile(filename);
}

std::string
CataCamera::parametersToString(void) const
{
    std::ostringstream oss;
    oss << mParameters;

    return oss.str();
}

}


================================================
FILE: camera_model/src/camera_models/CostFunctionFactory.cc
================================================
#include "camodocal/camera_models/CostFunctionFactory.h"

#include "ceres/ceres.h"
#include "camodocal/camera_models/CataCamera.h"
#include "camodocal/camera_models/EquidistantCamera.h"
#include "camodocal/camera_models/PinholeCamera.h"
#include "camodocal/camera_models/ScaramuzzaCamera.h"

namespace camodocal
{

template<typename T>
void
worldToCameraTransform(const T* const q_cam_odo, const T* const t_cam_odo,
                       const T* const p_odo, const T* const att_odo,
                       T* q, T* t, bool optimize_cam_odo_z = true)
{
    Eigen::Quaternion<T> q_z_inv(cos(att_odo[0] / T(2)), T(0), T(0), -sin(att_odo[0] / T(2)));
    Eigen::Quaternion<T> q_y_inv(cos(att_odo[1] / T(2)), T(0), -sin(att_odo[1] / T(2)), T(0));
    Eigen::Quaternion<T> q_x_inv(cos(att_odo[2] / T(2)), -sin(att_odo[2] / T(2)), T(0), T(0));

    Eigen::Quaternion<T> q_zyx_inv = q_x_inv * q_y_inv * q_z_inv;

    T q_odo[4] = {q_zyx_inv.w(), q_zyx_inv.x(), q_zyx_inv.y(), q_zyx_inv.z()};

    T q_odo_cam[4] = {q_cam_odo[3], -q_cam_odo[0], -q_cam_odo[1], -q_cam_odo[2]};

    T q0[4];
    ceres::QuaternionProduct(q_odo_cam, q_odo, q0);

    T t0[3];
    T t_odo[3] = {p_odo[0], p_odo[1], p_odo[2]};

    ceres::QuaternionRotatePoint(q_odo, t_odo, t0);

    t0[0] += t_cam_odo[0];
    t0[1] += t_cam_odo[1];

    if (optimize_cam_odo_z)
    {
        t0[2] += t_cam_odo[2];
    }

    ceres::QuaternionRotatePoint(q_odo_cam, t0, t);
    t[0] = -t[0];
    t[1] = -t[1];
    t[2] = -t[2];

    // Convert quaternion from Ceres convention (w, x, y, z)
    // to Eigen convention (x, y, z, w)
    q[0] = q0[1];
    q[1] = q0[2];
    q[2] = q0[3];
    q[3] = q0[0];
}

template<class CameraT>
class ReprojectionError1
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW

    ReprojectionError1(const Eigen::Vector3d& observed_P,
                       const Eigen::Vector2d& observed_p)
        : m_observed_P(observed_P), m_observed_p(observed_p)
        , m_sqrtPrecisionMat(Eigen::Matrix2d::Identity()) {}

    ReprojectionError1(const Eigen::Vector3d& observed_P,
                       const Eigen::Vector2d& observed_p,
                       const Eigen::Matrix2d& sqrtPrecisionMat)
        : m_observed_P(observed_P), m_observed_p(observed_p)
        , m_sqrtPrecisionMat(sqrtPrecisionMat) {}

    ReprojectionError1(const std::vector<double>& intrinsic_params,
                       const Eigen::Vector3d& observed_P,
                       const Eigen::Vector2d& observed_p)
        : m_intrinsic_params(intrinsic_params)
        , m_observed_P(observed_P), m_observed_p(observed_p) {}

    // variables: camera intrinsics and camera extrinsics
    template <typename T>
    bool operator()(const T* const intrinsic_params,
                    const T* const q,
                    const T* const t,
                    T* residuals) const
    {
        Eigen::Matrix<T, 3, 1> P = m_observed_P.cast<T>();

        Eigen::Matrix<T, 2, 1> predicted_p;
        CameraT::spaceToPlane(intrinsic_params, q, t, P, predicted_p);

        Eigen::Matrix<T, 2, 1> e = predicted_p - m_observed_p.cast<T>();

        Eigen::Matrix<T, 2, 1> e_weighted = m_sqrtPrecisionMat.cast<T>() * e;

        residuals[0] = e_weighted(0);
        residuals[1] = e_weighted(1);

        return true;
    }

    // variables: camera-odometry transforms and odometry poses
    template <typename T>
    bool operator()(const T* const q_cam_odo, const T* const t_cam_odo,
                    const T* const p_odo, const T* const att_odo,
                    T* residuals) const
    {
        T q[4], t[3];
        worldToCameraTransform(q_cam_odo, t_cam_odo, p_odo, att_odo, q, t);

        Eigen::Matrix<T, 3, 1> P = m_observed_P.cast<T>();

        std::vector<T> intrinsic_params(m_intrinsic_params.begin(), m_intrinsic_params.end());

        // project 3D object point to the image plane
        Eigen::Matrix<T, 2, 1> predicted_p;
        CameraT::spaceToPlane(intrinsic_params.data(), q, t, P, predicted_p);

        residuals[0] = predicted_p(0) - T(m_observed_p(0));
        residuals[1] = predicted_p(1) - T(m_observed_p(1));

        return true;
    }

//private:
    // camera intrinsics
    std::vector<double> m_intrinsic_params;

    // observed 3D point
    Eigen::Vector3d m_observed_P;

    // observed 2D point
    Eigen::Vector2d m_observed_p;

    // square root of precision matrix
    Eigen::Matrix2d m_sqrtPrecisionMat;
};

// variables: camera extrinsics, 3D point
template<class CameraT>
class ReprojectionError2
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW

    ReprojectionError2(const std::vector<double>& intrinsic_params,
                       const Eigen::Vector2d& observed_p)
        : m_intrinsic_params(intrinsic_params), m_observed_p(observed_p) {}

    template <typename T>
    bool operator()(const T* const q, const T* const t,
                    const T* const point, T* residuals) const
    {
        Eigen::Matrix<T, 3, 1> P;
        P(0) = T(point[0]);
        P(1) = T(point[1]);
        P(2) = T(point[2]);

        std::vector<T> intrinsic_params(m_intrinsic_params.begin(), m_intrinsic_params.end());

        // project 3D object point to the image plane
        Eigen::Matrix<T, 2, 1> predicted_p;
        CameraT::spaceToPlane(intrinsic_params.data(), q, t, P, predicted_p);

        residuals[0] = predicted_p(0) - T(m_observed_p(0));
        residuals[1] = predicted_p(1) - T(m_observed_p(1));

        return true;
    }

private:
    // camera intrinsics
    std::vector<double> m_intrinsic_params;

    // observed 2D point
    Eigen::Vector2d m_observed_p;
};

template<class CameraT>
class ReprojectionError3
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW

    ReprojectionError3(const Eigen::Vector2d& observed_p)
        : m_observed_p(observed_p)
        , m_sqrtPrecisionMat(Eigen::Matrix2d::Identity())
        , m_optimize_cam_odo_z(true) {}

    ReprojectionError3(const Eigen::Vector2d& observed_p,
                       const Eigen::Matrix2d& sqrtPrecisionMat)
        : m_observed_p(observed_p)
        , m_sqrtPrecisionMat(sqrtPrecisionMat)
        , m_optimize_cam_odo_z(true) {}

    ReprojectionError3(const std::vector<double>& intrinsic_params,
                       const Eigen::Vector2d& observed_p)
        : m_intrinsic_params(intrinsic_params)
        , m_observed_p(observed_p)
        , m_sqrtPrecisionMat(Eigen::Matrix2d::Identity())
        , m_optimize_cam_odo_z(true) {}

    ReprojectionError3(const std::vector<double>& intrinsic_params,
                       const Eigen::Vector2d& observed_p,
                       const Eigen::Matrix2d& sqrtPrecisionMat)
        : m_intrinsic_params(intrinsic_params)
        , m_observed_p(observed_p)
        , m_sqrtPrecisionMat(sqrtPrecisionMat)
        , m_optimize_cam_odo_z(true) {}


    ReprojectionError3(const std::vector<double>& intrinsic_params,
                       const Eigen::Vector3d& odo_pos,
                       const Eigen::Vector3d& odo_att,
                       const Eigen::Vector2d& observed_p,
                       bool optimize_cam_odo_z)
        : m_intrinsic_params(intrinsic_params)
        , m_odo_pos(odo_pos), m_odo_att(odo_att)
        , m_observed_p(observed_p)
        , m_optimize_cam_odo_z(optimize_cam_odo_z) {}

    ReprojectionError3(const std::vector<double>& intrinsic_params,
                       const Eigen::Quaterniond& cam_odo_q,
                       const Eigen::Vector3d& cam_odo_t,
                       const Eigen::Vector3d& odo_pos,
                       const Eigen::Vector3d& odo_att,
                       const Eigen::Vector2d& observed_p)
        : m_intrinsic_params(intrinsic_params)
        , m_cam_odo_q(cam_odo_q), m_cam_odo_t(cam_odo_t)
        , m_odo_pos(odo_pos), m_odo_att(odo_att)
        , m_observed_p(observed_p)
        , m_optimize_cam_odo_z(true) {}

    // variables: camera intrinsics, camera-to-odometry transform,
    //            odometry extrinsics, 3D point
    template <typename T>
    bool operator()(const T* const intrinsic_params,
                    const T* const q_cam_odo, const T* const t_cam_odo,
                    const T* const p_odo, const T* const att_odo,
                    const T* const point, T* residuals) const
    {
        T q[4], t[3];
        worldToCameraTransform(q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z);

        Eigen::Matrix<T, 3, 1> P(point[0], point[1], point[2]);

        // project 3D object point to the image plane
        Eigen::Matrix<T, 2, 1> predicted_p;
        CameraT::spaceToPlane(intrinsic_params, q, t, P, predicted_p);

        Eigen::Matrix<T, 2, 1> err = predicted_p - m_observed_p.cast<T>();
        Eigen::Matrix<T, 2, 1> err_weighted = m_sqrtPrecisionMat.cast<T>() * err;

        residuals[0] = err_weighted(0);
        residuals[1] = err_weighted(1);

        return true;
    }

    // variables: camera-to-odometry transform, 3D point
    template <typename T>
    bool operator()(const T* const q_cam_odo, const T* const t_cam_odo,
                    const T* const point, T* residuals) const
    {
        T p_odo[3] = {T(m_odo_pos(0)), T(m_odo_pos(1)), T(m_odo_pos(2))};
        T att_odo[3] = {T(m_odo_att(0)), T(m_odo_att(1)), T(m_odo_att(2))};
        T q[4], t[3];

        worldToCameraTransform(q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z);

        std::vector<T> intrinsic_params(m_intrinsic_params.begin(), m_intrinsic_params.end());
        Eigen::Matrix<T, 3, 1> P(point[0], point[1], point[2]);

        // project 3D object point to the image plane
        Eigen::Matrix<T, 2, 1> predicted_p;
        CameraT::spaceToPlane(intrinsic_params.data(), q, t, P, predicted_p);

        residuals[0] = predicted_p(0) - T(m_observed_p(0));
        residuals[1] = predicted_p(1) - T(m_observed_p(1));

        return true;
    }

    // variables: camera-to-odometry transform, odometry extrinsics, 3D point
    template <typename T>
    bool operator()(const T* const q_cam_odo, const T* const t_cam_odo,
                    const T* const p_odo, const T* const att_odo,
                    const T* const point, T* residuals) const
    {
        T q[4], t[3];
        worldToCameraTransform(q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z);

        std::vector<T> intrinsic_params(m_intrinsic_params.begin(), m_intrinsic_params.end());
        Eigen::Matrix<T, 3, 1> P(point[0], point[1], point[2]);

        // project 3D object point to the image plane
        Eigen::Matrix<T, 2, 1> predicted_p;
        CameraT::spaceToPlane(intrinsic_params.data(), q, t, P, predicted_p);

        Eigen::Matrix<T, 2, 1> err = predicted_p - m_observed_p.cast<T>();
        Eigen::Matrix<T, 2, 1> err_weighted = m_sqrtPrecisionMat.cast<T>() * err;

        residuals[0] = err_weighted(0);
        residuals[1] = err_weighted(1);

        return true;
    }

    // variables: 3D point
    template <typename T>
    bool operator()(const T* const point, T* residuals) const
    {
        T q_cam_odo[4] = {T(m_cam_odo_q.coeffs()(0)), T(m_cam_odo_q.coeffs()(1)), T(m_cam_odo_q.coeffs()(2)), T(m_cam_odo_q.coeffs()(3))};
        T t_cam_odo[3] = {T(m_cam_odo_t(0)), T(m_cam_odo_t(1)), T(m_cam_odo_t(2))};
        T p_odo[3] = {T(m_odo_pos(0)), T(m_odo_pos(1)), T(m_odo_pos(2))};
        T att_odo[3] = {T(m_odo_att(0)), T(m_odo_att(1)), T(m_odo_att(2))};
        T q[4], t[3];

        worldToCameraTransform(q_cam_odo, t_cam_odo, p_odo, att_odo, q, t, m_optimize_cam_odo_z);

        std::vector<T> intrinsic_params(m_intrinsic_params.begin(), m_intrinsic_params.end());
        Eigen::Matrix<T, 3, 1> P(point[0], point[1], point[2]);

        // project 3D object point to the image plane
        Eigen::Matrix<T, 2, 1> predicted_p;
        CameraT::spaceToPlane(intrinsic_params.data(), q, t, P, predicted_p);

        residuals[0] = predicted_p(0) - T(m_observed_p(0));
        residuals[1] = predicted_p(1) - T(m_observed_p(1));

        return true;
    }

private:
    // camera intrinsics
    std::vector<double> m_intrinsic_params;

    // observed camera-odometry transform
    Eigen::Quaterniond m_cam_odo_q;
    Eigen::Vector3d m_cam_odo_t;

    // observed odometry
    Eigen::Vector3d m_odo_pos;
    Eigen::Vector3d m_odo_att;

    // observed 2D point
    Eigen::Vector2d m_observed_p;

    Eigen::Matrix2d m_sqrtPrecisionMat;

    bool m_optimize_cam_odo_z;
};

// variables: camera intrinsics and camera extrinsics
template<class CameraT>
class StereoReprojectionError
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW

    StereoReprojectionError(const Eigen::Vector3d& observed_P,
                            const Eigen::Vector2d& observed_p_l,
                            const Eigen::Vector2d& observed_p_r)
        : m_observed_P(observed_P)
        , m_observed_p_l(observed_p_l)
        , m_observed_p_r(observed_p_r)
    {

    }

    template <typename T>
    bool operator()(const T* const intrinsic_params_l,
                    const T* const intrinsic_params_r,
                    const T* const q_l,
                    const T* const t_l,
                    const T* const q_l_r,
                    const T* const t_l_r,
                    T* residuals) const
    {
        Eigen::Matrix<T, 3, 1> P;
        P(0) = T(m_observed_P(0));
        P(1) = T(m_observed_P(1));
        P(2) = T(m_observed_P(2));

        Eigen::Matrix<T, 2, 1> predicted_p_l;
        CameraT::spaceToPlane(intrinsic_params_l, q_l, t_l, P, predicted_p_l);

        Eigen::Quaternion<T> q_r = Eigen::Quaternion<T>(q_l_r) * Eigen::Quaternion<T>(q_l);

        Eigen::Matrix<T, 3, 1> t_r;
        t_r(0) = t_l[0];
        t_r(1) = t_l[1];
        t_r(2) = t_l[2];

        t_r = Eigen::Quaternion<T>(q_l_r) * t_r;
        t_r(0) += t_l_r[0];
        t_r(1) += t_l_r[1];
        t_r(2) += t_l_r[2];

        Eigen::Matrix<T, 2, 1> predicted_p_r;
        CameraT::spaceToPlane(intrinsic_params_r, q_r.coeffs().data(), t_r.data(), P, predicted_p_r);

        residuals[0] = predicted_p_l(0) - T(m_observed_p_l(0));
        residuals[1] = predicted_p_l(1) - T(m_observed_p_l(1));
        residuals[2] = predicted_p_r(0) - T(m_observed_p_r(0));
        residuals[3] = predicted_p_r(1) - T(m_observed_p_r(1));

        return true;
    }

private:
    // observed 3D point
    Eigen::Vector3d m_observed_P;

    // observed 2D point
    Eigen::Vector2d m_observed_p_l;
    Eigen::Vector2d m_observed_p_r;
};

template <class CameraT>
class ComprehensionError {
  public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW

    ComprehensionError(const Eigen::Vector3d& observed_P, const Eigen::Vector2d& observed_p)
        : m_observed_P(observed_P),
          m_observed_p(observed_p),
          m_sqrtPrecisionMat(Eigen::Matrix2d::Identity()) {
    }

    template <typename T>
    bool operator()(const T* const intrinsic_params, const T* const q, const T* const t,
                    T* residuals) const {
        {
            Eigen::Matrix<T, 2, 1> p = m_observed_p.cast<T>();
        
            Eigen::Matrix<T, 3, 1> predicted_img_P;
            CameraT::LiftToSphere(intrinsic_params, p, predicted_img_P);
                
            Eigen::Matrix<T, 2, 1> predicted_p;
            CameraT::SphereToPlane(intrin
Download .txt
gitextract_7sq47s8w/

├── LICENCE
├── README.md
├── camera_model/
│   ├── CMakeLists.txt
│   ├── cmake/
│   │   └── FindEigen.cmake
│   ├── include/
│   │   └── camodocal/
│   │       ├── calib/
│   │       │   └── CameraCalibration.h
│   │       ├── camera_models/
│   │       │   ├── Camera.h
│   │       │   ├── CameraFactory.h
│   │       │   ├── CataCamera.h
│   │       │   ├── CostFunctionFactory.h
│   │       │   ├── EquidistantCamera.h
│   │       │   ├── PinholeCamera.h
│   │       │   └── ScaramuzzaCamera.h
│   │       ├── chessboard/
│   │       │   ├── Chessboard.h
│   │       │   ├── ChessboardCorner.h
│   │       │   ├── ChessboardQuad.h
│   │       │   └── Spline.h
│   │       ├── gpl/
│   │       │   ├── EigenQuaternionParameterization.h
│   │       │   ├── EigenUtils.h
│   │       │   └── gpl.h
│   │       └── sparse_graph/
│   │           └── Transform.h
│   ├── instruction
│   ├── package.xml
│   ├── readme.md
│   └── src/
│       ├── calib/
│       │   └── CameraCalibration.cc
│       ├── camera_models/
│       │   ├── Camera.cc
│       │   ├── CameraFactory.cc
│       │   ├── CataCamera.cc
│       │   ├── CostFunctionFactory.cc
│       │   ├── EquidistantCamera.cc
│       │   ├── PinholeCamera.cc
│       │   └── ScaramuzzaCamera.cc
│       ├── chessboard/
│       │   └── Chessboard.cc
│       ├── gpl/
│       │   ├── EigenQuaternionParameterization.cc
│       │   └── gpl.cc
│       ├── intrinsic_calib.cc
│       └── sparse_graph/
│           └── Transform.cc
├── config/
│   ├── advio_12_config.yaml
│   ├── ol_market1_config.yaml
│   ├── rpvio_rviz_config.rviz
│   └── rpvio_sim_config.yaml
├── plane_segmentation/
│   ├── RecoverPlane_perpendicular.py
│   ├── crf_inference.py
│   ├── data_loader_new.py
│   ├── inference.py
│   ├── net.py
│   ├── openloris.txt
│   ├── pretrained_model/
│   │   ├── model.data-00000-of-00001
│   │   ├── model.index
│   │   └── model.meta
│   ├── requirements.txt
│   ├── train.py
│   └── utils.py
├── rpvio.patch
├── rpvio_estimator/
│   ├── CMakeLists.txt
│   ├── cmake/
│   │   └── FindEigen.cmake
│   ├── launch/
│   │   ├── advio_12.launch
│   │   ├── ol_market1.launch
│   │   ├── rpvio_rviz.launch
│   │   └── rpvio_sim.launch
│   ├── package.xml
│   └── src/
│       ├── estimator.cpp
│       ├── estimator.h
│       ├── estimator_node.cpp
│       ├── factor/
│       │   ├── homography_factor.h
│       │   ├── imu_factor.h
│       │   ├── integration_base.h
│       │   ├── marginalization_factor.cpp
│       │   ├── marginalization_factor.h
│       │   ├── pose_local_parameterization.cpp
│       │   ├── pose_local_parameterization.h
│       │   ├── projection_factor.cpp
│       │   ├── projection_factor.h
│       │   ├── projection_td_factor.cpp
│       │   └── projection_td_factor.h
│       ├── feature_manager.cpp
│       ├── feature_manager.h
│       ├── initial/
│       │   ├── initial_aligment.cpp
│       │   ├── initial_alignment.h
│       │   ├── initial_ex_rotation.cpp
│       │   ├── initial_ex_rotation.h
│       │   ├── initial_sfm.cpp
│       │   ├── initial_sfm.h
│       │   ├── solve_5pts.cpp
│       │   └── solve_5pts.h
│       ├── parameters.cpp
│       ├── parameters.h
│       └── utility/
│           ├── CameraPoseVisualization.cpp
│           ├── CameraPoseVisualization.h
│           ├── tic_toc.h
│           ├── utility.cpp
│           ├── utility.h
│           ├── visualization.cpp
│           └── visualization.h
├── rpvio_feature_tracker/
│   ├── CMakeLists.txt
│   ├── cmake/
│   │   └── FindEigen.cmake
│   ├── package.xml
│   └── src/
│       ├── feature_tracker.cpp
│       ├── feature_tracker.h
│       ├── feature_tracker_node.cpp
│       ├── parameters.cpp
│       ├── parameters.h
│       └── tic_toc.h
└── scripts/
    ├── convert_vins_to_tum.py
    ├── run_advio_12.sh
    ├── run_ol_market1.sh
    └── run_rpvio_sim.sh
Download .txt
SYMBOL INDEX (205 symbols across 65 files)

FILE: camera_model/include/camodocal/calib/CameraCalibration.h
  function namespace (line 8) | namespace camodocal

FILE: camera_model/include/camodocal/camera_models/Camera.h
  function namespace (line 9) | namespace camodocal

FILE: camera_model/include/camodocal/camera_models/CameraFactory.h
  function namespace (line 9) | namespace camodocal

FILE: camera_model/include/camodocal/camera_models/CataCamera.h
  function namespace (line 10) | namespace camodocal

FILE: camera_model/include/camodocal/camera_models/CostFunctionFactory.h
  function namespace (line 9) | namespace ceres
  function namespace (line 14) | namespace camodocal

FILE: camera_model/include/camodocal/camera_models/EquidistantCamera.h
  function namespace (line 10) | namespace camodocal

FILE: camera_model/include/camodocal/camera_models/PinholeCamera.h
  function namespace (line 10) | namespace camodocal

FILE: camera_model/include/camodocal/camera_models/ScaramuzzaCamera.h
  function namespace (line 10) | namespace camodocal

FILE: camera_model/include/camodocal/chessboard/Chessboard.h
  function namespace (line 7) | namespace camodocal

FILE: camera_model/include/camodocal/chessboard/ChessboardCorner.h
  function namespace (line 7) | namespace camodocal

FILE: camera_model/include/camodocal/chessboard/ChessboardQuad.h
  function namespace (line 8) | namespace camodocal

FILE: camera_model/include/camodocal/chessboard/Spline.h
  type BC_type (line 33) | enum BC_type {
  type Spline_type (line 39) | enum Spline_type {
  type std (line 54) | typedef std::vector<std::pair<double, double> > base;
  type base (line 55) | typedef base::const_iterator const_iterator;
  function const_iterator (line 58) | const_iterator begin() const { return base::begin(); }
  function clear (line 60) | void clear() { _valid = false; base::clear(); _data.clear(); }
  function size (line 61) | size_t size() const { return base::size(); }
  function capacity (line 63) | size_t capacity() const { return base::capacity(); }
  function addPoint (line 68) | inline void addPoint(double x, double y)
  function setType (line 81) | void setType(Spline_type type) { _type = type; _valid = false; }
  type SplineData (line 105) | struct SplineData { double x,a,b,c,d; }
  function splineCalc (line 122) | inline double splineCalc(std::vector<SplineData>::const_iterator i, doub...
  function lowCalc (line 128) | inline double lowCalc(double xval)
  function highCalc (line 149) | inline double highCalc(double xval)
  function x (line 171) | inline double x(size_t i) const { return operator[](i).first; }
  function y (line 172) | inline double y(size_t i) const { return operator[](i).second; }
  function h (line 173) | inline double h(size_t i) const { return x(i+1) - x(i); }
  function generate (line 200) | void generate()

FILE: camera_model/include/camodocal/gpl/EigenQuaternionParameterization.h
  function namespace (line 6) | namespace camodocal

FILE: camera_model/include/camodocal/gpl/EigenUtils.h
  function namespace (line 9) | namespace camodocal

FILE: camera_model/include/camodocal/gpl/gpl.h
  function namespace (line 8) | namespace camodocal

FILE: camera_model/include/camodocal/sparse_graph/Transform.h
  function namespace (line 8) | namespace camodocal

FILE: camera_model/src/calib/CameraCalibration.cc
  type camodocal (line 21) | namespace camodocal
    function CameraPtr (line 167) | CameraPtr&
    function CameraConstPtr (line 173) | const CameraConstPtr

FILE: camera_model/src/camera_models/Camera.cc
  type camodocal (line 6) | namespace camodocal

FILE: camera_model/src/camera_models/CameraFactory.cc
  type camodocal (line 13) | namespace camodocal
    function CameraPtr (line 34) | CameraPtr
    function CameraPtr (line 89) | CameraPtr

FILE: camera_model/src/camera_models/CataCamera.cc
  type camodocal (line 14) | namespace camodocal

FILE: camera_model/src/camera_models/CostFunctionFactory.cc
  type camodocal (line 9) | namespace camodocal
    function worldToCameraTransform (line 13) | void
    class ReprojectionError1 (line 58) | class ReprojectionError1
      method ReprojectionError1 (line 68) | ReprojectionError1(const Eigen::Vector3d& observed_P,
      method ReprojectionError1 (line 74) | ReprojectionError1(const std::vector<double>& intrinsic_params,
    class ReprojectionError2 (line 141) | class ReprojectionError2
    class ReprojectionError3 (line 180) | class ReprojectionError3
      method ReprojectionError3 (line 190) | ReprojectionError3(const Eigen::Vector2d& observed_p,
      method ReprojectionError3 (line 196) | ReprojectionError3(const std::vector<double>& intrinsic_params,
      method ReprojectionError3 (line 203) | ReprojectionError3(const std::vector<double>& intrinsic_params,
      method ReprojectionError3 (line 212) | ReprojectionError3(const std::vector<double>& intrinsic_params,
      method ReprojectionError3 (line 222) | ReprojectionError3(const std::vector<double>& intrinsic_params,
    class StereoReprojectionError (line 356) | class StereoReprojectionError
    class ComprehensionError (line 421) | class ComprehensionError {

FILE: camera_model/src/camera_models/EquidistantCamera.cc
  type camodocal (line 14) | namespace camodocal

FILE: camera_model/src/camera_models/PinholeCamera.cc
  type camodocal (line 13) | namespace camodocal

FILE: camera_model/src/camera_models/ScaramuzzaCamera.cc
  function polyfit (line 18) | Eigen::VectorXd polyfit(Eigen::VectorXd& xVec, Eigen::VectorXd& yVec, in...
  type camodocal (line 46) | namespace camodocal

FILE: camera_model/src/chessboard/Chessboard.cc
  type camodocal (line 11) | namespace camodocal
    function less_pred (line 1552) | bool less_pred(const std::pair<float, int>& p1, const std::pair<float,...
    function countClasses (line 1557) | void countClasses(const std::vector<std::pair<float, int> >& pairs, si...

FILE: camera_model/src/gpl/EigenQuaternionParameterization.cc
  type camodocal (line 5) | namespace camodocal

FILE: camera_model/src/gpl/gpl.cc
  function orwl_gettime (line 20) | struct timespec orwl_gettime(void) {
  type camodocal (line 46) | namespace camodocal
    function hypot3 (line 49) | double hypot3(double x, double y, double z)
    function hypot3f (line 54) | float hypot3f(float x, float y, float z)
    function d2r (line 59) | double d2r(double deg)
    function d2r (line 64) | float d2r(float deg)
    function r2d (line 69) | double r2d(double rad)
    function r2d (line 74) | float r2d(float rad)
    function sinc (line 79) | double sinc(double theta)
    function LARGE_INTEGER (line 88) | LARGE_INTEGER
    function clock_gettime (line 109) | int
    function timeInMicroseconds (line 149) | unsigned long long timeInMicroseconds(void)
    function timeInSeconds (line 161) | double timeInSeconds(void)
    function colorDepthImage (line 439) | void colorDepthImage(cv::Mat& imgDepth, cv::Mat& imgColoredDepth,
    function colormap (line 465) | bool colormap(const std::string& name, unsigned char idx,
    function bresLine (line 492) | std::vector<cv::Point2i> bresLine(int x0, int y0, int x1, int y1)
    function bresCircle (line 532) | std::vector<cv::Point2i> bresCircle(int x0, int y0, int r)
    function fitCircle (line 621) | void
    function intersectCircles (line 676) | std::vector<cv::Point2d>
    function UTMLetterDesignator (line 714) | char
    function LLtoUTM (line 747) | void
    function UTMtoLL (line 827) | void
    function timestampDiff (line 897) | long int

FILE: camera_model/src/intrinsic_calib.cc
  function main (line 15) | int main(int argc, char** argv)

FILE: camera_model/src/sparse_graph/Transform.cc
  type camodocal (line 3) | namespace camodocal

FILE: plane_segmentation/RecoverPlane_perpendicular.py
  class RecoverPlane (line 15) | class RecoverPlane(object):
    method __init__ (line 16) | def __init__(self):
    method build_train_graph (line 19) | def build_train_graph(self):
    method compute_plane_reg_loss (line 162) | def compute_plane_reg_loss(self, pred_in, ref):
    method compute_depth_error (line 182) | def compute_depth_error(self,proj_homo,proj_depth):
    method compute_perpendicular_error (line 195) | def compute_perpendicular_error(self,  normal_vectors, num):
    method collect_summaries (line 221) | def collect_summaries(self):   #tf.summary can export model param
    method train (line 253) | def train(self, opt):
    method build_plane_test_graph (line 341) | def build_plane_test_graph(self):
    method preprocess_image (line 357) | def preprocess_image(self, image):
    method deprocess_image (line 362) | def deprocess_image(self, image):
    method setup_inference (line 368) | def setup_inference(self,
    method inference (line 381) | def inference(self, inputs, sess): #, mode='depth'
    method save (line 388) | def save(self, sess, checkpoint_dir, step):

FILE: plane_segmentation/crf_inference.py
  function CRF_act (line 25) | def CRF_act(  fn_im ,fn_anno , fn_output , fn_output2 ):

FILE: plane_segmentation/data_loader_new.py
  class DataLoader (line 6) | class DataLoader(object):
    method __init__ (line 7) | def __init__(self,
    method load_train_batch (line 19) | def load_train_batch(self):
    method make_intrinsics_matrix (line 103) | def make_intrinsics_matrix(self, fx, fy, cx, cy):
    method data_augmentation (line 114) | def data_augmentation(self, im, depth, label, intrinsics, out_h, out_w):
    method format_file_list (line 153) | def format_file_list(self, data_root, split):
    method get_multi_scale_intrinsics (line 179) | def get_multi_scale_intrinsics(self, intrinsics, num_scales):

FILE: plane_segmentation/inference.py
  function random_colors (line 74) | def random_colors(N, bright=True):
  function apply_mask (line 88) | def apply_mask(image, mask, max_mask, color, alpha=0.5):
  function color_mask (line 98) | def color_mask(image, pred_masks, colors, alpha=0.5 ):
  function thres_mask (line 117) | def thres_mask(pred_masks, num_plane):
  function meshgrid (line 131) | def meshgrid(height, width, is_homogeneous=True):
  function compute_depth (line 154) | def compute_depth(img, pred_param, num_plane, intrinsics):
  function compute_errors (line 183) | def compute_errors(gt, pred):
  function main (line 208) | def main(_):

FILE: plane_segmentation/net.py
  function resize_like (line 9) | def resize_like(inputs, ref):
  function plane_pred_net (line 17) | def plane_pred_net(tgt_image, num_plane, is_training=True):

FILE: plane_segmentation/train.py
  function main (line 43) | def main(_):

FILE: plane_segmentation/utils.py
  function random_colors (line 14) | def random_colors(N, bright=True):
  function apply_mask (line 26) | def apply_mask(image, mask, max_mask, color, alpha=0.4):
  function color_mask (line 47) | def color_mask(image, pred_mask_s, colors, alpha=0.4 ):
  function meshgrid (line 66) | def meshgrid(batch, height, width, is_homogeneous=True):
  function compute_depth (line 92) | def compute_depth(img, pred_param, num_plane, intrinsics):
  function compute_unscaled_ray (line 117) | def compute_unscaled_ray(img, intrinsics):
  function compute_plane_equation (line 128) | def compute_plane_equation(img, pred_param, ray, depth):
  function val2uint8 (line 139) | def val2uint8(mat,maxVal):

FILE: rpvio_estimator/src/estimator.h
  function class (line 27) | class Estimator

FILE: rpvio_estimator/src/estimator_node.cpp
  function predict (line 42) | void predict(const sensor_msgs::ImuConstPtr &imu_msg)
  function update (line 80) | void update()
  function getMeasurements (line 98) | std::vector<std::pair<std::vector<sensor_msgs::ImuConstPtr>, sensor_msgs...
  function imu_callback (line 138) | void imu_callback(const sensor_msgs::ImuConstPtr &imu_msg)
  function feature_callback (line 165) | void feature_callback(const sensor_msgs::PointCloudConstPtr &feature_msg)
  function restart_callback (line 179) | void restart_callback(const std_msgs::BoolConstPtr &restart_msg)
  function relocalization_callback (line 200) | void relocalization_callback(const sensor_msgs::PointCloudConstPtr &poin...
  function process (line 209) | void process()
  function main (line 342) | int main(int argc, char **argv)

FILE: rpvio_estimator/src/factor/homography_factor.h
  type HomographyFactor (line 5) | struct HomographyFactor

FILE: rpvio_estimator/src/factor/imu_factor.h
  function pre_integration (line 16) | pre_integration(_pre_integration)
  function virtual (line 19) | virtual bool Evaluate(double const *const *parameters, double *residuals...

FILE: rpvio_estimator/src/factor/integration_base.h
  function class (line 9) | class IntegrationBase
  function push_back (line 30) | void push_back(double dt, const Eigen::Vector3d &acc, const Eigen::Vecto...
  function repropagate (line 38) | void repropagate(const Eigen::Vector3d &_linearized_ba, const Eigen::Vec...
  function midPointIntegration (line 54) | void midPointIntegration(double _dt,
  function propagate (line 130) | void propagate(double _dt, const Eigen::Vector3d &_acc_1, const Eigen::V...

FILE: rpvio_estimator/src/factor/marginalization_factor.h
  type ResidualBlockInfo (line 15) | struct ResidualBlockInfo
  type ThreadsStruct (line 37) | struct ThreadsStruct
  function class (line 46) | class MarginalizationInfo
  function class (line 74) | class MarginalizationFactor : public ceres::CostFunction

FILE: rpvio_estimator/src/factor/pose_local_parameterization.h
  function class (line 7) | class PoseLocalParameterization : public ceres::LocalParameterization

FILE: rpvio_estimator/src/feature_manager.cpp
  function VectorXd (line 235) | VectorXd FeatureManager::getDepthVector()

FILE: rpvio_estimator/src/feature_manager.h
  function class (line 18) | class FeaturePerFrame
  function class (line 44) | class FeaturePerId
  function class (line 68) | class FeatureManager

FILE: rpvio_estimator/src/initial/initial_aligment.cpp
  function solveGyroscopeBias (line 3) | void solveGyroscopeBias(map<double, ImageFrame> &all_image_frame, Vector...
  function MatrixXd (line 40) | MatrixXd TangentBasis(Vector3d &g0)
  function RefineGravity (line 55) | void RefineGravity(map<double, ImageFrame> &all_image_frame, Vector3d &g...
  function LinearAlignment (line 125) | bool LinearAlignment(map<double, ImageFrame> &all_image_frame, Vector3d ...
  function VisualIMUAlignment (line 199) | bool VisualIMUAlignment(map<double, ImageFrame> &all_image_frame, Vector...

FILE: rpvio_estimator/src/initial/initial_alignment.h
  function class (line 13) | class ImageFrame

FILE: rpvio_estimator/src/initial/initial_ex_rotation.cpp
  function Matrix3d (line 69) | Matrix3d InitialEXRotation::solveRelativeR(const vector<pair<Vector3d, V...

FILE: rpvio_estimator/src/initial/initial_ex_rotation.h
  function class (line 14) | class InitialEXRotation

FILE: rpvio_estimator/src/initial/initial_sfm.h
  type SFMFeature (line 14) | struct SFMFeature
  type ReprojectionError3D (line 25) | struct ReprojectionError3D
  type ReprojectionErrorH (line 57) | struct ReprojectionErrorH
  function ceres (line 78) | static ceres::CostFunction* Create(const double observed_x,
  function class (line 91) | class GlobalSFM

FILE: rpvio_estimator/src/initial/solve_5pts.cpp
  type cv (line 5) | namespace cv {
    function decomposeEssentialMat (line 6) | void decomposeEssentialMat( InputArray _E, OutputArray _R1, OutputArra...
    function recoverPose (line 31) | int recoverPose( InputArray E, InputArray _points1, InputArray _points...
    function recoverPose (line 185) | int recoverPose( InputArray E, InputArray _points1, InputArray _points...

FILE: rpvio_estimator/src/initial/solve_5pts.h
  function class (line 13) | class MotionEstimator

FILE: rpvio_estimator/src/parameters.cpp
  function T (line 27) | T readParam(ros::NodeHandle &n, std::string name)
  function readParameters (line 42) | void readParameters(ros::NodeHandle &n)

FILE: rpvio_estimator/src/parameters.h
  type SIZE_PARAMETERIZATION (line 44) | enum SIZE_PARAMETERIZATION
  type StateOrder (line 51) | enum StateOrder
  type NoiseOrder (line 60) | enum NoiseOrder

FILE: rpvio_estimator/src/utility/CameraPoseVisualization.cpp
  function Eigen2Point (line 12) | void Eigen2Point(const Eigen::Vector3d& v, geometry_msgs::Point& p) {

FILE: rpvio_estimator/src/utility/CameraPoseVisualization.h
  function class (line 10) | class CameraPoseVisualization {

FILE: rpvio_estimator/src/utility/tic_toc.h
  function class (line 7) | class TicToc

FILE: rpvio_estimator/src/utility/utility.h
  function class (line 12) | class Utility
  type stat (line 156) | struct stat

FILE: rpvio_estimator/src/utility/visualization.cpp
  function registerPub (line 23) | void registerPub(ros::NodeHandle &n)
  function pubLatestOdometry (line 45) | void pubLatestOdometry(const Eigen::Vector3d &P, const Eigen::Quaternion...
  function printStatistics (line 65) | void printStatistics(const Estimator &estimator, double t)
  function pubOdometry (line 106) | void pubOdometry(const Estimator &estimator, const std_msgs::Header &hea...
  function pubKeyPoses (line 176) | void pubKeyPoses(const Estimator &estimator, const std_msgs::Header &hea...
  function pubCameraPose (line 210) | void pubCameraPose(const Estimator &estimator, const std_msgs::Header &h...
  function pubPointCloud (line 240) | void pubPointCloud(const Estimator &estimator, const std_msgs::Header &h...
  function pubTF (line 300) | void pubTF(const Estimator &estimator, const std_msgs::Header &header)
  function pubKeyframe (line 349) | void pubKeyframe(const Estimator &estimator)
  function pubRelocalization (line 407) | void pubRelocalization(const Estimator &estimator)

FILE: rpvio_feature_tracker/src/feature_tracker.cpp
  function inBorder (line 5) | bool inBorder(const cv::Point2f &pt)
  function reduceVector (line 13) | void reduceVector(vector<cv::Point2f> &v, vector<uchar> status)
  function reduceVector (line 22) | void reduceVector(vector<int> &v, vector<uchar> status)

FILE: rpvio_feature_tracker/src/feature_tracker.h
  function class (line 28) | class FeatureTracker

FILE: rpvio_feature_tracker/src/feature_tracker_node.cpp
  function callback (line 29) | void callback(const sensor_msgs::ImageConstPtr &img_msg, const sensor_ms...
  function main (line 239) | int main(int argc, char **argv)

FILE: rpvio_feature_tracker/src/parameters.cpp
  function T (line 23) | T readParam(ros::NodeHandle &n, std::string name)
  function readParameters (line 38) | void readParameters(ros::NodeHandle &n)

FILE: rpvio_feature_tracker/src/tic_toc.h
  function class (line 7) | class TicToc

FILE: scripts/convert_vins_to_tum.py
  function main (line 4) | def main():
Condensed preview — 106 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,269K chars).
[
  {
    "path": "LICENCE",
    "chars": 35141,
    "preview": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free "
  },
  {
    "path": "README.md",
    "chars": 9469,
    "preview": "## RP-VIO: Robust Plane-based Visual-Inertial Odometry for Dynamic Environments\nKarnik Ram, Chaitanya Kharyal, Sudarshan"
  },
  {
    "path": "camera_model/CMakeLists.txt",
    "chars": 1887,
    "preview": "cmake_minimum_required(VERSION 2.8.3)\nproject(camera_model)\n\nset(CMAKE_BUILD_TYPE \"Release\")\nset(CMAKE_CXX_FLAGS \"-std=c"
  },
  {
    "path": "camera_model/cmake/FindEigen.cmake",
    "chars": 7887,
    "preview": "# Ceres Solver - A fast non-linear least squares minimizer\n# Copyright 2015 Google Inc. All rights reserved.\n# http://ce"
  },
  {
    "path": "camera_model/include/camodocal/calib/CameraCalibration.h",
    "chars": 2195,
    "preview": "#ifndef CAMERACALIBRATION_H\n#define CAMERACALIBRATION_H\n\n#include <opencv2/core/core.hpp>\n\n#include \"camodocal/camera_mo"
  },
  {
    "path": "camera_model/include/camodocal/camera_models/Camera.h",
    "chars": 5063,
    "preview": "#ifndef CAMERA_H\n#define CAMERA_H\n\n#include <boost/shared_ptr.hpp>\n#include <eigen3/Eigen/Dense>\n#include <opencv2/core/"
  },
  {
    "path": "camera_model/include/camodocal/camera_models/CameraFactory.h",
    "chars": 658,
    "preview": "#ifndef CAMERAFACTORY_H\n#define CAMERAFACTORY_H\n\n#include <boost/shared_ptr.hpp>\n#include <opencv2/core/core.hpp>\n\n#incl"
  },
  {
    "path": "camera_model/include/camodocal/camera_models/CataCamera.h",
    "chars": 6561,
    "preview": "#ifndef CATACAMERA_H\r\n#define CATACAMERA_H\r\n\r\n#include <opencv2/core/core.hpp>\r\n#include <string>\r\n\r\n#include \"ceres/rot"
  },
  {
    "path": "camera_model/include/camodocal/camera_models/CostFunctionFactory.h",
    "chars": 3444,
    "preview": "#ifndef COSTFUNCTIONFACTORY_H\n#define COSTFUNCTIONFACTORY_H\n\n#include <boost/shared_ptr.hpp>\n#include <opencv2/core/core"
  },
  {
    "path": "camera_model/include/camodocal/camera_models/EquidistantCamera.h",
    "chars": 6787,
    "preview": "#ifndef EQUIDISTANTCAMERA_H\r\n#define EQUIDISTANTCAMERA_H\r\n\r\n#include <opencv2/core/core.hpp>\r\n#include <string>\r\n\r\n#incl"
  },
  {
    "path": "camera_model/include/camodocal/camera_models/PinholeCamera.h",
    "chars": 5982,
    "preview": "#ifndef PINHOLECAMERA_H\n#define PINHOLECAMERA_H\n\n#include <opencv2/core/core.hpp>\n#include <string>\n\n#include \"ceres/rot"
  },
  {
    "path": "camera_model/include/camodocal/camera_models/ScaramuzzaCamera.h",
    "chars": 10407,
    "preview": "#ifndef SCARAMUZZACAMERA_H\r\n#define SCARAMUZZACAMERA_H\r\n\r\n#include <opencv2/core/core.hpp>\r\n#include <string>\r\n\r\n#includ"
  },
  {
    "path": "camera_model/include/camodocal/chessboard/Chessboard.h",
    "chars": 3058,
    "preview": "#ifndef CHESSBOARD_H\n#define CHESSBOARD_H\n\n#include <boost/shared_ptr.hpp>\n#include <opencv2/core/core.hpp>\n\nnamespace c"
  },
  {
    "path": "camera_model/include/camodocal/chessboard/ChessboardCorner.h",
    "chars": 1196,
    "preview": "#ifndef CHESSBOARDCORNER_H\n#define CHESSBOARDCORNER_H\n\n#include <boost/shared_ptr.hpp>\n#include <opencv2/core/core.hpp>\n"
  },
  {
    "path": "camera_model/include/camodocal/chessboard/ChessboardQuad.h",
    "chars": 773,
    "preview": "#ifndef CHESSBOARDQUAD_H\n#define CHESSBOARDQUAD_H\n\n#include <boost/shared_ptr.hpp>\n\n#include \"camodocal/chessboard/Chess"
  },
  {
    "path": "camera_model/include/camodocal/chessboard/Spline.h",
    "chars": 8626,
    "preview": "/*  dynamo:- Event driven molecular dynamics simulator\n    http://www.marcusbannerman.co.uk/dynamo\n    Copyright (C) 201"
  },
  {
    "path": "camera_model/include/camodocal/gpl/EigenQuaternionParameterization.h",
    "chars": 1150,
    "preview": "#ifndef EIGENQUATERNIONPARAMETERIZATION_H\n#define EIGENQUATERNIONPARAMETERIZATION_H\n\n#include \"ceres/local_parameterizat"
  },
  {
    "path": "camera_model/include/camodocal/gpl/EigenUtils.h",
    "chars": 11621,
    "preview": "#ifndef EIGENUTILS_H\n#define EIGENUTILS_H\n\n#include <eigen3/Eigen/Dense>\n\n#include \"ceres/rotation.h\"\n#include \"camodoca"
  },
  {
    "path": "camera_model/include/camodocal/gpl/gpl.h",
    "chars": 2414,
    "preview": "#ifndef GPL_H\r\n#define GPL_H\r\n\r\n#include <algorithm>\r\n#include <cmath>\r\n#include <opencv2/core/core.hpp>\r\n\r\nnamespace ca"
  },
  {
    "path": "camera_model/include/camodocal/sparse_graph/Transform.h",
    "chars": 744,
    "preview": "#ifndef TRANSFORM_H\n#define TRANSFORM_H\n\n#include <boost/shared_ptr.hpp>\n#include <eigen3/Eigen/Dense>\n#include <stdint."
  },
  {
    "path": "camera_model/instruction",
    "chars": 68,
    "preview": "rosrun camera_model Calibration -w 8 -h 11 -s 70 -i ~/bag/PX/calib/\n"
  },
  {
    "path": "camera_model/package.xml",
    "chars": 2053,
    "preview": "<?xml version=\"1.0\"?>\n<package>\n  <name>camera_model</name>\n  <version>0.0.0</version>\n  <description>The camera_model p"
  },
  {
    "path": "camera_model/readme.md",
    "chars": 552,
    "preview": "part of [camodocal](https://github.com/hengli/camodocal)\n\n[Google Ceres](http://ceres-solver.org) is needed.\n\n# Calibrat"
  },
  {
    "path": "camera_model/src/calib/CameraCalibration.cc",
    "chars": 16543,
    "preview": "#include \"camodocal/calib/CameraCalibration.h\"\n\n#include <cstdio>\n#include <eigen3/Eigen/Dense>\n#include <iomanip>\n#incl"
  },
  {
    "path": "camera_model/src/camera_models/Camera.cc",
    "chars": 5759,
    "preview": "#include \"camodocal/camera_models/Camera.h\"\n#include \"camodocal/camera_models/ScaramuzzaCamera.h\"\n\n#include <opencv2/cal"
  },
  {
    "path": "camera_model/src/camera_models/CameraFactory.cc",
    "chars": 4440,
    "preview": "#include \"camodocal/camera_models/CameraFactory.h\"\n\n#include <boost/algorithm/string.hpp>\n\n\n#include \"camodocal/camera_m"
  },
  {
    "path": "camera_model/src/camera_models/CataCamera.cc",
    "chars": 26012,
    "preview": "#include \"camodocal/camera_models/CataCamera.h\"\n\n#include <cmath>\n#include <cstdio>\n#include <eigen3/Eigen/Dense>\n#inclu"
  },
  {
    "path": "camera_model/src/camera_models/CostFunctionFactory.cc",
    "chars": 45514,
    "preview": "#include \"camodocal/camera_models/CostFunctionFactory.h\"\n\n#include \"ceres/ceres.h\"\n#include \"camodocal/camera_models/Cat"
  },
  {
    "path": "camera_model/src/camera_models/EquidistantCamera.cc",
    "chars": 20449,
    "preview": "#include \"camodocal/camera_models/EquidistantCamera.h\"\n\n#include <cmath>\n#include <cstdio>\n#include <eigen3/Eigen/Dense>"
  },
  {
    "path": "camera_model/src/camera_models/PinholeCamera.cc",
    "chars": 22110,
    "preview": "#include \"camodocal/camera_models/PinholeCamera.h\"\n\n#include <cmath>\n#include <cstdio>\n#include <eigen3/Eigen/Dense>\n#in"
  },
  {
    "path": "camera_model/src/camera_models/ScaramuzzaCamera.cc",
    "chars": 25873,
    "preview": "#include \"camodocal/camera_models/ScaramuzzaCamera.h\"\n\n#include <cmath>\n#include <cstdio>\n#include <eigen3/Eigen/Dense>\n"
  },
  {
    "path": "camera_model/src/chessboard/Chessboard.cc",
    "chars": 70141,
    "preview": "#include \"camodocal/chessboard/Chessboard.h\"\n\n#include <opencv2/calib3d/calib3d.hpp>\n#include <opencv2/imgproc/imgproc.h"
  },
  {
    "path": "camera_model/src/gpl/EigenQuaternionParameterization.cc",
    "chars": 1409,
    "preview": "#include \"camodocal/gpl/EigenQuaternionParameterization.h\"\n\n#include <cmath>\n\nnamespace camodocal\n{\n\nbool\nEigenQuaternio"
  },
  {
    "path": "camera_model/src/gpl/gpl.cc",
    "chars": 25657,
    "preview": "#include \"camodocal/gpl/gpl.h\"\r\n\r\n#include <set>\r\n#ifdef _WIN32\r\n#include <winsock.h>\r\n#else\r\n#include <time.h>\r\n#endif\r"
  },
  {
    "path": "camera_model/src/intrinsic_calib.cc",
    "chars": 8444,
    "preview": "#include <boost/algorithm/string.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/program_options.hpp>\n#include <iom"
  },
  {
    "path": "camera_model/src/sparse_graph/Transform.cc",
    "chars": 1075,
    "preview": "#include <camodocal/sparse_graph/Transform.h>\n\nnamespace camodocal\n{\n\nTransform::Transform()\n{\n    m_q.setIdentity();\n  "
  },
  {
    "path": "config/advio_12_config.yaml",
    "chars": 2944,
    "preview": "%YAML:1.0\n\n#common parameters\nimu_topic: \"/imu0\"\nimage_topic: \"/cam0/image_raw\"\nmask_topic: \"/cam0/mask\"\noutput_path: \"~"
  },
  {
    "path": "config/ol_market1_config.yaml",
    "chars": 2946,
    "preview": "%YAML:1.0\n\n#common parameters\nimu_topic: \"/d400/imu0\"\nimage_topic: \"/d400/color/image_raw\"\nmask_topic: \"/planes/segments"
  },
  {
    "path": "config/rpvio_rviz_config.rviz",
    "chars": 17334,
    "preview": "Panels:\n  - Class: rviz/Displays\n    Help Height: 0\n    Name: Displays\n    Property Tree Widget:\n      Expanded:\n       "
  },
  {
    "path": "config/rpvio_sim_config.yaml",
    "chars": 2683,
    "preview": "%YAML:1.0\n\n#common parameters\nimu_topic: \"/imu_throttled\"\nimage_topic: \"/image\"\nmask_topic: \"/mask\"\noutput_path: \"~/outp"
  },
  {
    "path": "plane_segmentation/RecoverPlane_perpendicular.py",
    "chars": 16855,
    "preview": "from __future__ import division\nimport os\nimport time\nimport math\nfrom data_loader_new import DataLoader\nfrom net import"
  },
  {
    "path": "plane_segmentation/crf_inference.py",
    "chars": 3491,
    "preview": "# Author: Sudarshan\n\nimport sys\nimport numpy as np\nimport pydensecrf.densecrf as dcrf\nimport matplotlib.pyplot as plt\nim"
  },
  {
    "path": "plane_segmentation/data_loader_new.py",
    "chars": 8456,
    "preview": "from __future__ import division\nimport os\nimport random\nimport tensorflow as tf\n\nclass DataLoader(object):\n    def __ini"
  },
  {
    "path": "plane_segmentation/inference.py",
    "chars": 16177,
    "preview": "from __future__ import division\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport random\nimport colorsys\nimpor"
  },
  {
    "path": "plane_segmentation/net.py",
    "chars": 5462,
    "preview": "from __future__ import division\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.l"
  },
  {
    "path": "plane_segmentation/openloris.txt",
    "chars": 177066,
    "preview": "OpenLORIS_images img5314.jpg\nOpenLORIS_images img96.jpg\nOpenLORIS_images img268.jpg\nOpenLORIS_images img4489.jpg\nOpenLOR"
  },
  {
    "path": "plane_segmentation/requirements.txt",
    "chars": 5330,
    "preview": "# This file may be used to create an environment using:\n# $ conda create --name <env> --file <this file>\n# platform: lin"
  },
  {
    "path": "plane_segmentation/train.py",
    "chars": 2427,
    "preview": "from __future__ import division\nimport tensorflow as tf\nimport pprint\nimport random\nimport numpy as np\n\nfrom RecoverPlan"
  },
  {
    "path": "plane_segmentation/utils.py",
    "chars": 5246,
    "preview": "from __future__ import division\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport random"
  },
  {
    "path": "rpvio.patch",
    "chars": 250962,
    "preview": "From 3d295e1ff4c292ce203e636f363520f9b633908c Mon Sep 17 00:00:00 2001\nFrom: Karnik Ram <karnikram@gmail.com>\nDate: Thu,"
  },
  {
    "path": "rpvio_estimator/CMakeLists.txt",
    "chars": 1315,
    "preview": "cmake_minimum_required(VERSION 2.8.3)\nproject(rpvio_estimator)\n\nset(CMAKE_BUILD_TYPE \"Release\")\nset(CMAKE_CXX_FLAGS \"-st"
  },
  {
    "path": "rpvio_estimator/cmake/FindEigen.cmake",
    "chars": 7887,
    "preview": "# Ceres Solver - A fast non-linear least squares minimizer\n# Copyright 2015 Google Inc. All rights reserved.\n# http://ce"
  },
  {
    "path": "rpvio_estimator/launch/advio_12.launch",
    "chars": 1247,
    "preview": "<launch>\n\n  <arg name=\"playback_rate\" default=\"1.0\" />\n  <arg name=\"start_from\" default=\"0\" />\n  <arg name=\"bagfile_path"
  },
  {
    "path": "rpvio_estimator/launch/ol_market1.launch",
    "chars": 1255,
    "preview": "<launch>\n\n  <arg name=\"playback_rate\" default=\"1.0\" />\n  <arg name=\"start_from\" default=\"0\" />\n  <arg name=\"bagfile_path"
  },
  {
    "path": "rpvio_estimator/launch/rpvio_rviz.launch",
    "chars": 160,
    "preview": "<launch>\n    <node name=\"rvizvisualisation\" pkg=\"rviz\" type=\"rviz\" output=\"log\" args=\"-d $(find rpvio_estimator)/../conf"
  },
  {
    "path": "rpvio_estimator/launch/rpvio_sim.launch",
    "chars": 1358,
    "preview": "<launch>\n\n  <arg name=\"playback_rate\" default=\"1.0\" />\n  <arg name=\"start_from\" default=\"0\" />\n  <arg name=\"bagfile_path"
  },
  {
    "path": "rpvio_estimator/package.xml",
    "chars": 370,
    "preview": "<?xml version=\"1.0\"?>\n<package>\n  <name>rpvio_estimator</name>\n  <version>0.0.0</version>\n  <description>The vins_estima"
  },
  {
    "path": "rpvio_estimator/src/estimator.cpp",
    "chars": 47786,
    "preview": "#include \"estimator.h\"\n\nEstimator::Estimator(): f_manager{Rs}\n{\n    ROS_INFO(\"init begins\");\n    clearState();\n}\n\nvoid E"
  },
  {
    "path": "rpvio_estimator/src/estimator.h",
    "chars": 4233,
    "preview": "#pragma once\n\n#include \"parameters.h\"\n#include \"feature_manager.h\"\n#include \"utility/utility.h\"\n#include \"utility/tic_to"
  },
  {
    "path": "rpvio_estimator/src/estimator_node.cpp",
    "chars": 12694,
    "preview": "#include <stdio.h>\n#include <queue>\n#include <map>\n#include <thread>\n#include <mutex>\n#include <condition_variable>\n#inc"
  },
  {
    "path": "rpvio_estimator/src/factor/homography_factor.h",
    "chars": 2546,
    "preview": "#pragma once\n#include <ceres/ceres.h>\n#include <eigen3/Eigen/Dense>\n\nstruct HomographyFactor\n{\n    HomographyFactor(cons"
  },
  {
    "path": "rpvio_estimator/src/factor/imu_factor.h",
    "chars": 8487,
    "preview": "#pragma once\n#include <ros/assert.h>\n#include <iostream>\n#include <eigen3/Eigen/Dense>\n\n#include \"../utility/utility.h\"\n"
  },
  {
    "path": "rpvio_estimator/src/factor/integration_base.h",
    "chars": 26193,
    "preview": "#pragma once\n\n#include \"../utility/utility.h\"\n#include \"../parameters.h\"\n\n#include <ceres/ceres.h>\nusing namespace Eigen"
  },
  {
    "path": "rpvio_estimator/src/factor/marginalization_factor.cpp",
    "chars": 14991,
    "preview": "#include \"marginalization_factor.h\"\n\nvoid ResidualBlockInfo::Evaluate()\n{\n    residuals.resize(cost_function->num_residu"
  },
  {
    "path": "rpvio_estimator/src/factor/marginalization_factor.h",
    "chars": 2447,
    "preview": "#pragma once\n\n#include <ros/ros.h>\n#include <ros/console.h>\n#include <cstdlib>\n#include <pthread.h>\n#include <ceres/cere"
  },
  {
    "path": "rpvio_estimator/src/factor/pose_local_parameterization.cpp",
    "chars": 814,
    "preview": "#include \"pose_local_parameterization.h\"\n\nbool PoseLocalParameterization::Plus(const double *x, const double *delta, dou"
  },
  {
    "path": "rpvio_estimator/src/factor/pose_local_parameterization.h",
    "chars": 440,
    "preview": "#pragma once\n\n#include <eigen3/Eigen/Dense>\n#include <ceres/ceres.h>\n#include \"../utility/utility.h\"\n\nclass PoseLocalPar"
  },
  {
    "path": "rpvio_estimator/src/factor/projection_factor.cpp",
    "chars": 9282,
    "preview": "#include \"projection_factor.h\"\n\nEigen::Matrix2d ProjectionFactor::sqrt_info;\ndouble ProjectionFactor::sum_t;\n\nProjection"
  },
  {
    "path": "rpvio_estimator/src/factor/projection_factor.h",
    "chars": 635,
    "preview": "#pragma once\n\n#include <ros/assert.h>\n#include <ceres/ceres.h>\n#include <Eigen/Dense>\n#include \"../utility/utility.h\"\n#i"
  },
  {
    "path": "rpvio_estimator/src/factor/projection_td_factor.cpp",
    "chars": 10949,
    "preview": "#include \"projection_td_factor.h\"\n\nEigen::Matrix2d ProjectionTdFactor::sqrt_info;\ndouble ProjectionTdFactor::sum_t;\n\nPro"
  },
  {
    "path": "rpvio_estimator/src/factor/projection_td_factor.h",
    "chars": 910,
    "preview": "#pragma once\n\n#include <ros/assert.h>\n#include <ceres/ceres.h>\n#include <Eigen/Dense>\n#include \"../utility/utility.h\"\n#i"
  },
  {
    "path": "rpvio_estimator/src/feature_manager.cpp",
    "chars": 12958,
    "preview": "#include \"feature_manager.h\"\n\nint FeaturePerId::endFrame()\n{\n    return start_frame + feature_per_frame.size() - 1;\n}\n\nF"
  },
  {
    "path": "rpvio_estimator/src/feature_manager.h",
    "chars": 2678,
    "preview": "#ifndef FEATURE_MANAGER_H\n#define FEATURE_MANAGER_H\n\n#include <list>\n#include <algorithm>\n#include <vector>\n#include <nu"
  },
  {
    "path": "rpvio_estimator/src/initial/initial_aligment.cpp",
    "chars": 7658,
    "preview": "#include \"initial_alignment.h\"\n\nvoid solveGyroscopeBias(map<double, ImageFrame> &all_image_frame, Vector3d* Bgs)\n{\n    M"
  },
  {
    "path": "rpvio_estimator/src/initial/initial_alignment.h",
    "chars": 791,
    "preview": "#pragma once\n#include <eigen3/Eigen/Dense>\n#include <iostream>\n#include \"../factor/imu_factor.h\"\n#include \"../utility/ut"
  },
  {
    "path": "rpvio_estimator/src/initial/initial_ex_rotation.cpp",
    "chars": 4953,
    "preview": "#include \"initial_ex_rotation.h\"\n\nInitialEXRotation::InitialEXRotation(){\n    frame_count = 0;\n    Rc.push_back(Matrix3d"
  },
  {
    "path": "rpvio_estimator/src/initial/initial_ex_rotation.h",
    "chars": 1070,
    "preview": "#pragma once \n\n#include <vector>\n#include \"../parameters.h\"\nusing namespace std;\n\n#include <opencv2/opencv.hpp>\n\n#includ"
  },
  {
    "path": "rpvio_estimator/src/initial/initial_sfm.cpp",
    "chars": 17527,
    "preview": "#include \"initial_sfm.h\"\n\nGlobalSFM::GlobalSFM(){}\n\nvoid GlobalSFM::triangulatePoint(Eigen::Matrix<double, 3, 4> &Pose0,"
  },
  {
    "path": "rpvio_estimator/src/initial/initial_sfm.h",
    "chars": 3298,
    "preview": "#pragma once\n#include <ceres/ceres.h>\n#include <ceres/rotation.h>\n#include <eigen3/Eigen/Dense>\n#include <iostream>\n#inc"
  },
  {
    "path": "rpvio_estimator/src/initial/solve_5pts.cpp",
    "chars": 11346,
    "preview": "#include \"solve_5pts.h\"\n#include <algorithm>\n\n\nnamespace cv {\n    void decomposeEssentialMat( InputArray _E, OutputArray"
  },
  {
    "path": "rpvio_estimator/src/initial/solve_5pts.h",
    "chars": 1019,
    "preview": "#pragma once\n\n#include <vector>\nusing namespace std;\n\n#include <opencv2/opencv.hpp>\n#include <eigen3/Eigen/Dense>\n#inclu"
  },
  {
    "path": "rpvio_estimator/src/parameters.cpp",
    "chars": 3917,
    "preview": "#include \"parameters.h\"\n\ndouble INIT_DEPTH;\ndouble MIN_PARALLAX;\ndouble ACC_N, ACC_W;\ndouble GYR_N, GYR_W;\n\nstd::vector<"
  },
  {
    "path": "rpvio_estimator/src/parameters.h",
    "chars": 1242,
    "preview": "#pragma once\n\n#include <ros/ros.h>\n#include <vector>\n#include <eigen3/Eigen/Dense>\n#include \"utility/utility.h\"\n#include"
  },
  {
    "path": "rpvio_estimator/src/utility/CameraPoseVisualization.cpp",
    "chars": 6705,
    "preview": "#include \"CameraPoseVisualization.h\"\n\nconst Eigen::Vector3d CameraPoseVisualization::imlt = Eigen::Vector3d(-1.0, -0.5, "
  },
  {
    "path": "rpvio_estimator/src/utility/CameraPoseVisualization.h",
    "chars": 1333,
    "preview": "#pragma once\n\n#include <ros/ros.h>\n#include <std_msgs/ColorRGBA.h>\n#include <visualization_msgs/Marker.h>\n#include <visu"
  },
  {
    "path": "rpvio_estimator/src/utility/tic_toc.h",
    "chars": 488,
    "preview": "#pragma once\n\n#include <ctime>\n#include <cstdlib>\n#include <chrono>\n\nclass TicToc\n{\n  public:\n    TicToc()\n    {\n       "
  },
  {
    "path": "rpvio_estimator/src/utility/utility.cpp",
    "chars": 433,
    "preview": "#include \"utility.h\"\n\nEigen::Matrix3d Utility::g2R(const Eigen::Vector3d &g)\n{\n    Eigen::Matrix3d R0;\n    Eigen::Vector"
  },
  {
    "path": "rpvio_estimator/src/utility/utility.h",
    "chars": 6235,
    "preview": "#pragma once\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <libgen.h>\n#include <stdio.h>\n#include <stdlib.h>\n#in"
  },
  {
    "path": "rpvio_estimator/src/utility/visualization.cpp",
    "chars": 17024,
    "preview": "#include \"visualization.h\"\n\nusing namespace ros;\nusing namespace Eigen;\nros::Publisher pub_odometry, pub_latest_odometry"
  },
  {
    "path": "rpvio_estimator/src/utility/visualization.h",
    "chars": 1723,
    "preview": "#pragma once\n\n#include <ros/ros.h>\n#include <std_msgs/Header.h>\n#include <std_msgs/Float32.h>\n#include <std_msgs/Bool.h>"
  },
  {
    "path": "rpvio_feature_tracker/CMakeLists.txt",
    "chars": 776,
    "preview": "cmake_minimum_required(VERSION 2.8.3)\nproject(rpvio_feature_tracker)\n\nset(CMAKE_BUILD_TYPE \"Release\")\nset(CMAKE_CXX_FLAG"
  },
  {
    "path": "rpvio_feature_tracker/cmake/FindEigen.cmake",
    "chars": 7887,
    "preview": "# Ceres Solver - A fast non-linear least squares minimizer\n# Copyright 2015 Google Inc. All rights reserved.\n# http://ce"
  },
  {
    "path": "rpvio_feature_tracker/package.xml",
    "chars": 560,
    "preview": "<?xml version=\"1.0\"?>\n<package>\n  <name>rpvio_feature_tracker</name>\n  <version>0.0.0</version>\n  <description>The rpvio"
  },
  {
    "path": "rpvio_feature_tracker/src/feature_tracker.cpp",
    "chars": 12057,
    "preview": "#include \"feature_tracker.h\"\n\nint FeatureTracker::n_id = 0;\n\nbool inBorder(const cv::Point2f &pt)\n{\n    const int BORDER"
  },
  {
    "path": "rpvio_feature_tracker/src/feature_tracker.h",
    "chars": 1544,
    "preview": "#pragma once\n\n#include <cstdio>\n#include <iostream>\n#include <queue>\n#include <execinfo.h>\n#include <csignal>\n\n#include "
  },
  {
    "path": "rpvio_feature_tracker/src/feature_tracker_node.cpp",
    "chars": 10556,
    "preview": "#include <ros/ros.h>\n#include <sensor_msgs/Image.h>\n#include <sensor_msgs/image_encodings.h>\n#include <sensor_msgs/Point"
  },
  {
    "path": "rpvio_feature_tracker/src/parameters.cpp",
    "chars": 1823,
    "preview": "#include \"parameters.h\"\n\nstd::string IMAGE_TOPIC;\nstd::string MASK_TOPIC;\nstd::string IMU_TOPIC;\nstd::vector<std::string"
  },
  {
    "path": "rpvio_feature_tracker/src/parameters.h",
    "chars": 596,
    "preview": "#pragma once\n#include <ros/ros.h>\n#include <opencv2/highgui/highgui.hpp>\n\nextern int ROW;\nextern int COL;\nextern int FOC"
  },
  {
    "path": "rpvio_feature_tracker/src/tic_toc.h",
    "chars": 488,
    "preview": "#pragma once\n\n#include <ctime>\n#include <cstdlib>\n#include <chrono>\n\nclass TicToc\n{\n  public:\n    TicToc()\n    {\n       "
  },
  {
    "path": "scripts/convert_vins_to_tum.py",
    "chars": 711,
    "preview": "import pandas as pd\nimport argparse\n\ndef main():\n    parser = argparse.ArgumentParser(description='Converts the csv outp"
  },
  {
    "path": "scripts/run_advio_12.sh",
    "chars": 656,
    "preview": "#! /bin/bash\n\ndataset=$1\n\nrm -r $dataset/results/advio\nmkdir -p $dataset/results/advio\ncd $dataset/results/advio\nsed -i "
  },
  {
    "path": "scripts/run_ol_market1.sh",
    "chars": 636,
    "preview": "#! /bin/bash\n\ndataset=$1\n\nrm -r $dataset/results/ol\nmkdir -p $dataset/results/ol\ncd $dataset/results/ol\nsed -i \"s@~@$HOM"
  },
  {
    "path": "scripts/run_rpvio_sim.sh",
    "chars": 3205,
    "preview": "dataset=$1\n\nrm -r $dataset/results/rpvio-sim/\nmkdir -p $dataset/results/rpvio-sim/\ncd $dataset/results/rpvio-sim/\nsed -i"
  }
]

// ... and 3 more files (download for full content)

About this extraction

This page contains the full source code of the karnikram/rp-vio GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 106 files (60.9 MB), approximately 367.6k tokens, and a symbol index with 205 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!