Repository: alexbaucom17/DominoRobot
Branch: master
Commit: 4f7273faa068
Files: 159
Total size: 596.1 KB
Directory structure:
gitextract_eu8r6t07/
├── .gitignore
├── LICENSE
├── README.md
├── doc/
│ ├── DominoRobotControllerFlowchart.drawio
│ ├── DominoRobotSoftwareArchitecture.drawio
│ ├── KukaPsuedoCode.txt
│ ├── MassAndPowerCalcs.xlsx
│ ├── NetworkPrototcols.txt
│ ├── SpeedConversions.ods
│ ├── SubsystemDiagram.drawio
│ ├── SwClassHeirarchy.txt
│ └── clearcore_api.txt
├── experimental_testing/
│ ├── ImageParser.py
│ ├── RobotArduinoTesting/
│ │ ├── StepperDriverMotion_4Motor_Accel/
│ │ │ └── StepperDriverMotion_4Motor_Accel.ino
│ │ ├── TestMotor/
│ │ │ └── TestMotor.ino
│ │ ├── WifiServerWithESP/
│ │ │ ├── RobotServer.cpp
│ │ │ ├── RobotServer.h
│ │ │ └── WifiServerWithESP.ino
│ │ ├── WifiTesting/
│ │ │ └── WifiTesting.ino
│ │ ├── all_motors/
│ │ │ └── all_motors.ino
│ │ ├── all_motors_speed_control/
│ │ │ ├── Motor.cpp
│ │ │ ├── Motor.h
│ │ │ └── all_motors_speed_control.ino
│ │ ├── kalman_test/
│ │ │ ├── KalmanFilter.cpp
│ │ │ ├── KalmanFilter.h
│ │ │ └── kalman_test.ino
│ │ ├── lifter_nano_test/
│ │ │ └── lifter_nano_test.ino
│ │ ├── motor_driver_test_script/
│ │ │ └── motor_driver_test_script.ino
│ │ ├── new_dc_motor_tuning/
│ │ │ ├── Motor.cpp
│ │ │ ├── Motor.h
│ │ │ ├── constants.h
│ │ │ └── new_dc_motor_tuning.ino
│ │ ├── single_motor_speed_control/
│ │ │ ├── Motor.cpp
│ │ │ ├── Motor.h
│ │ │ └── single_motor_speed_control.ino
│ │ ├── stepper_nano_test/
│ │ │ └── stepper_nano_test.ino
│ │ └── test_encoders/
│ │ └── test_encoders.ino
│ ├── TrajGen.py
│ └── TrajGenv2.py
└── src/
├── master/
│ ├── DominoDesign-Questions.psd
│ ├── DominoDesign.psd
│ ├── FieldPlanner.py
│ ├── MarvelMindHandler.py
│ ├── MasterMain.py
│ ├── README.md
│ ├── RobotClient.py
│ ├── Runtime.py
│ ├── Utils.py
│ ├── __init__.py
│ ├── camera_utils.py
│ ├── config.py
│ ├── plans/
│ │ ├── AccuracyTesting_1x3_withdistance.p
│ │ ├── AccuracyTesting_1x3_withdistance086.p
│ │ ├── AccuracyTesting_2x2.p
│ │ ├── AccuracyTesting_3x3_withdistance_2axis.p
│ │ ├── DockingTest1.p
│ │ ├── DockingTest2.p
│ │ ├── FullPlan_DominoBros.p
│ │ ├── FullTest1.p
│ │ ├── HackyTestPlan.p
│ │ ├── LargeScale1_5x5.p
│ │ ├── LargeScale2_5x5.p
│ │ ├── VisionAccuracyTesting_2x2_SmallTestArea.p
│ │ ├── autosaved.p
│ │ ├── plan.p
│ │ ├── plan2.p
│ │ ├── vision_offsets small_testing_area.csv
│ │ └── vision_offsets_full_plan.csv
│ ├── plot_logs2.py
│ └── requirements.txt
├── robot/
│ ├── Makefile
│ ├── README.md
│ ├── src/
│ │ ├── KalmanFilter.cpp
│ │ ├── KalmanFilter.h
│ │ ├── Localization.cpp
│ │ ├── Localization.h
│ │ ├── MarvelmindWrapper.cpp
│ │ ├── MarvelmindWrapper.h
│ │ ├── RobotController.cpp
│ │ ├── RobotController.h
│ │ ├── RobotServer.cpp
│ │ ├── RobotServer.h
│ │ ├── SmoothTrajectoryGenerator.cpp
│ │ ├── SmoothTrajectoryGenerator.h
│ │ ├── StatusUpdater.cpp
│ │ ├── StatusUpdater.h
│ │ ├── TrayController.cpp
│ │ ├── TrayController.h
│ │ ├── camera_tracker/
│ │ │ ├── CameraPipeline.cpp
│ │ │ ├── CameraPipeline.h
│ │ │ ├── CameraTracker.cpp
│ │ │ ├── CameraTracker.h
│ │ │ ├── CameraTrackerBase.h
│ │ │ ├── CameraTrackerFactory.cpp
│ │ │ ├── CameraTrackerFactory.h
│ │ │ └── CameraTrackerMock.h
│ │ ├── constants.cfg
│ │ ├── constants.h
│ │ ├── main.cpp
│ │ ├── robot.cpp
│ │ ├── robot.h
│ │ ├── robot_controller_modes/
│ │ │ ├── RobotControllerModeBase.cpp
│ │ │ ├── RobotControllerModeBase.h
│ │ │ ├── RobotControllerModePosition.cpp
│ │ │ ├── RobotControllerModePosition.h
│ │ │ ├── RobotControllerModeStopFast.cpp
│ │ │ ├── RobotControllerModeStopFast.h
│ │ │ ├── RobotControllerModeVision.cpp
│ │ │ └── RobotControllerModeVision.h
│ │ ├── serial/
│ │ │ ├── MockSerialComms.cpp
│ │ │ ├── MockSerialComms.h
│ │ │ ├── SerialComms.cpp
│ │ │ ├── SerialComms.h
│ │ │ ├── SerialCommsBase.cpp
│ │ │ ├── SerialCommsBase.h
│ │ │ ├── SerialCommsFactory.cpp
│ │ │ └── SerialCommsFactory.h
│ │ ├── sockets/
│ │ │ ├── ClientSocket.cpp
│ │ │ ├── ClientSocket.h
│ │ │ ├── MockSocketMultiThreadWrapper.cpp
│ │ │ ├── MockSocketMultiThreadWrapper.h
│ │ │ ├── README.md
│ │ │ ├── ServerSocket.cpp
│ │ │ ├── ServerSocket.h
│ │ │ ├── Socket.cpp
│ │ │ ├── Socket.h
│ │ │ ├── SocketException.h
│ │ │ ├── SocketMultiThreadWrapper.cpp
│ │ │ ├── SocketMultiThreadWrapper.h
│ │ │ ├── SocketMultiThreadWrapperBase.h
│ │ │ ├── SocketMultiThreadWrapperFactory.cpp
│ │ │ ├── SocketMultiThreadWrapperFactory.h
│ │ │ └── SocketTimeoutException.h
│ │ ├── utils.cpp
│ │ └── utils.h
│ └── test/
│ ├── CameraPipeline_Test.cpp
│ ├── CameraTracker_Test.cpp
│ ├── Localization_Test.cpp
│ ├── RobotController_Test.cpp
│ ├── RobotServer_Test.cpp
│ ├── Robot_Test.cpp
│ ├── SanityCheckTest.cpp
│ ├── SmoothTrajectoryGenerator_Test.cpp
│ ├── SocketWrapper_Test.cpp
│ ├── StatusUpdater_Test.cpp
│ ├── TrayController_Test.cpp
│ ├── test-main.cpp
│ ├── test-utils.h
│ ├── test_constants.cfg
│ └── utils_Test.cpp
├── robot_motor_driver/
│ ├── SerialComms.cpp
│ ├── SerialComms.h
│ └── robot_motor_driver.ino
└── tools/
├── 80-domino-bot.rules
├── IR_calibration_1.yml
├── IR_calibration_2.yml
├── camera_calibration.py
├── motor_test_script.py
└── plot_logs.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
.VSCodeCounter/
beacon_state.txt
build/
lib/
.vscode/
scratch
log/
__pycache__
*.so
marvelmind_SW*
*previous_plan_state.json
venv/
================================================
FILE: LICENSE
================================================
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
================================================
FILE: README.md
================================================
# Domino Robot
This is the code for Mark Rober's Domino Robot project. You can find a bunch more info about the project, including details about the software architecture [on my website](https://www.baucomrobotics.com/domino-robot).
Here is the video that Mark made about the project:
[](https://www.youtube.com/watch?v=8HEfIJlcFbs)
## Folder structure
If you are interested in browsing the files in this repo, here is the general structure to get you oriented.
- `doc`: Some high level documentation stuff, much of it is probably out of date
- `experimental_testing`: Various experimental files
- `src`: The real stuff
- `master`: Code for running the master controller and GUI
- `robot`: Code that runs on the Raspberry Pi of the robot
- `robot_motor_driver`: Code that runs on the ClearCore of the robot
- `tools`: Some various utility scripts
## Usage (Not Recommended)
This repository exists mostly for those who are interested in browsing the code to see how it works. It is almost certainly a bad idea to try and download this code and use it unless you have identical hardware to the robot in the video and/or really know what you are doing. You should consider this code completely unsupported as I do not plan to make future updates or fix any bugs you may find.
If for some reason you really want to try and run this code, there are some (out of date) instructions on how to run the [the master code](src/master/README.md) and also some (possibly also out of date) info on which libraries are used for compiling [the robot code](src/robot/README.md). Good luck.
================================================
FILE: doc/DominoRobotControllerFlowchart.drawio
================================================
7VxZc6M4EP41rpp5SIob/Bg7x0zNzFZ2kp1j32SQbRJAXiEn9vz6FSDMIYGxjQ05kqoEGiFA/XV/3S3BQB37qxsMFvNvyIHeQJGc1UC9HCiKLJsW/RdJ1onE0OREMMOuwxplgjv3D2RCiUmXrgPDQkOCkEfcRVFooyCANinIAMboudhsirziVRdgBjnBnQ08XvrTdcg8fS5Jyg58gu5szi5t6eyAD9LGTBDOgYOecyL1aqCOMUIk2fJXY+hFg5eOS3LedcXRzY1hGJAmJ9z8a3sPo0/Sw83vv3788C2HwPBMS3p5At6SPTC7WbJORwCjZeDAqBNpoI6e5y6BdwtgR0efqc6pbE58j+7JdNMB4TxuG+1MXc8bIw/huCPVAdCa2lQeEoweYe6IYVtwMqVHZhg4Ln2e3DETAgNGV2Z3CjGBq8ohkDcDSxEJkQ8JXtMm7IRUFQyLisr2nzPNGqn+5jmlqumJgKFptuk6G3C6wcZ8h/FXTjn+OrQcTTT+ljJRDUM8/kPTkUyznfE3zO0KUM1jKSB4Ivc3n9XV31c/F2Pv/hH/M70/UzkFrAaK4ZFomJYTujmLNgkGD6mYXiZ35AP5yGkMOtSFsF2EyRzNUAC8q0w6Kuo0a/MVoQVT3gMkZM38IVgSVNSzBybQu0WhS1wUUJlNNQCpxkaRflzqvr6WGpCo4xHw3Jmw+QU7MEGEID/CDiqgQIp/qJyeiNe/2I3HO7+jnXM93b1c5Q9ertlefMMjYD/O4mdPuw5QADfY4oCkSNFQFp0/wDNIahSqiBGIoQeI+1TsTIQmduotcumtbJAr62YBuRwgQ7TENmRn5b1wqSOK7/qOkges6ShtiKbTEBLOADbPvL9NpMOTM4qnl2AUcOWSX7ntBJmmznYzZEY76zxMBYA2GyG6CxPc05QShNaofScL40zzAJM7yIErfAhDBGC1lzgeESFeOwFrBe6U14a71vDEPGiKU+ZA00ThUAc/LIUmejMHf4ExWOeaLaIGYfPrqJJUMoGkx9M7faM6EooQERlImp0Z/y2jrGUkZ5uZKXGmR/M3F1NTqTC9jTS5SocWyegjpYi8PVbSR0jhTS6iRDOyLQ+EoWun4mvXy6zdSRsxe6ESdvwlGnVTMmlq1EWbPmvJqNUtNl0Ra+1q02qtSW+9K6MnHoCP+u5phAdtgmJN3cAAYkB3ONvcKUMVpJg0w5/E6WcpYZ1aNrSFBYOJpWu6VAdQLk+tTkpL6rD4nFSWBTmpdayUVG8Ufdt+pUd9Q+nosdyX0jB20foVC3PI+Y7oEFLRmI4fRp5HR5mBZoILKEl5PBros4ToL2gDWVqsqkl+h7jgw+1n2oCaDJVI19fR9iKFQ1y9Zbf3sWlA8LKdjlKuhHXudUwOO104kio739+StYaWbPTKklU+Gq8kgbGHqMtsSAYUn6Q45M2dMKaB/B8wibuKlMLiItqvPhrol1vdca0T3509eEzUApsvXLM5I/ZEg/y0jDDxlM5VzTQKdiu3Eqdaoj6PH/DxlZMxQthxA0AgldPoLwinCPtdUcbMQ5MI2BL1EooUo/xk9DCdAj0GZ5keprZD9SWgB8ccTup9VXN60HoXlA5fKT207vbFFj7UhgWFqkdKRbnrKDXJZVt+ROVjh2qy8lGcQr6TVa2Z9ZisZPXckkX9Hp+w+KDoc0CHKozY6osbQJ8+lx0egQucoTBVmE4Vcarg0NxCN9rhArNcL+qcC2S+QtEnMtjMrGymWZpNUW/qv+dpyff3IKsMi8u/+xOP3LTEILdeY6igjhLONPM4MyBDTS9exzoFRfHxS0ZRCxAcHtjmeAyGZBvRZdL44u/8V3YtbRCgLBvFxRZn7TDgmSzk1RNU6PkC3z18DFybyr4lcCtU+lrmwUn8K+BBPfoV8aAR/7TDg1Z55lTvnAdfa81Mblo0k/tVNUvXFm9JRBL//F41q9Vsm2UzpVUvfAI/y1fGrhF+Bth5S4mGpnXuYF9r1Sn1m9sdrNUvB8svl650sO+OtR7TrUS4hqm2EtKa54ZyqihWs7owYvHqrqZrg3PLgTdFgZpSQmFVGINbfklYcobDrxvLVoV17HiUtv3OXrUCrRTzl2oF29uX34kqL8XS1YPaq3Vrt1qLRviyZ7/m6VgknUzTJZN2b2SeTtf6FjIpL9G5vhLfajUN6noV06VR/4tCjNIFZDpb5XgCTLbO94clfhZHeV+A58d1eqoQcoTiZrfrAdXeEYlm9MctbHkHo2rCb+srfL2lEq1xAbZn9QG+Ii56nZpNkPkHFwk8OCUCh+q7jpPg8UQlApwYZCURbG6Ih0St8bVTJDAlVnPYt0iQFm7TVaknqBHwFdjbbLk4xY8blWDjnQ828CGmt6RI8QTYN4CfoOe7gRPybyG/7Cmwcrqhd74UROWXglQa/LEKgjtYe0Q4LKXdy3HvQOhSidANXlWqQFNH+9CFxtfS3zUVHzV6pql0Xvu9yN6kyF6l752ZUpP1c0lSVEsa6vRvcYmjVqftk89P6vw0TLlut636p2qi6t8lorwZVfGq3xvbVtDrhVuI5CzHUJWWHHrpjS3ZFLiJocBPaC34CeG3ow4HgSwEwdiDAI8RhvurvBgFpa4gFzIxUXNoiEK1YjBX+PRVpVNo/rEqSy4qXPSxKpG+j/atMNGq2zb0/R2EiwnE8bPfum9b6dx7mcdTOt3NPv6XMEP2CUX16n8=
================================================
FILE: doc/DominoRobotSoftwareArchitecture.drawio
================================================
7V1bc9o4FP41mek+pCNLvj4mtElv2WabXvdlR2ABao3FCNEk/fUrgwW2JYwBG5s2yUOsY9kYfed+jpwz1Js8XHM8Hd+wkERnEIQPZ+jFGYSW4/nyT0J5TCnAtpaUEadhSlsT7ugvoiam1DkNySw3UTAWCTrNEwcsjslA5GiYc3afnzZkUf5Tp3iUfiJYE+4GOCLatC80FOMl1Xcys18ROhqrT7ZAemaC1eSUMBvjkN1nSOjlGepxxsTyaPLQI1GyempdltddbTi7ejBOYlHlgv7s/T90dh1dzV5fvjl/IaZ/2/A8vctPHM3TL3yDZ4JwSXv2Dk8Fm/6VPr14VEvC2TwOSXJXcIYu78dUkLspHiRn7yUXSNpYTCI5suThT8IFlct5EdFRLGnyjpI6ZLFIsbZcOR5xHFL5PXosYnzxKai/+E3m0ijK0IdO8ivpM8HZD5I54y5+VmcUXHKhL/W1Ul9cPh55yJDStbsmbEIEf5RT0rPnloOW1zzmOfR+zRaeoo0zLOEoIk5ZcbS69xoteZACtgN40NGwIaHk3nTIuBizEYtx9HJNvcyjt57zjiXILDD7ToR4TOHBc8HyiJIHKr5mjr8lt3rupKMXD+mdF4NHNYjl9/2aHWSuSobryxYjdV2RTzbiOGNzPiAla5UulcB8RESZQKRgJQtZyhacRFjQn3k1UTvEjiafV5RIFQbBbYSlxuOHSadB7jyCXQJ0uQsx8YcDo9wNfNIfanKH6pI7kJc7V5c7GBjkLmhK7BSLZEC5/vT6twcCAq9jOEANhw/zWNAJkcQbHEvTfKB4mI3XCWClCQ0EOlouMKDVIFyBBleiw+pFaELDcGHmxozTX9J64BIJk6D1Fw5D0cXwB2RgRK3vO7YDGkMtKIDmGETM5GKsfMz6XQxd131gfemyQvA6ln7iMAHmODLWNbQs6G1FCznHRUt35xVavShZziNBFXgh8DyDOnSIH9omqHzYRwbfvS6onAJUQUXbZXmNQaUbr4+924aAWmlFA1ah46Eg1LEibghCYsIqCFzLd5rCquhmIKRjZRmxai7O8jQ4jhhnrWOrb5kz2+KsdWj1LXuu8TgLumdtBVqLSy84x4+ZCVNGYzHL3Pk2IazZDTkwx27nDijwy/KOa+5ZPdoBDOW3ylA7BO4tMIZTN18cJvuupqc/zRoIu4dD7ABDBDEcDkLJLgY1HHpBHzTm3awCBiUXlt12vAf1AEIXozi8SLK+cjSI8GxGl44h5kInZwApsvla93rQzYjLOXgOwJZUlxzcEi6j0CSNqqllG9lZxWzJ+6EtynkxKt5yf0FUYXN9GjrDESZ/V9F2U+SapoZBwYkrutHLb55elU2EF1W+nb+R5uQtV0a7UV3aHxnYtvPuhGu7+dQtAPY+fFuUtA2yuSBf0Ug9/f7sXjnxG3TK7iCkKbsbJqTuh+AFl5+eHDzrRQTzHuPkBIs0NdgoaZMKJRlTDO6ajBRqLAZXZrIt6Yb7RQuwKN3eSUg3qireyO2UeKsqeFtc4u3FJbafiyoTb0iqzd+LT7xO8QnSC4B3ci1xcmGPTSaz2gORzpcyLFS1ltFYJIL0oFDlWT+TiA2oSL7AKxyH0Z9YoZVmue1YUYlxBqF3dLhsbLkYCMriPwigYkr1uPhga353MfjPfRO+dT7h96N/73u+oe9ICdCzD3g27RO++AK39E/1bB0nB5ljqLYbu41cpynIdEu0qi3J5eQsOliUzGDtIiybBK5MSOvRgHm4fFMcYjJRqI6ihREuFUB2P8eccTDhTlWLSu6gcXGq5qJR7cnovYoU5xbKZ2MtEBSaPbdeAVH5FcWkWGH+wYWQMhwa8m6rN4fs0jqwqf2grGWhDh0TwOdOgQkMKXnLc3Qts2pKrl3L6G5W1hiApCH8KDahe3hpJsE3tIeYwFo11tUOlq+BdctmNHWGjwZVSavHpnaRshaTOqCCMAdVYGjttlyD9a6lk8cIVdv5oeZbDvY33oFuvM0xRmupnLLHzjb8cPydDARbPMQ1iQnHiwx/IzZvlzaeTa1AZe1DdcihXZBDk32DBjlsLOYxtBi/Y3J16S+8VJv168vT9E3yzqbJ1pkdk8aQUym6090bA7dtjtlfg1qoogr169age4U/bt6TCtJxo5GJpddf3+JokvSrgysaLTOJZ9CN5PJd9uXAHSVHz64j1l/EL9K1qrssu0P80j0n69zL64gABLqOQCYd0ViKxNLDz8909se7wzAoyJvJHTZGLo25wyho1x3ep6hu5dU5Qtv0+f59XqXu71Yl31plvPSxM0J58UCldUi2eKeZhabyQifoI1sAwJadZLvdTHOnY1XLryiE7bWflT73U7S6URIR1BOy7ctiu91nO4Q9B8hU1QRQxwybngB6Mmzl4qQCv/bECT6Ztk1ro4DYbtpqzyIchKlhS3OlyH4VGjYZ2Z+koAbIEMofVU4NG5/vBBbzRLt+moZYNNOasj0PcwK9Kq4hvIemXpXmqshQT7Or3qI7wn8+gbepqGzCzrw5ujns7I2i11Lus4P1kUIG261a2GqsFQAa+srZ4AdJRO5mHgkq/34cc4JDefBFojOtWwhPGtBVoroMUHRUQPVc9sdllaXZ7swq79npWj806p4WbXdP6H7lynQT0REiDUMmu8vtmg7Ml7pcr7z1sjh/a6+mtoP5KM2aUM/MP3VrblIxnTMQ6oFOqYQGnltW7nUJycbUvTYcHqB8KmfwUd3K5zCHQM/g32AZS0UTGnfBqetecKVFxp5pm9RxI2PU7puC9twnHFgoK7XJPuHmXnJi5v6qNQJlelt2GWzlAKoOuqMYdL0i0cMTwnES/HEsw8GnxMuGxItJNaAjhwx6HvuOJk1LQIH45I1tCNc9w25KY4N/c96YYQMswfwJPFO9AWwHz/TeqebAs9s1ynvZZN9dvdFl9e6OI795DMKqRrmZOF5/pZhdiNJQgWU2vAls55dMqr6Q1avTy+N7aPtl83f1BuRw/W9GltPX/60Fvfwf
================================================
FILE: doc/KukaPsuedoCode.txt
================================================
KukaInit();
int tile_count = 0;
int num_tiles = 333;
int num_rows = 20;
int num_cols = 15;
while (tile_count < num_tiles) {
string line = ReadLineFromFile(tile_count);
vector colors = ParseColors(line);
int color_count = 0;
for (int row = 0; row < num_rows; row++) {
for (int col = 0; col < num_cols; col++) {
Pose pickup_pose = PickupPoseFromColor(colors[color_count]);
Pose dropoff_pose = DropoffPoseFromRowCol(row, col);
MoveToPickup(pickup_pose);
CloseGripper();
MoveToDropoff(dropoff_pose);
OpenGripper();
color_count++;
}
}
tile_count++;
}
================================================
FILE: doc/NetworkPrototcols.txt
================================================
Message spec
- All messages are sent as json encoded byte strings
- All messages will contain a 'type' field that will be a string denoting the command type
- All messages will contain a 'sn' field that will be an int denoting the message serial number
- All messages will contain a 'data' field that will house any additional data required by the message type. This field may be empty
Master to robot commands:
Move:
// Move to a waypoint in a straight line
float x
float y
float a
Place:
// Place dominoes on the floor
null
Dock:
// Dock at charging station
Undock:
// Undock from charging station
Dropoff:
// Drop off pallete on conveyor
Pickup:
// Pick up pallete from conveyor
Position:
// Fallback for sending position data if usb controller for marvelmind sensor doesn't work
float x
float y
float a
float time
Robot to master commands:
RobotStatus:
// Give a status update to server
float est_pos_x
float est_pos_y
float est_pos_a
float est_cur_vel_x
float est_cur_vel_y
float est_cur_vel_a
string cur_task
int cur_task_msg_sn
int bat_percent //Can we even get this?
string error_msg
bool error_present
//Add other fields/sensor statuses as needed
Master to base station commands:
NextTile:
// The next tile to fill
int[tile_x*tile_y] dominoes
int tile_id
Base Station to master commands:
BaseStatus:
// Status update for server
int cur_tile_id
float est_time_to_tile_full
// Other sensor statuses
General commands:
StatusRequest:
// Request status from robot
KeepAlive:
// Keep the connection alive
Ack:
//Acknowledge that command was recieved
int sn
Done:
//Let the server know the command was done
int sn
================================================
FILE: doc/SubsystemDiagram.drawio
================================================
7V1dk5o6GP41zvRc2OFbvNx1t+10tmec2jltrzoIUWmReCDu6vn1J0AikGQVFQjSzuyFhBDgeZ+8eb/CDvTJevc+cjarT9ADwUBTvN1Afxhommpr2iD5U7x91jIaGVnDMvI90ilvmPn/AdKokNat74G41BFBGCB/U250YRgCF5XanCiCL+VuCxiU77pxluSOSt4wc50AcN2++h5aZa22Wej9AfjLFb2zqpAza4d2Jg3xyvHgS6FJfxzokwhClP1a7yYgSMCjuGTXvXvl7OHBIhCiKhcMnS92bEPraWY8fbPd7ce75aehno3y7ARb8sLkYdGeIgA8DAg5hBFawSUMneAxb72P4Db0QHIbBR/lfZ4g3OBGFTf+BAjtiXSdLYK4aYXWATkbIydCd4m0cIMbOHHsu7T5nR/QbvwbExBiuI1ccOQ1KXOcaAnQkX7k7ZNXLtyA4PkewDVA0R53iEDgIP+5zBGHUG156JdLA/8gAjlDOConnE9OjECE26YTTk4vKx+B2cZJgXjBs7GMsRNvsvmx8HeJrO4XGNgJDGCUXq57JrA9I0U9gr9A4YytzXXLOob/M4gQ2B1FjJyl84ZoA1Unxy+FuUWaVoVpRdvqh1gK38HOR9+Sy9+OTHL4vXDqYUeGTg/25KDiJMGyifbZ4CY9/E7HSw7y0dMjOvzlk0vjJ5cffLHv3v38gb5+/Kz9+PkhiqbKkKr0+mZXeimGxNkXOmygH6K4MPI0acgpqI3KHNQsRm8y/fXx0f74R/YE4qsNlbl6zFA5g41cxbD5gEklgotBt26G4Jcyt/LE8PhOuJFbYBh+NjwpjFuYA5Z9xhyoi7q6KZe6WsvUDWEIGN4qneWtrkvhrX4eb4dqw8Q9hmHBbvsboBcY/UoE8OIjd4WHxURNTbl6rbjFYqG5rsiK86y5ZdZkxWlqx8w4Tb89VaGWFEWuN9pQFRf5UAqvGsQdzbrtvOu4YfzhRne4YXWLGzdoYvSXG3a3uHGLntMV3KjkOREC5dy4LnRQmRta7bGDqtwQe22qXG4UqaFUpEbZM1Frp0YLzomILkdCTdLitgrnAHyGc5j4JfiWygRipGEQJNa/FeDXuJ8nv5bJrzd3kbf1Q/hX3Y5BO+FdnYnvGrIdA2N8ewu85PBXo9Gucd1qXOz3MzTUWX5lL8QFXfn4ATuSoWvlkV4J3zYd2DAkhDUoFBJzg5fbr54Tr9LHOEr0pnk5pIJiBXeKT9UjW6c4fi4zh2fH3E5c0Aw5VYNbc3m2imhSIsb5vKuiPk+yqrA6moLVkbZdSb7Xgvhna0X7QhLXJmuTk/XXFUgLT9YQwSjmJJ9roURQJ0wpNqRqu0AcUp3bpmEedYDOCKlqZUzNUTXLSW/KcqKRmgLIT/4iqz6IEdhs0l8p3j2A26KRIGlw2zzcTpopUGIQPcM+YDyy5GJMn6dUUBM9g2Dthx5uvweOC8NakfYcYC+ESFuuDeaLepA2xqzy0CQjPeaQnsLYR74AXvyOiF16i1iRtbcILGlyAn8ZJos5Rg5rI/0+Qcx3neCOnFj7npdaoSKhlS3TOqTA2D2mxatwQyAFrSkp6LxNNMOam18eeyMCXR2VVY7Kq/V2RcCbKv2fCJwULF7xNyUFYdjhUIIrJQpEfxdi8s1G8jtRSGKTlPzptI+YS+2Eam2+ALpkErxSpXGV8dVKicaQcfZUU7KBa484pI/AqpwHK2F8LYu4WcJtzK/hh+qXIm6q2RhwvGdA8wYLmLrlOYLWv1tITwzjVDHd4Q4Yk11+kuYansASJCTPxsLPlg2Xnezv6mQYZQkfdmwURDxuc2aMBW7JCbf6zOnRmstnmAy2Bq91RqKV32wKW34PxQyEcc3gtuTlseAKdFO74GocuBO43uAVs15029mZ0jnq8qbJoYy0TsXQijnCoSudu6dXVboXka6bKr+EHhL4VVZhXbgKP0BsakJ8ehpgia1Beu/ZPkZgXVia5+wdf5vleshsVRBUFBgig4zNSdRX+8MRhyYfQOhCD9Scfqi8tnCQCoCvHEFsMx4udtk5EP8U3x0pvmu5vErMpSvzkwabAmPjQVXzk6bCDDQelQdqeM8dv3D/AwLo+mjfaTV9lQJhY35tBr+FQuADrxP8zqjbwe/rdDiTf2gz+C0UAZ9B/gwSGBMDBzlZDPwGKiIvE4qECsiju66L9oq/8AdJ7t7bYiw15c3jbGprlnVrEB+KTsu+ha5V8y1GjUEu2Xjpf3W4GHZBxqGZPdfXsYM3D2h1uNZTXUgL0JiqVfm6kU+Q90w3WkbXdKMo7sIqy2brNMWK8riTV7uaklYEqpQJYdrGWybGVtXNYutJLZUhTcNu1gHxDiyzt7G1ox7+jisus5bUVZavQOuZau+YYpf7CYbLamzI9qzmrN4aI3Zi0LVbmIv0KQtz8cHHcyiM0xrymbz65qsmIPbz5NXsi4GW+zmDy+bg6PI52Hahmxh0o6U5mF567vYtZquOaZsMyxr4cJfGu1W06NUPl4P8I0g1Tvl20mTdm/GjPzO+5SXX4qf7kWyatDWXD75/SCoNbnr6cVnqFncUiVHmoxnTCMQxRpFFt79pJunV/YLNRn0XAlMUIz3bSjlxa/tcrnMAJG74EkuhQxGx3yjxRL8J0vxHU69jB++GC/LxSY9iDkq58aTTkN2GLD3rpPMJwJ6FJoea2rHopGArLK8qe512yma/rLTTUDNryztxn76pL/GUfpeZ/kOWrHv+b230x/8B
================================================
FILE: doc/SwClassHeirarchy.txt
================================================
- robot
- status updater
- status
- robot server
- socket multi thread wrapper
- robot controller
- serial comms
- localization
- Kalman filter
- controller mode
- position mode
- trajectory generator
- x,y,a controllers
- vision mode
- trajectory generator
- Camera tracker *
- Kalman filter
- x,y,a controllers
- tray controller
- serial comms
- marvelmind wrapper
- camera tracker
- side camera
- rear camera
- motor controller
- serial comms
- robot velocity handler
- lifter action handler
- master
- gui
- runtime
- plan
- robot interface
- robot client
- tcp client
- field planner
================================================
FILE: doc/clearcore_api.txt
================================================
Tentative thoughts on api with clearcore for motor control over serial comms
There's a few options here:
- Clearcore does all trajectory generation, kalman filtering, kinematics, and control
- Central processor sends it marvelmind position data and forwards command from master
- This requires a more extensive api to handle forwarding commands
- Clearcore recieves global velocity command from central processor, handles frame transformation and kinematics only
- Sends back computed global velocity
- Central processor has to send global angle along with command
- Cnetral processor handles kalman filtering and trajectory generation
- Relatively simple api
- Clearcore recieves local velocity command from central processors, handles local kinematics only
- Sends back computed local velocity
- Central processor handles kf, traj gen, frame conversion
- Very simple api - seems like the best starting point
Starting with option 3
Send from central controller:
- Local velocity command [vx, vy, va]
Response from clearcore:
- Estimated local velocity [vx_est, vy_est, va_est]
Clearcore outline:
while true:
if new command:
decode command
do IK
send new motor commands
do FK
encode response
send response (est vel)
else:
pass
================================================
FILE: experimental_testing/ImageParser.py
================================================
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import skimage.transform as sktf
import numpy as np
# Configuration
image_name = 'MR.jpg'
desired_width = 300
desired_height = 300
dominoes = np.array(
[('black', (0,0,0)),
('red', (1,0,0)),
('blue', (0,0,1)),
('green', (0,1,0)),
('white', (1,1,1))
])
# Load original image
img = mpimg.imread(image_name)
# Scaled image
img_scaled = sktf.resize(img, (desired_height, desired_width),anti_aliasing=False)
# Parse image into domino IDs by choosing 'closest' color
img_parsed_color = np.zeros((desired_width*desired_height, 3))
img_parsed_ids = np.zeros_like(img_parsed_color,dtype=np.int_)[:,0]
count = 0
for row in img_scaled:
for px in row:
best_id = -1
best_score = 9999999
for id,val in enumerate(dominoes):
color = val[1]
score = np.linalg.norm(px - color)
if score < best_score:
best_score = score
best_id = id
img_parsed_ids[count] = best_id
img_parsed_color[count] = dominoes[best_id][1]
count = count + 1
img_parsed_ids = img_parsed_ids.reshape((desired_height, desired_width))
img_parsed_color = img_parsed_color.reshape((desired_height, desired_width, 3))
# Output some metrics
print('Domino usage:')
print('Total number of dominoes: ' + str(img_parsed_ids.size))
print('Colors:')
unique_colors, counts = np.unique(img_parsed_ids, return_counts=True)
for i,id in enumerate(unique_colors):
print(' ' + dominoes[id][0] + ': ' + str(counts[i]))
# Plot images
fig, axes = plt.subplots(nrows=1, ncols=3)
ax = axes.ravel()
ax[0].imshow(img)
ax[0].set_title('Original')
ax[1].imshow(img_scaled)
ax[1].set_title('Scaled')
ax[2].imshow(img_parsed_color)
ax[2].set_title('Dominoes')
plt.tight_layout()
plt.show()
================================================
FILE: experimental_testing/RobotArduinoTesting/StepperDriverMotion_4Motor_Accel/StepperDriverMotion_4Motor_Accel.ino
================================================
#include
// https://github.com/DIMRobotics/ArduinoStepperDriver
/* Axis identifiers inside the library; on them then
Can be controlled specific. You can do several axles,
The maximum number in the native assembly is 3. It is treated by editing
headerfile, there is a constant NUM_AXSIS
*/
axis_t bl, br, fl, fr;
float MaxSpeed = 3.14; // rad/s
#define PulseRev 3200
const float SECONDS_TO_MICROSECONDS = 1000000;
const float STEPPER_CONVERSION = SECONDS_TO_MICROSECONDS * 2 * PI / PulseRev;
#define bl_pulse 49
#define bl_dir 48
#define br_pulse 51
#define br_dir 50
#define fl_pulse 47
#define fl_dir 46
#define fr_pulse 45
#define fr_dir 44
#define PIN_ENABLE_ALL 40
void setup()
{
Serial.begin(115200);
StepperDriver.init();
delay(100);
// Serial.print("rad conversion: ");
// Serial.println(RAD_PER_SEC_TO_STEPS_PER_SEC);
// Serial.print("sec conversion: ");
// Serial.println(SECONDS_TO_MICROSECONDS);
bl = StepperDriver.newAxis(bl_pulse, bl_dir, 255, PulseRev);
br = StepperDriver.newAxis(br_pulse, br_dir, 255, PulseRev);
fr = StepperDriver.newAxis(fr_pulse, fr_dir, 255, PulseRev);
fl = StepperDriver.newAxis(fl_pulse, fl_dir, 255, PulseRev);
StepperDriver.enable(bl);
StepperDriver.enable(br);
StepperDriver.enable(fl);
StepperDriver.enable(fr);
digitalWrite(PIN_ENABLE_ALL, LOW);
Serial.println("TCCR2A: ");
Serial.println(TCCR2A, BIN);
Serial.println("TCCR2B: ");
Serial.println(TCCR2B, BIN);
Serial.println("TIMSK2: ");
Serial.println(TIMSK2, BIN);
}
void writeAll(float vel)
{
Serial.print("Speed: ");
Serial.println(vel, 4);
Serial.print("Delays: [ ");
writeSpeed(fl, vel);
writeSpeed(fr, vel);
writeSpeed(br, vel);
writeSpeed(bl, vel);
Serial.println("]");
}
void writeSpeed(axis_t motor, float vel)
{
uint16_t delay_us = 0; // This works for if vel is 0
// Compute motor direction
if(vel > 0)
{
StepperDriver.setDir(motor, FORWARD);
}
else
{
vel = -vel;
StepperDriver.setDir(motor, BACKWARD);
}
// Compute delay between steps to achieve desired velocity
if(vel != 0)
{
delay_us = static_cast(STEPPER_CONVERSION / vel);
}
Serial.print(delay_us);
Serial.print(", ");
// Update motor
StepperDriver.setDelay(motor, delay_us);
}
void loop()
{
writeAll(MaxSpeed);
delay(2000);
writeAll(0);
delay(500);
writeAll(-1*MaxSpeed);
delay(2000);
writeAll(0);
delay(500);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/TestMotor/TestMotor.ino
================================================
#define PIN_ENABLE 52
#define PIN_BL_PWM 6
#define PIN_BL_DIR 48
#define FW 1
#define BW 0
#define SPEED_SLOW 50
#define SPEED_FAST 255
void setup() {
// put your setup code here, to run once:
pinMode(PIN_ENABLE,OUTPUT);
pinMode(PIN_BL_PWM,OUTPUT);
pinMode(PIN_BL_DIR,OUTPUT);
Serial.begin(9600);
}
void loop() {
// put your main code here, to run repeatedly:
Serial.print("Forward");
analogWrite(PIN_BL_PWM, SPEED_SLOW);
digitalWrite(PIN_BL_DIR, FW);
digitalWrite(PIN_ENABLE, HIGH);
// Wait for a little bit
delay(2000);
// Stop
digitalWrite(PIN_ENABLE, LOW);
delay(500);
Serial.print("Reverse");
analogWrite(PIN_BL_PWM, SPEED_FAST);
digitalWrite(PIN_BL_DIR, BW);
digitalWrite(PIN_ENABLE, HIGH);
// Wait for a little bit
delay(2000);
// Stop
digitalWrite(PIN_ENABLE, LOW);
delay(500);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/WifiServerWithESP/RobotServer.cpp
================================================
#include // This has to be before ArduinoJson.h to fix compiler issues
#include "RobotServer.h"
#include
RobotServer::RobotServer(HardwareSerial& serial, HardwareSerial& debug)
: serial_(serial),
debug_(debug),
clientConnected_(false),
wifiConnected_(false)
{
serial_.begin(115200);
}
RobotServer::oneLoop()
{
COMMAND cmd = COMMAND::NONE;
String newMsg = getAnyIncomingMessage();
if(newMsg.length() != 0)
{
debug_.print("[RobotServer] ");
debug_.print("RCV: ");
debug_.println(newMsg);
if(newMsg == "Client connected")
{
clientConnected_ = true;
wifiConnected_ = true;
}
else if(newMsg == "Client disconnected")
{
clientConnected_ = false;
wifiConnected_ = true;
}
else if(newMsg == "Connecting..")
{
wifiConnected_ = false;
clientConnected_ = false;
}
else if(newMsg.startsWith("Connected to WiFi."))
{
clientConnected_ = false;
wifiConnected_ = true;
}
else if(newMsg == "Waiting for client connection")
{
clientConnected_ = false;
wifiConnected_ = true;
}
else
{
cmd = getCommand(cleanString(newMsg));
}
}
return cmd;
}
String RobotServer::cleanString(String message)
{
int idx_start = message.indexOf("{");
int idx_end = message.lastIndexOf("}") + 1;
return message.substring(idx_start, idx_end);
}
String RobotServer::getAnyIncomingMessage()
{
String msg = "";
if(serial_.available())
{
msg = serial_.readString();
}
return msg;
}
RobotServer::COMMAND RobotServer::getCommand(String message)
{
COMMAND cmd = COMMAND::NONE;
StaticJsonDocument<256> doc;
DeserializationError err = deserializeJson(doc, message);
debug_.print("[RobotServer] GetCommand(): ");
debug_.println(message);
if(err)
{
debug_.print("[RobotServer] Error parsing JSON: ");
debug_.println(err.c_str());
cmd = COMMAND::ERROR_BAD_JSON;
sendErr("bad_json");
}
else
{
String type = doc["type"];
if(type == "move")
{
debug_.println("[RobotServer] Got MOVE command ");
cmd = COMMAND::MOVE;
sendAck(type);
}
else if(type == "place")
{
debug_.println("[RobotServer] Got PLACE command ");
cmd = COMMAND::PLACE;
sendAck(type);
}
else if(type == "dock")
{
debug_.println("[RobotServer] Got DOCK command ");
cmd = COMMAND::DOCK;
sendAck(type);
}
else if(type == "undock")
{
debug_.println("[RobotServer] Got UNDOCK command ");
cmd = COMMAND::UNDOCK;
sendAck(type);
}
else if(type == "dropoff")
{
debug_.println("[RobotServer] Got DROPOFF command ");
cmd = COMMAND::DROPOFF;
sendAck(type);
}
else if(type == "pickup")
{
debug_.println("[RobotServer] Got PICKUP command ");
cmd = COMMAND::PICKUP;
sendAck(type);
}
else if(type == "position")
{
debug_.println("[RobotServer] Got POSITION command ");
cmd = COMMAND::POSITION;
sendAck(type);
}
else if(type == "status")
{
debug_.println("[RobotServer] Got STATUS command ");
cmd = COMMAND::STATUS;
sendStatus();
}
else if(type == "")
{
debug_.println("[RobotServer] ERROR: Type field empty or not specified ");
cmd = COMMAND::ERROR_NO_TYPE;
sendErr("no_type");
}
else
{
debug_.println("[RobotServer] ERROR: Unkown type field ");
cmd = COMMAND::ERROR_UNKOWN_TYPE;
sendErr("unkown_type");
}
}
return cmd;
}
void RobotServer::sendMsg(String msg)
{
serial_.print(msg + '\0');
debug_.print("[RobotServer] Send: ");
debug_.println(msg);
}
void RobotServer::sendAck(String data)
{
StaticJsonDocument<256> doc;
doc["type"] = "ack";
doc["data"] = data;
String msg;
serializeJson(doc, msg);
sendMsg(msg);
}
void RobotServer::sendErr(String data)
{
StaticJsonDocument<256> doc;
doc["type"] = "ack";
doc["data"] = data;
String msg;
serializeJson(doc, msg);
sendMsg(msg);
}
void RobotServer::sendStatus()
{
StaticJsonDocument<256> doc;
doc["type"] = "status";
doc["data"] = "not implimented";
String msg;
serializeJson(doc, msg);
sendMsg(msg);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/WifiServerWithESP/RobotServer.h
================================================
/*
* Robot server that handles gettings messages from master
* and responding correctly
*/
#ifndef RobotServer_h
#define RobotServer_h
#include
class RobotServer
{
public:
enum COMMAND
{
NONE,
ERROR_NO_TYPE,
ERROR_UNKOWN_TYPE,
ERROR_BAD_JSON,
MOVE,
PLACE,
DOCK,
UNDOCK,
DROPOFF,
PICKUP,
POSITION,
STATUS
};
// Constructor
RobotServer(HardwareSerial& serial, HardwareSerial& debug);
int oneLoop();
private:
HardwareSerial& serial_;
HardwareSerial& debug_;
bool clientConnected_;
bool wifiConnected_;
String getAnyIncomingMessage();
String cleanString(String message);
COMMAND getCommand(String message);
void sendMsg(String msg);
void sendAck(String data);
void sendErr(String data);
void sendStatus();
};
#endif
================================================
FILE: experimental_testing/RobotArduinoTesting/WifiServerWithESP/WifiServerWithESP.ino
================================================
#include "RobotServer.h"
RobotServer server = RobotServer(Serial3, Serial);
void setup() {
// put your setup code here, to run once:
// Communication with the host computer
Serial.begin(9600);
Serial.println("Wifi client starting");
}
void loop() {
// put your main code here, to run repeatedly:
int cmd = server.oneLoop();
delay(50);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/WifiTesting/WifiTesting.ino
================================================
// Arduino pin 15 (RX3) to ESP8266 TX
// Arduino pin 14 (TX3) to voltage divider then to ESP8266 RX
// Connect GND from the Arduiono to GND on the ESP8266
// Pull ESP8266 CH_PD HIGH
// When a command is entered in to the serial monitor on the computer
// the Arduino will relay it to the ESP8266
void setup()
{
Serial.begin(9600); // communication with the host computer
// Start the serial for communication with the ESP8266
Serial3.begin(115200);
Serial.println("");
Serial.println("Remember to to set Both NL & CR in the serial monitor.");
Serial.println("Ready");
Serial.println("");
}
void loop()
{
// listen for communication from the ESP8266 and then write it to the serial monitor
if ( Serial3.available() ) { Serial.write( Serial3.read() ); }
// listen for user input and send it to the ESP8266
if ( Serial.available() ) { Serial3.write( Serial.read() ); }
}
================================================
FILE: experimental_testing/RobotArduinoTesting/all_motors/all_motors.ino
================================================
// Pinouts
#define PIN_ENABLE 52
#define PIN_FR_PWM 3
#define PIN_FR_DIR 49
#define PIN_FL_PWM 2
#define PIN_FL_DIR 51
#define PIN_BR_PWM 5
#define PIN_BR_DIR 50
#define PIN_BL_PWM 6
#define PIN_BL_DIR 48
// Motor index
#define FR 0
#define FL 1
#define BR 2
#define BL 3
// Motor arrays
const int PWM_ARRAY[4] = {PIN_FR_PWM, PIN_FL_PWM, PIN_BR_PWM, PIN_BL_PWM};
const int DIR_ARRAY[4] = {PIN_FR_DIR, PIN_FL_DIR, PIN_BR_DIR, PIN_BL_DIR};
const int SPEED_ARRAY[4] = {130, 75, 75, 100};
const int NUM_DIRECTIONS = 4;
// Direction vectors
const int MOTOR_DIRECTIONS[NUM_DIRECTIONS][4] =
{
{1, 1, 1, 1}, //Forward
{0, 0, 0, 0}, //Backward
{1, 0, 0, 1}, //Left
{0, 1, 1, 0} //Right
// {1, 0, 1, 0}, //Forward-Left
// {1, 0, 0, 1}, //Forward-Right
// {0, 1, 0, 1}, //Backward-Left
// {0, 1, 1, 0} //Backward-Right
};
void commandMotors(int direction)
{
for(int i = 0; i < 4; i++)
{
analogWrite(PWM_ARRAY[i],SPEED_ARRAY[i]);
digitalWrite(DIR_ARRAY[i],MOTOR_DIRECTIONS[direction][i]);
}
digitalWrite(PIN_ENABLE,HIGH);
}
void setup()
{
// Setup pin modes
pinMode(PIN_ENABLE,OUTPUT);
for(int i = 0; i < 4; i++)
{
pinMode(PWM_ARRAY[i],OUTPUT);
pinMode(DIR_ARRAY[i],OUTPUT);
}
Serial.begin(9600);
}
int stage = 0;
void loop()
{
// Command motors on
commandMotors(stage);
Serial.print("On: ");
Serial.println(stage);
// Wait for a little bit
delay(1000);
// Turn motors off and let coast
digitalWrite(PIN_ENABLE,HIGH);
for(int i = 0; i < 4; i++)
{
analogWrite(PWM_ARRAY[i],0);
}
Serial.println("Off");
// Wait for a little bit
delay(1500);
// Increment stage
if(stage == NUM_DIRECTIONS-1)
{
stage = 0;
}
else
{
stage++;
}
}
================================================
FILE: experimental_testing/RobotArduinoTesting/all_motors_speed_control/Motor.cpp
================================================
#include "Motor.h"
#include
Motor::Motor(int pwmPin, int dirPin, int encPinA, int encPinB, double Kp, double Ki, double Kd, float velFilterFreq)
: pwmPin_(pwmPin),
dirPin_(dirPin),
inputVel_(0.0),
currentVelRaw_(0.0),
currentVelFiltered_(0.0),
outputCmd_(0.0),
prevCount_(0),
prevMillis_(millis()),
enc_(encPinA, encPinB),
controller_(¤tVelFiltered_, &outputCmd_, &inputVel_, Kp, Ki, Kd, DIRECT),
velFilter_(LOWPASS, velFilterFreq)
{
pinMode(pwmPin_, OUTPUT);
pinMode(dirPin_, OUTPUT);
controller_.SetMode(AUTOMATIC);
controller_.SetOutputLimits(-255, 255);
}
void Motor::setCommand(double vel)
{
inputVel_ = vel;
}
void Motor::runLoop()
{
// Read current values
long curCount = enc_.read();
unsigned long curMillis = millis();
// Compute delta
long deltaCount = curCount - prevCount_;
double deltaRevs = static_cast(deltaCount) * COUNTS_PER_REV;
unsigned long deltaMillis = curMillis - prevMillis_;
// Compute current velocity in revs/second
currentVelRaw_ = deltaRevs / (static_cast(deltaMillis) * 1000.0);
currentVelFiltered_ = velFilter_.input(currentVelRaw_);
// Run PID controller
controller_.Compute();
// Update output values
if(outputCmd_ < 0)
{
digitalWrite(dirPin_,0);
}
else
{
digitalWrite(dirPin_,1);
}
analogWrite(pwmPin_, abs(outputCmd_));
// Copy current values into previous
prevCount_ = curCount;
prevMillis_ = curMillis;
//
// Serial.print(curCount);
// Serial.print(" ");
// Serial.print(currentVelRaw_);
// Serial.print(" ");
// Serial.print(currentVelFiltered_);
// Serial.print(" ");
// Serial.print(inputVel_);
// Serial.print(" ");
// Serial.print(outputCmd_);
// Serial.println("");
}
================================================
FILE: experimental_testing/RobotArduinoTesting/all_motors_speed_control/Motor.h
================================================
/* Requires libraries:
* Filters: https://github.com/JonHub/Filters
* PID: https://playground.arduino.cc/Code/PIDLibrary/
* Encoder: https://www.pjrc.com/teensy/td_libs_Encoder.html
*/
#ifndef Motor_h
#define Motor_h
#include
#include
#include
#define COUNTS_PER_REV 835;
class Motor
{
public:
/*
* Motor object constructor
* pwmPin - which pin the motor PWM is connected to
* dirPin - which pin the motor direction is connected to
* encPinA - which pin the encoder wire A is connected to. This should be a pin capable of handling interrupts
* encPinB - which pin the encoder wire B is connected to. This should ideally (but not required) be a pin capable of handling interrupts
* Kp - Proportional gain
* Ki - Integral gain
* Kd - Derrivative gain
* velFilterFreq - frequency to use for velocity lowpass filter
*/
Motor(int pwmPin, int dirPin, int encPinA, int encPinB, double Kp, double Ki, double Kd, float velFilterFreq);
/*
* Set the desired velocity in revs/second
*/
void setCommand(double vel);
/*
* Run the actual controller. Make sure this gets called reasonably quickly (i.e. every 20 ms or so)
*/
void runLoop();
private:
// Pins
int pwmPin_;
int dirPin_;
// Values for computation
double inputVel_; // Desired velocity in revs/sec
double currentVelRaw_; // Raw current velocity in revs/sec
double currentVelFiltered_;
double outputCmd_; // Output command in [-255, 255]
long prevCount_; // Encoder count from previous loop
// Timer
unsigned long prevMillis_; // Timer from previous loop
// Other objects
Encoder enc_; // Encoder object
PID controller_; // PID controller
FilterOnePole velFilter_; // Velocity lowpass filter
};
#endif Motor_h
================================================
FILE: experimental_testing/RobotArduinoTesting/all_motors_speed_control/all_motors_speed_control.ino
================================================
/* Requires libraries:
* Filters: https://github.com/JonHub/Filters
* PID: https://playground.arduino.cc/Code/PIDLibrary/
* Encoder: https://www.pjrc.com/teensy/td_libs_Encoder.html
*/
#include "Motor.h"
// Pinouts
#define PIN_ENABLE 52
#define PIN_FR_PWM 3
#define PIN_FR_DIR 49
#define PIN_FL_PWM 2
#define PIN_FL_DIR 51
#define PIN_BR_PWM 5
#define PIN_BR_DIR 50
#define PIN_BL_PWM 6
#define PIN_BL_DIR 48
#define PIN_FR_ENC_A 20
#define PIN_FR_ENC_B 35
#define PIN_FL_ENC_A 21
#define PIN_FL_ENC_B 41
#define PIN_BR_ENC_A 18
#define PIN_BR_ENC_B 23
#define PIN_BL_ENC_A 19
#define PIN_BL_ENC_B 29
#define VEL_FILTER_FREQ 0.5 // HZ
const double Kp = 100;
const double Ki = 1;
const double Kd = 1;
Motor allMotors[4] = {
Motor(PIN_FR_PWM, PIN_FR_DIR, PIN_FR_ENC_A, PIN_FR_ENC_B, Kp, Ki, Kd, VEL_FILTER_FREQ),
Motor(PIN_FL_PWM, PIN_FL_DIR, PIN_FL_ENC_A, PIN_FL_ENC_B, Kp, Ki, Kd, VEL_FILTER_FREQ),
Motor(PIN_BR_PWM, PIN_BR_DIR, PIN_BR_ENC_A, PIN_BR_ENC_B, Kp, Ki, Kd, VEL_FILTER_FREQ),
Motor(PIN_BL_PWM, PIN_BL_DIR, PIN_BL_ENC_A, PIN_BL_ENC_B, Kp, Ki, Kd, VEL_FILTER_FREQ) };
const int MOTOR_DIRECTIONS[4][4] =
{
{1, 1, 1, 1}, //Forward
{-1, -1, -1, -1}, //Backward
{1, -1, -1, 1}, //Left
{-1, 1, 1, -1} //Right
// {1, 0, 1, 0}, //Forward-Left
// {1, 0, 0, 1}, //Forward-Right
// {0, 1, 0, 1}, //Backward-Left
// {0, 1, 1, 0} //Backward-Right
};
const float SPEED[4] = {2, 1, 1, 0.5}; // scale factor for rev/s
void setup()
{
// Setup pin modes
pinMode(PIN_ENABLE,OUTPUT);
Serial.begin(250000);
}
int waitForInput()
{
int motion = 0;
bool gotInput = false;
while(!gotInput)
{
if(Serial.available() > 0)
{
// Serial.println("Serial available");
int incomingByte = Serial.read();
// Serial.print("Read: ");
// Serial.println(incomingByte);
if(incomingByte == 49)
{
motion = 1;
gotInput = true;
}
else if(incomingByte == 50)
{
motion = 2;
gotInput = true;
}
else if(incomingByte == 51)
{
motion = 3;
gotInput = true;
}
else if(incomingByte == 52)
{
motion = 4;
gotInput = true;
}
else
{
// Do nothing
}
}
}
return motion;
}
void setMotorCommands(int motion)
{
for(int i = 0; i < 4; i++)
{
int curDirection = MOTOR_DIRECTIONS[motion-1][i];
float curSpeed = curDirection * SPEED[i];
Serial.print("Motor ");
Serial.print(i);
Serial.print(", Direction ");
Serial.print(curDirection);
Serial.print(", Speed ");
Serial.println(curSpeed);
allMotors[i].setCommand(curSpeed);
}
digitalWrite(PIN_ENABLE, 1);
}
void runMotors()
{
unsigned long prevTime = millis();
unsigned long startTime = prevTime;
int targetDelta = 20;
int targetTotalTime = 2000;
while (millis() - startTime < targetTotalTime)
{
if(millis() - prevTime > targetDelta)
{
prevTime = millis();
for(int i = 0; i < 4; i++)
{
allMotors[i].runLoop();
}
}
}
}
void loop()
{
int motion = waitForInput();
setMotorCommands(motion);
runMotors();
// Stop motors
digitalWrite(PIN_ENABLE, 0);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/kalman_test/KalmanFilter.cpp
================================================
#include "KalmanFilter.h"
KalmanFilter::KalmanFilter(
double dt,
const mat& A,
const mat& B,
const mat& C,
const mat& Q,
const mat& R,
const mat& P)
: A(A), B(B), C(C), Q(Q), R(R), P0(P),
m(C.get_rows()), n(A.get_rows()), dt(dt),
I(mat::identity(n)), x_hat(1,n), x_hat_new(1,n)
{
}
void KalmanFilter::init(double t0, const mat& x0)
{
x_hat = x0;
P = P0;
this->t0 = t0;
t = t0;
}
void KalmanFilter::init()
{
x_hat = mat::zeros(n,1);
P = P0;
t0 = 0;
t = t0;
}
void KalmanFilter::predict(double dt, const mat& A, const mat& u)
{
this->A = A;
this->dt = dt;
x_hat_new = A * x_hat + B * u;
P = A*P*A.t() + Q;
t += dt;
x_hat = x_hat_new;
}
void KalmanFilter::update(const mat& y)
{
K = P*C.t()*(C*P*C.t() + R).inv();
x_hat_new += K * (y - C*x_hat);
P = (I - K*C)*P;
x_hat = x_hat_new;
}
================================================
FILE: experimental_testing/RobotArduinoTesting/kalman_test/KalmanFilter.h
================================================
// Loosely based on kalman-cpp: https://github.com/hmartiro/kalman-cpp
#ifndef KalmanFilter_h
#define KalmanFilter_h
#include
class KalmanFilter
{
public:
/**
* Create a Kalman filter with the specified matrices.
* A - System dynamics matrix
* B - Control matrix
* C - Output matrix
* Q - Process noise covariance
* R - Measurement noise covariance
* P - Estimate error covariance
*/
KalmanFilter(
double dt,
const mat& A,
const mat& B,
const mat& C,
const mat& Q,
const mat& R,
const mat& P
);
/**
* Initialize the filter with initial states as zero.
*/
void init();
/**
* Initialize the filter with a guess for initial states.
*/
void init(double t0, const mat& x0);
/**
* Predict estimated state based on the time step, dynamics matrix, and control input
*/
void predict(double dt, const mat& A, const mat& u);
/**
* Update the estimated state based on measured values.
*/
void update(const mat& y);
/**
* Return the current state and time.
*/
mat state() { return x_hat; };
double time() { return t; };
private:
// Matrices for computation
mat A, B, C, Q, R, P, K, P0;
// System dimensions
uint8_t m, n;
// Initial and current time
double t0, t;
// Discrete time step
double dt;
// n-size identity
mat I;
// Estimated states
mat x_hat, x_hat_new;
};
#endif
================================================
FILE: experimental_testing/RobotArduinoTesting/kalman_test/kalman_test.ino
================================================
#include "KalmanFilter.h"
#include "LinearAlgebra.h"
mat A = mat::identity(6);
mat B = mat::zeros(6,3);
mat C = mat::zeros(3,6);
mat Q = mat::identity(6);
mat R = mat::zeros(3,3);
mat P = mat::identity(6);
KalmanFilter kf = KalmanFilter(0.1, A, B, C, Q, R, P);
void setup()
{
A(3,3) = 0;
A(4,4) = 0;
A(5,5) = 0;
B(3,0) = 1;
B(4,1) = 1;
B(5,2) = 1;
C(0,0) = 1;
C(1,1) = 1;
C(2,2) = 1;
kf.init();
}
void loop()
{
double dt = 0.1;
A(0,3) = dt;
A(1,4) = dt;
A(2,6) = dt;
mat u = mat::ones(1,3);
mat y = mat::ones(1,3);
kf.predict(dt, A, u);
kf.update(y);
mat x = kf.state();
String s;
x.print(s);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/lifter_nano_test/lifter_nano_test.ino
================================================
#define LIFTER_PIN_0 1
#define LIFTER_PIN_1 2
#define LIFTER_PIN_2 3
#define FEEDBACK_PIN 4
void setup ()
{
Serial.begin(115200);
pinMode(LIFTER_PIN_0, OUTPUT);
pinMode(LIFTER_PIN_1, OUTPUT);
pinMode(LIFTER_PIN_2, OUTPUT);
pinMode(FEEDBACK_PIN, INPUT_PULLUP);
}
int getCommand()
{
String instr = "";
int outCmd = 0;
if(Serial.available())
{
instr = Serial.readString();
outCmd = instr.toInt();
}
return outCmd;
}
void sendPos(int num)
{
int p2 = num / 4;
int r = num % 4;
int p1 = r / 2;
r = r % 2;
int p0 = r / 1;
digitalWrite(LIFTER_PIN_0, p0);
digitalWrite(LIFTER_PIN_1, p1);
digitalWrite(LIFTER_PIN_2, p2);
}
bool getFeedback()
{
return !digitalRead(FEEDBACK_PIN);
}
void loop()
{
int cmd = getCommand();
bool in_progress = getFeedback();
if(!in_progress && cmd > 0)
{
sendPos(cmd);
}
delay(100);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/motor_driver_test_script/motor_driver_test_script.ino
================================================
#include "ClearCore.h"
#define LIFTER_MOTOR ConnectorM3
#define INCREMENTAL_UP_PIN DI7
#define INCREMENTAL_DOWN_PIN DI6
#define LATCH_SERVO_PIN IO0 // Only IO0 does pwm
#define HOMING_SWITCH_PIN IO4
#define LIFTER_STEPS_PER_REV 800
#define LIFTER_MAX_VEL 7 // revs/sec
#define LIFTER_MAX_ACC 10 // rev/sec^2
// Max num steps ~51000
#define SAFETY_MAX_POS 120 // Revs, Sanity check on desired position to make sure it isn't larger than this
#define SAFETY_MIN_POS 0 // Revs, Sanity check on desired position to make sure it isn't less than this
#define LATCH_ACTIVE_MS 2000
#define LATCH_OPEN_DUTY_CYCLE 50
#define LATCH_CLOSE_DUTY_CYCLE 200
void setup()
{
pinMode(INCREMENTAL_UP_PIN, INPUT);
pinMode(INCREMENTAL_DOWN_PIN, INPUT);
pinMode(HOMING_SWITCH_PIN, INPUT);
pinMode(LATCH_SERVO_PIN, OUTPUT);
// Sets the input clocking rate. This normal rate is ideal for ClearPath
// step and direction applications.
MotorMgr.MotorInputClocking(MotorManager::CLOCK_RATE_LOW);
// Sets all motor connectors into step and direction mode.
MotorMgr.MotorModeSet(MotorManager::MOTOR_ALL, Connector::CPM_MODE_STEP_AND_DIR);
LIFTER_MOTOR.VelMax(LIFTER_MAX_VEL*LIFTER_STEPS_PER_REV);
LIFTER_MOTOR.AccelMax(LIFTER_MAX_ACC*LIFTER_STEPS_PER_REV);
LIFTER_MOTOR.HlfbMode(MotorDriver::HLFB_MODE_STATIC);
LIFTER_MOTOR.PolarityInvertSDEnable(true);
LIFTER_MOTOR.EnableRequest(true);
Serial.begin(115200);
Serial.println("Test script starting");
}
void test_motor()
{
bool vel_up = digitalRead(INCREMENTAL_UP_PIN);
bool vel_down = digitalRead(INCREMENTAL_DOWN_PIN);
Serial.print("Up: ");
Serial.print(vel_up);
Serial.print(", down: ");
Serial.println(vel_down);
if(vel_up)
{
LIFTER_MOTOR.MoveVelocity(LIFTER_MAX_VEL*LIFTER_STEPS_PER_REV);
Serial.println(LIFTER_MOTOR.PositionRefCommanded());
}
else if(vel_down)
{
LIFTER_MOTOR.MoveVelocity(-1*LIFTER_MAX_VEL*LIFTER_STEPS_PER_REV);
Serial.println(LIFTER_MOTOR.PositionRefCommanded());
}
else
{
LIFTER_MOTOR.MoveStopAbrupt();
}
}
void test_homing_switch()
{
bool switch_active = !digitalRead(HOMING_SWITCH_PIN);
if(switch_active) {
Serial.print("Homing switch: ");
Serial.println(switch_active);
LIFTER_MOTOR.PositionRefSet(0);
}
}
void test_servo()
{
Serial.println("Servo open");
analogWrite(LATCH_SERVO_PIN, LATCH_OPEN_DUTY_CYCLE);
delay(1000);
Serial.println("Servo closed");
analogWrite(LATCH_SERVO_PIN, LATCH_CLOSE_DUTY_CYCLE);
delay(1000);
}
void loop()
{
test_motor();
test_homing_switch();
//test_servo();
delay(100);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/new_dc_motor_tuning/Motor.cpp
================================================
#include "Motor.h"
#include "constants.h"
constexpr double COUNTS_TO_RADS = 2.0 * PI / static_cast(COUNTS_PER_OUTPUT_SHAFT_REV);
Motor::Motor(int pwmPin, int dirPin, int encPinA, int encPinB, double Kp, double Ki, double Kd)
: pwmPin_(pwmPin),
dirPin_(dirPin),
inputVel_(0.0),
currentVelRaw_(0.0),
currentVelFiltered_(0.0),
pidOut_(0.0),
outputCmd_(0.0),
prevCount_(0),
prevMicros_(micros()),
enc_(encPinA, encPinB),
controller_(¤tVelFiltered_, &pidOut_, &inputVel_, Kp, Ki, Kd, DIRECT),
velFilter_(LOWPASS, VEL_FILTER_FREQ)
{
pinMode(pwmPin_, OUTPUT);
pinMode(dirPin_, OUTPUT);
controller_.SetMode(AUTOMATIC);
controller_.SetOutputLimits(-255, 255);
controller_.SetSampleTime(5);
}
void Motor::setCommand(double vel)
{
inputVel_ = vel;
}
float Motor::getCurrentVelocity()
{
return static_cast(currentVelFiltered_);
}
long Motor::getCounts()
{
return enc_.read();
}
void Motor::runLoop(bool pub)
{
// Read current values
long curCount = -1*enc_.read();
unsigned long curMicros = micros();
// Compute delta
long deltaCount = curCount - prevCount_;
double deltaRads = static_cast(deltaCount) * COUNTS_TO_RADS;
unsigned long deltaMicros = curMicros - prevMicros_;
if(deltaMicros == 0)
{
// This will break things due to div by 0, so just skip
return;
}
// Copy current values into previous
prevCount_ = curCount;
prevMicros_ = curMicros;
// Do a check for large time deltas. This likely means the controller hasn't run for a while and we should ignore this value
if(deltaMicros > 100000)
{
return;
}
// Compute current velocity in rads/second
currentVelRaw_ = 1000000.0 * deltaRads / static_cast(deltaMicros);
currentVelFiltered_ = currentVelRaw_; // velFilter_.input(currentVelRaw_);
// Run PID controller
controller_.Compute();
/* Use output from PID to update our current command. Since this is a velocity controller, when the error is 0
* and the PID controller drives the output to 0, we actually want to maintain a certian PWM value. Hence, the
* PID output is used to modify our control singal and not drive it directly.
*/
outputCmd_ += int(pidOut_);
//outputCmd_ = static_cast(pidOut_ + inputVel_ * 10);
// Set a deadband region based on input vel to avoid integral windup
if(fabs(inputVel_) < 0.001)
{
outputCmd_ = 0;
}
// Make sure we don't exceed max/min power
if(outputCmd_ > 255)
{
outputCmd_ = 255;
}
else if(outputCmd_ < -255)
{
outputCmd_ = -255;
}
// Update output direction
if(outputCmd_ < 0)
{
digitalWrite(dirPin_,1);
}
else
{
digitalWrite(dirPin_,0);
}
// Actually write out the motor power
analogWrite(pwmPin_, abs(outputCmd_));
if (pub)
{
// Debugging prints
//Serial.print(deltaMicros);
//Serial.print(" ");
//Serial.print(curCount);
//Serial.print(" ");
Serial.print("currentVelRaw_:");
Serial.print(currentVelRaw_, 5);
Serial.print(" ");
Serial.print("currentVelFiltered_:");
Serial.print(currentVelFiltered_, 5);
Serial.print(" ");
Serial.print("inputVel_:");
Serial.print(inputVel_, 5);
Serial.print(" ");
//Serial.print(pidOut_, 5);
//Serial.print(" ");
Serial.print("outputCmd_:");
Serial.print(outputCmd_/10.0);
Serial.println("");
}
}
================================================
FILE: experimental_testing/RobotArduinoTesting/new_dc_motor_tuning/Motor.h
================================================
#ifndef Motor_h
#define Motor_h
#include
#include
#include
class Motor
{
public:
/*
* Motor object constructor
* pwmPin - which pin the motor PWM is connected to
* dirPin - which pin the motor direction is connected to
* encPinA - which pin the encoder wire A is connected to. This should be a pin capable of handling interrupts
* encPinB - which pin the encoder wire B is connected to. This should ideally (but not required) be a pin capable of handling interrupts
* Kp - Proportional gain
* Ki - Integral gain
* Kd - Derrivative gain
*/
Motor(int pwmPin, int dirPin, int encPinA, int encPinB, double Kp, double Ki, double Kd);
/*
* Set the desired velocity in rad/second
*/
void setCommand(double vel);
/*
* Run the actual controller. Make sure this gets called reasonably quickly (i.e. every 20 ms or so)
*/
void runLoop(bool pub);
/*
* Get the current measured motor velocity in rad/s
*/
float getCurrentVelocity();
// Get number of counts from encoder - for debugging
long getCounts();
private:
// Pins
int pwmPin_;
int dirPin_;
// Values for computation
double inputVel_; // Desired velocity in rad/sec
double currentVelRaw_; // Raw current velocity in rad/sec
double currentVelFiltered_; // Filtered velocity in rad/sec
double pidOut_; // Output from PID controller
int outputCmd_; // Output command in [-255, 255]
long prevCount_; // Encoder count from previous loop
// Timer
unsigned long prevMicros_; // Timer from previous loop
// Other objects
Encoder enc_; // Encoder object
PID controller_; // PID controller
FilterOnePole velFilter_; // Velocity lowpass filter
};
#endif Motor_h
================================================
FILE: experimental_testing/RobotArduinoTesting/new_dc_motor_tuning/constants.h
================================================
#ifndef Constants_h
#define Constants_h
// Pins
#define PIN_PWM_0 3
#define PIN_PWM_1 4
#define PIN_PWM_2 5
#define PIN_PWM_3 6
#define PIN_DIR_0 41
#define PIN_DIR_1 40
#define PIN_DIR_2 39
#define PIN_DIR_3 38
#define PIN_ENABLE_0 49
#define PIN_ENABLE_1 48
#define PIN_ENABLE_2 47
#define PIN_ENABLE_3 46
#define PIN_ENC_A_0 21
#define PIN_ENC_B_0 25
#define PIN_ENC_A_1 20
#define PIN_ENC_B_1 23
#define PIN_ENC_A_2 19
#define PIN_ENC_B_2 24
#define PIN_ENC_A_3 18
#define PIN_ENC_B_3 22
#define PIN_LATCH_SERVO_PIN 12
#define PIN_TRAY_STEPPER_LEFT_PULSE 46
#define PIN_TRAY_STEPPER_LEFT_DIR 47
#define PIN_TRAY_STEPPER_RIGHT_PULSE 44
#define PIN_TRAY_STEPPER_RIGHT_DIR 43
#define PIN_TRAY_HOME_SWITCH 53
// Velocitiy limits
#define MAX_TRANS_SPEED_FINE 0.08 // m/s
#define MAX_ROT_SPEED_FINE 0.5 // rad/2
#define MAX_TRANS_SPEED_COARSE 0.5 // m/s
#define MAX_ROT_SPEED_COARSE 1.0 // rad/2
// Acceleration limits
#define MAX_TRANS_ACC_FINE 0.1 // m/s^2
#define MAX_ROT_ACC_FINE 0.5 // rad/s^2
#define MAX_TRANS_ACC_COARSE 0.5 // m/s^2
#define MAX_ROT_ACC_COARSE 1.0 // rad/s^2
// Cartesian control gains
#define CART_TRANS_KP 2
#define CART_TRANS_KI 0.1
#define CART_TRANS_KD 0
#define CART_ROT_KP 3
#define CART_ROT_KI 0.5
#define CART_ROT_KD 0
// Motor control gains
#define MOTOR_KP 1
#define MOTOR_KI 0
#define MOTOR_KD 0
// Motor control constants
#define VEL_FILTER_FREQ 20 // HZ
#define COUNTS_PER_OUTPUT_SHAFT_REV 36000 // Manually measrued/estimated
// Physical dimensions
#define WHEEL_DIAMETER 0.152 // meters
#define WHEEL_DIST_FROM_CENTER 0.4572 // meters
// Scaling factors
#define TRAJ_MAX_FRACTION 0.7 // Only generate a trajectory to this fraction of max speed to give motors headroom to compensate
// Kalman filter scales
#define PROCESS_NOISE_SCALE 0.08
#define MEAS_NOISE_SCALE 0.01
#define MEAS_NOISE_VEL_SCALE_FACTOR 10000
// Possition accuracy targets
#define TRANS_POS_ERR_COARSE 0.10 // m
#define ANG_POS_ERR_COARSE 0.08 // rad
#define TRANS_VEL_ERR_COARSE 0.05 // m/s
#define ANG_VEL_ERR_COARSE 0.05 // rad/s
#define TRANS_POS_ERR_FINE 0.01 // m
#define ANG_POS_ERR_FINE 0.02 // rad
#define TRANS_VEL_ERR_FINE 0.01 // m/s
#define ANG_VEL_ERR_FINE 0.01 // rad/s
// Set debug printing, comment out to skip debug printing
#define PRINT_DEBUG true
// Tray stepper control values
#define TRAY_STEPPER_STEPS_PER_REV 200 // Steps per rev for tray servos
#define TRAY_DIST_PER_REV 1.8 // mm of linear travel per stepper revolution
#define TRAY_MAX_LINEAR_TRAVEL 300 // mm of total linear travel possible
// Note: all tray positions measured in mm from home pos
#define TRAY_DEFAULT_POS_MM 100 // Default position for driving in mm
#define TRAY_LOAD_POS_MM 50 // Loading position in mm
#define TRAY_PLACE_POS_MM 250 // Placing position in mm
#define TRAY_MAX_SPEED 1000 // Max tray speed in steps/sec
#define TRAY_MAX_ACCEL 2000 // Max tray acceleration in steps/sec/sec
// Tray servo control values
#define LATCH_CLOSE_POS 20 // Latch servo position for close in degrees
#define LATCH_OPEN_POS 150 // Latch servo position for open in degrees
#define TRAY_SERVO_MIN_PW 1000 // Min pulse witdh in microseconds corresponding to 0 position
#define TRAY_SERVO_MAX_PW 2000 // Max pulse witdh in microseconds corresponding to 180 position
#define TRAY_PLACEMENT_PAUSE_TIME 3000 // How many ms to wait after opening the latch for placement
enum COMMAND
{
NONE,
MOVE,
MOVE_REL,
MOVE_FINE,
PLACE_TRAY,
LOAD_TRAY,
INITIALIZE_TRAY,
POSITION,
ESTOP,
LOAD_COMPLETE,
};
#endif
================================================
FILE: experimental_testing/RobotArduinoTesting/new_dc_motor_tuning/new_dc_motor_tuning.ino
================================================
#include "constants.h"
#include "Motor.h"
double Kp = 14;
double Ki = 0.1;
double Kd = 0.08;
Motor m = Motor(PIN_PWM_2, PIN_DIR_2, PIN_ENC_A_2, PIN_ENC_B_2, Kp, Ki, Kd);
int step = 0;
unsigned long prevStepTime = millis();
unsigned long prevLoopTime = millis();
double cur_speed = 0;
double max_vel = 2;
double max_acc = 5;
double max_acc_time = 1000 * max_vel / max_acc;
template int sgn(T val) {
return (T(0) < val) - (val < T(0));
}
void setup()
{
pinMode(PIN_ENABLE_3, OUTPUT);
digitalWrite(PIN_ENABLE_3, HIGH);
Serial.begin(115200);
//Serial.println("Start motor tuning");
}
double computeAcc(double dt, double acc)
{
cur_speed += dt/1000.0 * acc;
if (fabsf(cur_speed) > max_vel)
{
cur_speed = sgn(cur_speed) * max_vel;
}
}
void runAccStep(int waitTime, double acc)
{
unsigned long curTime = millis();
unsigned long stepTime = curTime - prevStepTime;
if (stepTime > waitTime)
{
step += 1;
prevStepTime = millis();
prevLoopTime = millis();
}
else
{
unsigned long loopTime = curTime - prevLoopTime;
computeAcc(loopTime, acc);
prevLoopTime = millis();
}
}
double runCVStep(int waitTime, double vel)
{
unsigned long curTime = millis();
unsigned long stepTime = curTime - prevStepTime;
if (stepTime > waitTime)
{
step += 1;
prevStepTime = millis();
prevLoopTime = millis();
}
else
{
cur_speed = vel;
}
}
double runSteps()
{
if (step == 0)
{
int stepTime = max_acc_time;
double stepAcc = max_acc;
runAccStep(stepTime, stepAcc);
}
else if (step == 1)
{
int stepTime = 2000;
runCVStep(stepTime, max_vel);
}
else if (step == 2)
{
int stepTime = max_acc_time;
double stepAcc = -1*max_acc;
runAccStep(stepTime, stepAcc);
}
else if (step == 3)
{
int stepTime = 2000;
runCVStep(stepTime, 0);
}
else if (step == 4)
{
int stepTime = max_acc_time;
double stepAcc = -1 * max_acc;
runAccStep(stepTime, stepAcc);
}
else if (step == 5)
{
int stepTime = 2000;
runCVStep(stepTime, -1*max_vel);
}
else if (step == 6)
{
int stepTime = max_acc_time;
double stepAcc = max_acc;
runAccStep(stepTime, stepAcc);
}
else if (step == 7)
{
int stepTime = 2000;
runCVStep(stepTime, 0);
}
else if (step >= 8)
{
step = 0;
}
}
void loop()
{
runSteps();
m.setCommand(cur_speed);
bool motor_pub = true;
if(step == 3 || step == 7) {motor_pub = false;}
m.runLoop(motor_pub);
// analogWrite(PIN_PWM_2, s);
// Serial.println(s);
// s += 1;
delay(15);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/single_motor_speed_control/Motor.cpp
================================================
#include "Motor.h"
#include
Motor::Motor(int pwmPin, int dirPin, int encPinA, int encPinB, double Kp, double Ki, double Kd, float velFilterFreq)
: pwmPin_(pwmPin),
dirPin_(dirPin),
inputVel_(0.0),
currentVelRaw_(0.0),
currentVelFiltered_(0.0),
outputCmd_(0.0),
prevCount_(0),
prevMillis_(millis()),
enc_(encPinA, encPinB),
controller_(¤tVelFiltered_, &outputCmd_, &inputVel_, Kp, Ki, Kd, DIRECT),
velFilter_(LOWPASS, velFilterFreq)
{
pinMode(pwmPin_, OUTPUT);
pinMode(dirPin_, OUTPUT);
controller_.SetMode(AUTOMATIC);
controller_.SetOutputLimits(-255, 255);
controller_.SetSampleTime(5);
}
void Motor::setCommand(double vel)
{
inputVel_ = vel;
}
void Motor::runLoop()
{
// Read current values
long curCount = enc_.read();
unsigned long curMillis = millis();
// Compute delta
long deltaCount = curCount - prevCount_;
double deltaRevs = static_cast(deltaCount) / COUNTS_PER_SHAFT_REV;
unsigned long deltaMillis = curMillis - prevMillis_;
// Copy current values into previous
prevCount_ = curCount;
prevMillis_ = curMillis;
// Do a check for large time deltas. This likely means the controller hasn't run for a while and we should ignore this value
if(deltaMillis > 100)
{
return;
}
// Compute current velocity in revs/second
currentVelRaw_ = 1000.0 * deltaRevs / static_cast(deltaMillis);
currentVelFiltered_ = velFilter_.input(currentVelRaw_);
// Run PID controller
controller_.Compute();
// Update output values
if(outputCmd_ < 0)
{
digitalWrite(dirPin_,0);
}
else
{
digitalWrite(dirPin_,1);
}
analogWrite(pwmPin_, abs(outputCmd_));
// Serial.print(curCount);
// Serial.print(" ");
// Serial.print(currentVelRaw_);
// Serial.print(" ");
Serial.print(currentVelFiltered_);
Serial.print(" ");
Serial.print(inputVel_);
Serial.print(" ");
Serial.print(outputCmd_/255.0);
// Serial.print(" ");
// Serial.print(deltaMillis);
Serial.println("");
}
================================================
FILE: experimental_testing/RobotArduinoTesting/single_motor_speed_control/Motor.h
================================================
/* Requires libraries:
* Filters: https://github.com/JonHub/Filters
* PID: https://playground.arduino.cc/Code/PIDLibrary/
* Encoder: https://www.pjrc.com/teensy/td_libs_Encoder.html
*/
#ifndef Motor_h
#define Motor_h
#include
#include
#include
const unsigned int COUNTS_PER_MOTOR_REV = 44;
const unsigned int MOTOR_GEAR_RATIO = 40;
const double FUDGE_FACTOR = 1.5;
const double COUNTS_PER_SHAFT_REV = COUNTS_PER_MOTOR_REV * MOTOR_GEAR_RATIO * FUDGE_FACTOR;
class Motor
{
public:
/*
* Motor object constructor
* pwmPin - which pin the motor PWM is connected to
* dirPin - which pin the motor direction is connected to
* encPinA - which pin the encoder wire A is connected to. This should be a pin capable of handling interrupts
* encPinB - which pin the encoder wire B is connected to. This should ideally (but not required) be a pin capable of handling interrupts
* Kp - Proportional gain
* Ki - Integral gain
* Kd - Derrivative gain
* velFilterFreq - frequency to use for velocity lowpass filter
*/
Motor(int pwmPin, int dirPin, int encPinA, int encPinB, double Kp, double Ki, double Kd, float velFilterFreq);
/*
* Set the desired velocity in revs/second
*/
void setCommand(double vel);
/*
* Run the actual controller. Make sure this gets called reasonably quickly (i.e. every 20 ms or so)
*/
void runLoop();
private:
// Pins
int pwmPin_;
int dirPin_;
// Values for computation
double inputVel_; // Desired velocity in revs/sec
double currentVelRaw_; // Raw current velocity in revs/sec
double currentVelFiltered_;
double outputCmd_; // Output command in [-255, 255]
long prevCount_; // Encoder count from previous loop
// Timer
unsigned long prevMillis_; // Timer from previous loop
// Other objects
Encoder enc_; // Encoder object
PID controller_; // PID controller
FilterOnePole velFilter_; // Velocity lowpass filter
};
#endif Motor_h
================================================
FILE: experimental_testing/RobotArduinoTesting/single_motor_speed_control/single_motor_speed_control.ino
================================================
/* Requires libraries:
* Filters: https://github.com/JonHub/Filters
* PID: https://playground.arduino.cc/Code/PIDLibrary/
* Encoder: https://www.pjrc.com/teensy/td_libs_Encoder.html
*/
#include "Motor.h"
// Pinouts
#define PIN_ENABLE 52
#define PIN_BL_PWM 6
#define PIN_BL_DIR 48
#define PIN_BL_ENC_A 19
#define PIN_BL_ENC_B 29
#define VEL_FILTER_FREQ 10 // HZ
const double Kp = 200;
const double Ki = 2000;
const double Kd = 0;
Motor myMotor = Motor(PIN_BL_PWM, PIN_BL_DIR, PIN_BL_ENC_A, PIN_BL_ENC_B, Kp, Ki, Kd, VEL_FILTER_FREQ);
void setup()
{
// Setup pin modes
pinMode(PIN_ENABLE,OUTPUT);
Serial.begin(250000);
}
void setMotorCommands(float rps)
{
int curDirection = 1;
float curSpeed = curDirection * rps;
// Serial.print("Direction ");
// Serial.print(curDirection);
// Serial.print(", Speed ");
// Serial.println(curSpeed);
myMotor.setCommand(curSpeed);
digitalWrite(PIN_ENABLE, 1);
}
void runMotors()
{
unsigned long prevTime = millis();
unsigned long startTime = prevTime;
int targetDelta = 15;
int targetTotalTime = 3000;
while (millis() - startTime < targetTotalTime)
{
if(millis() - prevTime > targetDelta)
{
prevTime = millis();
myMotor.runLoop();
}
}
}
void loop()
{
// // Get revolutions per second
// Serial.println("Please enter target revolutions per second");
// bool gotInput = false;
// float rps = 0.0;
// while(!gotInput)
// {
// if(Serial.available() > 0)
// {
// rps = Serial.parseFloat();
// if(rps == 0)
// {
// continue;
// }
// else
// {
// break;
// }
// }
// delay(10);
// }
delay(1000);
float rps = 0.5;
setMotorCommands(rps);
runMotors();
// Stop motors
digitalWrite(PIN_ENABLE, 0);
}
================================================
FILE: experimental_testing/RobotArduinoTesting/stepper_nano_test/stepper_nano_test.ino
================================================
// Uses stepper driver library from here: https://www.airspayce.com/mikem/arduino/AccelStepper/classAccelStepper.html
#include
#define STEP_PIN 2
#define DIR_PIN 1 // Note that the DIR pin is actually controlled by the mster arduino so this won't be connected to anything
#define MILLIS_BETWEEN_PRINTING 100
const int max_vel = 800; //steps/second
AccelStepper motor;
unsigned long count = 0;
unsigned long prevMillisPrint;
void setup()
{
Serial.begin(115200);
Serial.println("Starting...");
motor = AccelStepper(AccelStepper::DRIVER, STEP_PIN, DIR_PIN);
motor.setMaxSpeed(max_vel);
motor.setAcceleration(max_vel);
motor.setMinPulseWidth(10); // Min from docs is 2.5 microseconds
prevMillisPrint = millis();
motor.move(800);
}
void loop()
{
if(motor.isRunning() && millis() - prevMillisPrint > MILLIS_BETWEEN_PRINTING)
{
Serial.print(motor.speed());
Serial.print(", ");
Serial.println(motor.currentPosition());
prevMillisPrint = millis();
count = 0;
}
motor.run();
count++;
}
================================================
FILE: experimental_testing/RobotArduinoTesting/test_encoders/test_encoders.ino
================================================
// Pins
#define PIN_ENCA_1 21
#define PIN_ENCA_2 20
#define PIN_ENCA_3 19
#define PIN_ENCA_4 18
#define PIN_ENCB_1 25
#define PIN_ENCB_2 23
#define PIN_ENCB_3 24
#define PIN_ENCB_4 22
#include
Encoder e[4] = { Encoder(PIN_ENCA_1, PIN_ENCB_1),
Encoder(PIN_ENCA_2, PIN_ENCB_2),
Encoder(PIN_ENCA_3, PIN_ENCB_3),
Encoder(PIN_ENCA_4, PIN_ENCB_4) } ;
void setup() {
// put your setup code here, to run once:
Serial.begin(115200);
Serial.println("Encoder Test:");
}
void loop() {
Serial.print("Pos: [");
for (int i = 0; i < 4; ++i )
{
Serial.print(e[i].read());
Serial.print(", ");
}
Serial.println("]");
// if a character is sent from the serial monitor,
// reset both back to zero.
if (Serial.available())
{
Serial.read();
Serial.println("Reset position to zero");
e[0].write(0);
e[1].write(0);
e[2].write(0);
e[3].write(0);
}
delay(100);
}
================================================
FILE: experimental_testing/TrajGen.py
================================================
# Testing out trajectory generation where I have a gui
import math
import numpy as np
from matplotlib import pyplot as plt
# Parameters
MAX_VEL = 3.0 # m/s
MAX_ACC = 2.0 # m/s^2
# Find min time and position change to reach full speed
T_ACC = MAX_VEL / MAX_ACC
print("T_ACC: {}".format(T_ACC))
P_ACC = 0.5 * MAX_ACC * T_ACC * T_ACC
print("P_ACC: {}".format(P_ACC))
def sign(n):
if n > 0:
return 1
elif n < 0:
return -1
else:
return 0
def gen_traj2(p1, p2, dt):
position_change = abs(p2 - p1)
# Determine if position change is less than required for steady state
if position_change < 2*P_ACC:
return gen_triangle_traj(p1, p2, dt)
else:
return gen_trap_traj(p1, p2, dt)
def gen_triangle_traj(p1, p2, dt):
turn_pt = (p2 - p1)/2
dir = sign(p2 - p1)
traj = {'time': [0], 'pos': [p1], 'vel': [0], 'acc': [dir * MAX_ACC]}
gen_const_acc_until_pos(traj, dt, dir*MAX_ACC, turn_pt, dir)
gen_const_acc_until_pos(traj, dt, -1*dir*MAX_ACC, p2, dir)
return traj
def gen_trap_traj(p1, p2, dt):
dir = sign(p2 - p1)
max_vel_pt = dir*P_ACC
slow_down_pt = p2 - dir * P_ACC
traj = {'time': [0], 'pos': [p1], 'vel': [0], 'acc': [dir * MAX_ACC]}
gen_const_acc_until_pos(traj, dt, dir*MAX_ACC, max_vel_pt, dir)
gen_const_acc_until_pos(traj, dt, 0, slow_down_pt, dir)
gen_const_acc_until_pos(traj, dt, -1*dir*MAX_ACC, p2, dir)
return traj
def gen_const_acc_until_pos(traj, dt, acc, stop_pos, stop_dir):
"""
Generate constant acceleration trajectory
"""
n_iter = 0
while True:
new_vel = traj['vel'][-1] + acc * dt
new_pos = traj['pos'][-1] + traj['vel'][-1] * dt
new_time = traj['time'][-1] + dt
if stop_dir > 0 and new_vel < 0:
break
if stop_dir < 0 and new_vel > 0:
break
if stop_dir > 0 and new_pos >= stop_pos:
break
elif stop_pos < 0 and new_pos <= stop_pos:
break
elif n_iter > 100000:
raise StopIteration("Too many iterations")
traj['time'].append(new_time)
traj['pos'].append(new_pos)
traj['vel'].append(new_vel)
traj['acc'].append(acc)
n_iter += 1
return traj
def plot_traj(pvt_data):
"""
Makes a plot of position, velocity, and acceleration versus time
"""
ax = plt.axes()
ax.plot(pvt_data["time"], pvt_data["pos"],'r',label='Position')
ax.plot(pvt_data["time"], pvt_data["vel"],'g',label='Velocity')
ax.plot(pvt_data["time"], pvt_data["acc"],'b',label='Acceleration')
ax.legend()
plt.show()
if __name__ == '__main__':
p1 = 0
p2 = 20
dt = 0.05
pvt = gen_traj2(p1, p2, dt)
plot_traj(pvt)
================================================
FILE: experimental_testing/TrajGenv2.py
================================================
import math
import numpy as np
from matplotlib import pyplot as plt
p_target = 20.0 # m
v_max = 1.0 #m/s
a_max = 2.0 #m/s^2
j_max = 8.0 #m/s^3
alpha = 0.8 # velocity decay
beta = 0.8 # acceleation decay
loop_limit = 10 # max loops for convergence
plot_timestep = 0.01
def generate(target_position):
v_lim = v_max
a_lim = a_max
j_lim = j_max
output = {}
output['done'] = False
for i in range(loop_limit):
print("Starting loop {}".format(i))
output = generate_once(target_position, v_lim, a_lim, j_lim)
if output['done']:
break
else:
v_lim = output['v_lim']
a_lim = output['a_lim']
j_lim = output['j_lim']
if output['done']:
print("Trajectory found")
return output
else:
print("Trajectory not found")
return {}
def generate_once(p, v_lim, a_lim, j_lim):
output = {}
output['done'] = False
output['v_lim'] = v_lim
output['a_lim'] = a_lim
output['j_lim'] = j_lim
output['t'] = []
# Constant jerk regions
dt_j = a_lim / j_lim
print("dt_j = {}".format(dt_j))
dv_j = 0.5 * j_lim * dt_j ** 2 # Positive jerk region
dp_j1 = 1/6.0 * j_lim * dt_j ** 3 # Positive jerk region
dp_j2 = (v_lim - dv_j) * dt_j + 0.5 * a_lim * dt_j ** 2 - 1/6 * j_lim * dt_j ** 3 # Negative jerk region
print("dv_j: {}, dp_j1: {}, dp_j2: {}".format(dv_j, dp_j1, dp_j2))
# Constant accel region
dt_a = (v_lim - 2*dv_j ) / a_lim
print("dt_a = {}".format(dt_a))
if dt_a <= 0:
output['a_lim'] = a_lim * beta
print("New a_lim = {}".format(output['a_lim'] ))
return output
dp_a = dv_j * dt_a + 0.5 * a_lim * dt_a ** 2
print("dp_a: {}".format(dp_a))
# Constant vel region
dt_v = (p - 2 * dp_j1 - 2 * dp_j2 - 2 * dp_a) / v_lim
print("dt_v = {}".format(dt_v))
if dt_v <= 0:
output['v_lim'] = alpha * v_lim
print("New v_lim = {}".format(output['v_lim'] ))
return output
dp_v = v_lim * dt_v
print("dp_v: {}".format(dp_v))
# If we get here, the genertation should be correct, so compute time regions and return
output['done'] = True
t = [0,0,0,0,0,0,0,0]
t[0] = 0
t[1] = t[0] + dt_j
t[2] = t[1] + dt_a
t[3] = t[2] + dt_j
t[4] = t[3] + dt_v
t[5] = t[4] + dt_j
t[6] = t[5] + dt_a
t[7] = t[6] + dt_j
output['t'] = t
print("Times: {}".format(t))
return output
def generate_inverse(p, dt_j, dt_a, dt_v):
output = {}
output['done'] = True
t = [0,0,0,0,0,0,0,0]
t[0] = 0
t[1] = t[0] + dt_j
t[2] = t[1] + dt_a
t[3] = t[2] + dt_j
t[4] = t[3] + dt_v
t[5] = t[4] + dt_j
t[6] = t[5] + dt_a
t[7] = t[6] + dt_j
output['t'] = t
# Solve system of equations to get kinematic limits
A = np.array([
[dt_j, -1, 0 ],
[dt_j ** 2, dt_a, -1 ],
[(dt_j ** 2) * (dt_a - dt_j), dt_j ** 2 + dt_a ** 2, dt_v + 2* dt_j ]])
b = np.array([0, 0, p])
lims = np.linalg.solve(A, b)
output['v_lim'] = lims[2]
output['a_lim'] = lims[1]
output['j_lim'] = lims[0]
return output
def generate_profile_from_params(output, timestep):
j_lim = output['j_lim']
a_lim = output['a_lim']
v_lim = output['v_lim']
T = output['t']
t_vals = np.arange(0, T[7], timestep)
p = [0]
v = [0]
a = [0]
j = [j_lim]
for t in t_vals[1:]:
if t >= T[0] and t < T[1]:
p.append(p[-1] + v[-1] * timestep + 0.5 * a[-1] * timestep **2 + 1/6.0 * j[-1] * timestep ** 3)
v.append(v[-1] + a[-1] * timestep + 0.5* j[-1] * timestep ** 2)
a.append(a[-1] + j[-1] * timestep)
j.append(j_lim)
elif t >= T[1] and t < T[2]:
p.append(p[-1] + v[-1] * timestep + 0.5 * a[-1] * timestep **2 + 1/6.0 * j[-1] * timestep ** 3)
v.append(v[-1] + a[-1] * timestep + 0.5* j[-1] * timestep ** 2)
a.append(a_lim)
j.append(0)
elif t >= T[2] and t < T[3]:
p.append(p[-1] + v[-1] * timestep + 0.5 * a[-1] * timestep **2 + 1/6.0 * j[-1] * timestep ** 3)
v.append(v[-1] + a[-1] * timestep + 0.5* j[-1] * timestep ** 2)
a.append(a[-1] + j[-1] * timestep)
j.append(-j_lim)
elif t >= T[3] and t < T[4]:
p.append(p[-1] + v[-1] * timestep + 0.5 * a[-1] * timestep **2 + 1/6.0 * j[-1] * timestep ** 3)
v.append(v_lim)
a.append(0)
j.append(0)
elif t >= T[4] and t < T[5]:
p.append(p[-1] + v[-1] * timestep + 0.5 * a[-1] * timestep **2 + 1/6.0 * j[-1] * timestep ** 3)
v.append(v[-1] + a[-1] * timestep + 0.5* j[-1] * timestep ** 2)
a.append(a[-1] + j[-1] * timestep)
j.append(-j_lim)
elif t >= T[5] and t < T[6]:
p.append(p[-1] + v[-1] * timestep + 0.5 * a[-1] * timestep **2 + 1/6.0 * j[-1] * timestep ** 3)
v.append(v[-1] + a[-1] * timestep + 0.5* j[-1] * timestep ** 2)
a.append(-a_lim)
j.append(0)
elif t >= T[6] and t < T[7]:
p.append(p[-1] + v[-1] * timestep + 0.5 * a[-1] * timestep **2 + 1/6.0 * j[-1] * timestep ** 3)
v.append(v[-1] + a[-1] * timestep + 0.5* j[-1] * timestep ** 2)
a.append(a[-1] + j[-1] * timestep)
j.append(j_lim)
return (t_vals, p, v, a, j)
def plot_data(t,p,v,a,j, output):
ax = plt.axes()
ax.plot(t, p,'r',label='Position', linewidth=4)
ax.plot(t, v,'g',label='Velocity', linewidth=4)
ax.plot(t, a,'b',label='Acceleration', linewidth=4)
ax.plot(t, j,'k',label='Jerk', linewidth=4)
ax.legend()
ax.set_xlabel('Time (s)')
ax.set_ylabel("Value (m, m/s, m/s^2, m/s^3)")
ax.set_title("Trajectory Generation\nDist: {} m, Vel limit: {} m/s, Acc limit: {} m/s^2, Jerk limit: {} m/s^3".format(p_target, v_max, a_max, j_max))
# Plot vertical lines
# ax.set_ylabel("Velocity (m/s)")
# ax.set_ylim((-0.5, 2))
# t_vert = output['t']
# y_vert = np.linspace(-10.0, 10.0, 100)
# for new_t in t_vert:
# ax.plot([new_t]*100, y_vert, 'k--', linewidth=3)
# ax.grid(True)
# ax.text(t_vert[0] + (t_vert[1] - t_vert[0])/6.0, -0.2, "Constant Jerk", fontsize='x-large')
# ax.text(t_vert[1] + (t_vert[2] - t_vert[1])/6.0, -0.1, "Constant Accel", fontsize='x-large')
# ax.text(t_vert[2] + (t_vert[3] - t_vert[2])/6.0, -0.2, "Constant Jerk", fontsize='x-large')
# ax.text(t_vert[3] + (t_vert[4] - t_vert[3])/3.0, 0.0, "Constant Velocity", fontsize='x-large')
# ax.text(t_vert[4] + (t_vert[5] - t_vert[4])/6.0, -0.2, "Constant Jerk", fontsize='x-large')
# ax.text(t_vert[5] + (t_vert[6] - t_vert[5])/6.0, -0.1, "Constant Accel", fontsize='x-large')
# ax.text(t_vert[6] + (t_vert[7] - t_vert[6])/6.0, -0.2, "Constant Jerk", fontsize='x-large')
plt.show()
def plot_trapazoidal_profile():
n_divisions = 91
a_steps = int(n_divisions/3)
t = np.linspace(0, 5, n_divisions)
a = [1]*a_steps + [0]*a_steps + [-1]*a_steps + [0]
v = [0]
p = [0]
for i in range(1, n_divisions):
dt = t[i] - t[i-1]
new_v = v[i-1] + a[i-1] * dt
new_p = p[i-1] + v[i-1] * dt + 0.5 * a[i-1] * dt * dt
v.append(new_v)
p.append(new_p)
ax = plt.axes()
# ax.plot(t, p,'r',label='Position')
ax.plot(t, v,'g',label='Velocity',linewidth=5)
# ax.plot(t, a,'b',label='Acceleration')
ax.set_xlabel('Time (s)')
ax.set_ylabel("Velocity (m/s)")
ax.legend()
# Plot vertical lines
t_vert = [t[0], t[a_steps], t[2*a_steps], t[3*a_steps]]
y_vert = np.linspace(-10.0, 10.0, 100)
for new_t in t_vert:
ax.plot([new_t]*100, y_vert, 'k--', linewidth=3)
ax.set_ylim((-0.5, 3))
ax.grid(True)
ax.text(t_vert[0] + (t_vert[1] - t_vert[0])/3.0, -0.2, "Constant Accel", fontsize='x-large')
ax.text(t_vert[1] + (t_vert[2] - t_vert[1])/3.0, -0.1, "Constant Velocity", fontsize='x-large')
ax.text(t_vert[2] + (t_vert[3] - t_vert[2])/3.0, -0.2, "Constant Accel", fontsize='x-large')
plt.show()
if __name__ == '__main__':
output = generate(p_target)
if output:
# Test inverse
dt_j = output['t'][1]
dt_a = output['t'][2] - output['t'][1]
dt_v = output['t'][4] - output['t'][3]
output2 = generate_inverse(p_target, dt_j, dt_a, dt_v)
eps = 0.01
all_valid = True
if output2:
if abs(output2['v_lim'] - output['v_lim']) / output['v_lim'] > eps:
print("v_lim not close. Expected: {}, Actual: {}, Percent diff: {}".format(output['v_lim'], output2['v_lim'], abs(output2['v_lim'] - output['v_lim']) / output['v_lim'] ))
all_valid = False
if abs(output2['a_lim'] - output['a_lim']) / output['a_lim'] > eps:
print("a_lim not close. Expected: {}, Actual: {}, Percent diff: {}".format(output['a_lim'], output2['a_lim'], abs(output2['a_lim'] - output['a_lim']) / output['a_lim'] ))
all_valid = False
if abs(output2['j_lim'] - output['j_lim']) / output['j_lim'] > eps:
print("j_lim not close. Expected: {}, Actual: {}, Percent diff: {}".format(output['j_lim'], output2['j_lim'], abs(output2['j_lim'] - output['j_lim']) / output['j_lim'] ))
all_valid = False
if all_valid:
print("All inverse values valid")
else:
print("Inverse failed")
# Generate plot
data = generate_profile_from_params(output, plot_timestep)
plot_data(*data, output)
# plot_trapazoidal_profile()
================================================
FILE: src/master/FieldPlanner.py
================================================
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import skimage.transform as sktf
import numpy as np
import matplotlib.patches as patches
import math
import enum
import logging
import os
import Utils
from Utils import ActionTypes
import config
import pickle
import csv
import copy
def generateVisionOffsetMap(cfg, max_x, max_y):
vision_offset_map = { (x,y): cfg.default_vision_offset for x in range(max_x) for y in range(max_y) }
with open(cfg.vision_offset_file) as csvfile:
reader = csv.reader(csvfile)
for idx,row in enumerate(reader):
if idx == 0:
continue
if row[0] == 'X':
tile_x = [x for x in range(max_x)]
else:
tile_x = [int(row[0])]
if row[1] == 'X':
tile_y = [ y for y in range(max_y)]
else:
tile_y = [int(row[1])]
offset_x_meters = int(row[2]) / 1000.0
offset_y_meters = int(row[3]) / 1000.0
offset_a_degrees = float(row[4])
for x in tile_x:
for y in tile_y:
key = (x,y)
add_value = np.array((offset_x_meters,offset_y_meters,offset_a_degrees))
vision_offset_map[key] = vision_offset_map[key] + add_value
return vision_offset_map
class DominoField:
"""
Data and methods for parsing and image and generating a field of tiles of dominos
"""
def __init__(self, cfg):
self.cfg = cfg
self.img = None
self.img_scaled = None
self.img_parsed_ids = None
self.img_parsed_color = None
self.n_tiles_x = 0
self.n_tiles_y = 0
self.tiles = []
def generate(self):
logging.info('Generating domino field from image...')
self._generateField()
logging.info('done.')
logging.info('Generating tiles from field...')
self._generateTiles()
logging.info('done.')
def printStats(self):
# Output some metrics
logging.info("Original image size: {}".format(self.img.shape[:2]))
logging.info("Scaled image size: {}".format(self.img_scaled.shape[:2]))
logging.info('Domino usage:')
logging.info('Total number of dominos: ' + str(self.img_parsed_ids.size))
logging.info('Colors:')
unique_colors, counts = np.unique(self.img_parsed_ids, return_counts=True)
for i, id in enumerate(unique_colors):
logging.info(' ' + self.cfg.dominos[id][0] + ': ' + str(counts[i]))
def show_image_parsing(self):
# Plot images
fig, axes = plt.subplots(nrows=1, ncols=3)
ax = axes.ravel()
ax[0].imshow(self.img)
ax[0].set_title('Original')
ax[1].imshow(self.img_scaled)
ax[1].set_title('Scaled')
ax[2].imshow(self.img_parsed_color)
ax[2].set_title('Dominos')
plt.tight_layout()
figManager = plt.get_current_fig_manager()
figManager.window.state('zoomed')
plt.show()
def render_domino_image_tiles(self):
# Allocate memory for image
tile_size_x_in_px = (self.cfg.domino_width_px + self.cfg.domino_spacing_width_px) * self.cfg.tile_width
tile_size_y_in_px = (self.cfg.domino_height_px + self.cfg.domino_spacing_height_px) * self.cfg.tile_height
array_size_x = tile_size_x_in_px * self.n_tiles_x
array_size_y = tile_size_y_in_px * self.n_tiles_y
image_array = np.zeros((array_size_x, array_size_y, 3))
# Generate image
for tile in self.tiles:
tile.draw(image_array)
# Modify array to show image correctly
image_array = np.transpose(image_array, (1, 0, 2))
image_array = np.flip(image_array, 0)
# Actually show image
plt.imshow(image_array)
figManager = plt.get_current_fig_manager()
figManager.window.state('zoomed')
plt.show()
def show_tile_ordering(self):
# Build array of order to show and write
order_array = np.zeros((self.n_tiles_x,self.n_tiles_y))
# print(self.n_tiles_x)
# print(self.n_tiles_y)
for tile in self.tiles:
# print(tile.coordinate)
order_array[tile.coordinate] = tile.order
# Modify array to show image correctly
order_array = np.transpose(order_array, (1, 0))
# Show figure with colors
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(order_array, origin='lower', cmap='cool', interpolation='None')
for x in range(self.n_tiles_x):
for y in range(self.n_tiles_y):
label = int(order_array[y, x])
ax.text(x, y, label, color='black', ha='center', va='center')
fig.colorbar(im)
plt.show()
def _generateField(self):
# Load original image
img = mpimg.imread(self.cfg.image_name)
# Skip A value for RGBA files
if img.shape[2] is 4:
img = img[:,:,:3]
# Do some modifications to MR logo to get it the right shape
if self.cfg.MR_LOGO_PLAN:
# Crop vertical
vert_size = img.shape[0]
crop_factor = 0.4
lb = int(vert_size/2 - crop_factor*vert_size)
ub = int(vert_size/2 + crop_factor*vert_size)
img = img[lb:ub,:,:]
# Padd horizontal
hz_size = img.shape[1]
pad_amount = int(0.1 * hz_size)
img = np.pad(img, pad_width=[(0, 0), (pad_amount, pad_amount),(0, 0)], mode='constant')
# Scaled image
img_scaled = sktf.resize(img, (self.cfg.desired_height_dominos, self.cfg.desired_width_dominos), anti_aliasing=False)
# Parse image into domino IDs by choosing 'closest' color
img_parsed_color = np.zeros((self.cfg.desired_width_dominos * self.cfg.desired_height_dominos, 3))
img_parsed_ids = np.zeros_like(img_parsed_color, dtype=np.int_)[:, 0]
count = 0
for row in img_scaled:
for px in row:
best_id = -1
best_score = 9999999
for id, val in enumerate(self.cfg.dominos):
color = val[1]
score = np.linalg.norm(px - color)
if score < best_score:
best_score = score
best_id = id
img_parsed_ids[count] = best_id
img_parsed_color[count] = self.cfg.dominos[best_id][1]
count = count + 1
img_parsed_ids = img_parsed_ids.reshape((self.cfg.desired_height_dominos, self.cfg.desired_width_dominos))
img_parsed_color = img_parsed_color.reshape((self.cfg.desired_height_dominos, self.cfg.desired_width_dominos, 3))
self.img = img
self.img_scaled = img_scaled
self.img_parsed_ids = img_parsed_ids
self.img_parsed_color = img_parsed_color
def _addTile(self, tile_coordinate, tile_values, tile_order, vision_offset_map):
vision_offset = vision_offset_map[tile_coordinate]
# print("Tile: order {}, coord {}, vision offset: {}".format(tile_order, tile_coordinate, vision_offset))
new_tile = Tile(self.cfg, tile_coordinate, tile_values, tile_order, vision_offset)
self.tiles.append(new_tile)
def _generateTiles(self):
# Check sizes and make sure things line up
if self.cfg.desired_height_dominos % self.cfg.tile_height != 0:
raise ValueError('Field height is not evenly divisible by tile height!')
if self.cfg.desired_width_dominos % self.cfg.tile_width != 0:
raise ValueError('Field width is not evenly divisible by tile width!')
# Determine number of tiles needed in x and y
self.n_tiles_x = int(self.cfg.desired_width_dominos / self.cfg.tile_width)
self.n_tiles_y = int(self.cfg.desired_height_dominos / self.cfg.tile_height)
print("Generating tiles {} x {}".format(self.n_tiles_x, self.n_tiles_y))
order_map = self._generateTileOrdering(self.n_tiles_x, self.n_tiles_y)
vision_offset_map = generateVisionOffsetMap(self.cfg, self.n_tiles_x, self.n_tiles_y)
# Loop over tiles and assign id and colors to each
for i in range(self.n_tiles_x):
x_start_idx = i * self.cfg.tile_width
x_end_idx = (i + 1) * self.cfg.tile_width
for j in range(self.n_tiles_y):
# Need to account for flipped y axis with array
y_start_idx = - j * self.cfg.tile_height - 1
y_end_idx = - (j + 1) * self.cfg.tile_height - 1
tile_values = np.copy(self.img_parsed_ids[y_start_idx:y_end_idx:-1, x_start_idx:x_end_idx])
tile_coord = (i,j)
tile_order = order_map[tile_coord]
self._addTile(tile_coord, tile_values, tile_order, vision_offset_map)
# Sort tile array so they are in order
self.tiles = sorted(self.tiles, key = lambda tile: tile.order)
@classmethod
def _generateTileOrdering(cls, num_x, num_y):
"""
Generates and ordering that maps x y tile coordinate -> order number
"""
# return cls._generateTileOrderingDiagonal(num_x, num_y)
return cls._generateTileOrderingColumns(num_x, num_y)
@classmethod
def _generateTileOrderingColumns(cls, num_x, num_y):
def coordToOrderLR(coord, num_x, num_y):
return coord[0]*num_y + num_y - coord[1] -1
def coordToOrderRL(coord, num_x, num_y):
return (num_x - coord[0] - 1)*num_y + num_y - coord[1] -1
all_coords = [(x,y) for x in range(num_x) for y in range(num_y)]
order_map = {coord: coordToOrderRL(coord, num_x, num_y) for coord in all_coords}
return order_map
@classmethod
def _generateTileOrderingDiagonal(cls, num_x, num_y):
coord = [num_x - 1, num_y - 1] # Starting at top right
order = 0
order_map = {tuple(coord): order}
done = False
next_pass = np.copy(coord)
next_pass[1] = coord[1] - 1 # Next pass starts at far right, next row down
order = order + 1
while not done:
# If you are at the top or left, go to next pass
if coord[1] + 1 >= num_y or coord[0] - 1 < 0:
coord = np.copy(next_pass)
# Compute new value for next pass
if next_pass[1] > 0:
next_pass[1] = next_pass[1] - 1 # Next pass starts at far right, next row down
elif next_pass[1] == 0:
next_pass[0] = next_pass[0] - 1 # If we reach the bottom row, shift to the left instead
else:
raise ValueError("Whoops! Shouldn't ever get here!")
else:
coord[0] = coord[0] - 1 # Move up and left
coord[1] = coord[1] + 1
# Save the order in the map for this coordinate
order_map[tuple(coord)] = order
order = order + 1
# If we got to 0,0 then we are done
if coord[0] == 0 and coord[1] == 0:
done = True
return order_map
class Tile:
"""
Holds info and useful methods related to an individual domino tile
"""
def __init__(self, cfg, coordinate, values, order, vision_offset):
self.coordinate = coordinate
self.vision_offset = vision_offset
self.values = values
self.cfg = cfg
self.order = order
def getPlacementPositionInMeters(self):
# Returns bottom left corner position of the tile relative to the origin of the field
tile_start_x = self.coordinate[0] * self.cfg.tile_size_width_meters
tile_start_y = self.coordinate[1] * self.cfg.tile_size_height_meters
return (tile_start_x, tile_start_y)
def draw(self, array):
# Note that this function draws pixels assuming the array is indexed as x,y instead of rows and columns
# the array is flipped to plot as an image in the parent function
# Determine tile location
tile_size_x_in_px = (self.cfg.domino_width_px + self.cfg.domino_spacing_width_px) * self.cfg.tile_width
tile_size_y_in_px = (self.cfg.domino_height_px + self.cfg.domino_spacing_height_px) * self.cfg.tile_height
tile_start_x_px = self.coordinate[0] * tile_size_x_in_px
tile_start_y_px = self.coordinate[1] * tile_size_y_in_px
tile_end_x_px = tile_start_x_px + tile_size_x_in_px
tile_end_y_px = tile_start_y_px + tile_size_y_in_px
self.draw_single(array, tile_start_x_px, tile_start_y_px, tile_end_x_px, tile_end_y_px)
def draw_single(self, array, tile_start_x_px, tile_start_y_px, tile_end_x_px, tile_end_y_px, draw_edge=True):
# Fill in tile with background color
array[tile_start_x_px:tile_end_x_px, tile_start_y_px:tile_end_y_px, 0] = self.cfg.tile_background_color[0]
array[tile_start_x_px:tile_end_x_px, tile_start_y_px:tile_end_y_px, 1] = self.cfg.tile_background_color[1]
array[tile_start_x_px:tile_end_x_px, tile_start_y_px:tile_end_y_px, 2] = self.cfg.tile_background_color[2]
# Fill in tile edge color (only have to do start locations since next tile over will fill in end locations)
if draw_edge:
array[tile_start_x_px:tile_end_x_px, tile_start_y_px, 0] = self.cfg.tile_edge_color[0]
array[tile_start_x_px:tile_end_x_px, tile_start_y_px, 1] = self.cfg.tile_edge_color[1]
array[tile_start_x_px:tile_end_x_px, tile_start_y_px, 2] = self.cfg.tile_edge_color[2]
array[tile_start_x_px, tile_start_y_px:tile_end_y_px, 0] = self.cfg.tile_edge_color[0]
array[tile_start_x_px, tile_start_y_px:tile_end_y_px, 1] = self.cfg.tile_edge_color[1]
array[tile_start_x_px, tile_start_y_px:tile_end_y_px, 2] = self.cfg.tile_edge_color[2]
# Draw dominos
for i in range(self.cfg.tile_width):
domino_start_x = tile_start_x_px + self.cfg.domino_spacing_width_px + (self.cfg.domino_width_px + self.cfg.domino_spacing_width_px) * i
domino_end_x = domino_start_x + self.cfg.domino_width_px
for j in range(self.cfg.tile_height):
domino_start_y = tile_start_y_px + self.cfg.domino_spacing_height_px + (self.cfg.domino_height_px + self.cfg.domino_spacing_height_px) * j
domino_end_y = domino_start_y + self.cfg.domino_height_px
domino_id = self.values[j, i]
domino_color = self.cfg.dominos[domino_id][1]
array[domino_start_x:domino_end_x, domino_start_y:domino_end_y, 0] = domino_color[0]
array[domino_start_x:domino_end_x, domino_start_y:domino_end_y, 1] = domino_color[1]
array[domino_start_x:domino_end_x, domino_start_y:domino_end_y, 2] = domino_color[2]
class Action:
def __init__(self, action_type, name):
self.action_type = action_type
self.name = name
def draw(self, ax, text=""):
pass
class SetPoseAction(Action):
def __init__(self, action_type, name, x, y, a):
# action_type (enum)
# string name
# X position [m]
# Y position [m]
# Angle [deg]
super().__init__(action_type, name)
self.x = float(x)
self.y = float(y)
self.a = math.radians(float(a))
class WaitAction(Action):
def __init__(self,action_type, name, time):
super().__init__(action_type, name)
self.time = time
class MoveConstVelAction(Action):
def __init__(self, action_type, name, vx, vy, va, t):
# action_type (enum)
# string name
# X velocity [m/s]
# Y velocity [m/s]
# Angle [rad/s]
# time [sec]
super().__init__(action_type, name)
self.vx = float(vx)
self.vy = float(vy)
self.va = float(va)
self.t = float(t)
class MoveAction(Action):
def __init__(self, action_type, name, x, y, a):
# action_type (enum)
# string name
# X position [m]
# Y position [m]
# Angle [deg]
super().__init__(action_type, name)
self.x = float(x)
self.y = float(y)
self.a = math.radians(float(a))
def getPos(self):
return np.array([self.x, self.y])
def getAngleDegrees(self):
return math.degrees(self.a)
def getAngleRadians(self):
return self.a
def draw(self, ax, text="", show_label=True):
if self.action_type in [ActionTypes.MOVE_WITH_VISION, ActionTypes.MOVE_REL, ActionTypes.MOVE_REL_SLOW]:
return
# Base triangle at 0 degrees
scale = 0.3
p1 = np.array([scale, 0])
s = math.sin(45*math.pi/180.0)
c = math.cos(45 * math.pi / 180.0)
p2 = np.array([-scale*s, scale*c])
p3 = np.array([-scale*s, -scale*c])
points = np.vstack((p1, p2 ,p3))
# Rotate for orientation
s = math.sin(self.getAngleRadians())
c = math.cos(self.getAngleRadians())
R = np.array([[c, -s],[s, c]])
for i in range(3):
# Do local rotation
points[i,:] = np.matmul(R, np.reshape(points[i,:],(2,1))).ravel()
# Then offset for position
points[i, :] = points[i, :] + self.getPos()
ax.add_patch(patches.Polygon(points,
fill=True,
edgecolor='c',
facecolor='c'))
text_point = points[0]
text_to_show = self.name
if text is not "":
text_to_show = text
if show_label:
ax.annotate(text_to_show, xy=text_point[:2], xytext=[1, 1], textcoords="offset points", fontsize=8, color="green")
def generate_full_action_sequence(cfg, tile):
"""
Standard sequence:
- Move to load
- Do load
- Move out of load
- Move to field entry
- Move to coarse drop off
- Wait for localization
- Move to fine drop off
- Drop off
- Move to coarse drop off
- Move to field exit
- Move to near load
"""
# Setup positions in field frame
tile_pos_in_field_frame = np.array(tile.getPlacementPositionInMeters())
robot_placement_pos_field_frame = tile_pos_in_field_frame + Utils.TransformPos(cfg.tile_to_robot_offset, [0,0], cfg.field_to_robot_frame_angle)
robot_placement_fine_pos_field_frame = robot_placement_pos_field_frame + Utils.TransformPos(cfg.tile_placement_fine_offset, [0,0], cfg.field_to_robot_frame_angle)
robot_placement_coarse_pos_field_frame = robot_placement_fine_pos_field_frame + Utils.TransformPos(cfg.tile_placement_coarse_offset, [0,0], cfg.field_to_robot_frame_angle)
# Convert positions to global frame
robot_placement_coarse_pos_global_frame = Utils.TransformPos(robot_placement_coarse_pos_field_frame, cfg.domino_field_origin, cfg.domino_field_angle)
robot_placement_fine_pos_global_frame = Utils.TransformPos(robot_placement_fine_pos_field_frame, cfg.domino_field_origin, cfg.domino_field_angle)
robot_field_angle = cfg.domino_field_angle + cfg.field_to_robot_frame_angle
# Setup various entry and intermediate positions
intermediate_entry_pos_global_frame = np.array([cfg.highway_x, cfg.intermediate_entry_hz_y])
entry_y = robot_placement_coarse_pos_global_frame[1]+cfg.enter_position_distance
field_entry_pos_global_frame = np.array([cfg.highway_x+0.75, entry_y])
intermediate_place_pos_global_frame = np.array([robot_placement_coarse_pos_global_frame[0], entry_y])
# Make pose adjustments based on config
coord = tile.coordinate
x_offset_row = cfg.x_offset_rows[coord[1]]
y_offset_row = cfg.y_offset_rows[coord[1]]
y_offset_col = cfg.y_offset_cols[coord[0]]
extra_y_offset_fine = y_offset_col + y_offset_row
robot_placement_fine_pos_global_frame[0] += x_offset_row
robot_placement_fine_pos_global_frame[1] += extra_y_offset_fine
robot_placement_coarse_pos_global_frame[1] += extra_y_offset_fine
intermediate_place_pos_global_frame[1] += extra_y_offset_fine
field_entry_pos_global_frame[1] += extra_y_offset_fine
# Figure out if intermediate steps are needed
intermediate_hz = robot_placement_coarse_pos_global_frame[1] < cfg.intermediate_entry_hz_y - 1
intermediate_vt = robot_placement_coarse_pos_global_frame[0] > cfg.intermediate_place_vt_x + 1
# Tiles near the back wall don't have enough space for backwards offset
relative_tile_offset = copy.deepcopy(cfg.tile_placement_coarse_offset)
if robot_placement_coarse_pos_global_frame[0] < 2:
intermediate_place_pos_global_frame[0] = robot_placement_fine_pos_global_frame[0]
robot_placement_coarse_pos_global_frame[0] = robot_placement_fine_pos_global_frame[0]
relative_tile_offset = np.asarray([0, 1])
actions = []
name = "Move to load waypoint - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, cfg.load_waypoint[0], cfg.load_waypoint[1], cfg.highway_angle))
# name = "Wait for localization"
# actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
name = "Move to near load area - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, cfg.base_station_prep_pos[0], cfg.base_station_prep_pos[1], cfg.base_station_target_angle))
# name = "Pause plan for load"
# actions.append(Action(ActionTypes.PAUSE_PLAN, name))
# name = "Start cameras"
# actions.append(Action(ActionTypes.START_CAMERAS, name))
# name = "Wait for localization"
# actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
# name = "Move to load prep - fine"
# actions.append(MoveAction(ActionTypes.MOVE_FINE, name, cfg.base_station_prep_pos[0], cfg.base_station_prep_pos[1], cfg.base_station_target_angle))
# name = "Move to load prep - vision"
# actions.append(MoveAction(ActionTypes.MOVE_WITH_VISION, name, cfg.base_station_prep_vision_offset[0], cfg.base_station_prep_vision_offset[1], cfg.base_station_prep_vision_offset[2]))
# name = "Move to load - relative slow"
# actions.append(MoveAction(ActionTypes.MOVE_REL_SLOW, name, cfg.base_station_relative_offset[0], cfg.base_station_relative_offset[1], cfg.base_station_relative_offset[2]))
# name = "Align with load"
# actions.append(MoveAction(ActionTypes.MOVE_WITH_VISION, name, cfg.base_station_vision_offset[0], cfg.base_station_vision_offset[1], cfg.base_station_vision_offset[2]))
# name = "Stop cameras"
# actions.append(Action(ActionTypes.STOP_CAMERAS, name))
name = "Load tile"
actions.append(Action(ActionTypes.LOAD, name))
# name = "Move away from load - relative slow"
# actions.append(MoveAction(ActionTypes.MOVE_REL_SLOW, name, -1.3*cfg.base_station_relative_offset[0], -1.3*cfg.base_station_relative_offset[1], -1.3*cfg.base_station_relative_offset[2]))
# name = "Pause plan for QC"
# actions.append(Action(ActionTypes.PAUSE_PLAN, name))
name = "Move to load waypoint - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, cfg.load_waypoint[0], cfg.load_waypoint[1], cfg.highway_angle))
# name = "Wait for localization"
# actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
# if intermediate_hz:
# name = "Move to intermediate enter - coarse"
# actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, intermediate_entry_pos_global_frame[0], intermediate_entry_pos_global_frame[1], cfg.highway_angle))
# name = "Wait for motor cooldown (long)"
# actions.append(WaitAction(ActionTypes.WAIT, name, 20))
name = "Move to enter - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, field_entry_pos_global_frame[0], field_entry_pos_global_frame[1], robot_field_angle))
name = "Wait for localization"
actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
# name = "Wait for motor cooldown"
# actions.append(WaitAction(ActionTypes.WAIT, name, 10))
# if intermediate_vt:
# name = "Move to intermediate place - coarse"
# actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, intermediate_place_pos_global_frame[0], intermediate_place_pos_global_frame[1], robot_field_angle))
# name = "Wait for localization"
# actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
# name = "Wait for motor cooldown"
# actions.append(WaitAction(ActionTypes.WAIT, name, 10))
name = "Move to near place - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, robot_placement_coarse_pos_global_frame[0], robot_placement_coarse_pos_global_frame[1], robot_field_angle))
name = "Start cameras"
actions.append(Action(ActionTypes.START_CAMERAS, name))
name = "Wait for localization"
actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
name = "Move to place - fine"
actions.append(MoveAction(ActionTypes.MOVE_FINE_STOP_VISION, name, robot_placement_fine_pos_global_frame[0], robot_placement_fine_pos_global_frame[1], robot_field_angle + cfg.angle_adjust_fine))
name = "Move to place - vision"
actions.append(MoveAction(ActionTypes.MOVE_WITH_VISION, name, tile.vision_offset[0], tile.vision_offset[1], tile.vision_offset[2]))
name = "Stop cameras"
actions.append(Action(ActionTypes.STOP_CAMERAS, name))
# name = "Pause plan for QC"
# actions.append(Action(ActionTypes.PAUSE_PLAN, name))
name = "Place tile"
actions.append(Action(ActionTypes.PLACE, name))
name = "Move away from place - relative slow"
actions.append(MoveAction(ActionTypes.MOVE_REL_SLOW, name, relative_tile_offset[0], relative_tile_offset[1], 0))
# if intermediate_vt:
# name = "Move to intermediate place - coarse"
# actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, intermediate_place_pos_global_frame[0], intermediate_place_pos_global_frame[1], robot_field_angle))
# name = "Wait for motor cooldown"
# actions.append(WaitAction(ActionTypes.WAIT, name, 10))
name = "Move to intermediate place - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, intermediate_place_pos_global_frame[0], intermediate_place_pos_global_frame[1], robot_field_angle))
name = "Move to exit - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, field_entry_pos_global_frame[0], field_entry_pos_global_frame[1], cfg.highway_angle))
name = "Wait for localization"
actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
# name = "Wait for motor cooldown"
# actions.append(WaitAction(ActionTypes.WAIT, name, 10))
# if intermediate_hz:
# name = "Move to intermediate exit - coarse"
# actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, intermediate_entry_pos_global_frame[0], intermediate_entry_pos_global_frame[1], cfg.highway_angle))
# name = "Wait for motor cooldown (long)"
# actions.append(WaitAction(ActionTypes.WAIT, name, 20))
return actions
def generate_hax_action_sequence(cfg, tile):
actions = []
x_pose = 7
name = "Move 1 - coarse 90"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, x_pose, 5, 90))
name = "Move 1 - coarse 0"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, x_pose, 5, 0))
name = "Move 1 - coarse -90"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, x_pose, 5, -90))
name = "Wait 1"
actions.append(WaitAction(ActionTypes.WAIT, name, 5))
name = "Move 2 - coarse -90"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, x_pose, -3.5, -90))
name = "Move 2 - coarse 0 "
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, x_pose, -3.5, 0))
name = "Move 2 - coarse 90"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, x_pose, -3.5, 90))
name = "Wait 2"
actions.append(WaitAction(ActionTypes.WAIT, name, 5))
return actions
def generate_small_testing_action_sequence(cfg, tile):
"""
Short sequence for testing
Load
Move to near place - coarse
Wait for localization
Move to place - fine
Place
Move away from place - fine
Move to load - coarse
"""
# Setup positions in field frame
tile_pos_in_field_frame = np.array(tile.getPlacementPositionInMeters())
robot_placement_fine_pos_field_frame = tile_pos_in_field_frame + Utils.TransformPos(cfg.tile_to_robot_offset, [0,0], cfg.field_to_robot_frame_angle)
robot_placement_coarse_pos_field_frame = robot_placement_fine_pos_field_frame + Utils.TransformPos(cfg.tile_placement_coarse_offset, [0,0], cfg.field_to_robot_frame_angle)
enter_field_prep_pos_field_frame = [robot_placement_coarse_pos_field_frame[0], - cfg.prep_position_distance]
exit_field_prep_pos_field_frame = [-cfg.exit_position_distance, robot_placement_coarse_pos_field_frame[1]]
# Convert positions to global frame
tile_pos_in_global_frame = Utils.TransformPos(tile_pos_in_field_frame, cfg.domino_field_origin, cfg.domino_field_angle)
robot_placement_coarse_pos_global_frame = Utils.TransformPos(robot_placement_coarse_pos_field_frame, cfg.domino_field_origin, cfg.domino_field_angle)
robot_placement_fine_pos_global_frame = Utils.TransformPos(robot_placement_fine_pos_field_frame, cfg.domino_field_origin, cfg.domino_field_angle)
robot_field_angle = cfg.domino_field_angle + cfg.field_to_robot_frame_angle
enter_field_prep_global_frame = Utils.TransformPos(enter_field_prep_pos_field_frame, cfg.domino_field_origin, cfg.domino_field_angle)
exit_field_prep_global_frame = Utils.TransformPos(exit_field_prep_pos_field_frame, cfg.domino_field_origin, cfg.domino_field_angle)
# print(tile.order)
# print(tile.coordinate)
# print(tile_pos_in_field_frame)
# print(tile_pos_in_global_frame)
# print(robot_placement_coarse_pos_global_frame)
# print(robot_placement_fine_pos_global_frame)
actions = []
name = "Load tile"
actions.append(Action(ActionTypes.LOAD, name))
name = "Move to prep - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, enter_field_prep_global_frame[0], enter_field_prep_global_frame[1], robot_field_angle))
name = "Wait for localization"
actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
name = "Move to near place - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, robot_placement_coarse_pos_global_frame[0], robot_placement_coarse_pos_global_frame[1], robot_field_angle))
name = "Wait for localization"
actions.append(Action(ActionTypes.WAIT_FOR_LOCALIZATION, name))
name = "Start cameras"
actions.append(Action(ActionTypes.START_CAMERAS, name))
name = "Move to place - fine"
actions.append(MoveAction(ActionTypes.MOVE_FINE, name, robot_placement_fine_pos_global_frame[0], robot_placement_fine_pos_global_frame[1], robot_field_angle))
name = "Move to place - vision"
actions.append(MoveAction(ActionTypes.MOVE_WITH_VISION, name, tile.vision_offset[0], tile.vision_offset[1], tile.vision_offset[2]))
name = "Stop cameras"
actions.append(Action(ActionTypes.STOP_CAMERAS, name))
name = "Place tile"
actions.append(Action(ActionTypes.PLACE, name))
name = "Move away from place - fine"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, robot_placement_coarse_pos_global_frame[0], robot_placement_coarse_pos_global_frame[1], robot_field_angle))
name = "Move to exit - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, enter_field_prep_global_frame[0], enter_field_prep_global_frame[1], robot_field_angle))
name = "Move to near load - coarse"
actions.append(MoveAction(ActionTypes.MOVE_COARSE, name, cfg.load_pose[0], cfg.load_pose[1], cfg.load_pose[2]))
return actions
def draw_env(cfg):
"""
Draws a figure of the environment
"""
fig,ax = plt.subplots(1)
# Draw overall map boundaries
ax.add_patch(patches.Rectangle(cfg.robot_boundaries[0],
cfg.robot_boundaries[1][0] - cfg.robot_boundaries[0][0],
cfg.robot_boundaries[1][1] - cfg.robot_boundaries[0][1],
fill=False,
edgecolor='b'))
# Draw field boundaries
ax.add_patch(patches.Rectangle(cfg.domino_field_origin,
cfg.domino_field_top_right[0] - cfg.domino_field_origin[0],
cfg.domino_field_top_right[1] - cfg.domino_field_origin[1],
fill=False,
edgecolor='r'))
# Draw base station
ax.add_patch(patches.Rectangle(cfg.base_station_boundaries[0],
cfg.base_station_boundaries[1][0] - cfg.base_station_boundaries[0][0],
cfg.base_station_boundaries[1][1] - cfg.base_station_boundaries[0][1],
fill=True,
edgecolor='k',
facecolor='k'))
ax.set_xlim(left=-1, right=cfg.robot_boundaries[1][0]+1)
ax.set_ylim(bottom=-1, top=cfg.robot_boundaries[1][1]+1)
ax.axis('equal')
return ax
class Cycle:
def __init__(self, id, cfg, robot_id, tile, action_sequence):
self.id = id
self.cfg = cfg
self.robot_id = robot_id
self.tile = tile
self.action_sequence = action_sequence
def draw_cycle(self, ax):
for action in self.action_sequence:
action.draw(ax)
def draw_action(self, ax, idx, text=""):
self.action_sequence[idx].draw(ax, text)
def generate_standard_cycles(cfg, field, cycle_generator_fn):
start_num = 1
robot_num = start_num
n_robots = len(cfg.ip_map)
cycles = []
for tile in field.tiles:
action_sequence = cycle_generator_fn(cfg, tile)
cycles.append(Cycle(tile.order, cfg, "robot{}".format(robot_num), tile, action_sequence))
robot_num += 1
if robot_num > n_robots:
robot_num = start_num
return cycles
class BasePlan:
def __init__(self, cycles):
self.cycles = cycles
def get_cycle(self, cycle_num):
try:
return self.cycles[cycle_num]
except IndexError:
return None
def get_action(self, cycle_id, action_id):
cycle = self.get_cycle(cycle_id)
if cycle:
try:
return cycle.action_sequence[action_id]
except IndexError:
return None
else:
return None
class RealPlan(BasePlan):
def __init__(self, cfg, field, cycles):
self.cfg = cfg
self.field = field
super().__init__(cycles)
def draw_cycle(self, cycle_num):
ax = draw_env(self.cfg)
self.get_cycle(cycle_num).draw_cycle(ax)
plt.show()
def find_pose_move_idx(self, cycle):
# Figure out what id the tile pose is
place_idx = -1
for i,action in enumerate(cycle.action_sequence):
if action.action_type == ActionTypes.PLACE:
place_idx = i
break
if place_idx == -1:
raise ValueError("Couldn't find placement index")
tile_pose_move_idx = -1
for j in range(place_idx, 0, -1):
action = cycle.action_sequence[j]
if action.action_type == ActionTypes.MOVE_FINE or action.action_type == ActionTypes.MOVE_COARSE :
tile_pose_move_idx = j
break
if tile_pose_move_idx == -1:
raise ValueError("Couldn't find movement index")
return tile_pose_move_idx
def draw_all_tile_poses(self):
ax = draw_env(self.cfg)
for cycle in self.cycles:
tile_pose_move_idx = self.find_pose_move_idx(cycle)
cycle.draw_action(ax, tile_pose_move_idx, text=cycle.tile.order)
plt.show()
class Plan(RealPlan):
def __init__(self, cfg, cycle_generator_fn):
field = DominoField(cfg)
field.generate()
logging.info('Generating robot actions...')
cycles = generate_standard_cycles(cfg, field, cycle_generator_fn)
logging.info('done.')
super().__init__(cfg, field, cycles)
class SubsectionPlan(RealPlan):
def __init__(self, full_plan):
self.full_plan = full_plan
self.start_coords = full_plan.cfg.start_coords
self.end_coords = full_plan.cfg.end_coords
self.delta_coords = (self.end_coords[0] - self.start_coords[0],
self.end_coords[1] - self.start_coords[1] )
new_field = DominoField(full_plan.cfg)
new_field.n_tiles_x = self.delta_coords[0]
new_field.n_tiles_y = self.delta_coords[1]
counter = 0
new_field.tiles = []
new_cycles = []
for i in range(len(full_plan.field.tiles)):
tile = full_plan.field.tiles[i]
cycle = full_plan.cycles[i]
tile_coords = tile.coordinate
if tile_coords[0] >= self.start_coords[0] and \
tile_coords[0] <= self.end_coords[0] and \
tile_coords[1] >= self.start_coords[1] and \
tile_coords[1] <= self.end_coords[1]:
# Make new tile
new_tile = copy.deepcopy(tile)
new_tile.order = counter
new_field.tiles.append(new_tile)
# Make new cycle
new_cycle = copy.deepcopy(cycle)
new_cycle.id = counter
new_cycle.tile = new_tile
new_cycles.append(new_cycle)
counter += 1
super().__init__(self.full_plan.cfg, new_field, new_cycles)
class TestPlan(BasePlan):
"""
Test plan used for debugging and testing various action sequences
"""
def __init__(self):
actions = []
actions.append(MoveAction(ActionTypes.MOVE_COARSE, "TestMoveCoarse", 0.5, 0.5, 0))
actions.append(MoveAction(ActionTypes.MOVE_FINE, 'TestMoveFine', 1,1,0))
actions.append(MoveAction(ActionTypes.MOVE_COARSE, 'Blah', 0,0,3.14))
cycles = [Cycle(i,None,'robot1','TestCycle{}'.format(i), actions) for i in range(3)]
super().__init__(cycles)
def RunFieldPlanning(autosave=False):
cfg = config.Config()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(os.path.join(cfg.log_folder,"planner.log")),
logging.StreamHandler()
]
)
plan = None
if cfg.USE_SMALL_TESTING_CONFIG:
plan = Plan(cfg, generate_small_testing_action_sequence)
else:
plan = Plan(cfg, generate_full_action_sequence)
# plan = Plan(cfg, generate_hax_action_sequence)
if cfg.USE_SUBSECTION:
plan = SubsectionPlan(plan)
if autosave:
fname = os.path.join(cfg.plans_dir,"autosaved.p")
with open(fname, 'wb') as f:
pickle.dump(plan, f)
logging.info("Saved plan to {}".format(fname))
return plan
def GeneratePDF(plan):
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from PIL import Image
logging.info("Generating PDF")
# Initialize pdf
name = "domino_plan.pdf"
if plan.cfg.MR_LOGO_PLAN:
name = "domnino_plan_logo.pdf"
full_path = os.path.join(plan.cfg.plans_dir, name)
page_height, page_width = letter # Flipped to get landscape
c = canvas.Canvas(full_path, pagesize=(page_width, page_height))
# Pre allocate image array
tile_size_x_in_px = (plan.cfg.domino_width_px + plan.cfg.domino_spacing_width_px) * plan.cfg.tile_width
tile_size_y_in_px = (plan.cfg.domino_height_px + plan.cfg.domino_spacing_height_px) * plan.cfg.tile_height
for i in range(len(plan.field.tiles)):
# Get tile to draw on this page
tile = plan.field.tiles[i]
# Draw title
text_width = 0.5 * page_width
text_height = 0.9 * page_height
text = "Tile {}, Coordinate: ({}, {})".format(i, tile.coordinate[0], tile.coordinate[1])
c.setFont("Helvetica", 20)
c.drawCentredString(text_width,text_height,text)
# Draw orientation note
text_width = 0.5 * page_width
text_height = 0.1 * page_height
text = "This side towards robot body"
c.drawCentredString(text_width,text_height,text)
# Draw image
image_array = np.zeros((tile_size_x_in_px, tile_size_y_in_px, 3))
tile.draw_single(image_array, 0, 0, tile_size_x_in_px, tile_size_y_in_px, draw_edge=False)
image_array = np.transpose(image_array, (1, 0, 2))
image_array = np.flip(image_array, 0)
im = Image.fromarray(np.uint8(255*image_array), mode='RGB')
image_fraction = 0.7
start_width = (1-image_fraction)/2.0 * page_width
start_height = (1-image_fraction)/2.0 * page_height
image_width = image_fraction * page_width
image_height = image_fraction * page_height
c.drawInlineImage(im, start_width , start_height, width=image_width, height=image_height)
# Complete page
c.showPage()
c.save()
if __name__ == '__main__':
import PySimpleGUI as sg
plan = RunFieldPlanning(autosave=False)
# plan.field.printStats()
# plan.field.show_image_parsing()
# plan.field.render_domino_image_tiles()
# plan.field.show_tile_ordering()
plan.draw_cycle(0)
plan.draw_cycle(10)
plan.draw_cycle(19)
plan.draw_all_tile_poses()
# GeneratePDF(plan)
# sg.change_look_and_feel('Dark Blue 3')
# clicked_value = sg.popup_yes_no('Save plan to file?')
# if clicked_value == "Yes":
# fname = sg.popup_get_file("Location to save", save_as=True)
# with open(fname, 'wb') as f:
# pickle.dump(plan, f)
# logging.info("Saved plan to {}".format(fname))
================================================
FILE: src/master/MarvelMindHandler.py
================================================
"""
Python wrapper for Marvelmind C API
"""
import ctypes
import logging
from collections import defaultdict
from Utils import NonBlockingTimer
class MarvelmindWrapper:
def __init__(self, cfg):
self.lib = ctypes.windll.LoadLibrary(cfg.mm_api_path)
self.devices = defaultdict(dict)
self.expected_devices = []
self.wake_timer = None
for name,beacons in cfg.device_map.items():
for b in beacons:
self.expected_devices.append(b)
self._open_serial_port()
def __del__(self):
self._close_serial_port()
def _open_serial_port(self):
"""
Establish communication with marvelmind router. Required for all other functions.
"""
logging.info("Opening communication with MarvelMind")
fn = self.lib.mm_open_port
fn.restype = ctypes.c_bool
fn.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
status = fn(None)
def _close_serial_port(self):
"""
Close communication with marvelmind router.
"""
fn = self.lib.mm_close_port
fn.restype = ctypes.c_bool
fn.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
status = fn(None)
def check_all_devices_status(self):
"""
Checks to see if all devices are awake and ready
"""
logging.info("Getting device list")
fn = self.lib.mm_get_devices_list
fn.restype = ctypes.c_bool
fn.argtypes = [ctypes.POINTER(ctypes.c_uint8)]
buff_size = 255
dataptr = (ctypes.c_uint8*buff_size)()
dataptr = ctypes.cast(dataptr, ctypes.POINTER(ctypes.c_uint8))
status = fn(dataptr)
num_devices = dataptr[0]
logging.info("Num devices found: " + str(num_devices))
idx = 1
device_offset = 9
addr_offset = 0
sleep_offset = 2
for i in range(num_devices):
addr = dataptr[idx + addr_offset]
sleep = dataptr[idx + sleep_offset]
self.devices[addr] = {'sleep': bool(sleep)}
idx += device_offset
ready = True
if self.wake_timer:
if not self.wake_timer.check():
ready = False
else:
for d in self.expected_devices:
if d not in self.devices.keys():
ready = False
logging.warning("Could not find expected device {}".format(d))
continue
if self.devices[d]['sleep'] is False:
ready = False
logging.warning("Device {} is not awake".format(d))
continue
return ready
def wake_all_devices(self):
for d in self.expected_devices:
self.wake_device(d)
def wake_all_devices_only_if_needed(self):
ok = self.check_all_devices_status()
if not ok:
logging.info("Triggering marvelmind device wakeup. Will wait for a moment to let them warmup")
self.wake_timer = NonBlockingTimer(30)
self.wake_all_devices()
def sleep_all_devices(self):
for d in self.expected_devices:
self.sleep_device(d)
def wake_device(self, address):
"""
Wake up the device with the given address
"""
logging.info("Waking device: {}".format(address))
fn = self.lib.mm_wake_device
fn.restype = ctypes.c_bool
fn.argtypes = [ctypes.c_uint8]
status = fn(address)
if not status:
logging.info("Warning: unable to wake device {}".format(address))
def sleep_device(self, address):
"""
Sleep the device with the given address
"""
logging.info("Sleeping device: {}".format(address))
fn = self.lib.mm_send_to_sleep_device
fn.restype = ctypes.c_bool
fn.argtypes = [ctypes.c_uint8]
status = fn(address)
if not status:
logging.info("Warning: unable to sleep device {}".format(address))
def get_metrics(self):
return self.devices
class MockMarvelmindWrapper:
def __init__(self, cfg):
pass
def check_all_devices_status(self):
return True
def wake_all_devices_only_if_needed(self):
pass
def wake_all_devices(self):
pass
def sleep_all_devices(self):
pass
def wake_device(self, address):
pass
def sleep_device(self, address):
pass
def get_metrics(self):
return {}
================================================
FILE: src/master/MasterMain.py
================================================
import time
import math
import copy
import config
import os
import sys
import logging
import PySimpleGUI as sg
import traceback
from FieldPlanner import *
from Runtime import RuntimeManager, PlanStatus
import Utils
STATUS_PANEL_OK_COLOR = "green"
STATUS_PANEL_BAD_COLOR = "red"
def status_panel(name):
width = 40
height = 20
return [[sg.Text("{} status".format(name))], [sg.Text("{} offline".format(name), size=(width, height), \
relief=sg.RELIEF_RIDGE, key='_{}_STATUS_'.format(name.upper()), background_color=STATUS_PANEL_BAD_COLOR) ]]
def setup_gui_layout(config, panel_names, target_names):
plan_button_size = [9,2]
plan_button_pad = (2, 10)
# Left hand column with status panels
col1 = []
for name in panel_names:
col1 += status_panel(name)
# Plan cycle/action modification buttons
incremenet_cycle_button = sg.Button('Cycle +', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_INC_CYCLE_', disabled=True)
decremenet_cycle_button = sg.Button('Cycle -', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_DEC_CYCLE_', disabled=True)
incremenet_action_button = sg.Button('Action +', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_INC_ACTION_', disabled=True)
decremenet_action_button = sg.Button('Action -', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_DEC_ACTION_', disabled=True)
set_cycle_field = sg.Input(key='_CYCLE_FIELD_', size=(5,1), pad=(0,20))
set_action_field = sg.Input(key='_ACTION_FIELD_', size=(5,1), pad=(0,20))
set_cycle_button = sg.Button('Set Cycle', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_SET_CYCLE_', disabled=True)
set_action_button = sg.Button('Set Action', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_SET_ACTION_', disabled=True)
cycle_buttons = [[sg.Column([[decremenet_cycle_button], [decremenet_action_button]]), sg.Column([[incremenet_cycle_button], [incremenet_action_button]]),
sg.Column([[set_cycle_field], [set_action_field]]), sg.Column([[set_cycle_button], [set_action_button]]) ]]
col1 += cycle_buttons
# Middle column with plot and buttons
target_element = [ [sg.Text("Target: ")], [sg.Combo(target_names, key='_TARGET_', default_value='robot1')] ]
actions = [a for a in ActionTypes]
action_element = [ [sg.Text("Action: ")], [sg.Combo(actions, key='_ACTION_')] ]
data_element = [ [sg.Text('Data:')], [sg.Input(key='_ACTION_DATA_')] ]
plan_file_field = sg.Input(key='_PLAN_FILE_', visible=False, enable_events=True)
load_plan_button = sg.FileBrowse(button_text='Load Plan', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, \
key='_LOAD_PLAN_', file_types=(('Robot Plans', ('*.json','*.p')),))
run_plan_button = sg.Button('Run Plan', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_RUN_PLAN_', disabled=True)
pause_plan_button = sg.Button('Pause Plan', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_PAUSE_PLAN_', disabled=True)
abort_plan_button = sg.Button('Abort Plan', button_color=('white','blue'), size=plan_button_size, pad=plan_button_pad, key='_ABORT_PLAN_', disabled=True)
plan_buttons = [[sg.Column([[plan_file_field, load_plan_button], [run_plan_button]]), sg.Column([[pause_plan_button], [abort_plan_button]])]]
button_size = [20,6]
button_pad = (2,10)
estop_button = [[sg.Button('ESTOP', button_color=('white','red'), size=button_size, pad=button_pad) ]]
manual_button = [[sg.Button('Send Command', button_color=('white','green'), size=button_size, pad=button_pad) ]]
clear_error_button = [[sg.Button('Clear Error', button_color=('white','orange red'), size=[10,6], pad=button_pad)]]
col2 = [[sg.Graph(canvas_size=(700,700), graph_bottom_left=config.graph_bottom_left, graph_top_right=config.graph_top_right, float_values=True, key="_GRAPH_", background_color="light grey") ],
[sg.Column(target_element), sg.Column(action_element), sg.Column(data_element)],
[sg.Column(plan_buttons), sg.Column(estop_button), sg.Column(manual_button), sg.Column(clear_error_button)] ]
# Right hand column with text ouput
col3 = [[sg.Output(size=(70, 50), echo_stdout_stderr=True)]]
return [[ sg.Column(col1), sg.Column(col2), sg.Column(col3)]]
class CmdGui:
def __init__(self, config):
self.config = config
sg.change_look_and_feel('DarkBlack')
panel_names = ["{}".format(n) for n in self.config.ip_map]
panel_names += ['plan']
target_names = copy.deepcopy(panel_names)
target_names.remove('plan')
layout = setup_gui_layout(config, panel_names, target_names)
self.window = sg.Window('Robot Controller', layout, return_keyboard_events=True, use_default_focus=False)
self.window.finalize()
self.viz_figs = {}
self.manual_action_debounce_timer = Utils.NonBlockingTimer(1.0)
self.drawn_plan_grid = False
self.last_cycle_number_drawn = -1
self._draw_environment()
def close(self):
self.window.close()
def update(self):
event, values = self.window.read(timeout=20)
# if event != "__TIMEOUT__":
# print(event)
# print(values)
# At exit, check if we should keep marvelmind on
if event is None or event == 'Exit':
if self.config.OFFLINE_TESTING or self.config.SKIP_MARVELMIND:
return 'Exit', None
else:
clicked_value = sg.popup_yes_no('Do you want to keep the Marvelmind running')
if clicked_value == "Yes":
return "ExitMM", None
else:
return 'Exit', None
# Sending a manual action (via button or pressing enter)
if event in ('Send Command', "Clear Error", '\r', '\n'):
manual_action = None
if self.manual_action_debounce_timer.check():
if event == "Clear Error":
manual_action = (values['_TARGET_'], Action(ActionTypes.CLEAR_ERROR, "ClearError"))
else:
manual_action = self._parse_manual_action(values)
if manual_action is not None:
self.window['_ACTION_DATA_'].update("")
self.manual_action_debounce_timer = Utils.NonBlockingTimer(1.0)
return 'Action', manual_action
# Pressing the run plan button
if event == "_RUN_PLAN_":
clicked_value = sg.popup_yes_no('Ready to start plan?')
if clicked_value == "Yes":
return "Run", None
if event == "_PLAN_FILE_":
return "Load", values["_PLAN_FILE_"]
if event == "_PAUSE_PLAN_":
return "Pause", None
if event == "_ABORT_PLAN_":
clicked_value = sg.popup_yes_no('Abort plan?')
if clicked_value == "Yes":
return "Abort", None
if event in ["_INC_CYCLE_","_DEC_CYCLE_","_INC_ACTION_","_DEC_ACTION_"]:
return event, None
if event in ["_SET_CYCLE_", "_SET_ACTION_"]:
return event, (values['_CYCLE_FIELD_'], values["_ACTION_FIELD_"])
if event == "ESTOP" or event == " ":
return "ESTOP", None
return None, None
def update_status_panels(self, metrics):
for key, metric in metrics.items():
if key == 'mm' or key == 'base':
continue
elif key == 'plan':
self._update_plan_panel(metric)
else:
self._update_robot_panel(key, metric)
def _parse_manual_action(self, values):
target = values['_TARGET_']
action_type = ActionTypes(values['_ACTION_'])
data_str = values['_ACTION_DATA_']
name = 'ManualAction'
action = None
if action_type in [ActionTypes.MOVE_COARSE, ActionTypes.MOVE_REL, ActionTypes.MOVE_REL_SLOW,
ActionTypes.MOVE_FINE, ActionTypes.MOVE_FINE_STOP_VISION]:
data = data_str.split(',')
data = [x.strip() for x in data]
if len(data) != 3:
logging.warning("Invalid data: {}".format(data))
return None
action = MoveAction(action_type, name, data[0], data[1], data[2])
elif action_type in [ActionTypes.MOVE_CONST_VEL]:
data = data_str.split(',')
data = [x.strip() for x in data]
if len(data) != 4:
logging.warning("Invalid data: {}".format(data))
return None
action = MoveConstVelAction(action_type, name, data[0], data[1], data[2], data[3])
elif action_type in [ActionTypes.SET_POSE]:
data = data_str.split(',')
data = [x.strip() for x in data]
if len(data) != 3:
logging.warning("Invalid data: {}".format(data))
return None
action = SetPoseAction(action_type, name, data[0], data[1], data[2])
elif action_type == ActionTypes.MOVE_WITH_VISION:
data = data_str.split(',')
data = [x.strip() for x in data]
if len(data) != 3:
data = (0,0,0)
logging.warning("Assuming position of (0,0,0) for vision move")
action = MoveAction(action_type, name, data[0], data[1], data[2])
elif action_type == ActionTypes.WAIT:
data = data_str.split(',')
data = [x.strip() for x in data]
action = WaitAction(action_type, name, float(data[0]))
else:
action = Action(action_type, name)
return (target, action)
def _udpate_cycle_button_status(self, disabled):
self.window['_INC_CYCLE_'].update(disabled=disabled)
self.window['_DEC_CYCLE_'].update(disabled=disabled)
self.window['_INC_ACTION_'].update(disabled=disabled)
self.window['_DEC_ACTION_'].update(disabled=disabled)
self.window['_CYCLE_FIELD_'].update(disabled=disabled)
self.window['_ACTION_FIELD_'].update(disabled=disabled)
self.window['_SET_CYCLE_'].update(disabled=disabled)
self.window['_SET_ACTION_'].update(disabled=disabled)
def _update_plan_button_status(self, plan_status):
if plan_status == PlanStatus.NONE:
self.window['_RUN_PLAN_'].update(text='Run', disabled=True)
self.window['_LOAD_PLAN_'].update(disabled=False)
self.window['_PAUSE_PLAN_'].update(disabled=True)
self.window['_ABORT_PLAN_'].update(disabled=True)
self._udpate_cycle_button_status(disabled=True)
elif plan_status == PlanStatus.LOADED or plan_status == PlanStatus.DONE:
self.window['_RUN_PLAN_'].update(text='Run', disabled=False)
self.window['_LOAD_PLAN_'].update(disabled=False)
self.window['_PAUSE_PLAN_'].update(disabled=True)
self.window['_ABORT_PLAN_'].update(disabled=True)
self._udpate_cycle_button_status(disabled=False)
elif plan_status == PlanStatus.RUNNING:
self.window['_RUN_PLAN_'].update(text='Run', disabled=True)
self.window['_LOAD_PLAN_'].update(disabled=True)
self.window['_PAUSE_PLAN_'].update(disabled=False)
self.window['_ABORT_PLAN_'].update(disabled=False)
self._udpate_cycle_button_status(disabled=True)
elif plan_status == PlanStatus.PAUSED:
self.window['_RUN_PLAN_'].update(text='Resume', disabled=False)
self.window['_LOAD_PLAN_'].update(disabled=True)
self.window['_PAUSE_PLAN_'].update(disabled=True)
self.window['_ABORT_PLAN_'].update(disabled=False)
self._udpate_cycle_button_status(disabled=False)
elif plan_status == PlanStatus.ABORTED:
self.window['_RUN_PLAN_'].update(text='Restart', disabled=False)
self.window['_LOAD_PLAN_'].update(disabled=False)
self.window['_PAUSE_PLAN_'].update(disabled=True)
self.window['_ABORT_PLAN_'].update(disabled=True)
self._udpate_cycle_button_status(disabled=False)
else:
logging.warning("Unhandled plan status for button state: {}".format(plan_status))
def _update_plan_panel(self, status_dict):
status_str = "Plan is not running"
color_str = STATUS_PANEL_BAD_COLOR
if status_dict:
try:
self._update_plan_button_status(status_dict['status'])
status_str = ""
plan_state_str = "{}".format(status_dict['status']).split('.')[1]
status_str += "Plan status: {}\n".format(plan_state_str)
status_str += "Plan filename: {}\n".format(status_dict['filename'])
status_str += "Next cycle num: {}\n".format(status_dict['next_cycle_number'])
status_str += "Idle bots: {}\n".format(status_dict['idle_bots'])
for id, data in status_dict['robots'].items():
needs_restart_str = ''
if data['needs_restart']:
needs_restart_str = "(Needs Restart)"
status_str += "{}{}:\n".format(id, needs_restart_str)
status_str += " Cycle id: {}\n".format(data["cycle_id"])
status_str += " Action id: {}\n".format(data["action_id"])
status_str += " Action name: {}\n".format(data["action_name"])
status_str += " Tile Coordinate: {}\n".format(data["tile_coordinate"])
status_str += " Vision offset: {}\n".format(data["vision_offset"])
# Set panel coloring based on state
if plan_state_str == "PAUSED" or plan_state_str == "ABORTED":
color_str = STATUS_PANEL_BAD_COLOR
elif plan_state_str != "NONE":
color_str = STATUS_PANEL_OK_COLOR
except Exception as e:
status_str = "Bad dict: " + str(status_dict)
self.window['_PLAN_STATUS_'].update(status_str, background_color=color_str)
def _update_robot_panel(self, robot_id, status_dict):
status_str = "Cannot get {} status".format(robot_id)
color_str = STATUS_PANEL_BAD_COLOR
if status_dict:
try:
robot_pose = [status_dict['pos_x'], status_dict['pos_y'], math.degrees(status_dict['pos_a'])]
last_mm_pose = [status_dict['last_mm_x'],status_dict['last_mm_y'], math.degrees(status_dict['last_mm_a']), status_dict['last_mm_used']]
status_str = ""
status_str += "Position: [{0:.3f} m, {1:.3f} m, {2:.2f} deg]\n".format(robot_pose[0], robot_pose[1], robot_pose[2])
status_str += "Last MM Pose: [{:.3f} m, {:.3f} m, {:.2f} deg]\n".format(
last_mm_pose[0], last_mm_pose[1], last_mm_pose[2])
status_str += "Velocity: [{0:.3f} m/s, {1:.3f} m/s, {2:.2f} deg/s]\n".format(status_dict['vel_x'],status_dict['vel_y'], math.degrees(status_dict['vel_a']))
status_str += "Vision Pose : [{0:.3f} m, {1:.3f} m, {2:.2f} deg]\n".format(status_dict['vision_x'],status_dict['vision_y'], math.degrees(status_dict['vision_a']))
status_str += "Camera Raw Pose : [{0:.3f} m, {1:.3f} m, {2:.2f} deg]\n".format(status_dict['cam_pose_x'],status_dict['cam_pose_y'], math.degrees(status_dict['cam_pose_a']))
status_str += "Cam Status: Both-{} | Side-{} | Rear-{}\n".format(status_dict["cam_both_ok"],status_dict["cam_side_ok"], status_dict["cam_rear_ok"])
status_str += "Raw Camera px: Side [{:d}, {:d}] Rear: [{:d}, {:d}]\n".format( \
int(status_dict['cam_side_u']),int(status_dict['cam_side_v']), int(status_dict['cam_rear_u']), int(status_dict['cam_rear_v']))
status_str += "Localization total confidence: {:.1f}%\n".format(status_dict['localization_total_confidence']*100)
status_str += " Axes confidence: [{:.1f}%, {:.1f}%, {:.1f}%]\n".format(
status_dict['localization_confidence_x']*100,status_dict['localization_confidence_y']*100,status_dict['localization_confidence_a']*100)
status_str += "Localization position uncertainty: {:.2f}\n".format(status_dict['last_position_uncertainty'])
status_str += "Controller timing: {} ms\n".format(status_dict['controller_loop_ms'])
status_str += "Position timing: {} ms\n".format(status_dict['position_loop_ms'])
status_str += "Camera timing: {} ms\n".format(status_dict['cam_loop_ms'])
status_str += "Current action: {}\n".format(status_dict['current_action'].split('.')[-1])
status_str += "Motion in progress: {}\n".format(status_dict["in_progress"])
status_str += "Has error: {}\n".format(status_dict["error_status"])
status_str += "Counter: {}\n".format(status_dict['counter'])
# Also update the visualization position
self._update_robot_viz_position(robot_id, robot_pose)
self._update_last_mm_pose(robot_id, last_mm_pose)
# If there is target position data populated, draw the target too
if 'current_move_data' in status_dict.keys():
self._update_target_viz_position(robot_id, robot_pose, status_dict['current_move_data'])
color_str = STATUS_PANEL_OK_COLOR
if status_dict["error_status"]:
color_str = STATUS_PANEL_BAD_COLOR
except Exception as e:
if "offline" in str(status_dict):
status_str = str(status_dict)
else:
status_str = "Bad dict: " + str(status_dict)
self.window['_{}_STATUS_'.format(robot_id.upper())].update(status_str, background_color=color_str)
def _update_robot_viz_position(self, robot_id, robot_pose):
if robot_id in self.viz_figs.keys():
for f in self.viz_figs[robot_id]:
self.window['_GRAPH_'].DeleteFigure(f)
self.viz_figs[robot_id] = self._draw_robot(robot_pose, use_target_color=False)
def _update_target_viz_position(self, robot_id, robot_pose, target_pose):
viz_key = "{}_target".format(robot_id)
if viz_key in self.viz_figs.keys():
for f in self.viz_figs[viz_key]:
self.window['_GRAPH_'].DeleteFigure(f)
if robot_pose and target_pose:
target_line = self.window['_GRAPH_'].draw_line(point_from=robot_pose[:2], point_to=target_pose[:2], color='yellow', width=2)
self.viz_figs[viz_key] = self._draw_robot(target_pose, use_target_color=True)
self.viz_figs[viz_key].append(target_line)
def _update_last_mm_pose(self, robot_id, last_mm_pose):
viz_key = "{}_last_mm".format(robot_id)
if viz_key in self.viz_figs.keys():
for f in self.viz_figs[viz_key]:
self.window['_GRAPH_'].DeleteFigure(f)
used = last_mm_pose[3]
if used:
color = 'green'
thickness = 2
else:
color = 'red'
thickness = 2
global_pose_2d = np.array(last_mm_pose[:2])
angle = last_mm_pose[2]
viz_handles = []
direction_pt = np.array([self.config.robot_direction_indicator_length, 0])
direction_arrow_x = self.config.robot_direction_indicator_arrow_length*math.cos(math.radians(self.config.robot_direction_indicator_arrow_angle))
direction_arrow_y = self.config.robot_direction_indicator_arrow_length*math.sin(math.radians(self.config.robot_direction_indicator_arrow_angle))
direction_arrow_left = direction_pt + np.array([-direction_arrow_x, direction_arrow_y])
direction_arrow_right = direction_pt + np.array([-direction_arrow_x, -direction_arrow_y])
global_direction_pt = Utils.TransformPos(direction_pt, global_pose_2d, angle)
viz_handles.append(self.window['_GRAPH_'].draw_line(point_from=list(global_pose_2d), point_to=list(global_direction_pt), color=color, width=thickness))
global_direction_arrow_left = Utils.TransformPos(direction_arrow_left, global_pose_2d, angle)
viz_handles.append(self.window['_GRAPH_'].draw_line(point_from=list(global_direction_pt), point_to=list(global_direction_arrow_left), color=color, width=thickness))
global_direction_arrow_right = Utils.TransformPos(direction_arrow_right, global_pose_2d, angle)
viz_handles.append(self.window['_GRAPH_'].draw_line(point_from=list(global_direction_pt), point_to=list(global_direction_arrow_right), color=color, width=thickness))
self.viz_figs[viz_key] = viz_handles
def _draw_robot(self, robot_pose, use_target_color):
robot_line_color = 'black'
robot_line_thickness = 2
direction_line_color = 'red'
direction_line_thickness = 1
if use_target_color:
robot_line_color = 'green'
robot_line_thickness = 2
direction_line_color = 'tomato'
direction_line_thickness = 1
robot_global_pos_2d = np.array([robot_pose[0],robot_pose[1]])
robot_angle = robot_pose[2]
robot_fig_handles = []
# Robot chassis points in robot frame
chassis_fl = np.array([self.config.robot_rotation_center_offset, self.config.robot_chassis_size/2.0])
chassis_fr = np.array([self.config.robot_rotation_center_offset, -self.config.robot_chassis_size/2.0])
chassis_bl = np.array([self.config.robot_rotation_center_offset - self.config.robot_chassis_size, self.config.robot_chassis_size/2.0])
chassis_br = np.array([self.config.robot_rotation_center_offset - self.config.robot_chassis_size, -self.config.robot_chassis_size/2.0])
# Tile points in robot frame
tile_bl = -self.config.tile_to_robot_offset
tile_br = tile_bl + np.array([0, -self.config.tile_size_width_meters])
tile_fl = tile_bl + np.array([self.config.tile_size_height_meters, 0])
tile_fr = tile_br + np.array([self.config.tile_size_height_meters, 0])
tile_front_center = (tile_fl + tile_fr) / 2.0
# Robot direction indicator
scale_factor = 0.7
direction_pt = np.array([scale_factor*self.config.robot_direction_indicator_length, 0])
direction_arrow_x = scale_factor*self.config.robot_direction_indicator_arrow_length*math.cos(math.radians(self.config.robot_direction_indicator_arrow_angle))
direction_arrow_y = scale_factor*self.config.robot_direction_indicator_arrow_length*math.sin(math.radians(self.config.robot_direction_indicator_arrow_angle))
direction_arrow_left = direction_pt + np.array([-direction_arrow_x, direction_arrow_y])
direction_arrow_right = direction_pt + np.array([-direction_arrow_x, -direction_arrow_y])
# Collect, transform, and draw chassis points
chassis_points_robot_frame = [chassis_fl, chassis_fr, chassis_br, chassis_bl] # order matters for drawing
chassis_points_global_frame = []
for pt in chassis_points_robot_frame:
global_pt = Utils.TransformPos(pt, robot_global_pos_2d, robot_angle)
chassis_points_global_frame.append(global_pt)
for i in range(4):
start_pt = list(chassis_points_global_frame[i])
end_pt = list(chassis_points_global_frame[(i+1)%4])
robot_fig_handles.append(self.window['_GRAPH_'].draw_line(point_from=start_pt, point_to=end_pt, color=robot_line_color, width = robot_line_thickness))
# Collect, transform, and draw tile points
tile_points_robot_frame = [tile_bl, tile_br, tile_fr, tile_fl] # order matters for drawing
tile_points_global_frame = []
for pt in tile_points_robot_frame:
global_pt = Utils.TransformPos(pt, robot_global_pos_2d, robot_angle)
tile_points_global_frame.append(global_pt)
for i in range(4):
start_pt = list(tile_points_global_frame[i])
end_pt = list(tile_points_global_frame[(i+1)%4])
robot_fig_handles.append(self.window['_GRAPH_'].draw_line(point_from=start_pt, point_to=end_pt, color=robot_line_color, width = robot_line_thickness))
# Draw angle indicator line
angle_root_pt = Utils.TransformPos(tile_front_center, robot_global_pos_2d, robot_angle)
global_direction_pt = Utils.TransformPos(direction_pt, angle_root_pt, robot_angle)
robot_fig_handles.append(self.window['_GRAPH_'].draw_line(point_from=list(angle_root_pt), point_to=list(global_direction_pt), color=direction_line_color, width=direction_line_thickness))
global_direction_arrow_left = Utils.TransformPos(direction_arrow_left, angle_root_pt, robot_angle)
robot_fig_handles.append(self.window['_GRAPH_'].draw_line(point_from=list(global_direction_pt), point_to=list(global_direction_arrow_left), color=direction_line_color, width=direction_line_thickness))
global_direction_arrow_right = Utils.TransformPos(direction_arrow_right, angle_root_pt, robot_angle)
robot_fig_handles.append(self.window['_GRAPH_'].draw_line(point_from=list(global_direction_pt), point_to=list(global_direction_arrow_right), color=direction_line_color, width=direction_line_thickness))
return robot_fig_handles
def _draw_environment(self):
# Robot boundaries
self.viz_figs["boundaires"] = self.window['_GRAPH_'].draw_rectangle(self.config.robot_boundaries[0], self.config.robot_boundaries[1], line_color='red')
# Domino field boundaries
bottom_left = self.config.domino_field_origin
rect_width_height = Utils.TransformPos(np.array([self.config.field_width, self.config.field_height]), [0,0], self.config.domino_field_angle)
top_left = (bottom_left[0], bottom_left[1]+rect_width_height[1])
bottom_right = (bottom_left[0] + rect_width_height[0], bottom_left[1])
self.viz_figs["field"] = self.window['_GRAPH_'].draw_rectangle(top_left, bottom_right, line_color='green', line_width=2)
# Base station
base_station_top_left = (self.config.base_station_boundaries[0][0], self.config.base_station_boundaries[1][1])
base_station_bottom_right = (self.config.base_station_boundaries[1][0], self.config.base_station_boundaries[0][1])
self.viz_figs["base"] = self.window['_GRAPH_'].draw_rectangle(base_station_top_left, base_station_bottom_right, line_color='blue')
# X axis
left_side = (self.config.graph_bottom_left[0], 0)
right_side = (self.config.graph_top_right[0], 0)
self.viz_figs["xaxis"] = self.window['_GRAPH_'].draw_line(left_side, right_side, color="black", width=2)
x_text_location = (right_side[0] - self.config.axes_label_offset, right_side[1] + self.config.axes_label_offset)
self.viz_figs["xaxis_label"] = self.window['_GRAPH_'].draw_text("X", x_text_location)
x_ticks = [i for i in range(0, int(right_side[0]), self.config.axes_tick_spacing)] + [i for i in range(0, int(left_side[0]), -self.config.axes_tick_spacing)]
x_ticks = np.sort(np.unique(np.array(x_ticks)))
for val in x_ticks:
bottom = (val, -self.config.axes_tick_size/2)
top = (val, self.config.axes_tick_size/2)
self.viz_figs["xtick_{}".format(val)] = self.window['_GRAPH_'].draw_line(bottom, top, color="black")
label_location = (bottom[0] + self.config.axes_label_offset, bottom[1])
self.viz_figs["xtick_label_{}".format(val)] = self.window['_GRAPH_'].draw_text("{}".format(val), label_location)
# Y axis
bottom_side = (0, self.config.graph_bottom_left[1])
top_side = (0, self.config.graph_top_right[1])
self.viz_figs["yaxis"] = self.window['_GRAPH_'].draw_line(bottom_side, top_side, color="black", width=2)
y_text_location = (top_side[0] + self.config.axes_label_offset, top_side[1] - self.config.axes_label_offset)
self.viz_figs["yaxis_label"] = self.window['_GRAPH_'].draw_text("Y", y_text_location)
y_ticks = [i for i in range(0, int(top_side[1]), self.config.axes_tick_spacing)] + [i for i in range(0, int(bottom_side[1]), -self.config.axes_tick_spacing)]
y_ticks = np.sort(np.unique(np.array(y_ticks)))
for val in y_ticks:
left = (-self.config.axes_tick_size/2, val)
right = (self.config.axes_tick_size/2, val)
self.viz_figs["ytick_{}".format(val)] = self.window['_GRAPH_'].draw_line(left, right, color="black")
label_location = (right[0], right[1]+self.config.axes_label_offset)
self.viz_figs["ytick_label_{}".format(val)] = self.window['_GRAPH_'].draw_text("{}".format(val), label_location)
def update_plan_display(self, plan_info):
if plan_info is None:
return
plan = plan_info[0]
plan_status = plan_info[1]
cycle_number = plan_info[2]
if not self.drawn_plan_grid:
rect_width_height = Utils.TransformPos(np.array([self.config.field_width, self.config.field_height]), [0,0], self.config.domino_field_angle)
bottom_left = self.config.domino_field_origin
bottom_right = (bottom_left[0], bottom_left[1]+rect_width_height[1])
top_left = (bottom_left[0] + rect_width_height[0], bottom_left[1])
for i in range(1, self.config.num_tiles_width):
dx = Utils.TransformPos(np.array([self.config.tile_size_width_meters, 0]), [0,0], self.config.domino_field_angle)
p1 = list(bottom_left + dx * i)
p2 = list(top_left + dx * i)
self.viz_figs["plan_vert_{}".format(i)] = self.window['_GRAPH_'].draw_line(p1, p2, color = "grey17")
for i in range(1, self.config.num_tiles_height):
dx = Utils.TransformPos(np.array([0, self.config.tile_size_height_meters]), [0,0], self.config.domino_field_angle)
p1 = list(bottom_left + dx * i)
p2 = list(bottom_right + dx * i)
self.viz_figs["plan_hz_{}".format(i)] = self.window['_GRAPH_'].draw_line(p1, p2, color = "grey17")
if type(plan) is SubsectionPlan:
origin = self.config.domino_field_origin
bottom_left = Utils.TransformPos(np.array(plan.field.tiles[-1].getPlacementPositionInMeters()), origin, self.config.domino_field_angle)
top_right = Utils.TransformPos(np.array(plan.field.tiles[0].getPlacementPositionInMeters())+
np.array((self.config.tile_size_width_meters,self.config.tile_size_height_meters)), origin, self.config.domino_field_angle)
self.viz_figs["subfield"] = self.window['_GRAPH_'].draw_rectangle(bottom_left, top_right, line_color='green', line_width=3)
self.drawn_plan_grid = True
if cycle_number != self.last_cycle_number_drawn:
tile_color = "red"
if plan_status in [PlanStatus.RUNNING, PlanStatus.PAUSED]:
tile_color = "blue"
if cycle_number <= len(plan.field.tiles):
if cycle_number < 0:
cycle_number = 0
if "plan_marker" in self.viz_figs:
self.window['_GRAPH_'].DeleteFigure(self.viz_figs["plan_marker"])
tile = plan.field.tiles[cycle_number]
location_field_frame = np.array(tile.getPlacementPositionInMeters()) + np.array([self.config.tile_size_width_meters, self.config.tile_size_height_meters])/2.0
location_global_frame = list(Utils.TransformPos(location_field_frame, self.config.domino_field_origin, self.config.domino_field_angle))
self.viz_figs["plan_marker"] = self.window['_GRAPH_'].draw_circle(location_global_frame, 0.25, fill_color=tile_color)
self.last_cycle_number_drawn = cycle_number
class Master:
def __init__(self, cfg, gui_handle):
self.cfg = cfg
self.cmd_gui = gui_handle
self.keep_mm_running = False
logging.info("Initializing Master")
self.runtime_manager = RuntimeManager(self.cfg)
self.runtime_manager.initialize()
def loop(self):
ready_to_exit = False
while not ready_to_exit:
try:
self.runtime_manager.update()
ready_to_exit = self.update_gui_and_handle_input()
except Exception as e:
logging.warning("Unhandled exception {}".format(str(e)))
if self.runtime_manager.get_plan_status() is PlanStatus.RUNNING:
self.runtime_manager.set_plan_status(PlanStatus.PAUSED)
# Clean up whenever loop exits
self.runtime_manager.shutdown(self.keep_mm_running)
self.cmd_gui.close()
def update_gui_and_handle_input(self):
# Handle any input from gui
event_type, event_data = self.cmd_gui.update()
if event_type == "Exit":
return True
if event_type == "ExitMM":
self.keep_mm_running = True
return True
if event_type == "Run":
if self.runtime_manager.get_plan_status() == PlanStatus.PAUSED:
logging.info("PLAN RESUMED")
self.runtime_manager.set_plan_status(PlanStatus.RUNNING)
if event_type == "Load":
if event_data is not '':
self.runtime_manager.load_plan(event_data)
if event_type == "Pause":
logging.info("PLAN PAUSED")
self.runtime_manager.estop()
self.runtime_manager.set_plan_status(PlanStatus.PAUSED)
if event_type == "Abort":
logging.warning("PLAN ABORTED")
self.runtime_manager.estop()
self.runtime_manager.set_plan_status(PlanStatus.ABORTED)
if event_type == "Action":
self.runtime_manager.run_manual_action(event_data)
if event_type == "_INC_CYCLE_":
self.runtime_manager.increment_robot_cycle("robot1")
if event_type == "_DEC_CYCLE_":
self.runtime_manager.decrement_robot_cycle("robot1")
if event_type == "_INC_ACTION_":
self.runtime_manager.increment_robot_action("robot1")
if event_type == "_DEC_ACTION_":
self.runtime_manager.decrement_robot_action("robot1")
if event_type == "_SET_CYCLE_":
self.runtime_manager.set_cycle("robot1", int(event_data[0]))
if event_type == "_SET_ACTION_":
self.runtime_manager.set_action("robot1", int(event_data[1]))
if event_type == "ESTOP":
self.runtime_manager.estop()
if self.runtime_manager.get_plan_status() == PlanStatus.RUNNING:
self.runtime_manager.set_plan_status(PlanStatus.PAUSED)
logging.warning("Pausing plan due to ESTOP event")
# Get metrics and update the displayed info
metrics = self.runtime_manager.get_all_metrics()
self.cmd_gui.update_status_panels(metrics)
self.cmd_gui.update_plan_display(self.runtime_manager.get_plan_info())
return False
def configure_logging(path):
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
if not os.path.isdir(path):
os.mkdir(path)
fileHandler = logging.FileHandler(os.path.join(path,"master.log"), 'w+')
fileFormatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
fileHandler.setFormatter(fileFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleFormatter = logging.Formatter("%(message)s")
consoleHandler.setFormatter(consoleFormatter)
rootLogger.addHandler(consoleHandler)
if __name__ == '__main__':
# Setup config and gui
cfg = config.Config()
gui = CmdGui(cfg)
# Need to setup gui before logging to ensure that output pane captures logs correctly
configure_logging(cfg.log_folder)
# Startup master and loop forever
m = Master(cfg, gui)
m.loop()
================================================
FILE: src/master/README.md
================================================
# **Warning! This is probably out of date and might be inaccurate. Use at your own risk.**
# Master
This is the top level 'Master' software for controlling the Domino Robot System. It is the central hub for communication and control of all other robots, provides a GUI for visualization and debugging, and handles executing the top level plan to set up all the dominos.
There are two programs that can be run within the master software:
- `MasterMain.py` is the main program that runs the GUI and controls everything and is the one the rest of this README will primarily focus on.
- `FieldPlanner.py` will generate a plan using an input image, available domino colors, and many other configuration parameters (all defined in `config.py`). This plan can then be saved to a file (in the `src/master/plans/` folder) and loaded at a later point for execution. You probably shouldn't ever need to run this.
# Table of Contents
- [**Warning! This is probably out of date and might be inaccurate. Use at your own risk.**](#warning-this-is-probably-out-of-date-and-might-be-inaccurate-use-at-your-own-risk)
- [Master](#master)
- [Table of Contents](#table-of-contents)
- [Installation](#installation)
- [Usage](#usage)
- [Robot Setup](#robot-setup)
- [Running MasterMain](#running-mastermain)
- [Using the MasterMain GUI](#using-the-mastermain-gui)
- [Window Overview](#window-overview)
- [Running Commands](#running-commands)
- [Summary of available commands](#summary-of-available-commands)
# Installation
1. Ensure you have a version of Python 3 installed by installing from https://www.python.org/. This project was developed using Python 3.7, but any recent Python version should be fine. I've only used it on Windows 10 so the instructions here are for Windows. It may or may not work on other operating systems.
2. Open a new terminal window and navigate to the folder where you downloaded the DominoRobot repository
```
cd path\to\DominoRobot
```
3. Create a python virtual environment in the DominoRobot folder
```
python -m venv venv
```
4. Activate the virtual environment (your terminal should show `(venv)` at the front of the line to indicate it is active):
```
.\venv\Scripts\activate
```
5. Install required python packages in the virtual environment:
```
pip install -r src\master\requirements.txt
```
# Usage
## Robot Setup
Any robot you wish to connect to and control must be setup and running before the master program can do anything.
1. Ensure you are connect to the DominoNet wifi. Talk to Alex if you need help getting setup on the network.
2. Power on the robot by plugging in both sets of batteries and turning on all power switches. You can leave the hardware ESTOP button pressed for now while finishing the setup, but just make sure you release it before trying to send movement commands or it won't go anywhere!
3. Connect to the robot over SSH.
- If this is your first time using SSH, you may need to install an SSH program like [PuTTY](https://www.putty.org/) or setup [Windows SSH](https://www.pugetsystems.com/labs/hpc/How-To-Use-SSH-Client-and-Server-on-Windows-10-1470/)
- Figure out the IP address of the robot you are interested in connecting to (If this list becomes out of date, the up to date info will be in the `ip_map` variable in `config.py`)
| Target | IP |
|---------------|--------------|
| robot1 | 192.168.1.5 |
- SSH to `pi@` and enter the password to connect
4. Once you are connected to the robot move to the DominoRobot directory:
```
cd DominoRobot
```
5. Ensure that the robot has the latest version of the correct software
```
git checkout master && git pull
```
6. Recompile the robot software (this could take a minute or two if it has to compile a lot of stuff)
```
make
```
7. Start the robot software
```
run_robot
```
8. This should start printing out a bunch of info on the screen and the robot should be ready to connect to the master (See the next section on [Running MasterMain](#running-mastermain) ). Note that you must keep this terminal window running as closing it will stop the robot software. If at any point you want to stop the software, just use `Ctrl+C` to stop it. You can then restart it with `run_robot` again.
9. **IMPORTANT**: When you are done and want to shut everything down, make sure you shut down the raspberry pi by running the shutdown command BEFORE turning off the power. If you just turn off the power without shutting down, there is a possibility that the SD card can get corrupted.
```
sudo shutdown -h now
```
## Running MasterMain
1. Open a new terminal window and navigate to the folder where you downloaded the DominoRobot repository (if you aren't there already from the installation step)
```
cd path\to\DominoRobot
```
2. Ensure that your virtual environment is active (your terminal should show `(venv)` at the front of the line to indicate it is active):
```
.\venv\Scripts\activate
```
3. Run the program:
```
python src\master\MasterMain.py
```
4. It may take a second for the window to show up, but as long as a window pops up and the commandline doesn't spit out a bunch of errors, you should be good to go. You can stop the program at any time with the X in the top right of the window, or using `Ctrl+C` on the commandline.
5. If you need to stop and re-run the program, you only need to re-run step 3 above as long as you don't close the terminal window (In most terminals you can just push the up arrow to get back the previous command you ran).
## Using the MasterMain GUI
### Window Overview
The window will look something like this when you launch it:

On the left are the status panels for each component connected to the system. They are all red right now because nothing is connected and will turn green and display information when that component is active, like this:

In the middle is a display that shows where the robot and other objects are in the scene. As components are connected and move around, this will update to show their location. On the bottom of the middle section are the control buttons which is your main way of interacting with the system:

On the right side of the window, the program prints out information about what it is doing. This information is also mirrored to the console and logged into `DominRobot\log\master.log` (which is overwritten on each new run)
### Running Commands
In order to send a command to a robot a few prerequisites must be met.
1. The robot has to be connected to the master (i.e. the box should be green and the counter value should be changing). If you followed the [setup](##robot-setup) steps above, this probably should be the case as soon as you run `MasterMain` on your computer. If not, you'll have to debug why (check that your SSH connection is okay and that the robot program hasn't crashed)
2. The robot is programmed to only run a single command at a time, so it will ignore any commands that are sent while another one is active, so make sure a previous command isn't still running. If there is no motion from the robot/lifter and the "Motion in progress" value is False, it should let you run a command.
To actually run the command, you need 3 pieces of information.
1. The `Target` field is which robot/device to send the command to. Make sure you have the right target selected from the drop down menu.

2. The `Action` field is which action you would like to run on the target. See below for a summary of what the various actions do.

3. Some actions require some additional data to be specified. If additional data is needed for an action, you must specify it in the `Data` field by typing it in. See the table below for which actions require additional data. If an action does not require additional data any information (blank or otherwise) in the data field is ignored.
Once all the prerequisites are met and the required fields are filled in, you may use the `` key or the `Send Command` button to run the command on the target. If everything is successful, you will see values change in the status panel of the target and text printed out in the pane on the right.
If something goes wrong, you can use the big red ESTOP button on the command window to send an emergency stop signal to all robots. Note that this is a software ESTOP and requires the master to be functional enough to send the ESTOP signal and the target functional enough to recieve and act on the ESTOP signal. There are certian failure modes where this will not be the case and this button will not function (i.e. the master software crashes and you can't push the button). In that case, use the physical ESTOP buttons on the robots to cut power.
### Summary of available commands
| Name | Valid targets | Summary | Additional data|
|-------|---------------|---------|----------------|
|MOVE_COARSE | robot | Move the robot coarsely to an absolute position. | Comma separated x,y,a pose (Example: `1.0,2,0` would tell the robot to move to x=1, y=2, a=0). Units are meters, meters, radians. |
|MOVE_FINE | robot | Move the robot very precisely to an absolute position.| Comma separated x,y,a pose (Example: `0.1,-0.5,3.14` would tell the robot to move to x=0.1, y=-0.5, a=3.14). Units are meters, meters, radians.|
|MOVE_REL | robot |Move the robot coarsely to a position relative to its current position.|Comma separated x,y,a pose (Example: `1.0,2,0` would tell the robot to move to x=x_current+1, y=y_current+2, a=a_current+0). Units are meters, meters, radians. |
|MOVE_CONST_VEL |robot | Move the robot coarsely at a constant velocity for a specified time.|Comma separated x,y,a,t velocity and time (Example: `0.1,0.2,0,4` would tell the robot to move to at a velocity of x_vel=0.1, y_vel=0.2, a_vel=0 for t=4 seconds). Units are meters/sec, meters/sec, radians/sec, seconds. |
|NET | all |Check if the robot is connected to the network. | None |
|LOAD| robot | Run the tray loading sequence on the robot. *Note that the tray will pause in the "ready to load" position until a LOAD_COMPLETE action is recieved*.| None |
|PLACE| robot | Run the tray placement sequence on the robot.| None|
|TRAY_INIT | robot | Run the tray intialization sequence on the robot. | None|
|LOAD_COMPLETE|robot| Indicate to the robot that the base station has completed the loading process. | None |
|ESTOP|all| Tell the target to immediately stop any current action/motion. | None |
|WAIT_FOR_LOCALIZATION| robot | Tell the target to wait for the localization confidence to reach a good level. | None |
|CLEAR_ERROR |all| Clear an error state that is preventing the target from starting a new action.| None |
================================================
FILE: src/master/RobotClient.py
================================================
# This is a basic TCP client for a robot that wraps up sending
# and recieving various commands
import socket
import select
import json
import time
import logging
import copy
PORT = 8123
NET_TIMEOUT = 0.1 # seconds
START_CHAR = "<"
END_CHAR = ">"
class TcpClient:
def __init__(self, ip, port, timeout):
self.socket = socket.create_connection((ip, port), timeout)
self.socket.setblocking(False)
if not socket:
return None
def send(self, msg, print_debug=True):
totalsent = 0
if print_debug:
logging.info("TX: " + msg)
msg = START_CHAR + msg + END_CHAR
msg_bytes = msg.encode()
while totalsent < len(msg):
sent = 0
try:
sent = self.socket.send(msg_bytes[totalsent:])
except:
pass
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def recieve(self, timeout=0.1, print_debug=True):
# logging.info("Checking socket ready")
# socket_ready, _, _ = select.select([self.socket], [], [])
# if not socket_ready:
# logging.info("Socket not ready")
# return ""
new_msg = ""
new_msg_ready = False
start_time = time.time()
while not new_msg_ready and time.time() - start_time < timeout:
# Get new data
try:
data = self.socket.recv(2048)
except socket.error:
continue
if data == b'':
raise RuntimeError("socket connection broken")
# Decode data and parse into message
new_str = data.decode(encoding='UTF-8',errors='strict')
start_idx = new_str.find(START_CHAR)
end_idx = new_str.find(END_CHAR)
if start_idx != -1 and end_idx != -1:
new_msg = new_str[start_idx+1:end_idx]
new_msg_ready = True
elif start_idx != -1:
new_msg += new_str[start_idx+1:]
msg_rcv_in_progress = True
elif end_idx != -1:
new_msg += new_str[:end_idx]
new_msg_ready = True
else:
new_msg += new_str
if print_debug and new_msg:
logging.info("RX: " + new_msg)
if not new_msg_ready:
#logging.info("Socket timeout")
new_msg = ""
return new_msg
class ClientBase:
def __init__(self, ip):
self.client = TcpClient(ip, PORT, NET_TIMEOUT)
# Queue is used to help handle out of order messages recieved
self.incoming_queue = []
def wait_for_server_response(self, expected_msg_type, timeout=0.1, print_debug=True):
"""
Waits for specified time to get a reply from the server
Must set expected_msg_type to a string and this function will return the first message
recieved matching that type. Other messages are added to the queue and checked the
next time this function is called. If no matching message is recieved before timeout,
this function returns None
"""
# First, check the existing queue to see if the message we are looking for is there
for ix in range(len(self.incoming_queue)):
# Since type field existence is checked before insertion into queue, it should
# always be safe to check the field directly
if self.incoming_queue[ix]['type'] == expected_msg_type:
msg_copy = copy.copy(self.incoming_queue[ix])
del self.incoming_queue[ix]
return msg_copy
# If there was nothing in the queue matching the expected type, try to recieve a message
start_time = time.time()
while time.time() - start_time < timeout:
try:
incoming_msg = self.client.recieve(timeout=0.1, print_debug=print_debug)
except socket.timeout:
break
if incoming_msg:
try:
new_msg = json.loads(incoming_msg)
if 'type' not in new_msg:
logging.warning("Discarded msg for missing type field: {}".format(new_msg))
continue
elif new_msg['type'] == expected_msg_type:
return new_msg
else:
self.incoming_queue.append(new_msg)
except:
logging.warning("Error decoding json: {}".format(incoming_msg))
break
# Will get here if timeout is reached or decode error happens
return None
def send_msg_and_wait_for_ack(self, msg, print_debug=False, timeout=0.1):
"""
Sends msg and ensures that the correct ack is returned
Logs a warning if ack is not recieved or incorrect ack is recieved
"""
self.client.send(json.dumps(msg,separators=(',',':')), print_debug=print_debug) # Make sure json dump is compact for transmission
num_wait_checks = 3
counter = 0
resp_ok = False
while counter < num_wait_checks and not resp_ok:
counter += 1
resp = self.wait_for_server_response(expected_msg_type='ack', print_debug=print_debug)
if not resp:
logging.warning("Did not recieve ack. Try {} of {}".format(counter, num_wait_checks))
continue
else:
if resp['type'] != 'ack':
logging.warning("Expecting return type ack. Try {} of {}".format(counter, num_wait_checks))
continue
elif resp['data'] != msg['type']:
logging.warning("Incorrect ack type. Expecting: {}, Got: {}. Try {} of {}".format(
msg['type'], resp['data'], counter, num_wait_checks))
continue
resp_ok = True
if resp_ok:
return resp
else:
raise ValueError("Bad/no response")
def net_status(self):
""" Check if the network connection is ok"""
msg = {'type': 'check'}
status = False
try:
self.send_msg_and_wait_for_ack(msg)
status = True
except:
pass
finally:
return status
def request_status(self):
""" Request status from server """
msg = {'type' : 'status'}
self.client.send(json.dumps(msg), print_debug=False)
resp = self.wait_for_server_response(expected_msg_type='status', print_debug=False)
if not resp:
logging.warning("Did not recieve status response")
return None
return resp['data']
def estop(self):
""" Tell client to estop """
msg = {'type': 'estop'}
self.send_msg_and_wait_for_ack(msg)
class RobotClient(ClientBase):
def __init__(self, cfg, robot_id):
super().__init__(cfg.ip_map[robot_id])
self.robot_id = robot_id
self.cfg = cfg
def move(self, x, y, a):
""" Tell robot to move to specific location """
msg = {'type': 'move', 'data': {'x': x, 'y': y, 'a': a}}
self.send_msg_and_wait_for_ack(msg)
def move_rel(self, x, y, a):
""" Tell robot to move to a relative location """
msg = {'type': 'move_rel', 'data': {'x': x, 'y': y, 'a': a}}
self.send_msg_and_wait_for_ack(msg)
def move_rel_slow(self, x, y, a):
""" Tell robot to move to a relative location but slowly """
msg = {'type': 'move_rel_slow', 'data': {'x': x, 'y': y, 'a': a}}
self.send_msg_and_wait_for_ack(msg)
def move_fine(self, x, y, a):
""" Tell robot to move to a specific location with fine precision """
msg = {'type': 'move_fine', 'data': {'x': x, 'y': y, 'a': a}}
self.send_msg_and_wait_for_ack(msg)
def move_fine_stop_vision(self, x, y, a):
""" Tell robot to move to a specific location with fine precision but stop if vision markers are detected """
msg = {'type': 'move_fine_stop_vision', 'data': {'x': x, 'y': y, 'a': a}}
self.send_msg_and_wait_for_ack(msg)
def move_with_vision(self, x, y, a):
""" Tell robot to move to a location relative to the vision measurements from cameras"""
msg = {'type': 'move_vision', 'data': {'x': x, 'y': y, 'a': a}}
self.send_msg_and_wait_for_ack(msg)
def move_const_vel(self, vx, vy, va, t):
""" Tell robot to move at constant velocity for a specific amount of time"""
msg = {'type': 'move_const_vel', 'data': {'vx': vx, 'vy': vy, 'va': va, 't': t}}
self.send_msg_and_wait_for_ack(msg)
def place(self):
""" Tell robot to place pallet """
msg = {'type': 'place'}
self.send_msg_and_wait_for_ack(msg)
def load(self):
""" Tell robot to load pallet """
msg = {'type': 'load'}
self.send_msg_and_wait_for_ack(msg)
def tray_init(self):
""" Tell robot to initialize tray """
msg = {'type': 'init'}
self.send_msg_and_wait_for_ack(msg)
def load_complete(self):
""" Tell robot that base station load is complete """
msg = {'type': 'lc'}
self.send_msg_and_wait_for_ack(msg)
def clear_error(self):
""" Tell robot to clear an existing error """
msg = {'type': 'clear_error'}
self.send_msg_and_wait_for_ack(msg)
def wait_for_localization(self):
""" Tell robot to wait for localization"""
msg = {'type': 'wait_for_loc'}
self.send_msg_and_wait_for_ack(msg)
def set_pose(self, x, y, a):
""" Sets the pose of the robot explicity bypassing any localization code """
msg = {'type': 'set_pose', 'data': {'x': x, 'y': y, 'a': a}}
self.send_msg_and_wait_for_ack(msg)
def toggle_vision_debug(self):
""" Tell robot to toggle vision debug output"""
msg = {'type': 'toggle_vision_debug'}
self.send_msg_and_wait_for_ack(msg)
def start_cameras(self):
""" Tell robot to start the cameras"""
msg = {'type': 'start_cameras'}
self.send_msg_and_wait_for_ack(msg)
def stop_cameras(self):
""" Tell robot to stop the cameras"""
msg = {'type': 'stop_cameras'}
self.send_msg_and_wait_for_ack(msg)
class BaseStationClient(ClientBase):
def __init__(self, cfg):
super().__init__(cfg.base_station_ip)
self.cfg = cfg
def load(self):
""" Tells the station to load the next set of dominos into the robot"""
msg = {'type': 'load'}
self.send_msg_and_wait_for_ack(msg)
# Hacky Mocks to use for testing
class MockRobotClient:
def __init__(self, cfg, robot_id):
super().__init__()
self.robot_id = robot_id
self.cfg = cfg
def move(self, x, y, a):
pass
def move_rel(self, x, y, a):
pass
def move_rel_slow(self, x, y, a):
pass
def move_fine(self, x, y, a):
pass
def place(self):
pass
def net_status(self):
return True
def estop(self):
pass
def load(self):
pass
def request_status(self):
return {"in_progress": False, "pos_x": 1, "pos_y": 2, "pos_a": 0}
def clear_error(self):
pass
def wait_for_localization(self):
pass
def toggle_distance(self):
pass
class MockBaseStationClient:
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def load(self):
pass
def net_status(self):
return True
def estop(self):
pass
def request_status(self):
return {}
if __name__== '__main__':
import config
from MasterMain import configure_logging
cfg = config.Config()
configure_logging(cfg.log_folder)
r = RobotClient(cfg, 'robot1')
time.sleep(1)
r.request_status()
#r.request_status()
time.sleep(1)
#r.request_status()
#time.sleep(1)
# while(True):
# speed = input("Input move speed [x,y,a]: ").strip().split(',')
# if len(speed) != 3:
# logging.info("Need to provide comma separated values.")
# else:
# x = float(speed[0])
# y = float(speed[1])
# a = float(speed[2])
# r.move(x,y,a)
# time.sleep(3)
================================================
FILE: src/master/Runtime.py
================================================
import time
import logging
import enum
import pprint
import pickle
import os
import json
import copy
import math
from numpy.core.defchararray import add
from MarvelMindHandler import MarvelmindWrapper, MockMarvelmindWrapper
from RobotClient import RobotClient, BaseStationClient, MockRobotClient, MockBaseStationClient
from FieldPlanner import ActionTypes, Action, TestPlan, RunFieldPlanning
from Utils import write_file, NonBlockingTimer, ActionValidator
class RobotInterface:
def __init__(self, config, robot_id):
self.comms_online = False
self.robot_client = None
self.config = config
self.robot_id = robot_id
self.last_status_time = 0
self.last_status = None
self.simple_action_map = {}
self.current_action = ActionTypes.NONE
self.current_move_data = []
self.current_action_timer = None
self.wait_timer = None
self.waiting_active = False
def bring_online(self, use_mock=False):
self._bring_comms_online(use_mock)
def check_online(self):
return self.comms_online
def get_robot_metrics(self):
metrics = copy.copy(self.last_status)
if metrics and type(metrics) is dict:
metrics['current_action'] = str(self.current_action)
metrics['current_move_data'] = self.current_move_data
if metrics and self.waiting_active:
metrics['in_progress'] = True
return metrics
def _setup_action_map(self):
self.simple_action_map = {
ActionTypes.LOAD: self.robot_client.load,
ActionTypes.PLACE: self.robot_client.place,
ActionTypes.TRAY_INIT: self.robot_client.tray_init,
ActionTypes.LOAD_COMPLETE: self.robot_client.load_complete,
ActionTypes.ESTOP: self.robot_client.estop,
ActionTypes.CLEAR_ERROR: self.robot_client.clear_error,
ActionTypes.WAIT_FOR_LOCALIZATION: self.robot_client.wait_for_localization,
ActionTypes.TOGGLE_VISION_DEBUG: self.robot_client.toggle_vision_debug,
ActionTypes.START_CAMERAS: self.robot_client.start_cameras,
ActionTypes.STOP_CAMERAS: self.robot_client.stop_cameras,
}
def _bring_comms_online(self, use_mock=False):
try:
logging.info("Attempting to connect to {} over wifi".format(self.robot_id))
if use_mock:
self.robot_client = MockRobotClient(self.config, self.robot_id)
else:
self.robot_client = RobotClient(self.config, self.robot_id)
if self.robot_client.net_status():
self.comms_online = True
self._setup_action_map()
logging.info("Connected to {} over wifi".format(self.robot_id))
except Exception as e:
logging.info("Couldn't connect to {} over wifi".format(self.robot_id))
return
def _get_status_from_robot(self):
self.last_status = self.robot_client.request_status()
self.last_status_time = time.time()
# Reset current action and move data if the robot finished an action
if self.current_action != ActionTypes.NONE and self.last_status is not None and \
'in_progress' in self.last_status.keys() and not self.last_status['in_progress'] and self.current_action_timer.check():
self.current_action = ActionTypes.NONE
self.current_move_data = []
def update(self):
if self.comms_online:
# Request a status update if needed
if time.time() - self.last_status_time > self.config.robot_status_wait_time:
try:
self._get_status_from_robot()
except RuntimeError:
logging.info("Network connection with {} lost".format(self.robot_id))
self.comms_online = False
self.last_status = "{} offline".format(self.robot_id)
if self.waiting_active:
if self.wait_timer.check():
self.waiting_active = False
self.wait_timer = None
def run_action(self, action):
if not self.comms_online:
return
logging.info("Running {} on {}".format(action.action_type, self.robot_id))
try:
if action.action_type is not None:
self.current_action = action.action_type
if action.action_type == ActionTypes.MOVE_COARSE:
self.robot_client.move(action.x, action.y, action.a)
self.current_move_data = [action.x, action.y, action.getAngleDegrees()]
elif action.action_type == ActionTypes.MOVE_REL:
self.robot_client.move_rel(action.x, action.y, action.a)
elif action.action_type == ActionTypes.MOVE_REL_SLOW:
self.robot_client.move_rel_slow(action.x, action.y, action.a)
elif action.action_type == ActionTypes.MOVE_FINE:
self.robot_client.move_fine(action.x, action.y, action.a)
self.current_move_data = [action.x, action.y, action.getAngleDegrees()]
elif action.action_type == ActionTypes.MOVE_FINE_STOP_VISION:
self.robot_client.move_fine_stop_vision(action.x, action.y, action.a)
self.current_move_data = [action.x, action.y, action.getAngleDegrees()]
elif action.action_type == ActionTypes.MOVE_WITH_VISION:
self.robot_client.move_with_vision(action.x, action.y, action.a)
elif action.action_type == ActionTypes.MOVE_CONST_VEL:
self.robot_client.move_const_vel(action.vx, action.vy, action.va, action.t)
elif action.action_type == ActionTypes.SET_POSE:
self.robot_client.set_pose(action.x, action.y, action.a)
elif action.action_type == ActionTypes.NET:
status = self.robot_client.net_status()
logging.info("Robot {} network status is: {}".format(self.robot_id, status))
elif action.action_type == ActionTypes.WAIT:
self.wait_timer = NonBlockingTimer(action.time)
self.waiting_active = True
elif action.action_type in self.simple_action_map.keys():
self.simple_action_map[action.action_type]()
else:
logging.info("Unknown action: {}".format(action.action_type))
self.current_action_timer = NonBlockingTimer(self.config.robot_next_action_wait_time)
except RuntimeError:
logging.info("Network connection with {} lost".format(self.robot_id))
self.comms_online = False
self.last_status = "{} offline".format(self.robot_id)
class BaseStationInterface:
def __init__(self, config):
self.config = config
self.client = None
self.comms_online = False
self.last_status = None
self.last_status_time = 0
def bring_online(self, use_mock=False):
logging.info("Bringing BaseStation comms online")
if use_mock:
self.client = MockBaseStationClient(self.config)
else:
self.client = BaseStationClient(self.config)
if self.client.net_status():
self.comms_online = True
def get_last_status(self):
return self.last_status
def check_online(self):
return self.comms_online
def update(self):
# Request a status update if needed
if time.time() - self.last_status_time > self.config.base_station_status_wait_time:
try:
self._get_status_from_base_station()
except RuntimeError:
logging.info("Network connection with base station lost")
self.comms_online = False
self.last_status = "Base station offline"
def _get_status_from_base_station(self):
self.last_status = self.client.request_status()
if self.last_status == None or self.last_status == "":
self.last_status = "Base station status not available!"
self.last_status_time = time.time()
def run_action(self, action):
if not self.comms_online:
return
try:
if action.action_type == ActionTypes.ESTOP:
self.client.estop()
elif action.action_type == ActionTypes.NET:
status = self.client.net_status()
logging.info("Base station network status is: {}".format(status))
elif action.action_type == ActionTypes.LOAD:
self.client.load()
else:
logging.info("Unknown action: {}".format(action.action_type))
except RuntimeError:
logging.info("Network connection with base station lost")
self.comms_online = False
self.last_status = "Base station offline"
class PlanStatus(enum.Enum):
NONE = 0,
LOADED = 1,
RUNNING = 2,
PAUSED = 3,
ABORTED = 4,
DONE = 5,
class RuntimeManager:
STATUS_NOT_INITIALIZED = 0
STATUS_PARTIALLY_INITIALIZED = 1
STATUS_FULLY_INITIALIZED = 2
def __init__(self, config):
self.config = config
self.robots = {id: RobotInterface(config, id) for id in self.config.ip_map.keys()}
self.base_station = BaseStationInterface(config)
self.initialization_status = RuntimeManager.STATUS_NOT_INITIALIZED
self.component_initialization_status = {id: False for id in self.config.ip_map.keys()}
self.component_initialization_status['mm'] = False
self.component_initialization_status['base_station'] = False
if self.config.OFFLINE_TESTING or self.config.SKIP_MARVELMIND:
self.mm_wrapper = MockMarvelmindWrapper(config)
else:
self.mm_wrapper = MarvelmindWrapper(config)
self.last_metrics = {}
self.idle_bots = set([n for n in self.robots.keys()])
self.initialization_timer = None
self.cycle_tracker = {n: {'action_id': None, 'cycle_id': None, 'timer': None, 'needs_restart': False, 'action_validator': ActionValidator()} for n in self.robots.keys()}
self.next_cycle_number = 0
self.plan = None
self.plan_path = ""
self.plan_status = PlanStatus.NONE
if self.config.USE_TEST_PLAN:
self._load_plan_from_file('')
elif self.config.REGEN_PLAN:
logging.info("Regenerating and loading plan")
plan = RunFieldPlanning(autosave=True)
self._load_plan_from_object(plan, os.path.join(self.config.plans_dir, "autosaved.p"))
elif self.config.AUTO_LOAD_PLAN and self.config.AUTO_LOAD_PLAN_NAME:
plan_path = os.path.join(self.config.plans_dir, self.config.AUTO_LOAD_PLAN_NAME)
self._load_plan_from_file(plan_path)
def initialize(self):
if self.initialization_timer and not self.initialization_timer.check():
return
# Handle testing/mock case
use_base_station_mock = False
use_robot_mock = False
if self.config.OFFLINE_TESTING:
use_base_station_mock = True
use_robot_mock = True
if self.config.SKIP_BASE_STATION:
use_base_station_mock = True
# Bring everything online if needed
if not self.component_initialization_status['base_station']:
self.base_station.bring_online(use_base_station_mock)
if not self.component_initialization_status['mm']:
self.mm_wrapper.wake_all_devices_only_if_needed()
for id, robot in self.robots.items():
if not self.component_initialization_status[id]:
robot.bring_online(use_robot_mock)
# Check the status of initialization
self._check_initialization_status()
if self.initialization_status != RuntimeManager.STATUS_FULLY_INITIALIZED:
self.initialization_timer = NonBlockingTimer(3)
logging.info("Unable to fully initialize RuntimeManager, will try again in 3 seconds")
logging.info("Current componenent initialization status:")
logging.info(pprint.pformat(self.component_initialization_status, indent=2, width=10))
def shutdown(self, keep_mm_awake):
logging.info("Shutting down")
if self.initialization_status != RuntimeManager.STATUS_NOT_INITIALIZED:
if not keep_mm_awake:
self.mm_wrapper.sleep_all_devices()
def get_initialization_status(self):
self._check_initialization_status()
return self.initialization_status
def get_all_metrics(self):
return self.last_metrics
def update(self):
self._check_initialization_status()
if self.get_initialization_status != RuntimeManager.STATUS_FULLY_INITIALIZED:
self.initialize()
self.base_station.update()
for robot in self.robots.values():
robot.update()
self._update_all_metrics()
if self.plan_status == PlanStatus.RUNNING:
self._update_plan()
self._update_cycle_actions()
self._cycle_state_to_file()
def run_manual_action(self, manual_action):
target = manual_action[0]
action = manual_action[1]
self._run_action(target, action)
def estop(self):
logging.warning("ESTOP")
self._run_action('base', Action(ActionTypes.ESTOP, 'ESTOP'))
for robot in self.robots.values():
robot.run_action(Action(ActionTypes.ESTOP, 'ESTOP'))
def load_plan(self, plan_file):
if plan_file and os.path.basename(plan_file).split('.')[1] == 'json':
self._load_cycle_state_from_file(plan_file)
else:
self._load_plan_from_file(plan_file)
def set_plan_status(self, status):
# Clean up if we don't want to save the plan state
if status == PlanStatus.ABORTED:
self.next_cycle_number = 0
self.cycle_tracker = {n: {'action_id': None, 'cycle_id': None, 'timer': None, 'needs_restart': False, 'action_validator': ActionValidator()} for n in self.robots.keys()}
self.idle_bots = set([n for n in self.robots.keys()])
elif status == PlanStatus.PAUSED:
for data in self.cycle_tracker.values():
data['needs_restart'] = True
self.plan_status = status
def increment_robot_cycle(self, target):
self._modify_cycle_state(target, add_cycle_id=1)
def decrement_robot_cycle(self, target):
self._modify_cycle_state(target, add_cycle_id=-1)
def increment_robot_action(self, target):
self._modify_cycle_state(target, add_action_id=1)
def decrement_robot_action(self, target):
self._modify_cycle_state(target, add_action_id=-1)
def set_cycle(self, target, cycle_num):
self._modify_cycle_state(target, add_cycle_id=cycle_num, absolute=True)
def set_action(self, target, action_num):
self._modify_cycle_state(target, add_action_id=action_num, absolute=True)
def get_plan_status(self):
return self.plan_status
def get_plan_info(self):
if not self.plan:
return None
else:
return (self.plan, self.plan_status, self.next_cycle_number-1)
def _load_plan_from_object(self, plan, plan_path=""):
self.plan = plan
self.plan_status = PlanStatus.LOADED
self.plan_path = plan_path
for key in self.robots.keys():
self.set_cycle(key, 0)
self.set_action(key, 0)
def _load_plan_from_file(self, plan_file):
if self.config.USE_TEST_PLAN or plan_file == "testplan":
self._load_plan_from_object(TestPlan(), "testplan")
else:
with open(plan_file, 'rb') as f:
plan = pickle.load(f)
self._load_plan_from_object(plan, plan_file)
logging.info("Loaded plan from {}".format(plan_file))
for key in self.robots.keys():
self.set_cycle(key, 0)
self.set_action(key, 0)
def _update_plan(self):
# If we have an idle robot, send it the next cycle to execute
if self.plan_status == PlanStatus.RUNNING and self._any_idle_bots():
next_cycle = self.plan.get_cycle(self.next_cycle_number)
# If we get none, that means we are done with the plan
if next_cycle is None:
self.next_cycle_number = 0
logging.info("Completed plan!")
self.plan_status = PlanStatus.DONE
self._erase_cycle_state_file()
else:
logging.info("Sending cycle {} for execution".format(self.next_cycle_number))
self.next_cycle_number += 1
self._assign_new_cycle(next_cycle)
def _any_idle_bots(self):
return len(self.idle_bots) > 0
def _assign_new_cycle(self, cycle):
expected_robot = cycle.robot_id
cycle_id = cycle.id
if expected_robot not in self.idle_bots:
raise ValueError("Expected cycle {} to be run with {} but only available robots are {}".format(cycle_id, expected_robot, self.idle_bots))
else:
logging.info("Assigning cycle {} to {}".format(cycle_id, expected_robot))
self.cycle_tracker[expected_robot]['cycle_id'] = cycle_id
self.cycle_tracker[expected_robot]['action_id'] = None
def _run_action(self, target, action):
if action.action_type == ActionTypes.PAUSE_PLAN:
self.set_plan_status(PlanStatus.PAUSED)
self.increment_robot_action(target)
elif target == 'base':
self.base_station.run_action(action)
elif 'robot' in target:
self.robots[target].run_action(action)
if target in self.idle_bots:
self.idle_bots.remove(target)
else:
logging.info("Unknown target: {}".format(target))
def _check_initialization_status(self):
self.component_initialization_status['base_station'] = self.base_station.check_online()
self.component_initialization_status['mm'] = self.mm_wrapper.check_all_devices_status()
for id,robot in self.robots.items():
self.component_initialization_status[id] = robot.check_online()
ready = [i for i in self.component_initialization_status.values()]
if all(ready):
self.initialization_status = RuntimeManager.STATUS_FULLY_INITIALIZED
elif any(ready):
self.initialization_status = RuntimeManager.STATUS_PARTIALLY_INITIALIZED
def _update_all_metrics(self):
self.last_metrics = {}
self.last_metrics['mm'] = self.mm_wrapper.get_metrics()
self.last_metrics['base'] = self.base_station.get_last_status()
self.last_metrics['plan'] = self._get_plan_metrics()
for robot in self.robots.values():
robot_metrics = robot.get_robot_metrics()
self.last_metrics[str(robot.robot_id)] = robot_metrics
# Check if the robot has an error that would require pausing the plan
robot_has_error = robot_metrics and "error_status" in robot_metrics and robot_metrics["error_status"]
plan_running = self.plan_status == PlanStatus.RUNNING
if plan_running and robot_has_error:
logging.warning("Pausing plan due to error on {}. Please address before proceeding.".format(robot.robot_id))
self.set_plan_status(PlanStatus.PAUSED)
def _get_plan_metrics(self):
plan_metrics = {}
plan_metrics['status'] = self.plan_status
plan_metrics['filename'] = os.path.basename(self.plan_path)
plan_metrics['next_cycle_number'] = self.next_cycle_number
plan_metrics['idle_bots'] = list(self.idle_bots)
robot_metrics = {}
for id, data in self.cycle_tracker.items():
robot_metrics[id] = {}
robot_metrics[id] = {'cycle_id': 'None', 'action_name': 'None', 'action_id': 'None', 'vision_offset': 'None','tile_coordinate': 'None'}
if data['cycle_id'] is not None:
robot_metrics[id]['cycle_id'] = data['cycle_id']
robot_metrics[id]['tile_coordinate'] = self.plan.field.tiles[data['cycle_id']].coordinate
if data['action_id'] is not None:
robot_metrics[id]['action_id'] = data['action_id']
action = self.plan.get_action(data['cycle_id'], data['action_id'])
if action:
robot_metrics[id]['action_name'] = action.name
if action and action.action_type == ActionTypes.MOVE_WITH_VISION:
robot_metrics[id]['vision_offset'] = (action.x, action.y, math.degrees(action.a))
else:
robot_metrics[id]['vision_offset'] = 'None'
robot_metrics[id]['needs_restart'] = data['needs_restart']
plan_metrics['robots'] = robot_metrics
return plan_metrics
def _update_cycle_actions(self):
# Use metrics to update in progress actions
for robot_id, metric in self.last_metrics.items():
# No updates needed for pos tracker, bbse station, or plan
if robot_id in ['mm', 'plan', 'base']:
continue
if metric is None or 'in_progress' not in metric.keys():
continue
# Figure out if we need to do any updates on the running actions
tracker_data = self.cycle_tracker[robot_id]
if tracker_data['cycle_id'] is not None:
cycle = self.plan.get_cycle(tracker_data['cycle_id'])
# Check if this action needs to be restarted due to being paused or interrupted
if tracker_data['needs_restart']:
tracker_data['timer'] = NonBlockingTimer(self.config.robot_next_action_wait_time)
action = cycle.action_sequence[tracker_data['action_id']]
logging.info("Re-starting action {} ({}) on {}".format(tracker_data['action_id'], action.name, robot_id))
self._run_action(robot_id, action)
tracker_data['action_validator'].update_expected_action(action.action_type)
tracker_data['needs_restart'] = False
# The timer check here is to give a delay for the robot to actually start the action
# before master checks if it is finished
action_timer_ready = tracker_data['timer'] is None or tracker_data['timer'].check()
# If we got a new cycle but haven't started an action yet, start the first action
start_next_action = False
if tracker_data['action_id'] is None:
start_next_action = True
# If the robot was doing an action and is now finished, start the next one
action_assigned = tracker_data['action_id'] is not None
action_finished = not metric['in_progress']
previous_action_validated = tracker_data['action_validator'].update_action_validation(metric)
if action_assigned and action_finished and action_timer_ready:
start_next_action = True
if not previous_action_validated:
logging.warning("Everything else finished but previous action not validated")
if start_next_action:
# Check if there is a new action to run for this cycle, if not, end the cycle
if tracker_data['action_id'] is not None and (tracker_data['action_id'] + 1) >= len(cycle.action_sequence):
logging.info("{} finished cycle {}".format(robot_id, cycle.id))
tracker_data['cycle_id'] = None
tracker_data['action_id'] = None
else:
# If there is a new action to run, start it
if tracker_data['action_id'] is None:
tracker_data['action_id'] = 0
else:
tracker_data['action_id'] += 1
tracker_data['timer'] = NonBlockingTimer(self.config.robot_next_action_wait_time)
next_action = cycle.action_sequence[tracker_data['action_id']]
logging.info("Starting action {} ({}) on {}".format(tracker_data['action_id'], next_action.name, robot_id))
self._run_action(robot_id, next_action)
tracker_data['action_validator'].update_expected_action(next_action.action_type)
# Update if any robots are idle
for robot, data in self.cycle_tracker.items():
if data['action_id'] == None and data['cycle_id'] == None:
self.idle_bots.add(robot)
def _modify_cycle_state(self, target, add_cycle_id=None, add_action_id=None, absolute=False):
if self.plan_status == PlanStatus.RUNNING:
logging.warning("Cannot update cycle state while plan is running.")
return
if target not in self.cycle_tracker.keys():
logging.warning("Invalid target: {}".format(target))
return
cur_cycle_id = self.cycle_tracker[target]["cycle_id"]
if cur_cycle_id is None:
cur_cycle_id = 0
new_cycle_id = cur_cycle_id
if add_cycle_id is not None:
if absolute:
new_cycle_id = add_cycle_id
else:
new_cycle_id = cur_cycle_id + add_cycle_id
if self.plan and new_cycle_id >= len(self.plan.cycles) or new_cycle_id < 0:
logging.warning("Cycle id {} out of bounds".format(new_cycle_id))
return
cur_action_id = self.cycle_tracker[target]["action_id"]
if cur_action_id is None:
cur_action_id = 0
new_action_id = cur_action_id
if add_action_id is not None:
if absolute:
new_action_id = add_action_id
else:
new_action_id = cur_action_id + add_action_id
if self.plan and new_action_id >= len(self.plan.get_cycle(new_cycle_id).action_sequence) or new_action_id < 0:
logging.warning("Action id {} out of bounds".format(new_action_id))
return
data = self.cycle_tracker[target]
data['action_id'] = new_action_id
data['cycle_id'] = new_cycle_id
data['timer'] = None
data['needs_restart'] = True
data['action_validator'] = ActionValidator()
self.next_cycle_number = new_cycle_id + 1
try:
self.idle_bots.remove(target)
except:
pass
def _cycle_state_to_file(self):
if self.plan_status == PlanStatus.RUNNING:
# Copy data and prepare to write
robot_cycle_states = copy.deepcopy(self.cycle_tracker)
for tracker_data in robot_cycle_states.values():
del tracker_data['timer']
del tracker_data['action_validator']
data_to_dump = {}
data_to_dump['plan_path'] = self.plan_path
data_to_dump['next_cycle_number'] = self.next_cycle_number
data_to_dump['robots'] = robot_cycle_states
with open(self.config.cycle_state_file, 'w') as f:
json.dump(data_to_dump, f)
def _load_cycle_state_from_file(self, filepath):
logging.info("Loading cycle state information from {}".format(filepath))
with open(filepath, 'r') as f:
loaded_data = json.load(f)
# Set the various bits of data from the file
self.plan_path = loaded_data['plan_path']
self.next_cycle_number = loaded_data['next_cycle_number']
self.cycle_tracker = loaded_data['robots']
# Need to handle robot states carefully
for robot, data in self.cycle_tracker.items():
# Need to re-add timer state and tell the controller the action needs to be restarted
data['timer'] = None
data['needs_restart'] = True
data['action_validator'] = ActionValidator()
# Need to remove the robot from the idle list if it was actually doing something at the time it stopped
if data['action_id'] is not None or data['cycle_id'] is not None:
self.idle_bots.remove(robot)
# Reload the plan data and start the status as PAUSED so that it can be resumed
self._load_plan_from_file(self.plan_path)
self.plan_status = PlanStatus.PAUSED
def _erase_cycle_state_file(self):
fname = self.config.cycle_state_file
if os.path.exists(fname):
os.remove(fname)
================================================
FILE: src/master/Utils.py
================================================
import time
import numpy as np
import math
import enum
class ActionTypes(enum.Enum):
MOVE_COARSE = 0,
MOVE_FINE = 1,
MOVE_REL = 2,
NET = 3,
LOAD = 4,
PLACE = 5,
TRAY_INIT = 6,
LOAD_COMPLETE = 7,
ESTOP = 8,
WAIT_FOR_LOCALIZATION = 9,
MOVE_CONST_VEL = 10,
CLEAR_ERROR = 11,
NONE = 12,
SET_POSE = 13,
MOVE_WITH_VISION = 14,
TOGGLE_VISION_DEBUG = 15,
START_CAMERAS = 16,
STOP_CAMERAS = 17,
MOVE_REL_SLOW = 18,
WAIT = 19,
MOVE_FINE_STOP_VISION = 20,
PAUSE_PLAN = 21,
class NonBlockingTimer:
def __init__(self, trigger_time):
self.start_time = time.time()
self.trigger_time = trigger_time
def check(self):
if time.time() - self.start_time > self.trigger_time:
return True
else:
return False
def write_file(filename, text):
with open(filename, 'w+') as f:
f.write(text)
# Pos and offset expected as 2 long numpy vector, Angle is expected in degrees
# Returns 2 length numpy vector
def TransformPos(pos_2d, frame_offset_2d, frame_angle):
frame_angle = math.radians(frame_angle)
# Make a transform
tf = np.array([[ np.cos(frame_angle), -np.sin(frame_angle), frame_offset_2d[0] ],
[ np.sin(frame_angle), np.cos(frame_angle), frame_offset_2d[1] ],
[0, 0, 1 ]])
pos_2d = np.concatenate((pos_2d,[1])).reshape((3,))
new_pos = np.matmul(tf, pos_2d)
return new_pos[:2].reshape((2,))
class ActionValidator:
def __init__(self):
self.expected_action = str(ActionTypes.NONE)
self.action_validated = False
def update_expected_action(self, expected_action):
self.expected_action = str(expected_action)
self.action_validated = False
def update_action_validation(self, metric):
if metric is not None and \
'in_progress' in metric and \
'current_action' in metric and \
metric['current_action'] == self.expected_action:
if self.expected_action in [ActionTypes.START_CAMERAS, ActionTypes.STOP_CAMERAS]:
self.action_validated = True
elif metric['in_progress'] == True:
self.action_validated = True
return self.action_validated
if __name__ == '__main__':
test_pos = np.array([1,1])
test_offset = np.array([2,2])
test_angle = 90
expected_pos = np.array([1,3])
new_pos = TransformPos(test_pos, test_offset, test_angle)
print("Actual: {}, expected: {}".format(new_pos, expected_pos))
================================================
FILE: src/master/__init__.py
================================================
================================================
FILE: src/master/camera_utils.py
================================================
# Helper script to get camera images and debug info from raspi
import paramiko
from scp import SCPClient
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import config
from datetime import datetime
def scp_image(ip, remote_path, local_path):
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
ssh_client.connect(ip, username="pi", password='dominobot')
with SCPClient(ssh_client.get_transport()) as scp:
scp.get(remote_path, local_path)
if not os.path.exists(local_path):
raise ValueError("SCP image did not complete successfully")
def display_debug_image(local_path):
img = mpimg.imread(local_path)
plt.imshow(img)
figManager = plt.get_current_fig_manager()
figManager.window.state('zoomed')
plt.show()
def get_and_display_multiple_images(cam_name, remote_ip, remote_path, local_path, img_data):
fig, axes = plt.subplots(nrows=2, ncols=3)
fig.suptitle("{} camera".format(cam_name), fontsize=16)
ax = axes.ravel()
for i,data in enumerate(img_data):
cur_time_str = datetime.now().strftime("%Y%m%d%H%M%S")
remote_path_to_file = remote_path + data['file']
local_path_to_file = os.path.join(local_path, cur_time_str+"_"+cam_name+"_"+data['file'])
scp_image(remote_ip, remote_path_to_file, local_path_to_file)
img = mpimg.imread(local_path_to_file)
if data["color"]:
ax[i].imshow(img)
else:
ax[i].imshow(img, cmap='gray', vmin=0, vmax=255)
ax[i].set_title(data['title'])
plt.tight_layout()
figManager = plt.get_current_fig_manager()
figManager.window.state('zoomed')
plt.show()
if __name__ == '__main__':
cfg = config.Config()
robot_ip = cfg.ip_map['robot1']
remote_path = "/home/pi/images/debug/"
local_path = os.path.join(cfg.log_folder,"images")
img_data = []
img_data.append({
"file": "img_raw.jpg",
"title": "raw",
"color": False
})
img_data.append({
"file": "img_undistorted.jpg",
"title": "undistorted",
"color": False
})
img_data.append({
"file": "img_thresh.jpg",
"title": "threshold",
"color": False
})
img_data.append({
"file": "img_keypoints.jpg",
"title": "detections",
"color": False
})
img_data.append({
"file": "img_best_keypoint.jpg",
"title": "best",
"color": False
})
DISPLAY_SIDE = True
DISPLAY_REAR = True
if DISPLAY_SIDE:
side_remote_path = remote_path + 'side/'
get_and_display_multiple_images("side", robot_ip, side_remote_path, local_path, img_data)
if DISPLAY_REAR:
rear_remote_path = remote_path + 'rear/'
get_and_display_multiple_images("rear", robot_ip, rear_remote_path, local_path, img_data)
================================================
FILE: src/master/config.py
================================================
import numpy as np
import os
import Utils
class Config:
# Various debug/test flags
# Set to override config values for home network
USING_HOME_NETWORK = False
# Set for laptop vs desktop
USING_DESKTOP = False
# Set to override config values for small scale testing
USE_SMALL_TESTING_CONFIG = False
# Set to skip connecting to robot
OFFLINE_TESTING = False
# Set to skip connecting to base station
SKIP_BASE_STATION = True
# Set to skip connecting to Marvelmind
SKIP_MARVELMIND = True
# Set to use fake plan instead of loading a generated one
USE_TEST_PLAN = False
# MR LOGO plan
MR_LOGO_PLAN = False
# Set to auto-load this plan on master startup
AUTO_LOAD_PLAN = False
AUTO_LOAD_PLAN_NAME = "FullPlan_DominoBros.p"
# Set to regenerate and auto load plan on master startup
REGEN_PLAN = True
# Set to true to use just a subsection of the overal plan
USE_SUBSECTION = True
# ====== PATHS ========
root_path = "C:\\Users\\alexb\\Documents\\Github\\DominoRobot\\" # Laptop
if USING_DESKTOP:
root_path = "C:\\Users\\alexb\\Data\\Github\\DominoRobot\\" # Desktop
mm_api_relative_path = "marvelmind_SW_20202_04_19\\API\\api_windows_64bit\\dashapi.dll"
config_dir_path = os.path.dirname(os.path.realpath(__file__))
mm_api_path = os.path.join(root_path, mm_api_relative_path)
log_folder = os.path.join(root_path, 'log')
plans_dir = os.path.join(config_dir_path, 'plans')
cycle_state_file = os.path.join(plans_dir, 'previous_plan_state.json')
# ====== ROBOT CONFIG ========
# Maps robot (or static) to sets of marvel mind beacons
device_map = {
"static": (11, 12),
"robot1": (1, 2)
}
# Specifies which IP address each robot has
ip_map = {'robot1': '10.0.0.3'} # Workshop
if USING_HOME_NETWORK:
ip_map = {'robot1': '192.168.1.5'} # Home
base_station_ip = '10.0.0.100'
# ====== PLAN GENERATION ========
# Image configuration
image_name = os.path.join(config_dir_path, 'DominoDesign-Questions.psd')
num_tiles_width = 18
num_tiles_height = 19
dominos = np.array(
[('black', (0,0,0)),
('red', (1,0,0)),
('blue', (0.188,0.5,0.886)),
('green', (0,1,0)),
('white', (1,1,1)),
('brown', (1,0.51,0)),
('yellow', (1,0.867,0)),
], dtype=object)
if USE_SMALL_TESTING_CONFIG:
num_tiles_width = 2
num_tiles_height = 4
if MR_LOGO_PLAN:
image_name = os.path.join(config_dir_path, 'logo.jpg')
num_tiles_width = 5
num_tiles_height = 5
dominos = np.array(
[('black', (0,0,0)),
('white', (1,1,1)),
], dtype=object)
# Physical dimensions of dominos
domino_width = 0.025 # meters
domino_height = 0.010 # meters
domino_spacing_width = 0.037 # meters
domino_spacing_height = 0.024 # meters
# Spacing for drawing dominos as pixels instead of rectangles
meters_per_pixel = 0.008
domino_width_px = round(domino_width / meters_per_pixel)
domino_height_px = round(domino_height / meters_per_pixel)
domino_spacing_width_px = round(domino_spacing_width / meters_per_pixel)
domino_spacing_height_px = round(domino_spacing_height / meters_per_pixel)
# Tile configuration
tile_width = 15
tile_height = 20
tile_background_color = (0.9,0.9,0.9)
tile_edge_color = (0,0,1)
tile_size_width_meters = 0.930 #tile_width * (domino_spacing_width + domino_width)
tile_size_height_meters = 0.665 #tile_height * (domino_spacing_height + domino_height)
desired_width_dominos = tile_width * num_tiles_width
desired_height_dominos = tile_height * num_tiles_height
# Map configureation
grid_top_left = np.array([13.2, 6.9])
grid_top_right = np.array([13.2, -10.07])
grid_num_cols = 19
grid_bottom_left = np.array([1.18,6.9])
grid_bottom_right = np.array([1.18,-10.07])
grid_num_rows = 19
grid_tile_width = ((grid_top_left - grid_top_right)/grid_num_cols)[1]
grid_tile_height = ((grid_top_left - grid_bottom_left)/grid_num_rows)[0]
tile_size_width_meters = grid_tile_width
# tile_size_height_meters = grid_tile_height
# Vision offset configuration
default_vision_offset = np.array((0,0,-0.5))
vision_offset_file = os.path.join(plans_dir, 'vision_offsets_full_plan.csv')
# ====== ENVIRONMENT CONFIGURATION ========
# Map configuration (distances in meters, angles in degrees)
robot_boundaries = np.array([[1,-11],[15,11]]) # Bottom left, top right, global frame
highway_x = 2.5 # "Highway" coordinate
load_waypoint = np.array([highway_x, -8.5]) # xya (global frame) for waypoint to go to first before load prep
highway_angle = -90
base_station_boundaries = np.array([[2.5,10],[3.5,11]]) # Bottom left, top right, global frame
base_station_target_angle = -90 # Target angle (deg) for base station in global frame
base_station_relative_offset = np.array([0.9, 0, 0]) # Relative position of base station from prep pos - robot frame (x,y,a)
base_station_vision_offset = np.array([0.01,0.010,-0.5]) # Vision offset for base station alignment
base_station_prep_pos = np.array([highway_x, -10.5]) # Pose outside of base station to align with before going in to dock
base_station_prep_vision_offset = np.array([0,0.01,-1]) # Vision offset to use for base station prep pose
robot_pose_top_left = grid_top_left # Robot pose in global frame for top left of tile position of domino field
domino_field_angle = -90 # Domino field angle (deg), global frame
tile_placement_fine_offset = np.array([0, 0]) # Offset position for fine tile placement [x,y], in robot coordinate frame (to avoid hitting next column)
tile_placement_coarse_offset = np.array([-0.5,0.5]) # Offset position for tile placement [x,y], in robot coordinate frame
tile_to_robot_offset = np.array([-0.3, -tile_size_width_meters/2.0]) # Offset from bottom left of tile to robot center [x,y], in robot coordinate frame
enter_position_distance = 0.25 # How far out of field boundaries to do robot prep move
intermediate_entry_hz_y = 0 # Y coordinate for horizontal intermediate position
intermediate_place_vt_x = 8 # X coordinate for vertical intermediate position
field_to_robot_frame_angle = 90 # In case robot frame and field frame ever need to be rotated relative to each other
# Used for testing sub-sections of the larger pattern
if USE_SUBSECTION:
start_coords = (6, 5)
end_coords = (18, 6)
# start_coords = (8, 4)
# end_coords = (18, 4)
# Left side
# if USE_SMALL_TESTING_CONFIG:
# load_pose = np.array([7.5,7.5,0])
# robot_pose_top_left = np.array([12.20,9.472])
# domino_field_angle = -90
# tile_placement_coarse_offset = np.array([-0.5,-0.5])
# tile_to_robot_offset = np.array([-0.3, -tile_size_width_meters/2.0 ])
# Right side
if USE_SMALL_TESTING_CONFIG:
load_pose = np.array([8,-6.5,0])
robot_pose_top_left = np.array([12.74,-6.94])
domino_field_angle = -90
tile_placement_coarse_offset = np.array([-0.5,-0.5])
tile_to_robot_offset = np.array([-0.3, -tile_size_width_meters/2.0 ])
# Fine motion y offset adjustments
# y_offset_cols = np.linspace(0.05, 0, num_tiles_height)
y_offset_cols = np.linspace(0.51, 0, num_tiles_width)
y_offset_rows = np.linspace(0, 0.0, num_tiles_height)
x_offset_rows = np.linspace(0, 0.3, num_tiles_height)
x_offset_rows[0:2] = 0
# Angle adjustment for fine motion to try and prevent wheel from hitting
angle_adjust_fine = 4 # degrees
# Computed - don't change
field_width = tile_size_width_meters * desired_width_dominos/tile_width
field_height = tile_size_height_meters * desired_height_dominos/tile_height
# Fix me
domino_field_top_left = robot_pose_top_left + np.array([tile_size_height_meters-tile_to_robot_offset[0], -tile_to_robot_offset[1]]) \
- np.array([0, 2*tile_size_width_meters])
#Utils.TransformPos(np.array([tile_size_height_meters-tile_to_robot_offset[0], -tile_to_robot_offset[1]]), [0,0], domino_field_angle)
domino_field_origin = domino_field_top_left + Utils.TransformPos(np.array([0,-field_height]), [0,0], domino_field_angle)
domino_field_top_right = domino_field_origin + Utils.TransformPos(np.array([field_width,field_height]), [0,0], domino_field_angle)
domino_field_boundaries = np.array([domino_field_origin, domino_field_top_right])
# ====== GUI CONFIGURATION ========
graph_padding = 0.5 # How many meters of padding to have around edge of graph
axes_label_offset = 0.25 # How many meters to offset labels on axes
axes_tick_spacing = 5 # How many meters of space between tick marks
axes_tick_size = 0.5 # How many meters long the tick marks should be
robot_chassis_size = 0.6 # How many meters wide and long the robot chassis is
robot_rotation_center_offset = 0.245 # How many meters from front of robot to center of rotation
robot_direction_indicator_length = 0.8 # How many meters long to make the direction line for the robot render
robot_direction_indicator_arrow_length = 0.4 # How many meters long to make direction arrow lines
robot_direction_indicator_arrow_angle = 25 # How many degrees to make arrow angle lines
# Computed - don't change
graph_bottom_left = np.array((
min(robot_boundaries[0][0], 0) - graph_padding,
robot_boundaries[0][1] - graph_padding
))
graph_size = np.max(robot_boundaries[1] - robot_boundaries[0]) + 2*graph_padding
graph_top_right = graph_bottom_left + np.array([graph_size,graph_size])
# ====== RUNTIME CONFIGURATION ========
robot_status_wait_time = 0.2 # How many seconds to wait between status requests for each robot
base_station_status_wait_time = 1 # How many seconds to wait between status requests for the base station
robot_next_action_wait_time = 2.0 # How many seconds to wait before checking if robot is finished with current plan action
================================================
FILE: src/master/plans/vision_offsets small_testing_area.csv
================================================
#tile_x, tile_y, offset_x (millimeters), offset_y(millimeters), offset_a(degrees), add to default (True) or exact (False)
0,0,5,5,0,True
1,1,0,15,0,True
1,0,10,25,0,True
================================================
FILE: src/master/plans/vision_offsets_full_plan.csv
================================================
#tile_x, tile_y, offset_x (millimeters), offset_y(millimeters), offset_a(degrees)
X,17,0,-4,0
================================================
FILE: src/master/plot_logs2.py
================================================
import paramiko
from scp import SCPClient
import os
import matplotlib.pyplot as plt
import config
import csv
import numpy as np
import datetime
possible_rows = [
'time',
'pos_x',
'pos_y',
'pos_a',
'vel_x',
'vel_y',
'vel_a',
'target_pos_x',
'target_pos_y',
'target_pos_a',
'target_vel_x',
'target_vel_y',
'target_vel_a',
'control_vel_x',
'control_vel_y',
'control_vel_a'
]
def scp_last_motion_log(ip, remote_path, local_path):
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
ssh_client.connect(ip, username="pi", password='dominobot')
with SCPClient(ssh_client.get_transport()) as scp:
scp.get(remote_path, local_path)
if not os.path.exists(local_path):
raise ValueError("SCP image did not complete successfully")
def init_data():
data = {name:[] for name in possible_rows}
data['title'] = ""
data['start_time'] = None
return data
def parse_log_file(path):
data = init_data()
with open(local_file) as csvfile:
reader = csv.reader(csvfile, delimiter=";")
for id,row in enumerate(reader):
if id == 0:
continue
elif id == 1:
data['title'] = row[-1].replace('"','')
else:
parse_row(row, data)
return data
def parse_row(row, data):
entry_data = row[-1].replace('"','').split(",")
entry_name = entry_data[0]
entry_values = entry_data[1:]
entry_time = datetime.datetime.strptime(row[1], '%H:%M:%S.%f')
if entry_name == "time":
if not data["start_time"]:
data["start_time"] = entry_time
data["time"].append(0)
else:
dt = (entry_time - data['start_time'])/ datetime.timedelta(seconds=1)
data["time"].append(dt)
else:
for i,axis in enumerate(['x','y','a']):
axis_name = entry_name+'_'+axis
axis_value = float(entry_values[i])
data[axis_name].append(axis_value)
def plot_data(data, rows_to_plot=None):
if not rows_to_plot:
rows_to_plot = possible_rows[1:]
plt.figure()
ax = plt.gca()
for row in rows_to_plot:
ax.plot('time', row, data=data, label=row)
ax.legend()
ax.set_title(data['title'])
plt.xlabel('Time (s)')
plt.show()
def plot_rows_axes(data, rows, axes):
rows_to_plot = [row+'_'+axis for axis in axes for row in rows]
plot_data(data, rows_to_plot)
if __name__ == '__main__':
cfg = config.Config()
GET_FILE = True
EXISTING_LOCAL_FILENAME = "WiggleVision.csv"
if GET_FILE:
robot_ip = cfg.ip_map['robot1']
remote_file = "/home/pi/DominoRobot/src/robot/log/last_motion_log.csv"
local_file = os.path.join(cfg.log_folder, "last_motion_log.csv")
scp_last_motion_log(robot_ip, remote_file, local_file)
else:
local_file = os.path.join(cfg.log_folder, EXISTING_LOCAL_FILENAME)
parsed_data = parse_log_file(local_file)
# plot_data(parsed_data, rows_to_plot=['control_vel_x','control_vel_y','control_vel_a'])
plot_rows_axes(parsed_data, ['pos','vel','target_pos','target_vel','control_vel'], ['a'])
================================================
FILE: src/master/requirements.txt
================================================
cycler==0.10.0
decorator==4.4.2
imageio==2.9.0
kiwisolver==1.3.1 #Was originally 1.2.0, but wasn't building with Python 3.9.6
matplotlib==3.3.0
networkx==2.5
numpy==1.21.0 #Changed from 1.19.1
opencv-python==4.5.1.48
paramiko==2.7.2
Pillow==8.2.0
pyparsing==2.4.7
pyserial==3.4
PySimpleGUI==4.32.1
python-dateutil==2.8.1
PyWavelets==1.1.1
rope==0.18.0
scikit-image==0.18.0 #0.17.2
scipy==1.7.0 #1.5.2
scp==0.13.3
six==1.15.0
tifffile==2020.9.3
virtualenv==16.7.8
================================================
FILE: src/robot/Makefile
================================================
# Started with https://spin.atomicobject.com/2016/08/26/makefile-c-projects/
# Additional inspiration from https://codereview.stackexchange.com/questions/157780/generic-makefile-handling-unit-testing-and-folder-structure
TARGET_EXEC ?= robot-main
TEST_EXEC ?= test-main
BUILD_DIR ?= build
SRC_DIRS ?= src
TEST_DIRS ?= test
SRCS := $(shell find $(SRC_DIRS) -name *.cpp -or -name *.c -or -name *.s)
OBJS := $(SRCS:%=$(BUILD_DIR)/%.o)
DEPS := $(OBJS:.o=.d)
INC_DIRS := ./lib ./src /usr/local/include
INC_FLAGS := $(addprefix -I,$(INC_DIRS))
CPPFLAGS ?= $(INC_FLAGS) -MMD -MP
# -g for debug symbols, -O2 for run speed
CXXFLAGS ?= -std=c++17 -Wall -Wextra -Wwrite-strings -Wno-parentheses -pthread -O3 `pkg-config --cflags opencv4`
LDFLAGS += -ldl -lconfig++ -lstdc++fs `pkg-config --libs opencv4`
LIBS += /usr/local/lib/libserial.a ./lib/marvelmind/marvelmind.a
TEST_SRCS := $(shell find $(TEST_DIRS) -name *.cpp -or -name *.c -or -name *.s)
TEST_OBJS := $(TEST_SRCS:%=$(BUILD_DIR)/%.o) $(filter-out build/src/main.cpp.o, $(OBJS))
vpath %.cpp $(SRC_DIRS)
.PHONY: all
all: $(BUILD_DIR)/$(TARGET_EXEC)
.PHONY: test
test: $(BUILD_DIR)/$(TEST_EXEC)
# Target file
$(BUILD_DIR)/$(TARGET_EXEC): $(OBJS)
$(CXX) $(CXXFLAGS) $(OBJS) -o $@ $(LIBS) $(LDFLAGS)
# Test file
$(BUILD_DIR)/$(TEST_EXEC): $(TEST_OBJS)
$(CXX) $(CXXFLAGS) $(TEST_OBJS) -o $@ $(LIBS) $(LDFLAGS)
# Source files
$(BUILD_DIR)/%.cpp.o: %.cpp
$(MKDIR_P) $(dir $@)
$(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@
.PHONY: clean
clean:
$(RM) -r $(BUILD_DIR)
-include $(DEPS)
MKDIR_P ?= mkdir -p
.DELETE_ON_ERROR:
================================================
FILE: src/robot/README.md
================================================
# **Warning! This is probably out of date and might be inaccurate. Use at your own risk.**
## Libraries needed for installation on Raspberry Pi
Required header libraries at src/robot/lib
- ArduinoJson
- Catch (for tests)
- Eigen
- plog
- marvelmind library, compiled as .a - https://marvelmind.com/download/ I used 2020_04_10_C_example. This required the addition of `#include ` in `marvelmind.h` to compile correctly
Required system libraries at /usr/local/include
- libSerial: https://github.com/crayzeewulf/libserial (have to build from source due to outdated apt package)
Required libraries at /usr/local/lib
- libserial.a
- opencv: (following instructions here: https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html and fix from here: https://github.com/cggos/DIPDemoQt/issues/1)
Required libraries installed with apt-get
- libconfig++-dev
## Build and run
Go to `DominoRobot/src/robot` folder and run `make` to build. You can build tests with `make test` and remove all outputs with `make clean`.
Run main or test using `build/robot-main` or `build/robot-test` respectively. The Raspberry Pi on the robot has these aliased to `run_robot` and `run_test` for ease of use.
================================================
FILE: src/robot/src/KalmanFilter.cpp
================================================
#include "KalmanFilter.h"
KalmanFilter::KalmanFilter(
Eigen::MatrixXf A,
Eigen::MatrixXf B,
Eigen::MatrixXf C,
Eigen::MatrixXf Q,
Eigen::MatrixXf R )
: KalmanFilter(A.rows(), C.rows())
{
A_ = A;
B_ = B;
C_ = C;
Q_ = Q;
R_ = R;
}
KalmanFilter::KalmanFilter(int n, int m)
: n_(n),
m_(m),
A_(Eigen::MatrixXf::Identity(n,n)),
B_(Eigen::MatrixXf::Zero(n,n)),
C_(Eigen::MatrixXf::Zero(m,n)),
I_(Eigen::MatrixXf::Identity(n,n)),
K_(Eigen::MatrixXf::Zero(n,m)),
P_(Eigen::MatrixXf::Identity(n,n)),
Q_(Eigen::MatrixXf::Identity(n,n)),
R_(Eigen::MatrixXf::Identity(m,m)),
S_(Eigen::MatrixXf::Identity(m,m)),
x_hat_(Eigen::VectorXf::Zero(n))
{}
void KalmanFilter::predict(const Eigen::VectorXf& u)
{
x_hat_ = A_ * x_hat_ + B_ * u;
P_ = A_ * P_ * A_.transpose() + Q_;
}
void KalmanFilter::update(const Eigen::VectorXf& y, Eigen::MatrixXf R)
{
R_ = R;
update(y);
}
void KalmanFilter::update(const Eigen::VectorXf& y)
{
S_ = C_ * P_ * C_.transpose() + R_;
K_ = P_ * C_.transpose() * S_.inverse();
x_hat_ = x_hat_ + K_ * (y - C_ * x_hat_);
P_ = (I_ - K_ * C_) * P_;
}
================================================
FILE: src/robot/src/KalmanFilter.h
================================================
/**
* Kalman filter loosely based on
* https://github.com/hmartiro/kalman-cpp
* and
* https://github.com/tysik/kalman_filters
*/
#ifndef KalmanFilter_h
#define KalmanFilter_h
#include
class KalmanFilter
{
public:
KalmanFilter(
Eigen::MatrixXf A,
Eigen::MatrixXf B,
Eigen::MatrixXf C,
Eigen::MatrixXf Q,
Eigen::MatrixXf R
);
// Size only simplify initializing in a class
KalmanFilter(int n, int m);
// Prediction step with input u
void predict(const Eigen::VectorXf& u);
// Update the estimated state based on measured values.
void update(const Eigen::VectorXf& y);
void update(const Eigen::VectorXf& y, Eigen::MatrixXf R);
// Get current state estimate
Eigen::VectorXf state() { return x_hat_; };
Eigen::MatrixXf covariance() { return P_; };
void update_covariance(Eigen::MatrixXf P) {P_ = P;}
private:
// System dimensions
int n_;
int m_;
// Matrices for computation
Eigen::MatrixXf A_;
Eigen::MatrixXf B_;
Eigen::MatrixXf C_;
Eigen::MatrixXf I_;
Eigen::MatrixXf K_;
Eigen::MatrixXf P_;
Eigen::MatrixXf Q_;
Eigen::MatrixXf R_;
Eigen::MatrixXf S_;
// Estimated state
Eigen::VectorXf x_hat_;
};
#endif //KalmanFilter_h
================================================
FILE: src/robot/src/Localization.cpp
================================================
#include "Localization.h"
#include
#include
#include
#include "constants.h"
Localization::Localization()
: pos_(0,0,0),
vel_(0,0,0),
mm_x_offset_(cfg.lookup("localization.mm_x_offset")),
mm_y_offset_(cfg.lookup("localization.mm_y_offset")),
variance_ref_trans_(cfg.lookup("localization.variance_ref_trans")),
variance_ref_angle_(cfg.lookup("localization.variance_ref_angle")),
meas_trans_cov_(cfg.lookup("localization.kf_meas_trans_cov")),
meas_angle_cov_(cfg.lookup("localization.kf_meas_angle_cov")),
localization_uncertainty_scale_(cfg.lookup("localization.kf_uncertainty_scale")),
min_vel_uncertainty_(cfg.lookup("localization.min_vel_uncertainty")),
vel_uncertainty_slope_(cfg.lookup("localization.vel_uncertainty_slope")),
max_vel_uncetainty_(cfg.lookup("localization.max_vel_uncetainty")),
vel_uncertainty_decay_time_(cfg.lookup("localization.vel_uncertainty_decay_time")),
metrics_(),
time_since_last_motion_(),
kf_(3,3)
{
Eigen::MatrixXf A = Eigen::MatrixXf::Identity(3,3);
Eigen::MatrixXf B = Eigen::MatrixXf::Identity(3,3);
Eigen::MatrixXf C = Eigen::MatrixXf::Identity(3,3);
Eigen::MatrixXf Q(3,3);
float predict_trans_cov = cfg.lookup("localization.kf_predict_trans_cov");
float predict_angle_cov = cfg.lookup("localization.kf_predict_angle_cov");
Q << predict_trans_cov,0,0,
0,predict_trans_cov,0,
0,0,predict_angle_cov;
Eigen::MatrixXf R(3,3);
R << meas_trans_cov_,0,0,
0,meas_trans_cov_,0,
0,0,meas_angle_cov_;
kf_ = KalmanFilter(A,B,C,Q,R);
}
void Localization::updatePositionReading(Point global_position)
{
if (fabs(global_position.a) > 3.2)
{
PLOGE << "INVALID ANGLE - should be in radians between +/- pi";
return;
}
// The x,y,a here is from the center of the marvlemind pair, so we need to transform it to the actual center of the
// robot (i.e. center of rotation)
Eigen::Vector3f raw_measured_position = {global_position.x, global_position.y, global_position.a};
Eigen::Vector3f adjusted_measured_position = marvelmindToRobotCenter(raw_measured_position);
// Generate an uncertainty based on the current velocity and time since motion since we know the beacons are less accurate when moving
const float position_uncertainty = computePositionUncertainty();
metrics_.last_position_uncertainty = position_uncertainty;
Eigen::MatrixXf R(3,3);
R << meas_trans_cov_ + position_uncertainty*localization_uncertainty_scale_,0,0,
0,meas_trans_cov_+ position_uncertainty*localization_uncertainty_scale_,0,
0,0,meas_angle_cov_+ position_uncertainty*localization_uncertainty_scale_;
// PLOGI << "position_uncertainty: " << position_uncertainty;
// PLOGI << "R: " << R;
// PLOGI << "cov1: " << kf_.covariance();
kf_.update(adjusted_measured_position, R);
Eigen::VectorXf est = kf_.state();
pos_.x = est[0];
pos_.y = est[1];
pos_.a = wrap_angle(est[2]);
// PLOGI << "cov2: " << kf_.covariance();
PLOGI_(LOCALIZATION_LOG_ID).printf("\nPosition update:");
PLOGI_(LOCALIZATION_LOG_ID).printf(" Raw input: [%4.3f, %4.3f, %4.3f]",
raw_measured_position(0), raw_measured_position(1), raw_measured_position(2));
PLOGI_(LOCALIZATION_LOG_ID).printf(" Adjusted input: [%4.3f, %4.3f, %4.3f]",
adjusted_measured_position(0), adjusted_measured_position(1), adjusted_measured_position(2));
PLOGI_(LOCALIZATION_LOG_ID).printf(" position_uncertainty: %4.3f", position_uncertainty);
PLOGI_(LOCALIZATION_LOG_ID).printf(" Current position: %s\n", pos_.toString().c_str());
}
void Localization::updateVelocityReading(Velocity local_cart_vel, float dt)
{
// Convert local cartesian velocity to global cartesian velocity using the last estimated angle
float cA = cos(pos_.a);
float sA = sin(pos_.a);
vel_.vx = cA * local_cart_vel.vx - sA * local_cart_vel.vy;
vel_.vy = sA * local_cart_vel.vx + cA * local_cart_vel.vy;
vel_.va = local_cart_vel.va;
// Apply prediction step with velocity to get estimated position
Eigen::Vector3f u = {vel_.vx, vel_.vy, vel_.va};
Eigen::Vector3f udt = dt*u;
kf_.predict(udt);
// PLOGI << "cov3: " << kf_.covariance();
time_since_last_motion_.reset();
Eigen::VectorXf est = kf_.state();
pos_.x = est[0];
pos_.y = est[1];
pos_.a = wrap_angle(est[2]);
// Using the covariance matrix from kf, using those values to estimate a fractional 'confidence'
// in our positioning relative to some reference amout.
Eigen::MatrixXf cov = kf_.covariance();
// Compute inverse confidence. As long as the variance isn't larger than the reference values
// these will be between 0-1 with 0 being more confident in the positioning
metrics_.confidence_x = std::max(1-cov(0,0)/variance_ref_trans_, 0.0f);
metrics_.confidence_y = std::max(1-cov(1,1)/variance_ref_trans_, 0.0f);
metrics_.confidence_a = std::max(1-cov(2,2)/variance_ref_angle_, 0.0f);
metrics_.total_confidence = (metrics_.confidence_x + metrics_.confidence_y + metrics_.confidence_a)/3;
}
Eigen::Vector3f Localization::marvelmindToRobotCenter(Eigen::Vector3f mm_global_position)
{
Eigen::Vector3f local_offset = {mm_x_offset_/1000.0f, mm_y_offset_/1000.0f, 0.0f};
float cA = cos(mm_global_position(2));
float sA = sin(mm_global_position(2));
Eigen::Matrix3f rotation;
rotation << cA, -sA, 0.0f,
sA, cA, 0.0f,
0.0f, 0.0f, 1.0f;
Eigen::Vector3f adjusted_position = mm_global_position - rotation * local_offset;
// PLOGD_(LOCALIZATION_LOG_ID).printf("\nMM to robot frame:");
// PLOGD_(LOCALIZATION_LOG_ID).printf(" mm position: [%4.3f, %4.3f, %4.3f]", mm_global_position(0), mm_global_position(1), mm_global_position(2));
// PLOGD_(LOCALIZATION_LOG_ID).printf(" R: \n[%4.3f, %4.3f, %4.3f]\n[%4.3f, %4.3f, %4.3f]\n[%4.3f, %4.3f, %4.3f]",
// rotation(0,0), rotation(0,1), rotation(0,2), rotation(1,0), rotation(1,1), rotation(1,2), rotation(2,0), rotation(2,1), rotation(2,2));
// PLOGD_(LOCALIZATION_LOG_ID).printf(" new position: [%4.3f, %4.3f, %4.3f]", adjusted_position(0), adjusted_position(1), adjusted_position(2));
return adjusted_position;
}
float Localization::computePositionUncertainty()
{
Eigen::Vector3f v = {vel_.vx, vel_.vy, vel_.va};
float total_v = v.norm();
float position_uncertainty = 0.0f;
if(total_v > 0.01)
{
position_uncertainty = std::max(min_vel_uncertainty_ + vel_uncertainty_slope_*total_v, max_vel_uncetainty_);
}
else
{
position_uncertainty = std::max(min_vel_uncertainty_ * (vel_uncertainty_decay_time_ - time_since_last_motion_.dt_s()) / vel_uncertainty_decay_time_, 0.0f);
}
PLOGD_(LOCALIZATION_LOG_ID).printf("\nPosition Uncertainty:");
PLOGD_(LOCALIZATION_LOG_ID).printf(" Total v: %4.3f, time since motion: %4.3f, position_uncertainty: %4.3f", total_v, time_since_last_motion_.dt_s(), position_uncertainty);
return position_uncertainty;
}
void Localization::resetAngleCovariance()
{
Eigen::MatrixXf covariance = kf_.covariance();
covariance(2,2) = variance_ref_angle_;
kf_.update_covariance(covariance);
}
================================================
FILE: src/robot/src/Localization.h
================================================
#ifndef Localization_h
#define Localization_h
#include "utils.h"
#include
#include "KalmanFilter.h"
class Localization
{
public:
Localization();
void updatePositionReading(Point global_position);
void updateVelocityReading(Velocity local_cart_vel, float dt);
Point getPosition() {return pos_; };
Velocity getVelocity() {return vel_; };
void forceZeroVelocity() {vel_ = {0,0,0}; };
// Force the position to a specific value, bypassing localization algorithms (used for testing/debugging)
void forceSetPosition(Point global_position) {pos_ = global_position;};
LocalizationMetrics getLocalizationMetrics() { return metrics_; };
void resetAngleCovariance();
private:
// Convert position reading from marvelmind frame to robot frame
Eigen::Vector3f marvelmindToRobotCenter(Eigen::Vector3f mm_global_position);
// Compute a multiplier based on the current velocity that informs how trustworthy the current reading might be
float computePositionUncertainty();
// Current position and velocity
Point pos_;
Velocity vel_;
// Parameters for localization algorithms
float mm_x_offset_;
float mm_y_offset_;
float variance_ref_trans_;
float variance_ref_angle_;
float meas_trans_cov_;
float meas_angle_cov_;
float localization_uncertainty_scale_;
float min_vel_uncertainty_;
float vel_uncertainty_slope_;
float max_vel_uncetainty_;
float vel_uncertainty_decay_time_;
LocalizationMetrics metrics_;
Timer time_since_last_motion_;
KalmanFilter kf_;
};
#endif //Localization_h
================================================
FILE: src/robot/src/MarvelmindWrapper.cpp
================================================
#include "MarvelmindWrapper.h"
#include "constants.h"
#include "plog/Log.h"
#include
MarvelmindWrapper::MarvelmindWrapper()
: hedge_(createMarvelmindHedge()),
ready_(false)
{
// I expect to 2 devices connected, but because of the fun of linux and udev rules, I don't know
// which of the three possible names the devices could have. I could later change this to first
// do an `ls /dev/ | grep marvelmind` and find the right ports, but this should work fine too.
if(std::filesystem::exists(MARVELMIND_USB_0))
{
hedge_->ttyFileName = MARVELMIND_USB_0;
PLOGI << "Found device " << MARVELMIND_USB_0;
}
else if(std::filesystem::exists(MARVELMIND_USB_1))
{
hedge_->ttyFileName = MARVELMIND_USB_1;
PLOGI << "Found device " << MARVELMIND_USB_1;
}
else if(std::filesystem::exists(MARVELMIND_USB_2))
{
hedge_->ttyFileName = MARVELMIND_USB_2;
PLOGI << "Found device " << MARVELMIND_USB_2;
}
else
{
PLOGW << "No marvelmind devices connected";
return;
}
// Now actually try to connect
hedge_->verbose = true;
startMarvelmindHedge(hedge_);
ready_ = true;
}
std::vector MarvelmindWrapper::getPositions()
{
PositionValue position_data;
std::vector output;
if (!ready_)
{
return output;
}
bool ok = getPositionFromMarvelmindHedge(hedge_, &position_data);
if (ok)
{
//PLOGI.printf("Position data - ready: %i, addr: %i, ts: %i, x: %i, y: %i, a:%f", position_data.ready, position_data.address, position_data.timestamp, position_data.x, position_data.y, position_data.angle);
if (position_data.ready)
{
output.push_back(position_data.x/1000.0);
output.push_back(position_data.y/1000.0);
output.push_back(position_data.angle);
}
}
return output;
}
MarvelmindWrapper::~MarvelmindWrapper()
{
if(ready_){
stopMarvelmindHedge (hedge_);
}
destroyMarvelmindHedge (hedge_);
}
// MarvelmindWrapper::MarvelmindWrapper()
// {
// marvelmindAPILoad();
// PLOGI << "Marvelmind API loaded";
// // I expect to connect to 2 devices, but because of the fun of linux and udev rules, I don't know
// // which of the three possible names the devices could have. I could later change this to first
// // do an `ls /dev/ | grep marvelmind` and find the right ports, but this should work fine too.
// // char portName0[64] = MARVELMIND_USB_0;
// // char portName1[64] = MARVELMIND_USB_1;
// // char portName2[64] = MARVELMIND_USB_2;
// // PLOGI << portName0;
// // bool success0 = mmOpenPortByName(portName0);
// // bool success1 = mmOpenPortByName(portName1);
// // bool success2 = mmOpenPortByName(portName2);
// // PLOGI << "S: " << success0 << success1 << success2;
// // int success = static_cast(success0) + static_cast(success1) + static_cast(success2);
// // if(success == 2)
// // {
// // PLOGI << "Successfully connected to 2 Marvelmind devices";
// // }
// // else if (success == 1)
// // {
// // PLOGW << "Only able to connect to 1 Marvelmind device";
// // }
// // else if (success == 0)
// // {
// // PLOGW << "Unable to connect to any Marvelmind devices";
// // }
// // else
// // {
// // PLOGE << "Managed to connect to " << success << "Marvelmind devices. That shouldn't happen!";
// // }
// // Open loop
// bool open = false;
// int count = 0;
// while(!open && count < 10000)
// {
// open = mmOpenPort();
// count++;
// usleep(1000);
// }
// PLOGI << "Open status: " << open;
// // Get usb loop
// bool usb_ready = false;
// count = 0;
// MarvelmindDeviceVersion usbDevVersion;
// while(!usb_ready && count < 10000)
// {
// usb_ready = mmGetVersionAndId(MM_USB_DEVICE_ADDRESS, &usbDevVersion);
// count++;
// usleep(1000);
// }
// PLOGI <<"USB ready: " << usb_ready;
// if(usb_ready)
// {
// PLOGI << "Device type: " << (usbDevVersion.fwVerDeviceType);
// PLOGI << "Device is hedge: " << mmDeviceIsHedgehog(usbDevVersion.fwVerDeviceType);
// }
// // bool device_list_ok = false;
// // count = 0;
// // MarvelmindDevicesList device_list;
// // while(!device_list_ok && count < 10)
// // {
// // device_list_ok = mmGetDevicesList(&device_list);
// // count++;
// // PLOGI << count;
// // usleep(1000);
// // }
// // PLOGI << "Device list status: " << device_list_ok;
// // if(!device_list_ok)
// // {
// // PLOGW << "Could not retrieve list of Marvelmind devices";
// // }
// // else
// // {
// // for(int i = 0; i < device_list.numDevices; i++)
// // {
// // MarvelmindDeviceInfo data = device_list.devices[i];
// // PLOGI << "Found device " << data.address;
// // // if(data.address == MARVELMIND_DEVICE_ID0 || data.address == MARVELMIND_DEVICE_ID1)
// // // {
// // // PLOGI << "Found device " << data.address;
// // // if (data.isSleeping)
// // // {
// // // PLOGI << "Device " << data.address << " is sleeping, triggering wake up";
// // // mmWakeDevice(data.address);
// // // }
// // // else
// // // {
// // // PLOGI << "Device " << data.address << " is already awake";
// // // }
// // // }
// // }
// // }
// }
================================================
FILE: src/robot/src/MarvelmindWrapper.h
================================================
#ifndef MarvelmindWrapper_h
#define MarvelmindWrapper_h
#include
extern "C" {
#include
}
class MarvelmindWrapper
{
public:
MarvelmindWrapper();
~MarvelmindWrapper();
std::vector getPositions();
private:
MarvelmindHedge* hedge_;
bool ready_;
};
#endif
================================================
FILE: src/robot/src/RobotController.cpp
================================================
#include "RobotController.h"
#include
#include
#include
#include "constants.h"
#include "serial/SerialCommsFactory.h"
#include "robot_controller_modes/RobotControllerModePosition.h"
#include "robot_controller_modes/RobotControllerModeVision.h"
#include "robot_controller_modes/RobotControllerModeStopFast.h"
RobotController::RobotController(StatusUpdater& statusUpdater)
: statusUpdater_(statusUpdater),
serial_to_motor_driver_(SerialCommsFactory::getFactoryInstance()->get_serial_comms(CLEARCORE_USB)),
localization_(),
prevOdomLoopTimer_(),
cartPos_(),
cartVel_(),
trajRunning_(false),
limits_mode_(LIMITS_MODE::FINE),
controller_rate_(cfg.lookup("motion.controller_frequency")),
logging_rate_(cfg.lookup("motion.log_frequency")),
log_this_cycle_(false),
fake_perfect_motion_(cfg.lookup("motion.fake_perfect_motion")),
fake_local_cart_vel_(0,0,0),
max_cart_vel_limit_({cfg.lookup("motion.translation.max_vel.coarse"),
cfg.lookup("motion.translation.max_vel.coarse"),
cfg.lookup("motion.rotation.max_vel.coarse")}),
loop_time_averager_(20)
{
if(fake_perfect_motion_) PLOGW << "Fake robot motion enabled";
}
void RobotController::moveToPosition(float x, float y, float a)
{
reset_last_motion_logger();
limits_mode_ = LIMITS_MODE::COARSE;
setCartVelLimits(limits_mode_);
Point goal_pos = Point(x,y,a);
PLOGI_(MOTION_CSV_LOG_ID).printf("MoveToPosition: %s",goal_pos.toString().c_str());
auto position_mode = std::make_unique(fake_perfect_motion_);
bool ok = position_mode->startMove(cartPos_, goal_pos, limits_mode_);
if (ok)
{
startTraj();
controller_mode_ = std::move(position_mode);
}
else { statusUpdater_.setErrorStatus(); }
}
void RobotController::moveToPositionRelative(float dx_local, float dy_local, float da_local)
{
reset_last_motion_logger();
limits_mode_ = LIMITS_MODE::COARSE;
setCartVelLimits(limits_mode_);
float dx_global = cos(cartPos_.a) * dx_local - sin(cartPos_.a) * dy_local;
float dy_global = sin(cartPos_.a) * dx_local + cos(cartPos_.a) * dy_local;
float da_global = da_local;
Point goal_pos = Point(cartPos_.x + dx_global, cartPos_.y + dy_global, wrap_angle(cartPos_.a + da_global));
PLOGI_(MOTION_CSV_LOG_ID).printf("MoveToPositionRelative: %s",goal_pos.toString().c_str());
auto position_mode = std::make_unique(fake_perfect_motion_);
bool ok = position_mode->startMove(cartPos_, goal_pos, limits_mode_);
if (ok)
{
startTraj();
controller_mode_ = std::move(position_mode);
}
else { statusUpdater_.setErrorStatus(); }
}
void RobotController::moveToPositionRelativeSlow(float dx_local, float dy_local, float da_local)
{
reset_last_motion_logger();
limits_mode_ = LIMITS_MODE::SLOW;
setCartVelLimits(limits_mode_);
float dx_global = cos(cartPos_.a) * dx_local - sin(cartPos_.a) * dy_local;
float dy_global = sin(cartPos_.a) * dx_local + cos(cartPos_.a) * dy_local;
float da_global = da_local;
Point goal_pos = Point(cartPos_.x + dx_global, cartPos_.y + dy_global, wrap_angle(cartPos_.a + da_global));
PLOGI_(MOTION_CSV_LOG_ID).printf("MoveToPositionRelativeSlow: %s",goal_pos.toString().c_str());
auto position_mode = std::make_unique(fake_perfect_motion_);
bool ok = position_mode->startMove(cartPos_, goal_pos, limits_mode_);
if (ok)
{
startTraj();
controller_mode_ = std::move(position_mode);
}
else { statusUpdater_.setErrorStatus(); }
}
void RobotController::moveToPositionFine(float x, float y, float a)
{
reset_last_motion_logger();
limits_mode_ = LIMITS_MODE::FINE;
setCartVelLimits(limits_mode_);
Point goal_pos = Point(x,y,a);
PLOGI_(MOTION_CSV_LOG_ID).printf("MoveToPositionFine: %s",goal_pos.toString().c_str());
auto position_mode = std::make_unique(fake_perfect_motion_);
bool ok = position_mode->startMove(cartPos_, goal_pos, limits_mode_);
if (ok)
{
startTraj();
controller_mode_ = std::move(position_mode);
}
else { statusUpdater_.setErrorStatus(); }
}
void RobotController::moveConstVel(float vx , float vy, float va, float t)
{
(void) vx;
(void) vy;
(void) va;
(void) t;
PLOGE << "Not implimented";
}
void RobotController::moveWithVision(float x, float y, float a)
{
reset_last_motion_logger();
limits_mode_ = LIMITS_MODE::VISION;
setCartVelLimits(limits_mode_);
Point goal = Point(x,y,a);
PLOGI_(MOTION_CSV_LOG_ID).printf("MoveWithVision: %s",goal.toString().c_str());
auto vision_mode = std::make_unique(fake_perfect_motion_, statusUpdater_);
bool ok = vision_mode->startMove(goal);
if (ok)
{
startTraj();
controller_mode_ = std::move(vision_mode);
}
else { statusUpdater_.setErrorStatus(); }
}
void RobotController::stopFast()
{
reset_last_motion_logger();
limits_mode_ = LIMITS_MODE::FINE;
setCartVelLimits(limits_mode_);
PLOGI_(MOTION_CSV_LOG_ID).printf("Stop Fast");
auto stop_fast_mode = std::make_unique(fake_perfect_motion_);
stop_fast_mode->startMove(cartPos_, cartVel_);
startTraj();
controller_mode_ = std::move(stop_fast_mode);
}
void RobotController::startTraj()
{
trajRunning_ = true;
enableAllMotors();
PLOGI.printf("Starting move");
}
void RobotController::estop()
{
PLOGW.printf("Estopping robot control");
PLOGD_(MOTION_LOG_ID) << "\n====ESTOP====\n";
trajRunning_ = false;
limits_mode_ = LIMITS_MODE::FINE;
disableAllMotors();
}
void RobotController::update()
{
// Ensures controller runs at approximately constant rate
if (!controller_rate_.ready()) { return; }
// Ensures logging doesn't get out of hand
log_this_cycle_ = logging_rate_.ready();
// Create a command based on the trajectory or not moving
Velocity target_vel;
if(trajRunning_)
{
target_vel = controller_mode_->computeTargetVelocity(cartPos_, cartVel_, log_this_cycle_);
// Check if we are finished with the trajectory
if (controller_mode_->checkForMoveComplete(cartPos_, cartVel_))
{
PLOGI.printf("Reached goal");
PLOGD_(MOTION_LOG_ID) << "Trajectory complete";
target_vel = {0,0,0};
disableAllMotors();
trajRunning_ = false;
// Bit of a hack for reseting angle covariance after vision motion
// This avoids weird rotations from error accumlated in the kalman filter
if (limits_mode_ == LIMITS_MODE::VISION)
{
localization_.resetAngleCovariance();
}
// Re-enable fine mode at the end of a trajectory
limits_mode_ = LIMITS_MODE::FINE;
}
}
else
{
// Force zero velocity
target_vel = {0,0,0};
// Force current velocity to 0
localization_.forceZeroVelocity();
// Force flags to default values
limits_mode_ = LIMITS_MODE::FINE;
}
// Run controller and odometry update
setCartVelCommand(target_vel);
computeOdometry();
// Update status
statusUpdater_.updatePosition(cartPos_.x, cartPos_.y, cartPos_.a);
statusUpdater_.updateVelocity(cartVel_.vx, cartVel_.vy, cartVel_.va);
loop_time_averager_.mark_point();
statusUpdater_.updateControlLoopTime(loop_time_averager_.get_ms());
statusUpdater_.updateLocalizationMetrics(localization_.getLocalizationMetrics());
}
void RobotController::enableAllMotors()
{
if (serial_to_motor_driver_->isConnected())
{
serial_to_motor_driver_->send("base:Power:ON");
PLOGI << "Motors enabled";
PLOGD_(MOTION_LOG_ID) << "Motors enabled";
}
else
{
PLOGW << "No connection: Skipping motor enable";
}
}
void RobotController::disableAllMotors()
{
if (serial_to_motor_driver_->isConnected())
{
serial_to_motor_driver_->send("base:Power:OFF");
PLOGI << "Motors disabled";
PLOGD_(MOTION_LOG_ID) << "Motors disabled";
}
else
{
PLOGW << "No connection: Skipping motor disable";
}
}
void RobotController::inputPosition(float x, float y, float a)
{
bool last_mm_used = false;
if(limits_mode_ == LIMITS_MODE::FINE || limits_mode_ == LIMITS_MODE::COARSE)
{
localization_.updatePositionReading({x,y,a});
cartPos_ = localization_.getPosition();
last_mm_used = true;
}
statusUpdater_.updateLastMarvelmindPose({x,y,a}, last_mm_used);
}
void RobotController::forceSetPosition(float x, float y, float a)
{
localization_.forceSetPosition({x,y,a});
cartPos_ = localization_.getPosition();
}
void RobotController::setCartVelLimits(LIMITS_MODE limits_mode)
{
if(limits_mode == LIMITS_MODE::VISION)
{
max_cart_vel_limit_ = { cfg.lookup("motion.translation.max_vel.vision"),
cfg.lookup("motion.translation.max_vel.vision"),
cfg.lookup("motion.rotation.max_vel.vision")};
}
else if(limits_mode == LIMITS_MODE::FINE || limits_mode == LIMITS_MODE::SLOW)
{
max_cart_vel_limit_ = { cfg.lookup("motion.translation.max_vel.fine"),
cfg.lookup("motion.translation.max_vel.fine"),
cfg.lookup("motion.rotation.max_vel.fine")};
}
else
{
max_cart_vel_limit_ = { cfg.lookup("motion.translation.max_vel.coarse"),
cfg.lookup("motion.translation.max_vel.coarse"),
cfg.lookup("motion.rotation.max_vel.coarse")};
}
}
bool RobotController::readMsgFromMotorDriver(Velocity* decodedVelocity)
{
std::string msg = "";
std::vector tmpVelocity = {0,0,0};
if (serial_to_motor_driver_->isConnected())
{
int count = 0;
while(msg.empty() && count++ < 10)
{
msg = serial_to_motor_driver_->rcv_base();
}
}
if (msg.empty())
{
return false;
}
else
{
tmpVelocity = parseCommaDelimitedStringToFloat(msg);
if(tmpVelocity.size() != 3)
{
PLOGW.printf("Decode failed");
return false;
}
}
decodedVelocity->vx = tmpVelocity[0];
decodedVelocity->vy = tmpVelocity[1];
decodedVelocity->va = tmpVelocity[2];
return true;
}
void RobotController::computeOdometry()
{
Velocity local_cart_vel = {0,0,0};
if(fake_perfect_motion_)
{
local_cart_vel = fake_local_cart_vel_;
}
else if(!readMsgFromMotorDriver(&local_cart_vel)) { return; }
Velocity zero = {0,0,0};
if(trajRunning_ || !(local_cart_vel == zero))
{
PLOGD_IF_(MOTION_LOG_ID, log_this_cycle_).printf("Decoded velocity: %.3f, %.3f, %.3f\n", local_cart_vel.vx, local_cart_vel.vy, local_cart_vel.va);
}
// Bypassing motor feedback and just using fake_local_cart_vel_ here may give better performance, but its hard to tell.
// Compute time since last odom update
float dt = prevOdomLoopTimer_.dt_s();
prevOdomLoopTimer_.reset();
localization_.updateVelocityReading(local_cart_vel, dt);
cartPos_ = localization_.getPosition();
cartVel_ = localization_.getVelocity();
}
void RobotController::setCartVelCommand(Velocity target_vel)
{
if (trajRunning_)
{
PLOGD_IF_(MOTION_LOG_ID, log_this_cycle_).printf("CartVelCmd: [vx: %.4f, vy: %.4f, va: %.4f]", target_vel.vx, target_vel.vy, target_vel.va);
}
// Convert input global velocities to local velocities
Velocity local_cart_vel;
float cA = cos(cartPos_.a);
float sA = sin(cartPos_.a);
local_cart_vel.vx = cA * target_vel.vx + sA * target_vel.vy;
local_cart_vel.vy = -sA * target_vel.vx + cA * target_vel.vy;
local_cart_vel.va = target_vel.va;
// Cap velocity for safety
if(fabs(local_cart_vel.vx) > max_cart_vel_limit_.vx)
{
float clamped_vel = sgn(local_cart_vel.vx) * max_cart_vel_limit_.vx;
PLOGW.printf("Attempted to command X velocity of %.3f m/s, clamping to %.3f m/s", local_cart_vel.vx, clamped_vel);
PLOGW_(MOTION_LOG_ID).printf("Attempted to command X velocity of %.3f m/s, clamping to %.3f m/s", local_cart_vel.vx, clamped_vel);
local_cart_vel.vx = clamped_vel;
}
if(fabs(local_cart_vel.vy) > max_cart_vel_limit_.vy)
{
float clamped_vel = sgn(local_cart_vel.vy) * max_cart_vel_limit_.vy;
PLOGW.printf("Attempted to command Y velocity of %.3f m/s, clamping to %.3f m/s", local_cart_vel.vy, clamped_vel);
PLOGW_(MOTION_LOG_ID).printf("Attempted to command Y velocity of %.3f m/s, clamping to %.3f m/s", local_cart_vel.vy, clamped_vel);
local_cart_vel.vy = clamped_vel;
}
if(fabs(local_cart_vel.va) > max_cart_vel_limit_.va)
{
float clamped_vel = sgn(local_cart_vel.va) * max_cart_vel_limit_.va;
PLOGW.printf("Attempted to command A velocity of %.3f rad/s, clamping to %.3f rad/s", local_cart_vel.va, clamped_vel);
PLOGW_(MOTION_LOG_ID).printf("Attempted to command A velocity of %.3f rad/s, clamping to %.3f rad/s", local_cart_vel.va, clamped_vel);
local_cart_vel.va = clamped_vel;
}
// Prep velocity data to send to motor driver
char buff[100];
sprintf(buff, "base:%.4f,%.4f,%.4f",local_cart_vel.vx, local_cart_vel.vy, local_cart_vel.va);
std::string s = buff;
if (local_cart_vel.vx != 0 || local_cart_vel.vy != 0 || local_cart_vel.va != 0 )
{
PLOGD_IF_(MOTION_LOG_ID, log_this_cycle_).printf("Sending to motors: [%s]", s.c_str());
}
if(fake_perfect_motion_)
{
fake_local_cart_vel_ = local_cart_vel;
}
else if (serial_to_motor_driver_->isConnected())
{
serial_to_motor_driver_->send(s);
}
}
================================================
FILE: src/robot/src/RobotController.h
================================================
#ifndef RobotController_h
#define RobotController_h
#include "SmoothTrajectoryGenerator.h"
#include "StatusUpdater.h"
#include "serial/SerialComms.h"
#include "utils.h"
#include "Localization.h"
#include "robot_controller_modes/RobotControllerModeBase.h"
class RobotController
{
public:
// Constructor
RobotController(StatusUpdater& statusUpdater);
// Command robot to move a specific position with low accuracy
void moveToPosition(float x, float y, float a);
// Command robot to move a specific position relative to current position with low accuracy
void moveToPositionRelative(float dx_local, float dy_local, float da_local);
// Command robot to move a specific position relative to current position with low speed
void moveToPositionRelativeSlow(float dx_local, float dy_local, float da_local);
// Command robot to move to a specific position with high accuracy
void moveToPositionFine(float x, float y, float a);
// Command robot to move with a constant velocity for some amount of time
void moveConstVel(float vx , float vy, float va, float t);
void moveWithVision(float x, float y, float a);
void stopFast();
// Main update loop. Should be called as fast as possible
void update();
// Enable all motors at once
void enableAllMotors();
// Disable all motors at once. This will cause motors to coast to a stop
void disableAllMotors();
// Provide a position reading from the MarvelMind sensors
void inputPosition(float x, float y, float a);
// Force the position to a specific value, bypassing localization algorithms (used for testing/debugging)
void forceSetPosition(float x, float y, float a);
// Indicates if a trajectory is currently active
bool isTrajectoryRunning() { return trajRunning_; };
// Stops the currently running motion
void estop();
Point getCurrentPosition() { return cartPos_; };
private:
//Internal methods
// Set the global cartesian velocity command
void setCartVelCommand(Velocity target_vel);
// Update loop for motor objects
void updateMotors();
// Calculate wheel odometry
void computeOdometry();
// Sets up everything to start the trajectory running
void startTraj();
// Reads an incoming message from the motor driver and fills the decoded
// velocity in the pointer, if available. Returns true if velocity is filled, false otherwise
bool readMsgFromMotorDriver(Velocity* decodedVelocity);
void setCartVelLimits(LIMITS_MODE limits_mode);
// Member variables
StatusUpdater& statusUpdater_; // Reference to status updater object to input status info about the controller
SerialCommsBase* serial_to_motor_driver_; // Serial connection to motor driver
Localization localization_; // Object that handles localization
Timer prevOdomLoopTimer_; // Timer for odom loop
Point cartPos_; // Current cartesian position
Velocity cartVel_; // Current cartesian velocity
bool trajRunning_; // If a trajectory is currently active
LIMITS_MODE limits_mode_; // Which limits mode is being used.
RateController controller_rate_; // Rate limit controller loops
RateController logging_rate_ ; // Rate limit logging to file
bool log_this_cycle_; // Trigger for logging this cycle
bool fake_perfect_motion_; // Flag used for testing to enable perfect motion without clearcore
Velocity fake_local_cart_vel_; // Commanded local cartesian velocity used to fake perfect motion
Velocity max_cart_vel_limit_; // Maximum velocity allowed, used to limit commanded velocity
TimeRunningAverage loop_time_averager_; // Handles keeping average of the loop timing
std::unique_ptr controller_mode_;
};
#endif
================================================
FILE: src/robot/src/RobotServer.cpp
================================================
#include "RobotServer.h"
#include
#include
#include "sockets/SocketMultiThreadWrapperFactory.h"
#include
RobotServer::RobotServer(StatusUpdater& statusUpdater)
: moveData_(),
positionData_(),
velocityData_(),
statusUpdater_(statusUpdater),
recvInProgress_(false),
recvIdx_(0),
buffer_(""),
socket_(SocketMultiThreadWrapperFactory::getFactoryInstance()->get_socket())
{
}
COMMAND RobotServer::getCommand(std::string message)
{
COMMAND cmd = COMMAND::NONE;
StaticJsonDocument<256> doc;
DeserializationError err = deserializeJson(doc, message);
if(err)
{
printIncomingCommand(message);
PLOGI.printf("Error parsing JSON: ");
PLOGI.printf(err.c_str());
sendErr("bad_json");
}
else
{
std::string type = doc["type"];
if(type == "move")
{
cmd = COMMAND::MOVE;
moveData_.x = doc["data"]["x"];
moveData_.y = doc["data"]["y"];
moveData_.a = doc["data"]["a"];
printIncomingCommand(message);
sendAck(type);
}
else if(type == "move_rel")
{
cmd = COMMAND::MOVE_REL;
moveData_.x = doc["data"]["x"];
moveData_.y = doc["data"]["y"];
moveData_.a = doc["data"]["a"];
printIncomingCommand(message);
sendAck(type);
}
else if(type == "move_rel_slow")
{
cmd = COMMAND::MOVE_REL_SLOW;
moveData_.x = doc["data"]["x"];
moveData_.y = doc["data"]["y"];
moveData_.a = doc["data"]["a"];
printIncomingCommand(message);
sendAck(type);
}
else if(type == "move_fine")
{
cmd = COMMAND::MOVE_FINE;
moveData_.x = doc["data"]["x"];
moveData_.y = doc["data"]["y"];
moveData_.a = doc["data"]["a"];
printIncomingCommand(message);
sendAck(type);
}
else if(type == "move_fine_stop_vision")
{
cmd = COMMAND::MOVE_FINE_STOP_VISION;
moveData_.x = doc["data"]["x"];
moveData_.y = doc["data"]["y"];
moveData_.a = doc["data"]["a"];
printIncomingCommand(message);
sendAck(type);
}
else if(type == "move_vision")
{
cmd = COMMAND::MOVE_WITH_VISION;
moveData_.x = doc["data"]["x"];
moveData_.y = doc["data"]["y"];
moveData_.a = doc["data"]["a"];
printIncomingCommand(message);
sendAck(type);
}
else if(type == "move_const_vel")
{
cmd = COMMAND::MOVE_CONST_VEL;
velocityData_.vx = doc["data"]["vx"];
velocityData_.vy = doc["data"]["vy"];
velocityData_.va = doc["data"]["va"];
velocityData_.t = doc["data"]["t"];
printIncomingCommand(message);
sendAck(type);
}
else if(type == "place")
{
cmd = COMMAND::PLACE_TRAY;
printIncomingCommand(message);
sendAck(type);
}
else if(type == "load")
{
cmd = COMMAND::LOAD_TRAY;
printIncomingCommand(message);
sendAck(type);
}
else if(type == "init")
{
cmd = COMMAND::INITIALIZE_TRAY;
printIncomingCommand(message);
sendAck(type);
}
else if(type == "p")
{
cmd = COMMAND::POSITION;
positionData_.x = doc["data"]["x"];
positionData_.y = doc["data"]["y"];
positionData_.a = doc["data"]["a"];
sendAck(type);
}
else if(type == "set_pose")
{
cmd = COMMAND::SET_POSE;
positionData_.x = doc["data"]["x"];
positionData_.y = doc["data"]["y"];
positionData_.a = doc["data"]["a"];
sendAck(type);
}
else if(type == "estop")
{
cmd = COMMAND::ESTOP;
printIncomingCommand(message);
sendAck(type);
}
else if(type == "lc")
{
cmd = COMMAND::LOAD_COMPLETE;
printIncomingCommand(message);
sendAck(type);
}
else if(type == "status")
{
sendStatus();
}
else if (type == "check")
{
sendAck(type);
}
else if (type == "clear_error")
{
statusUpdater_.clearErrorStatus();
sendAck(type);
}
else if (type == "wait_for_loc")
{
sendAck(type);
cmd = COMMAND::WAIT_FOR_LOCALIZATION;
}
else if (type == "toggle_vision_debug")
{
sendAck(type);
cmd = COMMAND::TOGGLE_VISION_DEBUG;
}
else if (type == "start_cameras")
{
sendAck(type);
cmd = COMMAND::START_CAMERAS;
}
else if (type == "stop_cameras")
{
sendAck(type);
cmd = COMMAND::STOP_CAMERAS;
}
else if(type == "")
{
printIncomingCommand(message);
PLOGI.printf("ERROR: Type field empty or not specified ");
sendErr("no_type");
}
else
{
printIncomingCommand(message);
PLOGI.printf("ERROR: Unkown type field ");
sendErr("unkown_type");
}
}
return cmd;
}
RobotServer::PositionData RobotServer::getMoveData()
{
return moveData_;
}
RobotServer::PositionData RobotServer::getPositionData()
{
return positionData_;
}
RobotServer::VelocityData RobotServer::getVelocityData()
{
return velocityData_;
}
void RobotServer::sendStatus()
{
std::string msg = statusUpdater_.getStatusJsonString();
sendMsg(msg, false);
}
COMMAND RobotServer::oneLoop()
{
COMMAND cmd = COMMAND::NONE;
std::string newMsg = getAnyIncomingMessage();
if(newMsg.length() != 0)
{
PLOGD.printf("RX: %s", newMsg.c_str());
cmd = getCommand(cleanString(newMsg));
}
return cmd;
}
std::string RobotServer::cleanString(std::string message)
{
int idx_start = message.find("{");
int idx_end = message.find("}") + 1;
int len = idx_end - idx_start + 1;
if(idx_start == -1 || idx_end == 0)
{
PLOGW.printf("Could not find brackets in message");
return message;
}
return message.substr(idx_start, len);
}
std::string RobotServer::getAnyIncomingMessage()
{
bool newData = false;
std::string new_msg = "";
while (socket_->dataAvailableToRead() && newData == false)
{
std::string data = socket_->getData();
for (auto c : data)
{
if (recvInProgress_ == true)
{
if (c == START_CHAR)
{
buffer_ = "";
}
else if (c != END_CHAR)
{
buffer_ += c;
}
else
{
recvInProgress_ = false;
newData = true;
new_msg = buffer_;
buffer_ = "";
}
}
else if (c == START_CHAR)
{
recvInProgress_ = true;
}
}
}
return new_msg;
}
void RobotServer::sendMsg(std::string msg, bool print_debug)
{
if (msg.length() == 0 && print_debug)
{
PLOGI.printf("Nothing to send!!!\n");
}
else
{
if(print_debug)
{
PLOGD.printf("TX: %s", msg.c_str());
}
std::string send_msg = START_CHAR + msg + END_CHAR;
socket_->sendData(send_msg);
}
}
void RobotServer::printIncomingCommand(std::string message)
{
PLOGI.printf(message.c_str());
}
void RobotServer::sendAck(std::string data)
{
StaticJsonDocument<64> doc;
doc["type"] = "ack";
doc["data"] = data;
std::string msg;
serializeJson(doc, msg);
sendMsg(msg);
}
void RobotServer::sendErr(std::string data)
{
StaticJsonDocument<64> doc;
doc["type"] = "ack";
doc["data"] = data;
std::string msg;
serializeJson(doc, msg);
sendMsg(msg);
}
================================================
FILE: src/robot/src/RobotServer.h
================================================
/*
* Robot server that handles gettings messages from master
* and responding correctly
*/
#ifndef RobotServer_h
#define RobotServer_h
#include
#include
#include "constants.h"
#include "sockets/SocketMultiThreadWrapperBase.h"
#include "StatusUpdater.h"
#define START_CHAR '<'
#define END_CHAR '>'
class RobotServer
{
public:
struct PositionData
{
float x;
float y;
float a;
};
struct VelocityData
{
float vx;
float vy;
float va;
float t;
};
RobotServer(StatusUpdater& statusUpdater);
COMMAND oneLoop();
RobotServer::PositionData getMoveData();
RobotServer::PositionData getPositionData();
RobotServer::VelocityData getVelocityData();
private:
PositionData moveData_;
PositionData positionData_;
VelocityData velocityData_;
StatusUpdater& statusUpdater_;
bool recvInProgress_;
int recvIdx_;
std::string buffer_;
SocketMultiThreadWrapperBase* socket_;
COMMAND getCommand(std::string message);
void sendMsg(std::string msg, bool print_debug=true);
void sendAck(std::string data);
void sendErr(std::string data);
std::string getAnyIncomingMessage();
std::string cleanString(std::string message);
void printIncomingCommand(std::string message);
void sendStatus();
};
#endif
================================================
FILE: src/robot/src/SmoothTrajectoryGenerator.cpp
================================================
#include "SmoothTrajectoryGenerator.h"
#include
#include "constants.h"
#include "utils.h"
constexpr float d6 = 1/6.0;
SmoothTrajectoryGenerator::SmoothTrajectoryGenerator()
: currentTrajectory_()
{
currentTrajectory_.complete = false;
solver_params_.num_loops = cfg.lookup("trajectory_generation.solver_max_loops");
solver_params_.beta_decay = cfg.lookup("trajectory_generation.solver_beta_decay");
solver_params_.alpha_decay = cfg.lookup("trajectory_generation.solver_alpha_decay");
solver_params_.exponent_decay = cfg.lookup("trajectory_generation.solver_exponent_decay");
}
PVTPoint SmoothTrajectoryGenerator::lookup(float time)
{
std::vector trans_values = lookup_1D(time, currentTrajectory_.trans_params);
std::vector rot_values = lookup_1D(time, currentTrajectory_.rot_params);
// Map translational trajectory into XY space with direction vector
Eigen::Vector2f trans_pos_delta = trans_values[0] * currentTrajectory_.trans_direction;
Eigen::Vector2f trans_vel = trans_values[1] * currentTrajectory_.trans_direction;
// Map rotational trajectory into angular space with direction
float rot_pos_delta = rot_values[0] * currentTrajectory_.rot_direction;
float rot_vel = rot_values[1] * currentTrajectory_.rot_direction;
// Build and return pvtpoint
PVTPoint pvt;
pvt.position = {currentTrajectory_.initialPoint.x + trans_pos_delta(0),
currentTrajectory_.initialPoint.y + trans_pos_delta(1),
wrap_angle(currentTrajectory_.initialPoint.a + rot_pos_delta) };
pvt.velocity = {trans_vel(0), trans_vel(1), rot_vel};
pvt.time = time;
return pvt;
}
std::vector lookup_1D(float time, const SCurveParameters& params)
{
// Handle time before start of trajectory
if(time <= params.switch_points[0].t)
{
return {params.switch_points[0].p, params.switch_points[0].v};
}
// Handle time after the end of the trajectory
else if (time > params.switch_points[7].t)
{
return {params.switch_points[7].p, params.switch_points[7].v};
}
// Handle times within the trajectory
else
{
// Look for correct region
for (int i = 1; i <= 7; i++)
{
// Once region is found, compute position and velocity from previous switch point
if(params.switch_points[i-1].t < time && time <= params.switch_points[i].t)
{
float dt = time - params.switch_points[i-1].t;
std::vector values = computeKinematicsBasedOnRegion(params, i, dt);
return {values[2], values[1]};
}
}
}
PLOGE << "Should not get to this point in lookup";
return {0,0};
}
bool SmoothTrajectoryGenerator::generatePointToPointTrajectory(Point initialPoint, Point targetPoint, LIMITS_MODE limits_mode)
{
// Print to logs
PLOGI.printf("Generating trajectory");
PLOGI.printf("Starting point: %s", initialPoint.toString().c_str());
PLOGI.printf("Target point: %s", targetPoint.toString().c_str());
PLOGD_(MOTION_LOG_ID).printf("\nGenerating trajectory");
PLOGD_(MOTION_LOG_ID).printf("Starting point: %s", initialPoint.toString().c_str());
PLOGD_(MOTION_LOG_ID).printf("Target point: %s", targetPoint.toString().c_str());
MotionPlanningProblem mpp = buildMotionPlanningProblem(initialPoint, targetPoint, limits_mode, solver_params_);
currentTrajectory_ = generateTrajectory(mpp);
PLOGI << currentTrajectory_.toString();
PLOGD_(MOTION_LOG_ID) << currentTrajectory_.toString();
return currentTrajectory_.complete;
}
// TODO Implement a more accurate version of this if needed
// Note that this implimentation is a hack and isn't guaranteed to give an accurate constant velocity - so use with caution.
bool SmoothTrajectoryGenerator::generateConstVelTrajectory(Point initialPoint, Velocity velocity, float moveTime, LIMITS_MODE limits_mode)
{
// Print to logs
PLOGI.printf("Generating constVel (sort of) trajectory");
PLOGI.printf("Starting point: %s", initialPoint.toString().c_str());
PLOGI.printf("Target velocity: %s", velocity.toString().c_str());
PLOGI.printf("Move time: %d", moveTime);
PLOGD_(MOTION_LOG_ID).printf("Generating constVel (sort of) trajectory");
PLOGD_(MOTION_LOG_ID).printf("Starting point: %s", initialPoint.toString().c_str());
PLOGD_(MOTION_LOG_ID).printf("Target velocity: %s", velocity.toString().c_str());
PLOGD_(MOTION_LOG_ID).printf("Move time: %d", moveTime);
// This will undershoot the target velocity because we aren't consider accel/jerk here so the
// solver will not quite reach this velocity - especially if the move time specified is small.
Point targetPoint;
targetPoint.x = initialPoint.x + velocity.vx * moveTime;
targetPoint.y = initialPoint.y + velocity.vy * moveTime;
targetPoint.a = initialPoint.a + velocity.va * moveTime;
MotionPlanningProblem mpp = buildMotionPlanningProblem(initialPoint, targetPoint, limits_mode, solver_params_);
currentTrajectory_ = generateTrajectory(mpp);
PLOGI << currentTrajectory_.toString();
PLOGD_(MOTION_LOG_ID) << currentTrajectory_.toString();
return currentTrajectory_.complete;
}
MotionPlanningProblem buildMotionPlanningProblem(Point initialPoint, Point targetPoint, LIMITS_MODE limits_mode, const SolverParameters& solver)
{
MotionPlanningProblem mpp;
mpp.initialPoint = {initialPoint.x, initialPoint.y, initialPoint.a};
mpp.targetPoint = {targetPoint.x, targetPoint.y, targetPoint.a};
DynamicLimits translationalLimits;
DynamicLimits rotationalLimits;
if(limits_mode == LIMITS_MODE::VISION)
{
PLOGI << "Setting trajectory limits mode to LIMITS_MODE::VISION";
translationalLimits = { cfg.lookup("motion.translation.max_vel.vision"),
cfg.lookup("motion.translation.max_acc.vision"),
cfg.lookup("motion.translation.max_jerk.vision")};
rotationalLimits = { cfg.lookup("motion.rotation.max_vel.vision"),
cfg.lookup("motion.rotation.max_acc.vision"),
cfg.lookup("motion.rotation.max_jerk.vision")};
}
else if(limits_mode == LIMITS_MODE::FINE || limits_mode == LIMITS_MODE::SLOW)
{
PLOGI << "Setting trajectory limits mode to LIMITS_MODE::FINE/SLOW";
translationalLimits = { cfg.lookup("motion.translation.max_vel.fine"),
cfg.lookup("motion.translation.max_acc.fine"),
cfg.lookup("motion.translation.max_jerk.fine")};
rotationalLimits = { cfg.lookup("motion.rotation.max_vel.fine"),
cfg.lookup("motion.rotation.max_acc.fine"),
cfg.lookup("motion.rotation.max_jerk.fine")};
}
else
{
PLOGI << "Setting trajectory limits mode to LIMITS_MODE::COARSE";
translationalLimits = { cfg.lookup("motion.translation.max_vel.coarse"),
cfg.lookup("motion.translation.max_acc.coarse"),
cfg.lookup("motion.translation.max_jerk.coarse")};
rotationalLimits = { cfg.lookup("motion.rotation.max_vel.coarse"),
cfg.lookup("motion.rotation.max_acc.coarse"),
cfg.lookup("motion.rotation.max_jerk.coarse")};
}
// This scaling makes sure to give some headroom for the controller to go a bit faster than the planned limits
// without actually violating any hard constraints
mpp.translationalLimits = translationalLimits * cfg.lookup("motion.limit_max_fraction");
mpp.rotationalLimits = rotationalLimits * cfg.lookup("motion.limit_max_fraction");
mpp.solver_params = solver;
return std::move(mpp);
}
bool sCurveWithinLimits(const SCurveParameters& params, const DynamicLimits& limits)
{
return params.v_lim <= limits.max_vel && params.a_lim <= limits.max_acc && params.j_lim <= limits.max_jerk;
}
Trajectory generateTrajectory(MotionPlanningProblem problem)
{
// Figure out delta that the trajectory needs to cover
Eigen::Vector3f deltaPosition = problem.targetPoint - problem.initialPoint;
deltaPosition(2) = wrap_angle(deltaPosition(2));
// Begin building trajectory object
Trajectory traj;
traj.complete = false;
traj.initialPoint = {problem.initialPoint(0), problem.initialPoint(1), problem.initialPoint(2)};
traj.trans_direction = deltaPosition.head(2).normalized();
traj.rot_direction = sgn(deltaPosition(2));
// Solve translational component
float dist = deltaPosition.head(2).norm();
SCurveParameters trans_params;
bool ok = generateSCurve(dist, problem.translationalLimits, problem.solver_params, &trans_params);
if(!ok)
{
PLOGW << "Failed to generate translational trajectory";
return traj;
}
// Solve rotational component
SCurveParameters rot_params;
dist = fabs(deltaPosition(2));
ok = generateSCurve(dist, problem.rotationalLimits, problem.solver_params, &rot_params);
if(!ok)
{
PLOGW << "Failed to generate rotational trajectory";
return traj;
}
// Syncronize trajectories so they both start and end at the same time
ok = synchronizeParameters(&trans_params, &rot_params);
if (!ok)
{
PLOGW << "Failed to synchronize trajectory parameters";
return traj;
}
if(!sCurveWithinLimits(trans_params, problem.translationalLimits))
{
PLOGW << "Generated translational trajectory violates limits";
return traj;
}
if(!sCurveWithinLimits(rot_params, problem.rotationalLimits))
{
PLOGW << "Generated rotational trajectory violates limits";
return traj;
}
traj.trans_params = trans_params;
traj.rot_params = rot_params;
traj.complete = true;
return traj;
}
bool generateSCurve(float dist, DynamicLimits limits, const SolverParameters& solver, SCurveParameters* params)
{
// Handle case where distance is very close to 0
float min_dist = cfg.lookup("trajectory_generation.min_dist_limit");
if (fabs(dist) < min_dist)
{
params->v_lim = 0;
params->a_lim = 0;
params->j_lim = 0;
for (int i = 0; i < 8; i++)
{
params->switch_points[i].t = 0;
params->switch_points[i].p = 0;
params->switch_points[i].v = 0;
params->switch_points[i].a = 0;
}
return true;
}
// Initialize parameters
float v_lim = limits.max_vel;
float a_lim = limits.max_acc;
float j_lim = limits.max_jerk;
bool solution_found = false;
int loop_counter = 0;
while(!solution_found && loop_counter < solver.num_loops)
{
loop_counter++;
PLOGI << "Trajectory generation loop " << loop_counter;
// Constant jerk region
float dt_j = a_lim / j_lim;
float dv_j = 0.5 * j_lim * std::pow(dt_j, 2);
float dp_j1 = d6 * j_lim * std::pow(dt_j, 3);
float dp_j2 = (v_lim - dv_j) * dt_j + 0.5 * a_lim * std::pow(dt_j, 2) - d6 * j_lim * std::pow(dt_j, 3);
// Constant accel region
float dt_a = (v_lim - 2 * dv_j) / a_lim;
if (dt_a <= 0)
{
// If dt_a is negative, it means we couldn't find a solution
// so adjust accel parameter and try loop again
a_lim *= std::pow(solver.beta_decay, 1 + solver.exponent_decay * loop_counter);
PLOGI << "dt_a: " << dt_a << ", trying new accel limit: " << a_lim;
continue;
}
float dp_a = dv_j * dt_a + 0.5 * a_lim * std::pow(dt_a, 2);
// Constant velocity region
float dt_v = (dist - 2 * dp_j1 - 2 * dp_j2 - 2 * dp_a) / v_lim;
if (dt_v <= 0)
{
// If dt_a is negative, it means we couldn't find a solution
// so adjust velocity parameter and try loop again
v_lim *= std::pow(solver.alpha_decay, 1 + solver.exponent_decay * loop_counter);
PLOGI << "dt_v: " << dt_v << ", trying new velocity limit: " << v_lim;
continue;
}
// If we get here, it means we found a valid solution and can populate the rest of the
// switch time parameters
solution_found = true;
params->v_lim = v_lim;
params->a_lim = a_lim;
params->j_lim = j_lim;
PLOGI << "Trajectory solution found";
populateSwitchTimeParameters(params, dt_j, dt_a, dt_v);
}
return solution_found;
}
void populateSwitchTimeParameters(SCurveParameters* params, float dt_j, float dt_a, float dt_v)
{
// Fill first point with all zeros
params->switch_points[0].t = 0;
params->switch_points[0].p = 0;
params->switch_points[0].v = 0;
params->switch_points[0].a = 0;
for (int i = 1; i < 8; i++)
{
float dt;
// Constant jerk regions
if (i == 1 || i == 3 || i == 5 || i == 7)
{
dt = dt_j;
}
// Constant acceleration regions
else if (i == 2 || i == 6)
{
dt = dt_a;
}
// Constant velocity region
else
{
dt = dt_v;
}
// Populate values
std::vector values = computeKinematicsBasedOnRegion(*params, i, dt);
params->switch_points[i].a = values[0];
params->switch_points[i].v = values[1];
params->switch_points[i].p = values[2];
params->switch_points[i].t = params->switch_points[i-1].t + dt;
}
}
bool synchronizeParameters(SCurveParameters* params1, SCurveParameters* params2)
{
// Figure out which parameter set is the faster one
float end_time_1 = params1->switch_points[7].t;
float end_time_2 = params2->switch_points[7].t;
// Slow down which ever one is faster to syncronize them
bool ok = false;
if(end_time_1 > end_time_2)
{
ok = slowDownParamsToMatchTime(params2, end_time_1);
}
else
{
ok = slowDownParamsToMatchTime(params1, end_time_2);
}
return ok;
}
bool slowDownParamsToMatchTime(SCurveParameters* params, float time_to_match)
{
// Compute time delta needed to adjust by
float end_time = params->switch_points[7].t;
float dt = time_to_match - end_time;
// Do a simple adjustment by adding an equal amount of time to each region
float dt_per_region = dt / 7.0;
for (int i = 1; i < 8; i++)
{
params->switch_points[i].t += + i*dt_per_region;
}
// Then recompute limits based on updated times
return solveInverse(params);
}
bool solveInverse(SCurveParameters* params)
{
// Gather parameters needed
const float dt_j = params->switch_points[1].t - params->switch_points[0].t;
const float dt_a = params->switch_points[2].t - params->switch_points[1].t;
const float dt_v = params->switch_points[4].t - params->switch_points[3].t;
const float deltaPosition = params->switch_points[7].p;
// Build linear system
Eigen::Matrix3f A;
Eigen::Vector3f b;
A << dt_j, -1, 0 ,
std::pow(dt_j, 2), dt_a , -1 ,
std::pow(dt_j, 2) * (dt_a - dt_j), std::pow(dt_j, 2) + std::pow(dt_a, 2), dt_v + 2* dt_j;
b << 0, 0, deltaPosition;
// Solve system and check results
// Eigen::Vector3f lims = A.fullPivHouseholderQr().solve(b);
Eigen::Vector3f lims = A.bdcSvd(Eigen::ComputeFullU | Eigen::ComputeFullV).solve(b);
float relative_error = (A*lims - b).norm() / b.norm();
if (relative_error > 1e-5)
{
PLOGW << "Could not find feasible inverse parameter mapping";
return false;
}
params->j_lim = lims(0);
params->a_lim = lims(1);
params->v_lim = lims(2);
populateSwitchTimeParameters(params, dt_j, dt_a, dt_v);
return true;
}
std::vector computeKinematicsBasedOnRegion(const SCurveParameters& params, int region, float dt)
{
float j, a, v, p;
bool need_a = true;
bool need_v = true;
// Positive jerk
if (region == 1 || region == 7)
{
j = params.j_lim;
}
// Negative jerk
else if (region == 3 || region == 5)
{
j = -1 * params.j_lim;
}
// Constant positive acceleration
else if (region == 2)
{
j = 0;
a = params.a_lim;
need_a = false;
}
// Constant negative acceleration
else if ( region == 6)
{
j = 0;
a = -1*params.a_lim;
need_a = false;
}
// Constant velocity region
else if (region == 4)
{
j = 0;
a = 0;
v = params.v_lim;
need_a = false;
need_v = false;
}
else
{
// Error
PLOGE << "Invalid region value: " << region;
return {0,0,0,0};
}
// Compute remaining values
if (need_a)
{
a = params.switch_points[region-1].a + j * dt;
}
if (need_v)
{
v = params.switch_points[region-1].v +
params.switch_points[region-1].a * dt +
0.5 * j * std::pow(dt, 2);
}
p = params.switch_points[region-1].p +
params.switch_points[region-1].v * dt +
0.5 * params.switch_points[region-1].a * std::pow(dt, 2) +
d6 * j * std::pow(dt, 3);
return {a,v,p};
}
================================================
FILE: src/robot/src/SmoothTrajectoryGenerator.h
================================================
#ifndef SmoothTrajectoryGenerator_h
#define SmoothTrajectoryGenerator_h
#include
#include "utils.h"
// Return structure for a trajectory point lookup that contains all the info about a point in time the controller
// needs to drive the robot
struct PVTPoint
{
Point position;
Velocity velocity;
float time;
std::string toString() const
{
char s[200];
sprintf(s, "[Position: %s, Velocity: %s, T: %.3f]", position.toString().c_str(), velocity.toString().c_str(), time);
return static_cast(s);
}
};
// Contains info about the maximum dynamic limits of a trajectory
struct DynamicLimits
{
float max_vel;
float max_acc;
float max_jerk;
DynamicLimits operator* (float c)
{
DynamicLimits rtn;
rtn.max_vel = c * max_vel;
rtn.max_acc = c * max_acc;
rtn.max_jerk = c * max_jerk;
return rtn;
}
};
// A fully defined point for switching from one region of the trajectory
// to another - needed for efficient lookup without building a huge table
struct SwitchPoint
{
float t;
float p;
float v;
float a;
};
// Parameters defining a 1-D S-curve trajectory
struct SCurveParameters
{
float v_lim;
float a_lim;
float j_lim;
SwitchPoint switch_points[8];
std::string toString() const
{
char s[256];
sprintf(s, " Limits: [v: %.3f, a: %.3f, j: %.3f]\n Switch times: [%.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f]", v_lim, a_lim, j_lim,
switch_points[0].t,switch_points[1].t,switch_points[2].t,switch_points[3].t,switch_points[4].t,switch_points[5].t,switch_points[6].t,switch_points[7].t);
return static_cast(s);
}
};
// Everything needed to define a point to point s-curve trajectory in X, Y, and angle
struct Trajectory
{
Eigen::Vector2f trans_direction;
int rot_direction;
Point initialPoint;
SCurveParameters trans_params;
SCurveParameters rot_params;
bool complete;
std::string toString() const
{
char s[1000];
sprintf(s, "Trajectory Parameters:\nTranslation:\n Direction: [%.2f, %.2f]\n S-Curve:\n%s\nRotation:\n Direction: %i\n S-Curve:\n%s\n",
trans_direction[0], trans_direction[1], trans_params.toString().c_str(), rot_direction, rot_params.toString().c_str());
return static_cast(s);
}
};
struct SolverParameters
{
int num_loops;
float alpha_decay;
float beta_decay;
float exponent_decay;
};
enum class LIMITS_MODE
{
COARSE,
FINE,
VISION,
SLOW,
};
// All the pieces needed to define the motion planning problem
struct MotionPlanningProblem
{
Eigen::Vector3f initialPoint;
Eigen::Vector3f targetPoint;
DynamicLimits translationalLimits;
DynamicLimits rotationalLimits;
SolverParameters solver_params;
};
// Helper methods - making public for easier testing
MotionPlanningProblem buildMotionPlanningProblem(Point initialPoint, Point targetPoint, LIMITS_MODE limits_mode, const SolverParameters& solver);
Trajectory generateTrajectory(MotionPlanningProblem problem);
bool generateSCurve(float dist, DynamicLimits limits, const SolverParameters& solver, SCurveParameters* params);
void populateSwitchTimeParameters(SCurveParameters* params, float dt_j, float dt_a, float dt_v);
bool synchronizeParameters(SCurveParameters* params1, SCurveParameters* params2);
bool slowDownParamsToMatchTime(SCurveParameters* params, float time_to_match);
bool solveInverse(SCurveParameters* params);
std::vector lookup_1D(float time, const SCurveParameters& params);
std::vector computeKinematicsBasedOnRegion(const SCurveParameters& params, int region, float dt);
class SmoothTrajectoryGenerator
{
public:
SmoothTrajectoryGenerator();
// Generates a trajectory that starts at the initial point and ends at the target point. Setting fineMode to true makes the
// adjusts the dynamic limits for a more accurate motion. Returns a bool indicating if trajectory generation was
// successful
bool generatePointToPointTrajectory(Point initialPoint, Point targetPoint, LIMITS_MODE limits_mode);
// Generates a trajectory that attempts to maintain the target velocity for a specified time. Note that the current implimentation
// of this does not give a guarantee on the accuracy of the velocity if the specified velocity and move time would violate the dynamic
// limits of the fine or coarse movement mode. Returns a bool indicating if trajectory generation was successful
bool generateConstVelTrajectory(Point initialPoint, Velocity velocity, float moveTime, LIMITS_MODE limits_mode);
// Looks up a point in the current trajectory based on the time, in seconds, from the start of the trajectory
PVTPoint lookup(float time);
private:
// The current trajectory - this lets the generation class hold onto this and just provide a lookup method
// since I don't have a need to pass the trajectory around anywhere
Trajectory currentTrajectory_;
// These need to be part of the class because they need to be loaded at construction time, not
// program initialization time (i.e. as globals). This is because the config file is not
// yet loaded at program start up time.
SolverParameters solver_params_;
};
#endif
================================================
FILE: src/robot/src/StatusUpdater.cpp
================================================
#include "StatusUpdater.h"
#include "constants.h"
StatusUpdater::StatusUpdater() :
currentStatus_()
{
}
std::string StatusUpdater::getStatusJsonString()
{
// Return the status string
return currentStatus_.toJsonString();
}
void StatusUpdater::updatePosition(float x, float y, float a)
{
currentStatus_.pos_x = x;
currentStatus_.pos_y = y;
currentStatus_.pos_a = a;
}
void StatusUpdater::updateVelocity(float vx, float vy, float va)
{
currentStatus_.vel_x = vx;
currentStatus_.vel_y = vy;
currentStatus_.vel_a = va;
}
void StatusUpdater::updateInProgress(bool in_progress)
{
currentStatus_.in_progress = in_progress;
}
void StatusUpdater::updateControlLoopTime(int controller_loop_ms)
{
currentStatus_.controller_loop_ms = controller_loop_ms;
}
void StatusUpdater::updatePositionLoopTime(int position_loop_ms)
{
currentStatus_.position_loop_ms = position_loop_ms;
}
void StatusUpdater::updateLocalizationMetrics(LocalizationMetrics localization_metrics)
{
currentStatus_.localization_metrics = localization_metrics;
}
void StatusUpdater::update_motor_driver_connected(bool connected)
{
currentStatus_.motor_driver_connected = connected;
}
void StatusUpdater::update_lifter_driver_connected(bool connected)
{
currentStatus_.lifter_driver_connected = connected;
}
void StatusUpdater::updateVisionControllerPose(Point pose)
{
currentStatus_.vision_x = pose.x;
currentStatus_.vision_y = pose.y;
currentStatus_.vision_a = pose.a;
}
void StatusUpdater::updateLastMarvelmindPose(Point pose, bool pose_used)
{
currentStatus_.last_mm_x = pose.x;
currentStatus_.last_mm_y = pose.y;
currentStatus_.last_mm_a = pose.a;
currentStatus_.last_mm_used = pose_used;
}
================================================
FILE: src/robot/src/StatusUpdater.h
================================================
#ifndef StatusUpdater_h
#define StatusUpdater_h
#include
#include "utils.h"
class StatusUpdater
{
public:
StatusUpdater();
std::string getStatusJsonString();
void updatePosition(float x, float y, float a);
void updateVelocity(float vx, float vy, float va);
void updateControlLoopTime(int controller_loop_ms);
void updatePositionLoopTime(int position_loop_ms);
void updateInProgress(bool in_progress);
bool getInProgress() const { return currentStatus_.in_progress; };
void setErrorStatus() { currentStatus_.error_status = true;}
void clearErrorStatus() {currentStatus_.error_status = false;};
bool getErrorStatus() const {return currentStatus_.error_status;}
void updateLocalizationMetrics(LocalizationMetrics localization_metrics);
float getLocalizationConfidence() const {return currentStatus_.localization_metrics.total_confidence;};
void update_motor_driver_connected(bool connected);
void update_lifter_driver_connected(bool connected);
void updateCameraDebug(CameraDebug camera_debug) {currentStatus_.camera_debug = camera_debug;};
void updateVisionControllerPose(Point pose);
void updateLastMarvelmindPose(Point pose, bool pose_used);
struct Status
{
// Current position and velocity
float pos_x;
float pos_y;
float pos_a;
float vel_x;
float vel_y;
float vel_a;
// Vision tracker pose
float vision_x;
float vision_y;
float vision_a;
// Last pose from mm
float last_mm_x;
float last_mm_y;
float last_mm_a;
bool last_mm_used;
// Loop times
int controller_loop_ms;
int position_loop_ms;
bool in_progress;
bool error_status;
uint8_t counter; // Just to show that the status is updating. Okay to roll over
bool motor_driver_connected;
bool lifter_driver_connected;
LocalizationMetrics localization_metrics;
CameraDebug camera_debug;
//When adding extra fields, update toJsonString method to serialize and add additional capacity
Status():
pos_x(0.0),
pos_y(0.0),
pos_a(0.0),
vel_x(0.0),
vel_y(0.0),
vel_a(0.0),
vision_x(0.0),
vision_y(0.0),
vision_a(0.0),
last_mm_x(0.0),
last_mm_y(0.0),
last_mm_a(0.0),
last_mm_used(false),
controller_loop_ms(999),
position_loop_ms(999),
in_progress(false),
error_status(false),
counter(0),
motor_driver_connected(false),
lifter_driver_connected(false),
localization_metrics(),
camera_debug()
{
}
std::string toJsonString()
{
// Size the object correctly
const size_t capacity = JSON_OBJECT_SIZE(50); // Update when adding new fields
DynamicJsonDocument root(capacity);
// Format to match messages sent by server
root["type"] = "status";
JsonObject doc = root.createNestedObject("data");
// Fill in data
doc["pos_x"] = pos_x;
doc["pos_y"] = pos_y;
doc["pos_a"] = pos_a;
doc["vel_x"] = vel_x;
doc["vel_y"] = vel_y;
doc["vel_a"] = vel_a;
doc["controller_loop_ms"] = controller_loop_ms;
doc["position_loop_ms"] = position_loop_ms;
doc["in_progress"] = in_progress;
doc["error_status"] = error_status;
doc["counter"] = counter++;
doc["motor_driver_connected"] = motor_driver_connected;
doc["lifter_driver_connected"] = lifter_driver_connected;
doc["localization_confidence_x"] = localization_metrics.confidence_x;
doc["localization_confidence_y"] = localization_metrics.confidence_y;
doc["localization_confidence_a"] = localization_metrics.confidence_a;
doc["localization_total_confidence"] = localization_metrics.total_confidence;
doc["last_position_uncertainty"] = localization_metrics.last_position_uncertainty;
doc["cam_side_ok"] = camera_debug.side_ok;
doc["cam_rear_ok"] = camera_debug.rear_ok;
doc["cam_both_ok"] = camera_debug.both_ok;
doc["cam_side_u"] = camera_debug.side_u;
doc["cam_side_v"] = camera_debug.side_v;
doc["cam_rear_u"] = camera_debug.rear_u;
doc["cam_rear_v"] = camera_debug.rear_v;
doc["cam_side_x"] = camera_debug.side_x;
doc["cam_side_y"] = camera_debug.side_y;
doc["cam_rear_x"] = camera_debug.rear_x;
doc["cam_rear_y"] = camera_debug.rear_y;
doc["cam_pose_x"] = camera_debug.pose_x;
doc["cam_pose_y"] = camera_debug.pose_y;
doc["cam_pose_a"] = camera_debug.pose_a;
doc["cam_loop_ms"] = camera_debug.loop_ms;
doc["vision_x"] = vision_x;
doc["vision_y"] = vision_y;
doc["vision_a"] = vision_a;
doc["last_mm_x"] = last_mm_x;
doc["last_mm_y"] = last_mm_y;
doc["last_mm_a"] = last_mm_a;
doc["last_mm_used"] = last_mm_used;
// Serialize and return string
std::string msg;
serializeJson(root, msg);
return msg;
}
};
Status getStatus() { return currentStatus_;};
private:
Status currentStatus_;
};
#endif
================================================
FILE: src/robot/src/TrayController.cpp
================================================
#include "TrayController.h"
#include "constants.h"
#include
#include "serial/SerialCommsFactory.h"
enum class LifterPosType
{
DEFAULT,
PLACE,
LOAD,
};
int getPos(LifterPosType pos_type)
{
float revs = 0.0;
switch(pos_type)
{
case LifterPosType::DEFAULT:
revs = cfg.lookup("tray.default_pos_revs");
break;
case LifterPosType::PLACE:
revs = cfg.lookup("tray.place_pos_revs");
break;
case LifterPosType::LOAD:
revs = cfg.lookup("tray.load_pos_revs");
break;
}
int steps_per_rev = cfg.lookup("tray.steps_per_rev");
float steps = steps_per_rev * revs;
return static_cast(steps);
}
TrayController::TrayController()
: serial_to_lifter_driver_(SerialCommsFactory::getFactoryInstance()->get_serial_comms(CLEARCORE_USB)),
action_step_running_(false),
load_complete_(false),
action_step_(0),
fake_tray_motion_(cfg.lookup("tray.fake_tray_motions")),
cur_action_(ACTION::NONE),
controller_rate_(cfg.lookup("tray.controller_frequency")),
is_initialized_(false)
{
if(fake_tray_motion_) PLOGW << "Fake tray motion enabled";
}
void TrayController::initialize()
{
cur_action_ = ACTION::INITIALIZE;
action_step_ = 0;
PLOGI << "Starting tray action INITIALIZE";
}
bool TrayController::place()
{
if(!is_initialized_ && !fake_tray_motion_)
{
PLOGE << "Tray is not initialized, aborting action";
return false;
}
cur_action_ = ACTION::PLACE;
action_step_ = 0;
PLOGI << "Starting tray action PLACE";
return true;
}
bool TrayController::load()
{
if(!is_initialized_ && !fake_tray_motion_)
{
PLOGE << "Tray is not initialized, aborting action";
return false;
}
cur_action_ = ACTION::LOAD;
action_step_ = 0;
PLOGI << "Starting tray action LOAD";
return true;
}
void TrayController::estop()
{
cur_action_ = ACTION::NONE;
action_step_ = 0;
action_step_running_ = false;
PLOGW << "Estopping tray control";
if (serial_to_lifter_driver_->isConnected())
{
serial_to_lifter_driver_->send("lift:stop");
}
}
void TrayController::setLoadComplete()
{
if(cur_action_ == ACTION::LOAD && action_step_ == 1)
{
load_complete_ = true;
}
else
{
PLOGW << "Recieved LOAD_COMPLETE signal at incorrect time. Ignoring.";
}
}
void TrayController::update()
{
if (!controller_rate_.ready()) { return; }
switch (cur_action_)
{
case ACTION::INITIALIZE:
updateInitialize();
break;
case ACTION::LOAD:
updateLoad();
break;
case ACTION::PLACE:
updatePlace();
break;
default:
// Just to make sure a serial buffer doesn't fill up somewhere
std::string msg = "";
msg = serial_to_lifter_driver_->rcv_lift();
(void) msg;
break;
}
}
void TrayController::runStepAndWaitForCompletion(std::string data, std::string debug_print)
{
if(!action_step_running_)
{
if (serial_to_lifter_driver_->isConnected())
{
serial_to_lifter_driver_->send(data);
}
action_step_running_ = true;
PLOGI << debug_print;
action_timer_.reset();
}
else
{
// Request status and wait for command to complete
std::string msg = "";
if (serial_to_lifter_driver_->isConnected())
{
serial_to_lifter_driver_->send("lift:status_req");
msg = serial_to_lifter_driver_->rcv_lift();
}
// Initial delay for action
if(action_timer_.dt_ms() > 1000 && (msg == "none" || fake_tray_motion_)) {
action_step_running_ = false;
action_step_++;
}
}
}
void TrayController::updateInitialize()
{
/*
Sequence:
0 - Close latch
1 - Do tray init
2 - Move to default location
*/
// 0 - Close latch
if(action_step_ == 0)
{
std::string data = "lift:close";
std::string debug = "Closing latch";
runStepAndWaitForCompletion(data, debug);
}
// 1 - Do tray init
if(action_step_ == 1)
{
std::string data = "lift:home";
std::string debug = "Homing tray";
runStepAndWaitForCompletion(data, debug);
}
// 2 - Move to default location
if(action_step_ == 2)
{
int pos = getPos(LifterPosType::DEFAULT);
std::string data = "lift:pos:" + std::to_string(pos);
std::string debug = "Moving tray to default position";
runStepAndWaitForCompletion(data, debug);
}
// 3 - Done with actinon
if(action_step_ == 3)
{
cur_action_ = ACTION::NONE;
PLOGI << "Done with initialization";
is_initialized_ = true;
}
}
void TrayController::updatePlace()
{
/*
Sequence:
0 - Move tray to place
1 - Open latch
2 - Move tray to default
3 - Close latch
*/
// 0 - Move tray to place
if(action_step_ == 0)
{
int pos = getPos(LifterPosType::PLACE);
std::string data = "lift:pos:" + std::to_string(pos);
std::string debug = "Moving tray to placement position";
runStepAndWaitForCompletion(data, debug);
}
// 1 - Open latch
if(action_step_ == 1)
{
std::string data = "lift:open";
std::string debug = "Opening latch";
runStepAndWaitForCompletion(data, debug);
}
// 2 - Move tray to default
if(action_step_ == 2)
{
int pos = getPos(LifterPosType::DEFAULT);
std::string data = "lift:pos:" + std::to_string(pos);
std::string debug = "Moving tray to default position";
runStepAndWaitForCompletion(data, debug);
}
// 3 - Close latch
if(action_step_ == 3)
{
std::string data = "lift:close";
std::string debug = "Closing latch";
runStepAndWaitForCompletion(data, debug);
}
// 4 - Done with actinon
if(action_step_ == 4)
{
cur_action_ = ACTION::NONE;
PLOGI << "Done with place";
}
}
void TrayController::updateLoad()
{
/*
Sequence:
0 - Move tray to load
1 - Wait for load complete signal
2 - Move to default
*/
// 0 - Move tray to load
if(action_step_ == 0)
{
int pos = getPos(LifterPosType::LOAD);
std::string data = "lift:pos:" + std::to_string(pos);
std::string debug = "Moving tray to load position";
runStepAndWaitForCompletion(data, debug);
}
// 1 - Wait for load complete signal
if(action_step_ == 1)
{
if(!action_step_running_)
{
action_step_running_ = true;
PLOGI << "Waiting for load complete";
}
else
{
if(load_complete_)
{
action_step_++;
PLOGI << "Got load complete signal";
load_complete_ = false;
action_step_running_ = false;
}
}
}
// 2 - Move to default
if(action_step_ == 2)
{
int pos = getPos(LifterPosType::DEFAULT);
std::string data = "lift:pos:" + std::to_string(pos);
std::string debug = "Moving tray to default position";
runStepAndWaitForCompletion(data, debug);
}
// 3 - Done with actinon
if(action_step_ == 3)
{
cur_action_ = ACTION::NONE;
PLOGI << "Done with load";
}
}
================================================
FILE: src/robot/src/TrayController.h
================================================
#ifndef TrayController_h
#define TrayController_h
#include "serial/SerialComms.h"
#include "utils.h"
class TrayController
{
public:
TrayController();
void initialize();
// Returns bool indicating if action started successfully
bool place();
// Returns bool indicating if action started successfully
bool load();
bool isActionRunning() {return cur_action_ != ACTION::NONE;}
void update();
void estop();
void setLoadComplete();
// Used for testing
void setTrayInitialized(bool value) {is_initialized_ = value;};
private:
enum ACTION
{
NONE,
INITIALIZE,
PLACE,
LOAD,
};
SerialCommsBase* serial_to_lifter_driver_; // Serial connection to lifter driver
bool action_step_running_;
bool load_complete_;
int action_step_;
bool fake_tray_motion_;
ACTION cur_action_;
RateController controller_rate_; // Rate limit controller loop
Timer action_timer_;
bool is_initialized_;
void runStepAndWaitForCompletion(std::string data, std::string debug_print);
void updateInitialize();
void updateLoad();
void updatePlace();
};
#endif
================================================
FILE: src/robot/src/camera_tracker/CameraPipeline.cpp
================================================
#include "CameraPipeline.h"
#include
#include "constants.h"
#include
std::mutex data_mutex;
cv::Point2f getBestKeypoint(std::vector keypoints)
{
if(keypoints.empty())
{
return {0,0};
}
// Get keypoint with largest area
cv::KeyPoint best_keypoint = keypoints.front();
for (const auto& k : keypoints) {
// PLOGI << "Keypoint: " << k.class_id;
// PLOGI << "Point: " << k.pt;
// PLOGI << "Angle: " << k.angle;
// PLOGI << "Size: " << k.size;
if(k.size > best_keypoint.size)
{
best_keypoint = k;
}
}
return best_keypoint.pt;
}
CameraPipeline::CameraPipeline(CAMERA_ID id, bool start_thread)
: use_debug_image_(cfg.lookup("vision_tracker.debug.use_debug_image")),
output_debug_images_(cfg.lookup("vision_tracker.debug.save_camera_debug")),
thread_running_(false),
current_output_()
{
initCamera(id);
// Configure detection parameters
threshold_ = cfg.lookup("vision_tracker.detection.threshold");
blob_params_.minThreshold = 10;
blob_params_.maxThreshold = 200;
blob_params_.filterByArea = cfg.lookup("vision_tracker.detection.blob.use_area");
blob_params_.minArea = cfg.lookup("vision_tracker.detection.blob.min_area");
blob_params_.maxArea = cfg.lookup("vision_tracker.detection.blob.max_area");
blob_params_.filterByColor = false;
blob_params_.filterByCircularity = cfg.lookup("vision_tracker.detection.blob.use_circularity");
blob_params_.minCircularity = cfg.lookup("vision_tracker.detection.blob.min_circularity");
blob_params_.maxCircularity = cfg.lookup("vision_tracker.detection.blob.max_circularity");
blob_params_.filterByConvexity = false;
blob_params_.filterByInertia = false;
blob_detector_ = cv::SimpleBlobDetector::create(blob_params_);
// Start thread
if (start_thread) start();
}
CameraPipeline::~CameraPipeline()
{
stop();
}
void CameraPipeline::initCamera(CAMERA_ID id)
{
// Get config strings
std::string name = cameraIdToString(id);
std::string calibration_path_config_name = "vision_tracker." + name + ".calibration_file";
std::string camera_path_config_name = "vision_tracker." + name + ".camera_path";
std::string debug_output_path_config_name = "vision_tracker." + name + ".debug_output_path";
std::string debug_image_config_name = "vision_tracker." + name + ".debug_image";
std::string x_offset_config_name = "vision_tracker.physical." + name + ".x_offset";
std::string y_offset_config_name = "vision_tracker.physical." + name + ".y_offset";
std::string z_offset_config_name = "vision_tracker.physical." + name + ".z_offset";
std::string x_res_scale_config_name = "vision_tracker." + name + ".resolution_scale_x";
std::string y_res_scale_config_name = "vision_tracker." + name + ".resolution_scale_y";
// Initialize intrinsic calibration data
camera_data_.id = id;
std::string calibration_path = cfg.lookup(calibration_path_config_name);
PLOGI.printf("Loading %s camera calibration from %s", name.c_str(), calibration_path.c_str());
cv::FileStorage fs(calibration_path, cv::FileStorage::READ);
if(fs["K"].isNone() || fs["D"].isNone())
{
PLOGE.printf("Missing %s calibration data", name.c_str());
throw;
}
fs["K"] >> camera_data_.K;
fs["D"] >> camera_data_.D;
// Handle scale factors
float x_res_scale = cfg.lookup(x_res_scale_config_name);
camera_data_.K.at(0,0) = camera_data_.K.at(0,0) * x_res_scale;
camera_data_.K.at(0,1) = camera_data_.K.at(0,1) * x_res_scale;
camera_data_.K.at(0,2) = camera_data_.K.at(0,2) * x_res_scale;
float y_res_scale = cfg.lookup(y_res_scale_config_name);
camera_data_.K.at(1,0) = camera_data_.K.at(1,0) * y_res_scale;
camera_data_.K.at(1,1) = camera_data_.K.at(1,1) * y_res_scale;
camera_data_.K.at(1,2) = camera_data_.K.at(1,2) * y_res_scale;
// Initialize extrinsic data
float x_offset = cfg.lookup(x_offset_config_name);
float y_offset = cfg.lookup(y_offset_config_name);
float z_offset = cfg.lookup(z_offset_config_name);
Eigen::Vector3f camera_pose = {x_offset, y_offset, z_offset};
Eigen::Matrix3f camera_rotation;
// Hardcoding rotation matrices here for simplicity
if(id == CAMERA_ID::SIDE)
{
camera_rotation << 1,0,0, 0,-1,0, 0,0,-1;
}
else
{
camera_rotation << 0,1,0, 1,0,0, 0,0,-1;
}
// From https://ksimek.github.io/2012/08/22/extrinsic/
camera_data_.t = -1 * camera_rotation * camera_pose;
camera_data_.R = camera_rotation.transpose();
// PLOGI << name << " R: " << camera_data_.R;
// PLOGI << name << " t: " << camera_data_.t.transpose();
// Precompute some inverse values for later re-use
camera_data_.R_inv = camera_data_.R.inverse();
Eigen::Matrix3f tmp;
// Hack to get around problems with enabling eigen support in opencv
tmp << camera_data_.K.at(0,0), camera_data_.K.at(0,1), camera_data_.K.at(0,2),
camera_data_.K.at(1,0), camera_data_.K.at(1,1), camera_data_.K.at(1,2),
camera_data_.K.at(2,0), camera_data_.K.at(2,1), camera_data_.K.at(2,2);
camera_data_.K_inv = tmp.inverse();
// PLOGI << name << " K: " << camera_data_.K;
// PLOGI << name << " Ktmp: " << tmp;
// PLOGI << name << " Kinv: " << camera_data_.K_inv;
// Initialize camera calibration capture or debug data
if(!use_debug_image_)
{
std::string camera_path = cfg.lookup(camera_path_config_name);
camera_data_.capture = cv::VideoCapture(camera_path);
if (!camera_data_.capture.isOpened())
{
PLOGE.printf("Could not open %s camera at %s", name.c_str(), camera_path.c_str());
throw;
}
PLOGI.printf("Opened %s camera at %s", name.c_str(), camera_path.c_str());
camera_data_.capture.set(cv::CAP_PROP_FRAME_WIDTH, 320);
camera_data_.capture.set(cv::CAP_PROP_FRAME_HEIGHT, 240);
camera_data_.capture.set(cv::CAP_PROP_FPS, 30);
int width = camera_data_.capture.get(cv::CAP_PROP_FRAME_WIDTH);
int height = camera_data_.capture.get(cv::CAP_PROP_FRAME_HEIGHT);
int fps = camera_data_.capture.get(cv::CAP_PROP_FPS);
PLOGI.printf("Properties %s: resolution: %ix%i, fps: %i", name.c_str(), width, height, fps);
}
else
{
std::string image_path = cfg.lookup(debug_image_config_name);
camera_data_.debug_frame = cv::imread(image_path, cv::IMREAD_GRAYSCALE);
if(camera_data_.debug_frame.empty())
{
PLOGE.printf("Could not read %s debug image from %s", name.c_str(), image_path.c_str());
throw;
}
PLOGW.printf("Loading %s debug image from %s", name.c_str(), image_path.c_str());
}
// Initialze debug output path
if(output_debug_images_)
{
camera_data_.debug_output_path = std::string(cfg.lookup(debug_output_path_config_name));
}
}
std::string CameraPipeline::cameraIdToString(CAMERA_ID id)
{
if(id == CAMERA_ID::SIDE) return "side";
if(id == CAMERA_ID::REAR) return "rear";
PLOGE << "Unknown camera id";
return "unk";
}
std::vector CameraPipeline::allKeypointsInImage(cv::Mat img_raw, bool output_debug)
{
// Timer t;
// Undistort and crop
cv::Mat img_undistorted;
cv::Rect validPixROI;
// PLOGI << "init memory time: " << t.dt_ms();
// t.reset();
cv::Mat newcameramtx = cv::getOptimalNewCameraMatrix(camera_data_.K, camera_data_.D, img_raw.size(), /*alpha=*/1, img_raw.size(), &validPixROI);
// PLOGI << "new camera time: " << t.dt_ms();
// t.reset();
cv::undistort(img_raw, img_undistorted, camera_data_.K, camera_data_.D);
// PLOGI << "undistort time: " << t.dt_ms();
// t.reset();
// Threshold
cv::Mat img_thresh;
cv::threshold(img_undistorted, img_thresh, threshold_, 255, cv::THRESH_BINARY_INV);
// PLOGI <<" Threshold";
// PLOGI << "threshold time: " << t.dt_ms();
// t.reset();
// Blob detection
std::vector keypoints;
keypoints.reserve(10);
// PLOGI << "blob memory time: " << t.dt_ms();
// t.reset();
blob_detector_->detect(img_thresh, keypoints);
// PLOGI <<"Num blobs " << keypoints.size();
// PLOGI << "blob detect time: " << t.dt_ms();
// t.reset();
if(output_debug)
{
std::string debug_path = camera_data_.id == CAMERA_ID::SIDE ?
cfg.lookup("vision_tracker.side.debug_output_path") :
cfg.lookup("vision_tracker.rear.debug_output_path");
cv::imwrite(debug_path + "img_raw.jpg", img_raw);
cv::imwrite(debug_path + "img_undistorted.jpg", img_undistorted);
cv::imwrite(debug_path + "img_thresh.jpg", img_thresh);
cv::Mat img_with_keypoints;
cv::drawKeypoints(img_thresh, keypoints, img_with_keypoints, cv::Scalar(0,0,255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cv::imwrite(debug_path + "img_keypoints.jpg", img_with_keypoints);
cv::Mat img_with_best_keypoint = img_undistorted;
cv::Point2f best_keypoint = getBestKeypoint(keypoints);
cv::circle(img_with_best_keypoint, best_keypoint, 1, cv::Scalar(0,0,255), -1);
cv::circle(img_with_best_keypoint, best_keypoint, 10, cv::Scalar(0,0,255), 5);
Eigen::Vector2f best_point_m = cameraToRobot(best_keypoint);
std::string label_text = "Best:" + std::to_string(best_point_m[0]) +"m,"+ std::to_string(best_point_m[1]) + "m";
cv::putText(img_with_best_keypoint, //target image
label_text, //text
cv::Point(10, 20), //top-left position
cv::FONT_HERSHEY_DUPLEX,
0.5,
CV_RGB(255,0,0), //font color
1);
Eigen::Vector2f target_point_world;
if(camera_data_.id == CAMERA_ID::SIDE)
{
target_point_world << float(cfg.lookup("vision_tracker.physical.side.target_x")), float(cfg.lookup("vision_tracker.physical.side.target_y"));
}
else
{
target_point_world << float(cfg.lookup("vision_tracker.physical.rear.target_x")), float(cfg.lookup("vision_tracker.physical.rear.target_y"));
}
Eigen::Vector2f target_point_camera = robotToCamera(target_point_world);
cv::Point2f pt{target_point_camera[0], target_point_camera[1]};
cv::circle(img_with_best_keypoint, pt, 5, cv::Scalar(255,0,0), -1);
std::string label_text2 = "Target:" + std::to_string(target_point_camera[0]) +"px, "+ std::to_string(target_point_camera[1]) + "px";
cv::putText(img_with_best_keypoint, //target image
label_text2, //text
cv::Point(10, 40), //top-left position
cv::FONT_HERSHEY_DUPLEX,
0.5,
CV_RGB(0,0,255), //font color
1);
cv::imwrite(debug_path + "img_best_keypoint.jpg", img_with_best_keypoint);
PLOGI.printf("Writing debug images %s",cameraIdToString(camera_data_.id).c_str());
}
// PLOGI << "debug time: " << t.dt_ms();
// t.reset();
return keypoints;
}
void CameraPipeline::start()
{
if(!thread_running_)
{
PLOGI.printf("Starting %s camera thread",cameraIdToString(camera_data_.id).c_str());
thread_running_ = true;
thread_ = std::thread(&CameraPipeline::threadLoop, this);
// thread_.detach();
}
}
void CameraPipeline::stop()
{
if(thread_running_)
{
PLOGI.printf("Stopping %s camera thread",cameraIdToString(camera_data_.id).c_str());
thread_running_ = false;
thread_.join();
}
}
void CameraPipeline::threadLoop()
{
while(thread_running_)
{
oneLoop();
}
}
void CameraPipeline::oneLoop()
{
Eigen::Vector2f best_point_m = {0,0};
cv::Point2f best_point_px = {0,0};
bool detected = false;
try
{
// Get latest frame
cv::Mat frame;
if (use_debug_image_) frame = camera_data_.debug_frame;
else camera_data_.capture >> frame;
// Perform keypoint detection
std::vector keypoints = allKeypointsInImage(frame, output_debug_images_);
// Do post-processing if the detection was successful
detected = !keypoints.empty();
if(detected)
{
best_point_px = getBestKeypoint(keypoints);
best_point_m = cameraToRobot(best_point_px);
}
}
catch (cv::Exception& e)
{
PLOGW << "Caught cv::Exception: " << e.what();
detected = false;
best_point_m = {0,0};
best_point_px = {0,0};
}
// Update output values in thread-safe manner
{
std::lock_guard read_lock(data_mutex);
current_output_.ok = detected;
current_output_.timestamp = ClockFactory::getFactoryInstance()->get_clock()->now();
current_output_.point = best_point_m;
current_output_.uv = {best_point_px.x, best_point_px.y};
}
}
CameraPipelineOutput CameraPipeline::getData()
{
std::lock_guard read_lock(data_mutex);
CameraPipelineOutput output = current_output_;
current_output_.ok = false;
return output;
}
Eigen::Vector2f CameraPipeline::cameraToRobot(cv::Point2f cameraPt)
{
Eigen::Vector3f uv_point = {cameraPt.x, cameraPt.y, 1};
// Compute scaling factor for Z=0
Eigen::Vector3f tmp1 = camera_data_.R_inv * camera_data_.K_inv * uv_point;
Eigen::Vector3f tmp2 = camera_data_.R_inv * camera_data_.t;
float s = tmp2(2,0) / tmp1(2,0);
// Debug
Eigen::Vector3f scaled_cam_frame = s * camera_data_.K_inv * uv_point;
Eigen::Vector3f cam_frame_offset = scaled_cam_frame - camera_data_.t;
// Convert to world coordinate
Eigen::Vector3f world_point = camera_data_.R_inv * cam_frame_offset;
// PLOGI << cameraIdToString(id);
// PLOGI << "uv_point: " << uv_point.transpose();
// PLOGI << "scaled_cam_frame: " << scaled_cam_frame.transpose();
// PLOGI << "cam_frame_offset: " << cam_frame_offset.transpose();
// PLOGI << "world_point: " << world_point.transpose();
if(fabs(world_point(2)) > 1e-3) PLOGE.printf("Z value %f is not near zero", world_point(2));
return {world_point(0),world_point(1)};
}
Eigen::Vector2f CameraPipeline::robotToCamera(Eigen::Vector2f robotPt)
{
Eigen::Vector4f robot_point_3d = {robotPt[0], robotPt[1], 0, 1};
Eigen::MatrixXf r_t(3,4);
r_t << camera_data_.R, camera_data_.t;
Eigen::Matrix3f K_tmp;
// Hack to get around problems with enabling eigen support in opencv
K_tmp << camera_data_.K.at(0,0), camera_data_.K.at(0,1), camera_data_.K.at(0,2),
camera_data_.K.at(1,0), camera_data_.K.at(1,1), camera_data_.K.at(1,2),
camera_data_.K.at(2,0), camera_data_.K.at(2,1), camera_data_.K.at(2,2);
Eigen::Vector3f camera_pt_scaled = K_tmp * r_t * robot_point_3d;
return {camera_pt_scaled[0]/camera_pt_scaled[2], camera_pt_scaled[1]/camera_pt_scaled[2]};
}
================================================
FILE: src/robot/src/camera_tracker/CameraPipeline.h
================================================
#ifndef CameraPipeline_h
#define CameraPipeline_h
#include "utils.h"
#include
#include
#include
#include
enum class CAMERA_ID
{
REAR,
SIDE
};
struct CameraPipelineOutput
{
bool ok = false;
ClockTimePoint timestamp = ClockFactory::getFactoryInstance()->get_clock()->now();
Eigen::Vector2f point = {0,0};
Eigen::Vector2f uv = {0,0};
};
class CameraPipeline
{
public:
CameraPipeline(CAMERA_ID id, bool start_thread);
~CameraPipeline();
CameraPipelineOutput getData();
void start();
void stop();
void oneLoop();
void toggleDebugImageOutput() {output_debug_images_ = !output_debug_images_;};
Eigen::Vector2f cameraToRobot(cv::Point2f cameraPt);
Eigen::Vector2f robotToCamera(Eigen::Vector2f robotPt);
private:
struct CameraData
{
CAMERA_ID id;
cv::VideoCapture capture;
cv::Mat K;
Eigen::Matrix3f K_inv;
cv::Mat D;
Eigen::Matrix3f R;
Eigen::Matrix3f R_inv;
Eigen::Vector3f t;
cv::Mat debug_frame;
std::string debug_output_path;
};
void threadLoop();
std::string cameraIdToString(CAMERA_ID id);
void initCamera(CAMERA_ID id);
std::vector allKeypointsInImage(cv::Mat img_raw, bool output_debug);
CameraData camera_data_;
bool use_debug_image_;
std::atomic output_debug_images_;
std::atomic thread_running_;
std::thread thread_;
CameraPipelineOutput current_output_;
// Params read from config
cv::SimpleBlobDetector::Params blob_params_;
cv::Ptr blob_detector_;
int threshold_;
float pixels_per_meter_u_;
float pixels_per_meter_v_;
};
#endif //CameraPipeline_h
================================================
FILE: src/robot/src/camera_tracker/CameraTracker.cpp
================================================
#include "CameraTracker.h"
#include
#include "constants.h"
constexpr int num_samples_to_average = 10;
CameraTracker::CameraTracker(bool start_thread)
: camera_loop_time_averager_(num_samples_to_average),
rear_cam_(CAMERA_ID::REAR, start_thread),
side_cam_(CAMERA_ID::SIDE, start_thread),
output_({{0,0,0}, false, ClockFactory::getFactoryInstance()->get_clock()->now(), false}),
last_rear_cam_output_(),
last_side_cam_output_(),
side_cam_ok_filter_(0.5),
rear_cam_ok_filter_(0.5),
both_cams_ok_filter_(0.5),
running_(false)
{
// Target points
robot_P_side_target_ = {cfg.lookup("vision_tracker.physical.side.target_x"),
cfg.lookup("vision_tracker.physical.side.target_y")};
robot_P_rear_target_ = {cfg.lookup("vision_tracker.physical.rear.target_x"),
cfg.lookup("vision_tracker.physical.rear.target_y")};
}
CameraTracker::~CameraTracker() {}
void CameraTracker::update()
{
CameraPipelineOutput side_output = side_cam_.getData();
CameraPipelineOutput rear_output = rear_cam_.getData();
output_.raw_detection = side_output.ok && rear_output.ok;
debug_.side_ok = side_cam_ok_filter_.update(side_output.ok);
debug_.rear_ok = rear_cam_ok_filter_.update(rear_output.ok);
bool new_output_pose_ready = false;
if(rear_output.ok)
{
last_rear_cam_output_ = rear_output;
}
if(side_output.ok)
{
last_side_cam_output_ = side_output;
}
if(last_side_cam_output_.ok && last_rear_cam_output_.ok) new_output_pose_ready = true;
// int time_delta_ms = std::chrono::duration_cast
// (last_side_cam_output_.timestamp - last_rear_cam_output_.timestamp).count();
// if(abs(time_delta_ms) > 400)
// {
// PLOGW << "Camera timestamp delta too large: " << time_delta_ms;
// new_output_pose_ready = false;
// }
output_.ok = both_cams_ok_filter_.update(new_output_pose_ready);
debug_.both_ok = output_.ok;
if(!new_output_pose_ready) return;
// Populate output
output_.pose = computeRobotPoseFromImagePoints(last_side_cam_output_.point, last_rear_cam_output_.point);
output_.timestamp = ClockFactory::getFactoryInstance()->get_clock()->now();
camera_loop_time_averager_.mark_point();
// Populate debug
debug_.side_u = last_side_cam_output_.uv[0];
debug_.side_v = last_side_cam_output_.uv[1];
debug_.rear_u = last_rear_cam_output_.uv[0];
debug_.rear_v = last_rear_cam_output_.uv[1];
debug_.side_x = last_side_cam_output_.point[0];
debug_.side_y = last_side_cam_output_.point[1];
debug_.rear_x = last_rear_cam_output_.point[0];
debug_.rear_y = last_rear_cam_output_.point[1];
debug_.pose_x = output_.pose.x;
debug_.pose_y = output_.pose.y;
debug_.pose_a = output_.pose.a;
debug_.loop_ms = camera_loop_time_averager_.get_ms();
// Mark camera output values as not okay to make sure they are only used once
last_rear_cam_output_.ok = false;
last_side_cam_output_.ok = false;
}
CameraTrackerOutput CameraTracker::getPoseFromCamera()
{
return output_;
}
CameraDebug CameraTracker::getCameraDebug()
{
return debug_;
}
void CameraTracker::start()
{
rear_cam_.start();
side_cam_.start();
camera_loop_time_averager_ = TimeRunningAverage(num_samples_to_average);
running_ = true;
}
void CameraTracker::stop()
{
rear_cam_.stop();
side_cam_.stop();
running_ = false;
}
void CameraTracker::toggleDebugImageOutput()
{
rear_cam_.toggleDebugImageOutput();
side_cam_.toggleDebugImageOutput();
}
Point CameraTracker::computeRobotPoseFromImagePoints(Eigen::Vector2f p_side, Eigen::Vector2f p_rear)
{
// Solve linear equations to get pose values
Eigen::Matrix4f A;
Eigen::Vector4f b;
A << 1, 0, -p_rear[1], p_rear[0],
0, 1, p_rear[0], p_rear[1],
1, 0, -p_side[1], p_side[0],
0, 1, p_side[0], p_side[1];
b << robot_P_rear_target_[0],
robot_P_rear_target_[1],
robot_P_side_target_[0],
robot_P_side_target_[1];
Eigen::Vector4f x = A.colPivHouseholderQr().solve(b);
// Populate pose with angle averaging
float pose_x = x[0];
float pose_y = x[1];
float pose_a = atan2(x[2], x[3]);
return {pose_x, pose_y, pose_a};
}
================================================
FILE: src/robot/src/camera_tracker/CameraTracker.h
================================================
#ifndef CameraTracker_h
#define CameraTracker_h
#include "CameraTrackerBase.h"
#include "utils.h"
#include
#include "CameraPipeline.h"
class CameraTracker : public CameraTrackerBase
{
public:
CameraTracker(bool start_thread = false);
virtual ~CameraTracker();
virtual void start() override;
virtual void stop() override;
virtual void update() override;
virtual bool running() override { return running_; };
virtual void toggleDebugImageOutput() override;
virtual CameraTrackerOutput getPoseFromCamera() override;
virtual CameraDebug getCameraDebug() override;
// Below here exposed for testing only (yes, bad practice, but useful for now)
Point computeRobotPoseFromImagePoints(Eigen::Vector2f p_side, Eigen::Vector2f p_rear);
void oneLoop();
private:
TimeRunningAverage camera_loop_time_averager_;
CameraDebug debug_;
CameraPipeline rear_cam_;
CameraPipeline side_cam_;
CameraTrackerOutput output_;
Eigen::Vector2f robot_P_side_target_;
Eigen::Vector2f robot_P_rear_target_;
CameraPipelineOutput last_rear_cam_output_;
CameraPipelineOutput last_side_cam_output_;
LatchedBool side_cam_ok_filter_;
LatchedBool rear_cam_ok_filter_;
LatchedBool both_cams_ok_filter_;
bool running_;
};
#endif //CameraTracker_h
================================================
FILE: src/robot/src/camera_tracker/CameraTrackerBase.h
================================================
#ifndef CameraTrackerBase_h
#define CameraTrackerBase_h
#include "utils.h"
struct CameraTrackerOutput
{
Point pose;
bool ok;
ClockTimePoint timestamp;
bool raw_detection;
};
class CameraTrackerBase
{
public:
virtual void start() = 0;
virtual void stop() = 0;
virtual void update() = 0;
virtual bool running() = 0;
virtual void toggleDebugImageOutput() = 0;
virtual CameraTrackerOutput getPoseFromCamera() = 0;
virtual CameraDebug getCameraDebug() = 0;
};
#endif //CameraTrackerBase_h
================================================
FILE: src/robot/src/camera_tracker/CameraTrackerFactory.cpp
================================================
#include "CameraTrackerFactory.h"
#include "CameraTracker.h"
#include "CameraTrackerMock.h"
#include
CameraTrackerFactory* CameraTrackerFactory::instance = NULL;
CameraTrackerFactory* CameraTrackerFactory::getFactoryInstance()
{
if(!instance)
{
instance = new CameraTrackerFactory;
}
return instance;
}
void CameraTrackerFactory::set_mode(CAMERA_TRACKER_FACTORY_MODE mode)
{
mode_ = mode;
}
CameraTrackerBase* CameraTrackerFactory::get_camera_tracker()
{
if (!camera_tracker_)
{
build_camera_tracker();
}
return camera_tracker_.get();
}
void CameraTrackerFactory::build_camera_tracker()
{
if(mode_ == CAMERA_TRACKER_FACTORY_MODE::STANDARD)
{
camera_tracker_ = std::make_unique();
PLOGI << "Built CameraTracker";
}
else if (mode_ == CAMERA_TRACKER_FACTORY_MODE::MOCK)
{
camera_tracker_ = std::make_unique();
PLOGI << "Built CameraTrackerMock";
}
}
// Private constructor
CameraTrackerFactory::CameraTrackerFactory()
: mode_(CAMERA_TRACKER_FACTORY_MODE::STANDARD),
camera_tracker_()
{}
================================================
FILE: src/robot/src/camera_tracker/CameraTrackerFactory.h
================================================
#ifndef CameraTrackerFactory_h
#define CameraTrackerFactory_h
#include
#include