Showing preview only (801K chars total). Download the full file or copy to clipboard to get everything.
Repository: BerkeleyAutomation/gqcnn
Branch: master
Commit: 499a609fe9df
Files: 170
Total size: 754.8 KB
Directory structure:
gitextract_i8u5tpyn/
├── .flake8
├── .github/
│ └── ISSUE_TEMPLATE/
│ ├── bug-performance-issue--custom-images-.md
│ ├── bug-performance-issue--physical-robot-.md
│ ├── bug-performance-issue--replication-.md
│ ├── installation-issue.md
│ └── other-issue.md
├── .gitignore
├── .style.yapf
├── .travis.yml
├── CMakeLists.txt
├── LICENSE
├── README.md
├── cfg/
│ ├── examples/
│ │ ├── antipodal_grasp_sampling.yaml
│ │ ├── fc_gqcnn_pj.yaml
│ │ ├── fc_gqcnn_suction.yaml
│ │ ├── gqcnn_pj.yaml
│ │ ├── gqcnn_suction.yaml
│ │ ├── replication/
│ │ │ ├── dex-net_2.0.yaml
│ │ │ ├── dex-net_2.1.yaml
│ │ │ ├── dex-net_3.0.yaml
│ │ │ ├── dex-net_4.0_fc_pj.yaml
│ │ │ ├── dex-net_4.0_fc_suction.yaml
│ │ │ ├── dex-net_4.0_pj.yaml
│ │ │ └── dex-net_4.0_suction.yaml
│ │ └── ros/
│ │ ├── fc_gqcnn_pj.yaml
│ │ ├── fc_gqcnn_suction.yaml
│ │ ├── gqcnn_pj.yaml
│ │ └── gqcnn_suction.yaml
│ ├── finetune.yaml
│ ├── finetune_dex-net_4.0_pj.yaml
│ ├── finetune_dex-net_4.0_suction.yaml
│ ├── finetune_example_pj.yaml
│ ├── finetune_example_suction.yaml
│ ├── hyperparam_search/
│ │ ├── train_dex-net_4.0_fc_suction_hyperparam_search.yaml
│ │ └── train_hyperparam_search.yaml
│ ├── tools/
│ │ ├── analyze_gqcnn_performance.yaml
│ │ └── run_policy.yaml
│ ├── train.yaml
│ ├── train_dex-net_2.0.yaml
│ ├── train_dex-net_3.0.yaml
│ ├── train_dex-net_4.0_fc_pj.yaml
│ ├── train_dex-net_4.0_fc_suction.yaml
│ ├── train_dex-net_4.0_pj.yaml
│ ├── train_dex-net_4.0_suction.yaml
│ ├── train_example_pj.yaml
│ ├── train_example_suction.yaml
│ └── train_fc.yaml
├── ci/
│ └── travis/
│ └── format.sh
├── data/
│ ├── calib/
│ │ ├── phoxi/
│ │ │ ├── phoxi.intr
│ │ │ └── phoxi_to_world.tf
│ │ └── primesense/
│ │ ├── primesense.intr
│ │ └── primesense.tf
│ └── examples/
│ ├── clutter/
│ │ ├── phoxi/
│ │ │ ├── dex-net_4.0/
│ │ │ │ ├── depth_0.npy
│ │ │ │ ├── depth_1.npy
│ │ │ │ ├── depth_2.npy
│ │ │ │ ├── depth_3.npy
│ │ │ │ └── depth_4.npy
│ │ │ └── fcgqcnn/
│ │ │ ├── depth_0.npy
│ │ │ ├── depth_1.npy
│ │ │ ├── depth_2.npy
│ │ │ ├── depth_3.npy
│ │ │ └── depth_4.npy
│ │ └── primesense/
│ │ ├── depth_0.npy
│ │ ├── depth_1.npy
│ │ ├── depth_2.npy
│ │ ├── depth_3.npy
│ │ └── depth_4.npy
│ └── single_object/
│ └── primesense/
│ ├── depth_0.npy
│ ├── depth_1.npy
│ ├── depth_2.npy
│ ├── depth_3.npy
│ ├── depth_4.npy
│ ├── depth_5.npy
│ ├── depth_6.npy
│ ├── depth_7.npy
│ ├── depth_8.npy
│ └── depth_9.npy
├── docker/
│ ├── cpu/
│ │ └── Dockerfile
│ └── gpu/
│ └── Dockerfile
├── docs/
│ ├── Makefile
│ ├── gh_deploy.sh
│ ├── make.bat
│ └── source/
│ ├── api/
│ │ ├── analysis.rst
│ │ ├── gqcnn.rst
│ │ ├── policies.rst
│ │ └── training.rst
│ ├── benchmarks/
│ │ └── benchmarks.rst
│ ├── conf.py
│ ├── index.rst
│ ├── info/
│ │ └── info.rst
│ ├── install/
│ │ └── install.rst
│ ├── license/
│ │ └── license.rst
│ ├── replication/
│ │ └── replication.rst
│ └── tutorials/
│ ├── analysis.rst
│ ├── planning.rst
│ ├── training.rst
│ └── tutorial.rst
├── examples/
│ ├── antipodal_grasp_sampling.py
│ ├── policy.py
│ ├── policy_ros.py
│ └── policy_with_image_proc.py
├── gqcnn/
│ ├── __init__.py
│ ├── analysis/
│ │ ├── __init__.py
│ │ └── analyzer.py
│ ├── grasping/
│ │ ├── __init__.py
│ │ ├── actions.py
│ │ ├── constraint_fn.py
│ │ ├── grasp.py
│ │ ├── grasp_quality_function.py
│ │ ├── image_grasp_sampler.py
│ │ └── policy/
│ │ ├── __init__.py
│ │ ├── enums.py
│ │ ├── fc_policy.py
│ │ └── policy.py
│ ├── model/
│ │ ├── __init__.py
│ │ └── tf/
│ │ ├── __init__.py
│ │ ├── fc_network_tf.py
│ │ └── network_tf.py
│ ├── search/
│ │ ├── __init__.py
│ │ ├── enums.py
│ │ ├── resource_manager.py
│ │ ├── search.py
│ │ ├── trial.py
│ │ └── utils.py
│ ├── training/
│ │ ├── __init__.py
│ │ └── tf/
│ │ ├── __init__.py
│ │ └── trainer_tf.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── enums.py
│ │ ├── policy_exceptions.py
│ │ ├── train_stats_logger.py
│ │ └── utils.py
│ └── version.py
├── launch/
│ └── grasp_planning_service.launch
├── msg/
│ ├── Action.msg
│ ├── BoundingBox.msg
│ ├── GQCNNGrasp.msg
│ └── Observation.msg
├── package.xml
├── post-checkout
├── requirements/
│ ├── cpu_requirements.txt
│ ├── docs_requirements.txt
│ └── gpu_requirements.txt
├── ros_nodes/
│ └── grasp_planner_node.py
├── scripts/
│ ├── docker/
│ │ └── build-docker.sh
│ ├── policies/
│ │ ├── run_all_dex-net_2.0_examples.sh
│ │ ├── run_all_dex-net_2.1_examples.sh
│ │ ├── run_all_dex-net_3.0_examples.sh
│ │ ├── run_all_dex-net_4.0_fc_pj_examples.sh
│ │ ├── run_all_dex-net_4.0_fc_suction_examples.sh
│ │ ├── run_all_dex-net_4.0_pj_examples.sh
│ │ └── run_all_dex-net_4.0_suction_examples.sh
│ └── training/
│ ├── train_dex-net_2.0.sh
│ ├── train_dex-net_2.1.sh
│ ├── train_dex-net_3.0.sh
│ ├── train_dex-net_4.0_fc_pj.sh
│ ├── train_dex-net_4.0_fc_suction.sh
│ ├── train_dex-net_4.0_pj.sh
│ └── train_dex-net_4.0_suction.sh
├── setup.py
├── srv/
│ ├── GQCNNGraspPlanner.srv
│ ├── GQCNNGraspPlannerBoundingBox.srv
│ ├── GQCNNGraspPlannerFull.srv
│ └── GQCNNGraspPlannerSegmask.srv
└── tools/
├── analyze_gqcnn_performance.py
├── finetune.py
├── hyperparam_search.py
├── plot_training_losses.py
├── run_policy.py
└── train.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .flake8
================================================
[flake8]
ignore = C408, E121, E123, E126, E226, E24, E704, W503, W504, W605
exclude = docs/source/conf.py
inline-quotes = "
no-avoid-escape = 1
================================================
FILE: .github/ISSUE_TEMPLATE/bug-performance-issue--custom-images-.md
================================================
---
name: Bug/Performance Issue [Custom Images]
about: You are testing your own images in software.
title: 'Issue: Bug/Performance Issue [Custom Images]'
labels: ''
assignees: ''
---
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
- Python version:
- Installed using pip or ROS:
- Camera:
- GPU model (if applicable):
**Describe what you are trying to do**
**Describe current behavior**
**Describe the expected behavior**
**Describe the input images**
Provide details such as original captured resolution, pre-processing, final resolution, etc.
**Describe the physical camera setup**
Provide details such as the distance from camera to workspace, orientation, etc.. Attach a picture if possible.
**Other info / logs**
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.
================================================
FILE: .github/ISSUE_TEMPLATE/bug-performance-issue--physical-robot-.md
================================================
---
name: Bug/Performance Issue [Physical Robot]
about: You are running on a physical robot.
title: 'Issue: Bug/Performance Issue [Physical Robot]'
labels: ''
assignees: ''
---
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
- Python version:
- Installed using pip or ROS:
- Camera:
- Gripper:
- Robot:
- GPU model (if applicable):
**Describe what you are trying to do**
Provide details such as what angle you are attempting grasps at relative to the workspace.
**Describe current behavior**
**Describe the expected behavior**
**Describe the input images**
Provide details such as original captured resolution, pre-processing, final resolution, etc.
**Describe the physical camera setup**
Provide details such as the distance from camera to workspace, orientation, etc.. Attach a picture if possible.
**Describe the physical robot setup**
Attach a picture if possible.
**Other info / logs**
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.
================================================
FILE: .github/ISSUE_TEMPLATE/bug-performance-issue--replication-.md
================================================
---
name: Bug/Performance Issue [Replication]
about: You are replicating training or the policy with the provided images/datasets/models.
title: 'Issue: Bug/Performance Issue [Replication]'
labels: ''
assignees: ''
---
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
- Python version:
- Installed using pip or ROS:
- GPU model (if applicable):
**Describe the result you are trying to replicate**
If you can, provide a link to the section in the [documentation](https://berkeleyautomation.github.io/gqcnn/index.html).
**Provide the exact sequence of commands / steps that you executed to replicate this result**
**Describe the unexpected behavior**
**Other info / logs**
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.
================================================
FILE: .github/ISSUE_TEMPLATE/installation-issue.md
================================================
---
name: Installation Issue
about: You are experiencing installation issues.
title: 'Issue: Installation Issue'
labels: ''
assignees: ''
---
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
- Python version:
- Installed using pip or ROS:
**Describe the problem**
**Provide the exact sequence of commands / steps that you executed before running into the problem**
**Any other info / logs**
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.
================================================
FILE: .github/ISSUE_TEMPLATE/other-issue.md
================================================
---
name: Other Issue
about: Your issue doesn't fall into the above categories.
title: 'Issue: Other Issue'
labels: ''
assignees: ''
---
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04):
- Python version:
- Installed using pip or ROS:
**What is the problem**
Provide as much detail as possible and steps to replicate it if applicable.
**Other info / logs**
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
data/training/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# dotenv
.env
# virtualenv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
# Temp files
*~
.#*
#*
================================================
FILE: .style.yapf
================================================
[style]
based_on_style=pep8
allow_split_before_dict_value=False
join_multiple_lines=False
================================================
FILE: .travis.yml
================================================
# Copyright ©2017. The Regents of the University of California (Regents).
# All Rights Reserved. Permission to use, copy, modify, and distribute this
# software and its documentation for educational, research, and not-for-profit
# purposes, without fee and without a signed licensing agreement, is hereby
# granted, provided that the above copyright notice, this paragraph and the
# following two paragraphs appear in all copies, modifications, and
# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
# otl@berkeley.edu,
# http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
# IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
# INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
# THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
# HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
language: python
matrix:
- name: "Python 3.5 on Xenial Linux"
python: 3.5
dist: xenial
- name: "Python 3.6 on Xenial Linux"
python: 3.6
dist: xenial
- name: "Python 3.7 on Xenial Linux"
python: 3.7
dist: xenial
- name: "Linter"
python: 3.7
dist: xenial
before_install: []
install: []
script:
- pip install yapf==0.27.0 # Must use specific version.
- pip install flake8 flake8-comprehensions flake8-quotes==2.0.0
- ./ci/travis/format.sh --all # Should exit with 0 for no diffs.
- flake8
before_install:
- sudo apt-get update
- sudo apt-get install -y curl g++ make
- sudo apt-get install -y python-opengl # For GLU.
- pushd ~
- curl -L http://download.osgeo.org/libspatialindex/spatialindex-src-1.8.5.tar.gz | tar xz
- cd spatialindex-src-1.8.5
- ./configure
- make
- sudo make install
- sudo ldconfig
- popd
- pip install -U setuptools # Required for easy_install to find right
# skimage version for Python 3.5.
install:
- pip install .
script:
- python -c "import gqcnn"
notifications:
email: false
================================================
FILE: CMakeLists.txt
================================================
# Copyright ©2017. The Regents of the University of California (Regents).
# All Rights Reserved. Permission to use, copy, modify, and distribute this
# software and its documentation for educational, research, and not-for-profit
# purposes, without fee and without a signed licensing agreement, is hereby
# granted, provided that the above copyright notice, this paragraph and the
# following two paragraphs appear in all copies, modifications, and
# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
# otl@berkeley.edu,
# http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
# IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
# INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
# THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
# HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
cmake_minimum_required(VERSION 2.8.3)
project(gqcnn)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
rospy
message_generation
geometry_msgs
sensor_msgs
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a run_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
add_message_files(
FILES
GQCNNGrasp.msg
BoundingBox.msg
Action.msg
Observation.msg
)
## Generate services in the 'srv' folder
add_service_files(
FILES
GQCNNGraspPlanner.srv
GQCNNGraspPlannerBoundingBox.srv
GQCNNGraspPlannerSegmask.srv
)
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
generate_messages(
DEPENDENCIES
geometry_msgs
sensor_msgs
)
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a run_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if you package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES yumipy
# CATKIN_DEPENDS rospy
# DEPENDS system_lib
CATKIN_DEPENDS message_runtime
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
# include_directories(include)
include_directories(
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(yumipy
# src/${PROJECT_NAME}/yumipy.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(yumipy ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
# add_executable(yumipy_node src/yumipy_node.cpp)
## Add cmake target dependencies of the executable
## same as for the library above
#add_dependencies(gqcnn gqcnn_generate_messages_cpp)
## Specify libraries to link a library or executable target against
# target_link_libraries(yumipy_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS yumipy yumipy_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_yumipy.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)
================================================
FILE: LICENSE
================================================
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
================================================
FILE: README.md
================================================
## Note: Python 2.x support has officially been dropped.
# Berkeley AUTOLAB's GQCNN Package
<p>
<a href="https://travis-ci.org/BerkeleyAutomation/gqcnn/">
<img alt="Build Status" src="https://travis-ci.org/BerkeleyAutomation/gqcnn.svg?branch=master">
</a>
<a href="https://github.com/BerkeleyAutomation/gqcnn/releases/latest">
<img alt="Release" src="https://img.shields.io/github/release/BerkeleyAutomation/gqcnn.svg?style=flat">
</a>
<a href="https://github.com/BerkeleyAutomation/gqcnn/blob/master/LICENSE">
<img alt="Software License" src="https://img.shields.io/badge/license-REGENTS-brightgreen.svg">
</a>
<a>
<img alt="Python 3 Versions" src="https://img.shields.io/badge/python-3.5%20%7C%203.6%20%7C%203.7-yellow.svg">
</a>
</p>
## Package Overview
The gqcnn Python package is for training and analysis of Grasp Quality Convolutional Neural Networks (GQ-CNNs). It is part of the ongoing [Dexterity-Network (Dex-Net)](https://berkeleyautomation.github.io/dex-net/) project created and maintained by the [AUTOLAB](https://autolab.berkeley.edu) at UC Berkeley.
## Installation and Usage
Please see the [docs](https://berkeleyautomation.github.io/gqcnn/) for installation and usage instructions.
## Citation
If you use any part of this code in a publication, please cite [the appropriate Dex-Net publication](https://berkeleyautomation.github.io/gqcnn/index.html#academic-use).
================================================
FILE: cfg/examples/antipodal_grasp_sampling.yaml
================================================
# policy params
policy:
# general params
deterministic: 1
gripper_width: 0.05
# sampling params
sampling:
# type
type: antipodal_depth
# antipodality
friction_coef: 0.5
depth_grad_thresh: 0.0025
depth_grad_gaussian_sigma: 1.0
downsample_rate: 4
max_rejection_samples: 4000
# distance
max_dist_from_center: 1000
min_dist_from_boundary: 45
min_grasp_dist: 10.0
angle_dist_weight: 5.0
# depth sampling
depth_sampling_mode: uniform
depth_samples_per_grasp: 1
depth_sample_win_height: 1
depth_sample_win_width: 1
min_depth_offset: 0.015
max_depth_offset: 0.05
# metrics
metric:
type: zero
# visualization
vis:
grasp_sampling: 0
final_grasp: 1
vmin: 0.4
vmax: 1.0
================================================
FILE: cfg/examples/fc_gqcnn_pj.yaml
================================================
policy:
type: fully_conv_pj
sampling_method: top_k
num_depth_bins: 16
gripper_width: 0.05
gqcnn_stride: 4
gqcnn_recep_h: 96
gqcnn_recep_w: 96
# filtering params
max_grasps_to_filter: 50
filter_grasps: 0
# metrics
metric:
type: fcgqcnn
gqcnn_model: /path/to/your/FC-GQ-Image-Wise
gqcnn_backend: tf
fully_conv_gqcnn_config:
im_height: 386
im_width: 516
# visualization
policy_vis:
scale: 0.5
show_axis: 1
num_samples: 0
actions_2d: 0
actions_3d: 0
affordance_map: 0
vis:
final_grasp: 1
vmin: 0.5
vmax: 0.8
# image pre-processing before input to policy
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/fc_gqcnn_suction.yaml
================================================
policy:
type: fully_conv_suction
sampling_method: top_k
gqcnn_stride: 4
gqcnn_recep_h: 96
gqcnn_recep_w: 96
# filtering params
max_grasps_to_filter: 50
filter_grasps: 0
# metrics
metric:
type: fcgqcnn
gqcnn_model: /path/to/your/FC-GQ-Image-Wise-Suction
gqcnn_backend: tf
fully_conv_gqcnn_config:
im_height: 386
im_width: 516
# visualization
policy_vis:
scale: 0.5
show_axis: 1
num_samples: 0
actions_2d: 0
actions_3d: 0
affordance_map: 0
vis:
final_grasp: 1
vmin: 0.5
vmax: 0.8
# image pre-processing before input to policy
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/gqcnn_pj.yaml
================================================
policy:
# optimization params
num_seed_samples: 128
num_gmm_samples: 64
num_iters: 3
gmm_refit_p: 0.25
gmm_component_frac: 0.4
gmm_reg_covar: 0.01
# general params
deterministic: 1
gripper_width: 0.05
# sampling params
sampling:
# type
type: antipodal_depth
# antipodality
friction_coef: 1.0
depth_grad_thresh: 0.0025
depth_grad_gaussian_sigma: 1.0
downsample_rate: 4
max_rejection_samples: 4000
# distance
max_dist_from_center: 160
min_dist_from_boundary: 45
min_grasp_dist: 2.5
angle_dist_weight: 5.0
# depth sampling
depth_sampling_mode: uniform
depth_samples_per_grasp: 3
depth_sample_win_height: 1
depth_sample_win_width: 1
min_depth_offset: 0.015
max_depth_offset: 0.05
# metrics
metric:
type: gqcnn
gqcnn_model: models/GQCNN-4.0-PJ
crop_height: 96
crop_width: 96
# visualization
vis:
grasp_sampling : 0
tf_images: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
final_grasp: 1
vmin: 0.5
vmax: 0.8
k: 25
# image proc params
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/gqcnn_suction.yaml
================================================
policy:
# optimization params
num_seed_samples: 200
num_gmm_samples: 50
num_iters: 3
gmm_refit_p: 0.25
gmm_component_frac: 0.4
gmm_reg_covar: 0.01
# general params
deterministic: 1
gripper_width: 0.05
crop_height: 96
crop_width: 96
# sampling params
sampling:
# type
type: suction
# params
max_suction_dir_optical_axis_angle: 30
delta_theta: 5
delta_phi: 5
sigma_depth: 0.0025
min_suction_dist: 1.0
angle_dist_weight: 5.0
depth_gaussian_sigma: 1.0
num_grasp_samples: 1000
max_dist_from_center: 260
max_num_samples: 10000
# metric params
metric:
type: gqcnn
gqcnn_model: models/GQCNN-4.0-SUCTION
crop_height: 96
crop_width: 96
# visualization
vis:
grasp_sampling : 0
tf_images: 0
plane: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
grasp_affordance_map: 0
final_grasp: 1
vmin: 0.5
vmax: 0.8
k: 25
# image proc params
inpaint_rescale_factor: 0.5
# detection params
detection:
type: point_cloud_box
foreground_mask_tolerance: 60
min_pt:
- 0.205
- -0.3
- 0.01
max_pt:
- 0.65
- 0.3
- 0.15
selection_policy: min
focus: 0
min_contour_area: 250.0
max_contour_area: 1000000.0
min_box_area: 250.0
max_box_area: 1000000.0
box_padding_px: 15
rescale_factor: 1.0
interpolation: bilinear
depth_grad_thresh: 10.0
contour_dist_thresh: 2.5
point_cloud_mask_only: 1
image_width: 640
image_height: 480
filter_dim: 1
================================================
FILE: cfg/examples/replication/dex-net_2.0.yaml
================================================
# policy params
policy:
# optimization params
num_seed_samples: 250
num_gmm_samples: 50
num_iters: 3
gmm_refit_p: 0.25
gmm_component_frac: 0.4
gmm_reg_covar: 0.01
# general params
deterministic: 1
gripper_width: 0.05
# sampling params
sampling:
# type
type: antipodal_depth
# antipodality
friction_coef: 0.8
depth_grad_thresh: 0.0025
depth_grad_gaussian_sigma: 1.0
downsample_rate: 2
max_rejection_samples: 4000
# distance
max_dist_from_center: 160
min_dist_from_boundary: 45
min_grasp_dist: 2.5
angle_dist_weight: 5.0
# depth sampling
depth_sampling_mode: uniform
depth_samples_per_grasp: 1
depth_sample_win_height: 1
depth_sample_win_width: 1
min_depth_offset: 0.005
max_depth_offset: 0.04
# metrics
metric:
type: gqcnn
gqcnn_model: models/GQCNN-2.0
crop_height: 96
crop_width: 96
# visualization
vis:
grasp_sampling : 0
tf_images: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
final_grasp: 1
vmin: 0.0
vmax: 1.0
k: 25
# image proc params
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/replication/dex-net_2.1.yaml
================================================
# policy params
policy:
# optimization params
num_seed_samples: 250
num_gmm_samples: 50
num_iters: 3
gmm_refit_p: 0.25
gmm_component_frac: 0.4
gmm_reg_covar: 0.01
# general params
deterministic: 1
gripper_width: 0.05
# sampling params
sampling:
# type
type: antipodal_depth
# antipodality
friction_coef: 0.8
depth_grad_thresh: 0.0025
depth_grad_gaussian_sigma: 1.0
downsample_rate: 2
max_rejection_samples: 4000
# distance
max_dist_from_center: 160
min_dist_from_boundary: 45
min_grasp_dist: 2.5
angle_dist_weight: 5.0
# depth sampling
depth_sampling_mode: uniform
depth_samples_per_grasp: 1
depth_sample_win_height: 1
depth_sample_win_width: 1
min_depth_offset: 0.005
max_depth_offset: 0.04
# metrics
metric:
type: gqcnn
gqcnn_model: models/GQCNN-2.1
crop_height: 96
crop_width: 96
# visualization
vis:
grasp_sampling : 0
tf_images: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
final_grasp: 1
vmin: 0.0
vmax: 1.0
k: 25
# image proc params
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/replication/dex-net_3.0.yaml
================================================
# policy params
policy:
# optimization params
num_seed_samples: 250
num_gmm_samples: 50
num_iters: 3
gmm_refit_p: 0.25
gmm_component_frac: 0.4
gmm_reg_covar: 0.01
# general params
deterministic: 1
max_approach_angle: 80
# sampling params
sampling:
# type
type: suction
# params
max_suction_dir_optical_axis_angle: 30
delta_theta: 1
delta_phi: 1
mean_depth: 0.0025
sigma_depth: 0.000001
min_suction_dist: 1.0
angle_dist_weight: 5.0
depth_gaussian_sigma: 1.0
max_dist_from_center: 10000000000
max_num_samples: 10000
num_grasp_samples: 500
# metric params
metric:
type: gqcnn
gqcnn_model: models/GQCNN-3.0
crop_height: 96
crop_width: 96
# visualization
vis:
grasp_sampling : 0
tf_images: 0
plane: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
final_grasp: 1
vmin: 0.0
vmax: 1.0
k: 25
# image proc params
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/replication/dex-net_4.0_fc_pj.yaml
================================================
policy:
type: fully_conv_pj
sampling_method: top_k
num_depth_bins: 16
gripper_width: 0.05
gqcnn_stride: 4
gqcnn_recep_h: 96
gqcnn_recep_w: 96
# filtering params
max_grasps_to_filter: 50
filter_grasps: 0
# metrics
metric:
type: fcgqcnn
gqcnn_model: models/FCGQCNN-4.0-PJ
gqcnn_backend: tf
fully_conv_gqcnn_config:
im_height: 480
im_width: 640
# visualization
policy_vis:
scale: 0.5
show_axis: 1
num_samples: 0
actions_2d: 0
actions_3d: 0
affordance_map: 0
vis:
final_grasp: 1
vmin: 0.0
vmax: 1.0
# image pre-processing before input to policy
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/replication/dex-net_4.0_fc_suction.yaml
================================================
policy:
type: fully_conv_suction
sampling_method: top_k
gqcnn_stride: 4
gqcnn_recep_h: 96
gqcnn_recep_w: 96
# filtering params
max_grasps_to_filter: 50
filter_grasps: 0
# metrics
metric:
type: fcgqcnn
gqcnn_model: models/FCGQCNN-4.0-SUCTION
gqcnn_backend: tf
fully_conv_gqcnn_config:
im_height: 480
im_width: 640
# visualization
policy_vis:
scale: 0.5
show_axis: 1
num_samples: 0
actions_2d: 0
actions_3d: 0
affordance_map: 0
vis:
final_grasp: 1
vmin: 0.0
vmax: 1.0
# image pre-processing before input to policy
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/replication/dex-net_4.0_pj.yaml
================================================
policy:
# optimization params
num_seed_samples: 128
num_gmm_samples: 64
num_iters: 3
gmm_refit_p: 0.25
gmm_component_frac: 0.4
gmm_reg_covar: 0.01
# general params
deterministic: 1
gripper_width: 0.05
# sampling params
sampling:
# type
type: antipodal_depth
# antipodality
friction_coef: 1.0
depth_grad_thresh: 0.0025
depth_grad_gaussian_sigma: 1.0
downsample_rate: 4
max_rejection_samples: 4000
# distance
max_dist_from_center: 160
min_dist_from_boundary: 45
min_grasp_dist: 2.5
angle_dist_weight: 5.0
# depth sampling
depth_sampling_mode: uniform
depth_samples_per_grasp: 3
depth_sample_win_height: 1
depth_sample_win_width: 1
min_depth_offset: 0.015
max_depth_offset: 0.05
# metrics
metric:
type: gqcnn
gqcnn_model: models/GQCNN-4.0-PJ
crop_height: 96
crop_width: 96
# visualization
vis:
grasp_sampling : 0
tf_images: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
final_grasp: 1
vmin: 0.0
vmax: 1.0
k: 25
# image proc params
inpaint_rescale_factor: 0.5
================================================
FILE: cfg/examples/replication/dex-net_4.0_suction.yaml
================================================
policy:
# optimization params
num_seed_samples: 200
num_gmm_samples: 50
num_iters: 3
gmm_refit_p: 0.25
gmm_component_frac: 0.4
gmm_reg_covar: 0.01
# general params
deterministic: 1
gripper_width: 0.05
crop_height: 96
crop_width: 96
# sampling params
sampling:
# type
type: suction
# params
max_suction_dir_optical_axis_angle: 30
delta_theta: 5
delta_phi: 5
sigma_depth: 0.0025
min_suction_dist: 1.0
angle_dist_weight: 5.0
depth_gaussian_sigma: 1.0
num_grasp_samples: 1000
max_dist_from_center: 260
max_num_samples: 10000
# metric params
metric:
type: gqcnn
gqcnn_model: models/GQCNN-4.0-SUCTION
crop_height: 96
crop_width: 96
# visualization
vis:
grasp_sampling : 0
tf_images: 0
plane: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
grasp_affordance_map: 0
final_grasp: 1
vmin: 0.0
vmax: 1.0
k: 25
# image proc params
inpaint_rescale_factor: 0.5
# detection params
detection:
type: point_cloud_box
foreground_mask_tolerance: 60
min_pt:
- 0.205
- -0.3
- 0.01
max_pt:
- 0.65
- 0.3
- 0.15
selection_policy: min
focus: 0
min_contour_area: 250.0
max_contour_area: 1000000.0
min_box_area: 250.0
max_box_area: 1000000.0
box_padding_px: 15
rescale_factor: 1.0
interpolation: bilinear
depth_grad_thresh: 10.0
contour_dist_thresh: 2.5
point_cloud_mask_only: 1
image_width: 640
image_height: 480
filter_dim: 1
================================================
FILE: cfg/examples/ros/fc_gqcnn_pj.yaml
================================================
!include ../fc_gqcnn_pj.yaml
# ROS-specific visualization
vis:
rgbd_state: 0
cropped_rgbd_image: 0
color_image: 0
depth_image: 0
segmask: 0
================================================
FILE: cfg/examples/ros/fc_gqcnn_suction.yaml
================================================
!include ../fc_gqcnn_suction.yaml
# ROS-specific visualization
vis:
rgbd_state: 0
cropped_rgbd_image: 0
color_image: 0
depth_image: 0
segmask: 0
================================================
FILE: cfg/examples/ros/gqcnn_pj.yaml
================================================
!include ../gqcnn_pj.yaml
# ROS-specific visualization
vis:
rgbd_state: 0
cropped_rgbd_image: 0
color_image: 0
depth_image: 0
segmask: 0
================================================
FILE: cfg/examples/ros/gqcnn_suction.yaml
================================================
!include ../gqcnn_suction.yaml
# ROS-specific visualization
vis:
rgbd_state: 0
cropped_rgbd_image: 0
color_image: 0
depth_image: 0
segmask: 0
================================================
FILE: cfg/finetune.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 10 # number of epochs to train for
eval_frequency: 2.5 # how often to get validation error (in epochs)
save_frequency: 2.5 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
optimize_base_layers: 0
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
base_model:
output_layer: fc4
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 4
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/finetune_dex-net_4.0_pj.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10.0 # how often to get validation error (in epochs)
save_frequency: 10.0 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
optimize_base_layers: 0
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
base_model:
output_layer: fc4
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 128
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 128
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/finetune_dex-net_4.0_suction.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10.0 # how often to get validation error (in epochs)
save_frequency: 10.0 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
optimize_base_layers: 0
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: suction
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
base_model:
output_layer: fc3
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 128
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 128
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/finetune_example_pj.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 20 # number of epochs to train for
eval_frequency: 10.0 # how often to get validation error (in epochs)
save_frequency: 10.0 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.33 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
optimize_base_layers: 0
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
base_model:
output_layer: conv2_2
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/finetune_example_suction.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 20 # number of epochs to train for
eval_frequency: 10.0 # how often to get validation error (in epochs)
save_frequency: 10.0 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.33 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
optimize_base_layers: 0
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: suction
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
base_model:
output_layer: conv2_2
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/hyperparam_search/train_dex-net_4.0_fc_suction_hyperparam_search.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: 0
debug_num_files: 1000000000
seed: 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: 0
seed: 24098
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: suction
# method by which to integrate depth into the network
input_depth_mode: im_only
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: ['anchor_filt_dim', 6, 9, 9]
num_filt: ['anchor_num_filts', 8, 12, 16, 20]
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: ['anchor_filt_dim', 3, 5, 5]
num_filt: ['anchor_num_filts', 8, 12, 16, 20]
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: ['anchor_filt_dim', 3, 5, 3]
num_filt: ['anchor_num_filts', 8, 12, 16, 20]
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: ['anchor_filt_dim', 3, 5, 3]
num_filt: ['anchor_num_filts', 8, 12, 16, 20]
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: ['anchor_fc_out_size', 32, 64, 128]
fc4:
type: fc
out_size: ['anchor_fc_out_size', 32, 64, 128]
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/hyperparam_search/train_hyperparam_search.yaml
================================================
# Example hyper-parameter search config corresponding to cfg/train.yaml that
# grid-searches through number of training epochs, base learning rate, and
# conv1_1 filter sizes.
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: [25, 50, 75] # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: [0.001, 0.01, 0.05, 0.1]
decay_step_multiplier: 1.0 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: 0
debug_num_files: 1000000000
seed: 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: 0
seed: 24098
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: [3, 5, 9]
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 4
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/tools/analyze_gqcnn_performance.yaml
================================================
log_rate: 10
font_size: 15
line_width: 4
dpi: 100
num_bins: 100
num_vis: 10
================================================
FILE: cfg/tools/run_policy.yaml
================================================
# policy params
policy:
# optimization params
num_seed_samples: 250
num_gmm_samples: 100
num_iters: 3
gmm_refit_p: 0.125
gmm_component_frac: 0.8
gmm_reg_covar: 0.01
max_resamples_per_iteration: 10
# general params
deterministic: 0
gripper: yumi_metal_spline
gripper_width: 0.05
max_approach_angle: 60
logging_dir: logs/debug
# sampling params
sampling:
# type
type: antipodal_depth
# antipodality
friction_coef: 0.8
depth_grad_thresh: 0.002
depth_grad_gaussian_sigma: 0.5
min_num_edge_pixels: 25
downsample_rate: 2
max_rejection_samples: 4000
# distance
max_dist_from_center: 100000
min_dist_from_boundary: 45
min_grasp_dist: 1.0
angle_dist_weight: 5.0
# depth sampling
depth_samples_per_grasp: 1
depth_sample_win_height: 1
depth_sample_win_width: 1
depth_sampling_mode: uniform
min_depth_offset: 0.015
max_depth_offset: 0.04
# metric params
metric:
type: gqcnn
gqcnn_model: /mnt/data/diskstation/models/icra2019/gqcnn_mini_dexnet/
crop_height: 96
crop_width: 96
# filter params
filter_collisions: 1
filter_ik: 1
filter_unreachable: 0
max_grasps_filter: 10
collision_free_filter:
approach_dist: 0.075
rescale_factor: 0.125
ik_filter:
approach_dist: 0.075
traj_len: 0
group_name: left_jaws
ik_timeout: 0.1
reachability_filter:
unreachable_pose_dir: /mnt/data/diskstation/unreachable_poses/left/
min_rot_dist: 0.1
min_trans_dist: 0.01
# visualization
vis:
input_images: 0
grasp_sampling : 0
tf_images: 0
grasp_candidates: 0
elite_grasps: 0
grasp_ranking: 0
grasp_plan: 0
seg_point_cloud: 0
filtered_point_cloud: 0
final_grasp: 1
grasp_scale: 1
vmin: 0.6
vmax: 0.9
k: 25
================================================
FILE: cfg/train.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 1.0 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 4
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_dex-net_2.0.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 5 # how often to get validation error (in epochs)
save_frequency: 5 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.66 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: depth_ims_tf_table
pose_field_name: hand_poses
# label params
target_metric_name: robust_ferrari_canny # name of the field to use for the labels
metric_thresh: 0.002 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 1
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 1
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 32
im_width: 32
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: legacy_parallel_jaw
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 7
num_filt: 64
pool_size: 1
pool_stride: 1
pad: SAME
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 64
pool_size: 2
pool_stride: 2
pad: SAME
norm: 1
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 64
pool_size: 1
pool_stride: 1
pad: SAME
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 64
pool_size: 2
pool_stride: 2
pad: SAME
norm: 1
norm_type: local_response
fc3:
type: fc
out_size: 1024
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 1024
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_dex-net_3.0.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 25 # number of epochs to train for
eval_frequency: 5 # how often to get validation error (in epochs)
save_frequency: 5 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.66 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: depth_ims_tf_table
pose_field_name: hand_poses
# label params
target_metric_name: robust_suction_wrench_resistance # name of the field to use for the labels
metric_thresh: 0.2 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 1
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 1
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 32
im_width: 32
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: legacy_suction
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 7
num_filt: 64
pool_size: 1
pool_stride: 1
pad: SAME
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 64
pool_size: 2
pool_stride: 2
pad: SAME
norm: 1
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 64
pool_size: 1
pool_stride: 1
pad: SAME
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 64
pool_size: 2
pool_stride: 2
pad: SAME
norm: 1
norm_type: local_response
fc3:
type: fc
out_size: 1024
pose_stream:
pc1:
type: pc
out_size: 128
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 1024
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_dex-net_4.0_fc_pj.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 0
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: im_depth_sub
# used for training with multiple angular predictions
angular_bins: 16
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 128
fc4:
type: fc
out_size: 128
fc5:
type: fc
out_size: 32
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_dex-net_4.0_fc_suction.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: suction
# method by which to integrate depth into the network
input_depth_mode: im_only
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 128
fc4:
type: fc
out_size: 128
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_dex-net_4.0_pj.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 128
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 128
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_dex-net_4.0_suction.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: suction
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_example_pj.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 100 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 1.0 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.98
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 4
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_example_suction.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.9 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 1.0 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 1000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 1
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: suction
# method by which to integrate depth into the network
input_depth_mode: pose_stream
# used for training with multiple angular predictions
angular_bins: 0
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 3
num_filt: 8
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 64
pose_stream:
pc1:
type: pc
out_size: 16
pc2:
type: pc
out_size: 0
merge_stream:
fc4:
type: fc_merge
out_size: 64
fc5:
type: fc
out_size: 2
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: cfg/train_fc.yaml
================================================
# general optimization params
train_batch_size: 64
val_batch_size: &val_batch_size 64
# logging params
num_epochs: 50 # number of epochs to train for
eval_frequency: 10 # how often to get validation error (in epochs)
save_frequency: 10 # how often to save output (in epochs)
vis_frequency: 10000 # how often to visualize filters (in epochs)
log_frequency: 1 # how often to log output (in steps)
# train / val split params
train_pct: 0.8 # percentage of the data to use for training vs validation
total_pct: 1.0 # percentage of all the files to use
eval_total_train_error: 0 # whether or not to evaluate the total training error on each validataion
max_files_eval: 1000 # the number of validation files to use in each eval
# optimization params
loss: sparse
optimizer: momentum
train_l2_regularizer: 0.0005
base_lr: 0.01
decay_step_multiplier: 0.5 # number of times to go through training datapoints before stepping down decay rate (in epochs)
decay_rate: 0.95
momentum_rate: 0.9
max_training_examples_per_load: 128
drop_rate: 0.0
max_global_grad_norm: 100000000000
# input params
training_mode: classification
image_field_name: tf_depth_ims
pose_field_name: grasps
# label params
target_metric_name: grasp_metrics # name of the field to use for the labels
metric_thresh: 0.5 # threshold for positive examples (label = 1 if grasp_metric > metric_thresh)
# preproc params
num_random_files: 10000 # the number of random files to compute dataset statistics in preprocessing (lower speeds initialization)
preproc_log_frequency: 100 # how often to log preprocessing (in steps)
# denoising / synthetic data params
multiplicative_denoising: 0
gamma_shape: 1000.00
symmetrize: 0
gaussian_process_denoising: 0
gaussian_process_rate: 0.5
gaussian_process_scaling_factor: 4.0
gaussian_process_sigma: 0.005
# tensorboard
tensorboard_port: 6006
# debugging params
debug: &debug 0
debug_num_files: 10 # speeds up initialization
seed: &seed 24098
### GQCNN CONFIG ###
gqcnn:
# basic data metrics
im_height: 96
im_width: 96
im_channels: 1
debug: *debug
seed: *seed
# needs to match input data mode that was used for training, determines the pose dimensions for the network
gripper_mode: parallel_jaw
# method by which to integrate depth into the network
input_depth_mode: im_depth_sub
# used for training with multiple angular predictions
angular_bins: 16
# prediction batch size, in training this will be overriden by the val_batch_size in the optimizer's config file
batch_size: *val_batch_size
# architecture
architecture:
im_stream:
conv1_1:
type: conv
filt_dim: 9
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv1_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
conv2_1:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 1
pool_stride: 1
pad: VALID
norm: 0
norm_type: local_response
conv2_2:
type: conv
filt_dim: 5
num_filt: 16
pool_size: 2
pool_stride: 2
pad: VALID
norm: 0
norm_type: local_response
fc3:
type: fc
out_size: 128
fc4:
type: fc
out_size: 128
fc5:
type: fc
out_size: 32
# architecture normalization constants
radius: 2
alpha: 2.0e-05
beta: 0.75
bias: 1.0
# leaky relu coefficient
relu_coeff: 0.0
================================================
FILE: ci/travis/format.sh
================================================
#!/bin/bash
# Copyright ©2017. The Regents of the University of California (Regents).
# All Rights Reserved. Permission to use, copy, modify, and distribute this
# software and its documentation for educational, research, and not-for-profit
# purposes, without fee and without a signed licensing agreement, is hereby
# granted, provided that the above copyright notice, this paragraph and the
# following two paragraphs appear in all copies, modifications, and
# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
# otl@berkeley.edu,
# http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
# IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
# INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
# THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
# HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# Script for YAPF formatting. Adapted from https://github.com/ray-project/ray/blob/master/ci/travis/format.sh.
YAPF_FLAGS=(
'--style' ".style.yapf"
'--recursive'
'--parallel'
)
YAPF_EXCLUDES=()
# Format specified files
format() {
yapf --in-place "${YAPF_FLAGS[@]}" -- "$@"
}
# Format all files, and print the diff to `stdout` for Travis.
format_all() {
yapf --diff "${YAPF_FLAGS[@]}" "${YAPF_EXCLUDES[@]}" .
}
# This flag formats individual files. `--files` *must* be the first command line
# arg to use this option.
if [[ "$1" == '--files' ]]; then
format "${@:2}"
# If `--all` is passed, then any further arguments are ignored and the
# entire Python directory is formatted.
elif [[ "$1" == '--all' ]]; then
format_all
fi
================================================
FILE: data/calib/phoxi/phoxi.intr
================================================
{"_cy": 191.75, "_cx": 255.5, "_fy": 552.5, "_height": 386, "_fx": 552.5, "_width": 516, "_skew": 0.0, "_K": 0, "_frame": "phoxi"}
================================================
FILE: data/calib/phoxi/phoxi_to_world.tf
================================================
phoxi
world
0.385146 -0.121589 0.808145
0.005659 -0.999983 0.001249
-0.989208 -0.005415 0.146420
-0.146410 -0.002064 -0.989222
================================================
FILE: data/calib/primesense/primesense.intr
================================================
{"_cy": 239.5, "_cx": 319.5, "_fy": 525.0, "_height": 480, "_fx": 525.0, "_width": 640, "_skew": 0.0, "_K": 0, "_frame": "primesense_overhead"}
================================================
FILE: data/calib/primesense/primesense.tf
================================================
primesense_overhead
world
0.214280 0.004186 0.872913
0.021188 -0.975273 0.219984
-0.999765 -0.021698 0.000098
0.004678 -0.219934 -0.975504
================================================
FILE: docker/cpu/Dockerfile
================================================
FROM ubuntu:xenial
MAINTAINER Vishal Satish <vsatish@berkeley.edu>
# Args.
# Must be an absolute path.
ARG work_dir=/root/Workspace
# Install `apt-get` deps.
RUN apt-get update && apt-get install -y \
build-essential \
python3 \
python3-dev \
python3-tk \
python-opengl \
curl \
libsm6 \
libxext6 \
libglib2.0-0 \
libxrender1 \
wget \
unzip
# Install libspatialindex (required for latest rtree).
RUN curl -L http://download.osgeo.org/libspatialindex/spatialindex-src-1.8.5.tar.gz | tar xz && \
cd spatialindex-src-1.8.5 && \
./configure && \
make && \
make install && \
ldconfig && \
cd ..
# Install pip (`apt-get install python-pip` causes trouble w/ networkx).
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
python3 get-pip.py && \
rm get-pip.py
# Required for easy_install to find right skimage version for Python 3.5.
RUN pip3 install -U setuptools
# Make working directory.
WORKDIR ${work_dir}
# Copy the library.
ADD docker/gqcnn.tar .
# This is because `python setup.py develop` skips `install_requires` (I think).
RUN python3 -m pip install -r gqcnn/requirements/cpu_requirements.txt
# Install the library in editable mode because it's more versatile (in case we want to develop or if users want to modify things)
# Keep the egg outside of the library in site-packages in case we want to mount the library (overwriting it) for development with docker
ENV PYTHONPATH ${work_dir}/gqcnn
WORKDIR /usr/local/lib/python3.5/site-packages/
RUN python3 ${work_dir}/gqcnn/setup.py develop --docker
# Move to the top-level gqcnn package dir.
WORKDIR ${work_dir}/gqcnn
================================================
FILE: docker/gpu/Dockerfile
================================================
FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu16.04
MAINTAINER Vishal Satish <vsatish@berkeley.edu>
# Args
# `work_dir` must be an absolute path.
ARG work_dir=/root/Workspace
# Install `apt-get` deps.
RUN apt-get update && apt-get install -y \
build-essential \
python3 \
python3-dev \
python3-tk \
python-opengl \
curl \
libsm6 \
libxext6 \
libglib2.0-0 \
libxrender1 \
wget \
unzip
# Install libspatialindex (required for latest rtree).
RUN curl -L http://download.osgeo.org/libspatialindex/spatialindex-src-1.8.5.tar.gz | tar xz && \
cd spatialindex-src-1.8.5 && \
./configure && \
make && \
make install && \
ldconfig && \
cd ..
# Install pip (`apt-get install python-pip` causes trouble w/ networkx).
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
python3 get-pip.py && \
rm get-pip.py
# Required for easy_install to find right skimage version for Python 3.5.
RUN pip3 install -U setuptools
# Make working directory.
WORKDIR ${work_dir}
# Copy the library.
ADD docker/gqcnn.tar .
# This is because `python setup.py develop` skips install_requires (I think) and also because we want to explicitly use the GPU requirements.
RUN python3 -m pip install -r gqcnn/requirements/gpu_requirements.txt
# Install the library in editable mode because it's more versatile (in case we want to develop or if users want to modify things)
# Keep the egg outside of the library in site-packages in case we want to mount the library (overwriting it) for development with docker
ENV PYTHONPATH ${work_dir}/gqcnn
WORKDIR /usr/local/lib/python3.5/site-packages/
RUN python3 ${work_dir}/gqcnn/setup.py develop --docker
# Move to the base library dir
WORKDIR ${work_dir}/gqcnn
================================================
FILE: docs/Makefile
================================================
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = GQCNN
SOURCEDIR = source
BUILDDIR = build
GH_PAGES_SOURCES = docs examples gqcnn tools
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
.PHONY: clean
clean:
rm -rf $(BUILDDIR)/*
.PHONY: gh-pages
gh-pages:
git checkout gh-pages && \
cd .. && \
git rm -rf . && git clean -fxd && \
git checkout master $(GH_PAGES_SOURCES) && \
git reset HEAD && \
cd docs && \
make html && \
cd .. && \
mv -fv docs/build/html/* ./ && \
touch .nojekyll && \
rm -rf $(GH_PAGES_SOURCES) && \
git add -A && \
git commit -m "Generated gh-pages for `git log master -1 --pretty=short --abbrev-commit`" && \
git push origin --delete gh-pages && \
git push origin gh-pages ; \
git checkout master
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
================================================
FILE: docs/gh_deploy.sh
================================================
#!/bin/sh
make gh-pages
cd ../docs
================================================
FILE: docs/make.bat
================================================
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
set SPHINXPROJ=GQCNN
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
================================================
FILE: docs/source/api/analysis.rst
================================================
Analysis
========
GQCNNAnalyzer
~~~~~~~~~~~~~
A tool for analyzing trained GQ-CNNs. Calculates statistics such as training/valiation errors and losses. Also plots Precision-Recall Curve and ROC, and saves sample TP/TN/FP/FN training/validation examples.
.. autoclass:: gqcnn.GQCNNAnalyzer
================================================
FILE: docs/source/api/gqcnn.rst
================================================
GQ-CNN
======
GQ-CNN and FC-GQ-CNN classes are **never accessed directly**, but through a lightweight factory function that returns the corresponding class depending on the specified backend. ::
$ from gqcnn import get_gqcnn_model
$
$ backend = 'tf'
$ my_gqcnn = get_gqcnn_model(backend)(<class initializer args>)
.. autofunction:: gqcnn.get_gqcnn_model
.. autofunction:: gqcnn.get_fc_gqcnn_model
GQCNNTF
~~~~~~~
Tensorflow implementation of GQ-CNN model.
.. autoclass:: gqcnn.model.tf.GQCNNTF
:exclude-members: init_mean_and_std,
set_base_network,
init_weights_file,
initialize_network,
set_batch_size,
set_im_mean,
get_im_mean,
set_im_std,
get_im_std,
set_pose_mean,
get_pose_mean,
set_pose_std,
get_pose_std,
set_im_depth_sub_mean,
set_im_depth_sub_std,
add_softmax_to_output,
add_sigmoid_to_output,
update_batch_size,
FCGQCNNTF
~~~~~~~~~
Tensorflow implementation of FC-GQ-CNN model.
.. autoclass:: gqcnn.model.tf.FCGQCNNTF
:exclude-members: __init__
================================================
FILE: docs/source/api/policies.rst
================================================
Policies
========
All GQ-CNN grasping policies are child classes of the base :ref:`GraspingPolicy` class that implements `__call__()`, which operates on :ref:`RgbdImageStates <RgbdImageState>` and returns a :ref:`GraspAction`. ::
$ from gqcnn import RgbdImageState, CrossEntropyRobustGraspingPolicy
$
$ im = RgbdImageState.load(<saved rgbd image dir>)
$ my_policy = CrossEntropyRobustGraspingPolicy(<policy initializer args>)
$
$ my_grasp_action = my_policy(im)
Primary Policies
~~~~~~~~~~~~~~~~
CrossEntropyRobustGraspingPolicy
--------------------------------
An implementation of the `Cross Entropy Method (CEM)`_ used in `Dex-Net 2.0`_, `Dex-Net 2.1`_, `Dex-Net 3.0`_, and `Dex-Net 4.0`_ to iteratively locate the best grasp.
.. autoclass:: gqcnn.CrossEntropyRobustGraspingPolicy
FullyConvolutionalGraspingPolicyParallelJaw
-------------------------------------------
An implementation of the `FC-GQ-CNN`_ parallel jaw policy that uses dense, parallelized fully convolutional networks.
.. autoclass:: gqcnn.FullyConvolutionalGraspingPolicyParallelJaw
FullyConvolutionalGraspingPolicySuction
---------------------------------------
An implementation of the `FC-GQ-CNN`_ suction policy that uses dense, parallelized fully convolutional networks.
.. autoclass:: gqcnn.FullyConvolutionalGraspingPolicySuction
Grasps and Image Wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
.. _RgbdImageState:
RgbdImageState
--------------
A wrapper for states containing an RGBD (RGB + Depth) image, camera intrinisics, and segmentation masks.
.. autoclass:: gqcnn.RgbdImageState
.. _GraspAction:
GraspAction
-----------
A wrapper for 2D grasp actions such as :ref:`Grasp2D` or :ref:`SuctionPoint2D`.
.. autoclass:: gqcnn.grasping.policy.policy.GraspAction
.. _Grasp2D:
Grasp2D
-------
A wrapper for 2D parallel jaw grasps.
.. autoclass:: gqcnn.grasping.grasp.Grasp2D
.. _SuctionPoint2D:
SuctionPoint2D
--------------
A wrapper for 2D suction grasps.
.. autoclass:: gqcnn.grasping.grasp.SuctionPoint2D
Miscellaneous and Parent Policies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Policy
------
.. autoclass:: gqcnn.grasping.policy.policy.Policy
.. _GraspingPolicy:
GraspingPolicy
--------------
.. autoclass:: gqcnn.grasping.policy.policy.GraspingPolicy
FullyConvolutionalGraspingPolicy
--------------------------------
.. autoclass:: gqcnn.grasping.policy.fc_policy.FullyConvolutionalGraspingPolicy
RobustGraspingPolicy
--------------------
.. autoclass:: gqcnn.RobustGraspingPolicy
UniformRandomGraspingPolicy
---------------------------
.. autoclass:: gqcnn.UniformRandomGraspingPolicy
.. _Cross Entropy Method (CEM): https://en.wikipedia.org/wiki/Cross-entropy_method
.. _Dex-Net 2.0: https://berkeleyautomation.github.io/dex-net/#dexnet_2
.. _Dex-Net 2.1: https://berkeleyautomation.github.io/dex-net/#dexnet_21
.. _Dex-Net 3.0: https://berkeleyautomation.github.io/dex-net/#dexnet_3
.. _Dex-Net 4.0: https://berkeleyautomation.github.io/dex-net/#dexnet_4
.. _FC-GQ-CNN: https://berkeleyautomation.github.io/fcgqcnn
================================================
FILE: docs/source/api/training.rst
================================================
Training
========
GQ-CNN training classes are **never accessed directly**, but through a lightweight factory function that returns the corresponding class depending on the specified backend. ::
$ from gqcnn import get_gqcnn_trainer
$
$ backend = 'tf'
$ my_trainer = get_gqcnn_trainer(backend)(<class initializer args>)
.. autofunction:: gqcnn.get_gqcnn_trainer
GQCNNTrainerTF
~~~~~~~~~~~~~~
.. autoclass:: gqcnn.training.tf.GQCNNTrainerTF
================================================
FILE: docs/source/benchmarks/benchmarks.rst
================================================
Dex-Net 2.0
~~~~~~~~~~~
Below are the highest classification accuracies achieved on the `Dex-Net 2.0`_ dataset on a randomized 80-20 train-validation split using various splitting rules:
.. image:: ../images/gqcnn_leaderboard.png
:width: 100%
The current leader is a ConvNet submitted by nomagic.ai. `GQ` is our best GQ-CNN for `Dex-Net 2.0`_.
We believe grasping performance on the physical robot can be improved if these validation error rates can be further reduced by modifications to the network architecture and optimization.
If you achieve superior numbers on a randomized validation set, please email Jeff Mahler (jmahler@berkeley.edu) with the subject "Dex-Net 2.0 Benchmark Submission" and we will consider testing on our ABB YuMi.
.. _Dex-Net 2.0: https://berkeleyautomation.github.io/dex-net/#dexnet_2
================================================
FILE: docs/source/conf.py
================================================
# -*- coding: utf-8 -*-
#
# core documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 16 14:33:48 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(
0, os.path.join(os.path.abspath(os.path.dirname(__name__)), "../../"))
sys.path.insert(
0,
os.path.join(os.path.abspath(os.path.dirname(__name__)), "../../examples"))
sys.path.insert(
0, os.path.join(os.path.abspath(os.path.dirname(__name__)), "../../tools"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinxcontrib.napoleon"]
autoclass_content = "class"
autodoc_member_order = "bysource"
autodoc_default_flags = ["members", "show-inheritance"]
napoleon_include_special_with_doc = True
napoleon_include_init_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"GQCNN"
copyright = u"2019, The Regents of the University of California"
author = u"Vishal Satish, Jeff Mahler"
# The version info for the project you"re documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"1.1"
# The full version, including alpha/beta/rc tags.
release = u"1.1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ""
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# "da", "de", "en", "es", "fi", "fr", "hu", "it", "ja"
# "nl", "no", "pt", "ro", "ru", "sv", "tr"
#html_search_language = "en"
# A dictionary with options for the search language support, empty by default.
# Now only "ja" uses this config value
#html_search_options = {"type": "default"}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = "scorer.js"
# Output file base name for HTML help builder.
htmlhelp_basename = "GQCNNdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
#"papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
#"pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#"preamble": "",
# Latex figure (float) alignment
#"figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "GQCNN.tex", u"GQCNN Documentation",
u"Jeff Mahler, Vishal Satish", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "GQCNN", u"GQCNN Documentation", [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, "GQCNN", u"GQCNN Documentation", author, "GQCNN",
"One line description of project.", "Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: "footnote", "no", or "inline".
#texinfo_show_urls = "footnote"
# If true, do not generate a @detailmenu in the "Top" node"s menu.
#texinfo_no_detailmenu = False
================================================
FILE: docs/source/index.rst
================================================
.. GQCNN documentation master file, created by
sphinx-quickstart on Thu May 4 16:09:26 2017.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Berkeley AUTOLAB's GQCNN Package
================================
Overview
--------
The `gqcnn` package is a Python API for training and deploying `Grasp Quality Convolutional Neural Networks (GQ-CNNs)`_ for grasp planning using training datasets from the `Dexterity Network (Dex-Net)`_, developed by the `Berkeley AUTOLAB`_ and introduced in the `Dex-Net 2.0 paper`_.
.. note:: We're excited to announce **version 1.0**, which brings the GQ-CNN package up to date with recent research in `Dex-Net`_.
Version 1.0 introduces support for:
#. **Dex-Net 4.0:** Composite policies that decide whether to use a suction cup or parallel-jaw gripper.
#. **Fully Convolutional GQ-CNNs:** Fully convolutional architectures that efficiently evaluate millions of grasps faster than prior GQ-CNNs.
Links
-----
* `Source Code`_
* `Datasets`_
* `Pretrained Models`_
* `Dex-Net Website`_
* `UC Berkeley AUTOLAB`_
.. _Source Code: https://github.com/BerkeleyAutomation/gqcnn
.. _Datasets: http://bit.ly/2rIM7Jk
.. _Pretrained Models: http://bit.ly/2tAFMko
.. _Dex-Net Website: https://berkeleyautomation.github.io/dex-net
.. _UC Berkeley AUTOLAB: http://autolab.berkeley.edu
.. image:: images/gqcnn.png
:width: 100 %
Project Goals
-------------
The goals of this project are to facilitate:
#. **Replicability** of GQ-CNN training and deployment from:
#. The latest `Dex-Net 4.0`_ results.
#. Older results such as `Dex-Net 2.0`_, `Dex-Net 2.1`_ and `Dex-Net 3.0`_.
#. Experimental results such as `FC-GQ-CNN`_.
#. **Research extensions** on novel GQ-CNN architectures that have higher performance on `Dex-Net 4.0`_ training datasets.
.. _Dex-Net 2.0: https://berkeleyautomation.github.io/dex-net/#dexnet_2
.. _Dex-Net 2.1: https://berkeleyautomation.github.io/dex-net/#dexnet_21
.. _Dex-Net 3.0: https://berkeleyautomation.github.io/dex-net/#dexnet_3
.. _Dex-Net 4.0: https://berkeleyautomation.github.io/dex-net/#dexnet_4
.. _FC-GQ-CNN: https://berkeleyautomation.github.io/fcgqcnn
Our longer-term goal is to encourage development of robust GQ-CNNs that can be used to plan grasps on different hardware setups with different robots and cameras.
Disclaimer
----------
GQ-CNN models are sensitive to the following parameters used during dataset generation:
#. The robot gripper
#. The depth camera
#. The distance between the camera and workspace.
As a result, we cannot guarantee performance of our pre-trained models on other physical setups. If you have a specific use-case in mind, please reach out to us. It might be possible to generate a custom dataset for you particular setup. We are actively researching how to generate more robust datasets that can generalize across robots, cameras, and viewpoints.
Development
-----------
The package is currently under active development. Please raise all bugs, feature requests, and other issues under the `Github Issues`_.
For other questions or concerns, please contact Jeff Mahler (jmahler@berkeley.edu) or Vishal Satish (vsatish@berkeley.edu) with the subject line starting with "GQ-CNN Development".
Academic Use
------------
If you use the code, datasets, or models in a publication, please cite the appropriate paper:
#. **Dex-Net 2.0** `(bibtex) <https://raw.githubusercontent.com/BerkeleyAutomation/dex-net/gh-pages/docs/dexnet_rss2017.bib>`__
#. **Dex-Net 2.1** `(bibtex) <https://raw.githubusercontent.com/BerkeleyAutomation/dex-net/gh-pages/docs/dexnet_corl2017.bib>`__
#. **Dex-Net 3.0** `(bibtex) <https://raw.githubusercontent.com/BerkeleyAutomation/dex-net/gh-pages/docs/dexnet_icra2018.bib>`__
#. **Dex-Net 4.0** `(bibtex) <https://raw.githubusercontent.com/BerkeleyAutomation/dex-net/gh-pages/docs/dexnet_sciencerobotics2019.bib>`__
#. **FC-GQ-CNN** `(bibtex) <https://raw.githubusercontent.com/BerkeleyAutomation/fcgqcnn/gh-pages/docs/fcgqcnn_ral2019.bib>`__
.. _Grasp Quality Convolutional Neural Networks (GQ-CNNs): info/info.html
.. _Dexterity Network (Dex-Net): https://berkeleyautomation.github.io/dex-net
.. _Dex-Net: https://berkeleyautomation.github.io/dex-net
.. _Berkeley AUTOLAB: http://autolab.berkeley.edu/
.. _Dex-Net 2.0 paper: https://github.com/BerkeleyAutomation/dex-net/raw/gh-pages/docs/dexnet_rss2017_final.pdf
.. _Github Issues: https://github.com/BerkeleyAutomation/gqcnn/issues
.. toctree::
:maxdepth: 2
:caption: Background
info/info.rst
.. toctree::
:maxdepth: 2
:caption: Installation Guide
install/install.rst
.. toctree::
:maxdepth: 2
:caption: Getting Started
tutorials/tutorial.rst
.. toctree::
:maxdepth: 2
:caption: Replication
replication/replication.rst
.. toctree::
:maxdepth: 2
:caption: Benchmarks
benchmarks/benchmarks.rst
.. toctree::
:maxdepth: 2
:caption: API Documentation
:glob:
api/gqcnn.rst
api/training.rst
api/analysis.rst
api/policies.rst
.. toctree::
:maxdepth: 2
:caption: License
license/license.rst
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
================================================
FILE: docs/source/info/info.rst
================================================
What are GQ-CNNs?
-----------------
GQ-CNNs are neural network architectures that take as input a depth image and grasp, and output the predicted probability that the grasp will successfully hold the object while lifting, transporting, and shaking the object.
.. figure:: ../images/gqcnn.png
:width: 100%
:align: center
Original GQ-CNN architecture from `Dex-Net 2.0`_.
.. figure:: ../images/fcgqcnn_arch_diagram.png
:width: 100%
:align: center
Alternate faster GQ-CNN architecture from `FC-GQ-CNN`_.
The GQ-CNN weights are trained on datasets of synthetic point clouds, parallel jaw grasps, and grasp metrics generated from physics-based models with domain randomization for sim-to-real transfer. See the ongoing `Dexterity Network (Dex-Net)`_ project for more information.
.. _Dexterity Network (Dex-Net): https://berkeleyautomation.github.io/dex-net
.. _Dex-Net 2.0: https://berkeleyautomation.github.io/dex-net/#dexnet_2
.. _FC-GQ-CNN: https://berkeleyautomation.github.io/fcgqcnn
================================================
FILE: docs/source/install/install.rst
================================================
Prerequisites
~~~~~~~~~~~~~
Python
""""""
The `gqcnn` package has only been tested with `Python 3.5`, `Python 3.6`, and `Python 3.7`.
Ubuntu
""""""
The `gqcnn` package has only been tested with `Ubuntu 12.04`, `Ubuntu 14.04` and `Ubuntu 16.04`.
Virtualenv
""""""""""
We highly recommend using a Python environment management system, in particular `Virtualenv`, with the Pip and ROS installations. **Note: Several users have encountered problems with dependencies when using Conda.**
Pip Installation
~~~~~~~~~~~~~~~~
The pip installation is intended for users who are **only interested in 1) Training GQ-CNNs or 2) Grasp planning on saved RGBD images**, not
interfacing with a physical robot.
If you have intentions of using GQ-CNNs for grasp planning on a physical robot, we suggest you `install as a ROS package`_.
.. _install as a ROS package: https://berkeleyautomation.github.io/gqcnn/install/install.html#ros-installation
1. Clone the repository
"""""""""""""""""""""""
Clone or download the `project`_ from Github. ::
$ git clone https://github.com/BerkeleyAutomation/gqcnn.git
.. _project: https://github.com/BerkeleyAutomation/gqcnn
2. Run pip installation
"""""""""""""""""""""""
Change directories into the `gqcnn` repository and run the pip installation. ::
$ pip install .
This will install `gqcnn` in your current virtual environment.
.. _ros-install:
ROS Installation
~~~~~~~~~~~~~~~~
Installation as a ROS package is intended for users who wish to use GQ-CNNs to plan grasps on a physical robot.
1. Clone the repository
"""""""""""""""""""""""
Clone or download the `project`_ from Github. ::
$ cd <PATH_TO_YOUR_CATKIN_WORKSPACE>/src
$ git clone https://github.com/BerkeleyAutomation/gqcnn.git
2. Build the catkin package
"""""""""""""""""""""""""""
Build the catkin package. ::
$ cd <PATH_TO_YOUR_CATKIN_WORKSPACE>
$ catkin_make
Then re-source `devel/setup.bash` for the package to be available through Python.
Docker Installation
~~~~~~~~~~~~~~~~~~~
We currently do not provide pre-built Docker images, but you can build them yourself. This will require you to have installed `Docker`_ or `Nvidia-Docker`_ if you plan on using GPUs. Note that our provided build for GPUs uses CUDA 10.0 and cuDNN 7.0, so make sure that this is compatible with your GPU hardware. If you wish to use a different CUDA/cuDNN version, change the base image in `docker/gpu/Dockerfile` to the desired `CUDA/cuDNN image distribution`_. **Note that other images have not yet been tested.**
.. _Docker: https://www.docker.com/
.. _Nvidia-Docker: https://github.com/NVIDIA/nvidia-docker
.. _CUDA/cuDNN image distribution: https://hub.docker.com/r/nvidia/cuda/
1. Clone the repository
"""""""""""""""""""""""
Clone or download the `project`_ from Github. ::
$ git clone https://github.com/BerkeleyAutomation/gqcnn.git
.. _project: https://github.com/BerkeleyAutomation/gqcnn
2. Build Docker images
""""""""""""""""""""""
Change directories into the `gqcnn` repository and run the build script. ::
$ ./scripts/docker/build-docker.sh
This will build the images `gqcnn/cpu` and `gqcnn/gpu`.
3. Run Docker image
""""""""""""""""""""
To run `gqcnn/cpu`: ::
$ docker run --rm -it gqcnn/cpu
To run `gqcnn/gpu`: ::
$ nvidia-docker run --rm -it gqcnn/gpu
Note the use of `nvidia-docker` in the latter to enable the Nvidia runtime.
You will then see an interactive shell like this: ::
$ root@a96488604093:~/Workspace/gqcnn#
Now you can proceed to run the examples and tutorial!
================================================
FILE: docs/source/license/license.rst
================================================
License
~~~~~~~
The `gqcnn` library, pre-trained models, and raw datasets are licensed under The Regents of the University of California (Regents). Copyright ©2019. All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
The raw datasets are generated from 3D object models from `3DNet`_ and `the KIT Object Database`_ that may be subject to copyright.
.. _3DNet: https://repo.acin.tuwien.ac.at/tmp/permanent/3d-net.org/
.. _the KIT Object Database: https://h2t-projects.webarchiv.kit.edu/Projects/ObjectModelsWebUI/
================================================
FILE: docs/source/replication/replication.rst
================================================
Replicating Results
~~~~~~~~~~~~~~~~~~~
Numerous publications in the larger `Dex-Net`_ project utilize GQ-CNNs, particularly `Dex-Net 2.0`_, `Dex-Net 2.1`_, `Dex-Net 3.0`_, `Dex-Net 4.0`_, and `FC-GQ-CNN`_. One of the goals of the `gqcnn` library is to provide the necessary code and instructions for replicating the results of these papers. Thus, we have provided a number of replication scripts under the `scripts/` directory.
There are two ways to replicate results:
#. **Use a pre-trained model:** Download a pre-trained GQ-CNN model and run an example policy.
#. **Train from scratch:** Download the raw dataset, train a GQ-CNN model, and run an example policy using the model you just trained.
**We highly encourage method 1.** Note that method 2 is computationally expensive as training takes roughly 24 hours on a Nvidia Titan Xp GPU. Furthermore, the raw datasets are fairly large in size.
Please keep in mind that GQ-CNN models are sensitive to the following parameters used during dataset generation:
#. The robot gripper
#. The depth camera
#. The distance between the camera and workspace.
As a result, we cannot guarantee performance of our pre-trained models on other physical setups.
For more information about the pre-trained models and sample inputs for the example policy, see :ref:`pre-trained-models` and :ref:`sample-inputs`.
Using a Pre-trained Model
=========================
First download the pre-trained models. ::
$ ./scripts/downloads/models/download_models.sh
Dex-Net 2.0
"""""""""""
Evaluate the pre-trained GQ-CNN model. ::
$ ./scripts/policies/run_all_dex-net_2.0_examples.sh
Dex-Net 2.1
"""""""""""
Evaluate the pre-trained GQ-CNN model. ::
$ ./scripts/policies/run_all_dex-net_2.1_examples.sh
Dex-Net 3.0
"""""""""""
Evaluate the pre-trained GQ-CNN model. ::
$ ./scripts/policies/run_all_dex-net_3.0_examples.sh
Dex-Net 4.0
"""""""""""
To evaluate the pre-trained **parallel jaw** GQ-CNN model. ::
$ ./scripts/policies/run_all_dex-net_4.0_pj_examples.sh
To evaluate the pre-trained **suction** GQ-CNN model. ::
$ ./scripts/policies/run_all_dex-net_4.0_suction_examples.sh
FC-GQ-CNN
"""""""""""
To evaluate the pre-trained **parallel jaw** FC-GQ-CNN model. ::
$ ./scripts/policies/run_all_dex-net_4.0_fc_pj_examples.sh
To evaluate the pre-trained **suction** FC-GQ-CNN model. ::
$ ./scripts/policies/run_all_dex-net_4.0_fc_suction_examples.sh
Training from Scratch
=====================
Dex-Net 2.0
"""""""""""
First download the appropriate dataset. ::
$ ./scripts/downloads/datasets/download_dex-net_2.0.sh
Then train a GQ-CNN from scratch. ::
$ ./scripts/training/train_dex-net_2.0.sh
Finally, evaluate the trained GQ-CNN. ::
$ ./scripts/policies/run_all_dex-net_2.0_examples.sh
Dex-Net 2.1
"""""""""""
First download the appropriate dataset. ::
$ ./scripts/downloads/datasets/download_dex-net_2.1.sh
Then train a GQ-CNN from scratch. ::
$ ./scripts/training/train_dex-net_2.1.sh
Finally, evaluate the trained GQ-CNN. ::
$ ./scripts/policies/run_all_dex-net_2.1_examples.sh
Dex-Net 3.0
"""""""""""
First download the appropriate dataset. ::
$ ./scripts/downloads/datasets/download_dex-net_3.0.sh
Then train a GQ-CNN from scratch. ::
$ ./scripts/training/train_dex-net_3.0.sh
Finally, evaluate the trained GQ-CNN. ::
$ ./scripts/policies/run_all_dex-net_3.0_examples.sh
Dex-Net 4.0
"""""""""""
To replicate the `Dex-Net 4.0`_ **parallel jaw** results, first download the appropriate dataset. ::
$ ./scripts/downloads/datasets/download_dex-net_4.0_pj.sh
Then train a GQ-CNN from scratch. ::
$ ./scripts/training/train_dex-net_4.0_pj.sh
Finally, evaluate the trained GQ-CNN. ::
$ ./scripts/policies/run_all_dex-net_4.0_pj_examples.sh
To replicate the `Dex-Net 4.0`_ **suction** results, first download the appropriate dataset. ::
$ ./scripts/downloads/datasets/download_dex-net_4.0_suction.sh
Then train a GQ-CNN from scratch. ::
$ ./scripts/training/train_dex-net_4.0_suction.sh
Finally, evaluate the trained GQ-CNN. ::
$ ./scripts/policies/run_all_dex-net_4.0_suction_examples.sh
FC-GQ-CNN
"""""""""""
To replicate the `FC-GQ-CNN`_ **parallel jaw** results, first download the appropriate dataset. ::
$ ./scripts/downloads/datasets/download_dex-net_4.0_fc_pj.sh
Then train a FC-GQ-CNN from scratch. ::
$ ./scripts/training/train_dex-net_4.0_fc_pj.sh
Finally, evaluate the trained FC-GQ-CNN. ::
$ ./scripts/policies/run_all_dex-net_4.0_fc_pj_examples.sh
To replicate the `FC-GQ-CNN`_ **suction** results, first download the appropriate dataset. ::
$ ./scripts/downloads/datasets/download_dex-net_4.0_fc_suction.sh
Then train a FC-GQ-CNN from scratch. ::
$ ./scripts/training/train_dex-net_4.0_fc_suction.sh
Finally, evaluate the trained FC-GQ-CNN. ::
$ ./scripts/policies/run_all_dex-net_4.0_fc_suction_examples.sh
.. _Dex-Net: https://berkeleyautomation.github.io/dex-net/
.. _Dex-Net 2.0: https://berkeleyautomation.github.io/dex-net/#dexnet_2
.. _Dex-Net 2.1: https://berkeleyautomation.github.io/dex-net/#dexnet_21
.. _Dex-Net 3.0: https://berkeleyautomation.github.io/dex-net/#dexnet_3
.. _Dex-Net 4.0: https://berkeleyautomation.github.io/dex-net/#dexnet_4
.. _FC-GQ-CNN: https://berkeleyautomation.github.io/fcgqcnn
.. _gqcnn: https://github.com/BerkeleyAutomation/gqcnn
================================================
FILE: docs/source/tutorials/analysis.rst
================================================
Analysis
~~~~~~~~
It is helpful to check the training and validation loss and classification errors to ensure that the network has trained successfully. To analyze the performance of a trained GQ-CNN, run: ::
$ python tools/analyze_gqcnn_performance.py <model_name>
The args are:
#. **model_name**: Name of a trained model.
The script will store a detailed analysis in `analysis/<model_name>/`.
To analyze the networks we just trained, run: ::
$ python tools/analyze_gqcnn_performance.py gqcnn_example_pj
$ python tools/analyze_gqcnn_performance.py gqcnn_example_suction
Below is the expected output for the **parallel jaw** network. Please keep in mind that the exact performance values may change due to randomization in the training dataset and random weight initialization: ::
$ GQCNNAnalyzer INFO TRAIN
$ GQCNNAnalyzer INFO Original error: 36.812
$ GQCNNAnalyzer INFO Final error: 6.061
$ GQCNNAnalyzer INFO Orig loss: 0.763
$ GQCNNAnalyzer INFO Final loss: 0.248
$ GQCNNAnalyzer INFO VAL
$ GQCNNAnalyzer INFO Original error: 32.212
$ GQCNNAnalyzer INFO Final error: 7.509
$ GQCNNAnalyzer INFO Normalized error: 0.233
A set of plots will be saved to `analysis/gqcnn_example_pj/`. The plots `training_error_rates.png` and `precision_recall.png` should look like the following:
.. image:: ../images/plots/pj_error_rate.png
:width: 49 %
.. image:: ../images/plots/pj_roc.png
:width: 49 %
Here is the expected output for the **suction** network: ::
$ GQCNNAnalyzer INFO TRAIN
$ GQCNNAnalyzer INFO Original error: 17.844
$ GQCNNAnalyzer INFO Final error: 6.417
$ GQCNNAnalyzer INFO Orig loss: 0.476
$ GQCNNAnalyzer INFO Final loss: 0.189
$ GQCNNAnalyzer INFO VAL
$ GQCNNAnalyzer INFO Original error: 18.036
$ GQCNNAnalyzer INFO Final error: 6.907
$ GQCNNAnalyzer INFO Normalized error: 0.383
A set of plots will be saved to `analysis/gqcnn_example_suction/`. The plots `training_error_rates.png` and `precision_recall.png` should look like the following:
.. image:: ../images/plots/suction_error_rate.png
:width: 49 %
.. image:: ../images/plots/suction_roc.png
:width: 49 %
================================================
FILE: docs/source/tutorials/planning.rst
================================================
Grasp Planning
~~~~~~~~~~~~~~
Grasp planning involves searching for the grasp with the highest predicted probability of success given a point cloud.
In the `gqcnn` package this is implemented as policies that map an RGBD image to a 6-DOF grasping pose by maximizing the output of a GQ-CNN. The maximization can be implemented with iterative methods such as the `Cross Entropy Method (CEM)`_, which is used in `Dex-Net 2.0`_, `Dex-Net 2.1`_, `Dex-Net 3.0`_, `Dex-Net 4.0`_, or much faster `fully convolutional networks`, which are used in the `FC-GQ-CNN`_.
.. _Cross Entropy Method (CEM): https://en.wikipedia.org/wiki/Cross-entropy_method
.. _Dex-Net 2.0: https://berkeleyautomation.github.io/dex-net/#dexnet_2
.. _Dex-Net 2.1: https://berkeleyautomation.github.io/dex-net/#dexnet_21
.. _Dex-Net 3.0: https://berkeleyautomation.github.io/dex-net/#dexnet_3
.. _Dex-Net 4.0: https://berkeleyautomation.github.io/dex-net/#dexnet_4
.. _FC-GQ-CNN: https://berkeleyautomation.github.io/fcgqcnn
We provide example policies in `examples/`. In particular, we provide both an example Python policy and an example ROS policy. **Note that the ROS policy requires the ROS gqcnn installation**, which can be found :ref:`here <ros-install>`. We highly recommend using the Python policy unless you need to plan grasps on a physical robot using ROS.
.. _sample-inputs:
Sample Inputs
-------------
Sample inputs from our experimental setup are provided with the repo:
#. **data/examples/clutter/phoxi/dex-net_4.0**: Set of example images from a PhotoNeo PhoXi S containing objects used in `Dex-Net 4.0`_ experiments arranged in heaps.
#. **data/examples/clutter/phoxi/fcgqcnn**: Set of example images from a PhotoNeo PhoXi S containing objects in `FC-GQ-CNN`_ experiments arranged in heaps.
#. **data/examples/single_object/primesense/**: Set of example images from a Primesense Carmine containing objects used in `Dex-Net 2.0`_ experiments in singulation.
#. **data/examples/clutter/primesense/**: Set of example images from a Primesense Carmine containing objects used in `Dex-Net 2.1`_ experiments arranged in heaps.
**\*\*Note that when trying these sample inputs, you must make sure that the GQ-CNN model you are using was trained for the corresponding camera and input type (singulation/clutter). See the following section for more details.\*\***
.. _pre-trained-models:
Pre-trained Models
------------------
Pre-trained parallel jaw and suction models for `Dex-Net 4.0`_ are automatically downloaded with the `gqcnn` package installation. If you do wish to try out models for older results (or for our experimental `FC-GQ-CNN`_), all pre-trained models can be downloaded with: ::
$ ./scripts/downloads/models/download_models.sh
The models are:
#. **GQCNN-2.0**: For `Dex-Net 2.0`_, trained on images of objects in singulation with parameters for a Primesense Carmine.
#. **GQCNN-2.1**: For `Dex-Net 2.1`_, a `Dex-Net 2.0`_ model fine-tuned on images of objects in clutter with parameters for a Primesense Carmine.
#. **GQCNN-3.0**: For `Dex-Net 3.0`_, trained on images of objects in clutter with parameters for a Primesense Carmine.
#. **GQCNN-4.0-PJ**: For `Dex-Net 4.0`_, trained on images of objects in clutter with parameters for a PhotoNeo PhoXi S.
#. **GQCNN-4.0-SUCTION**: For `Dex-Net 4.0`_, trained on images of objects in clutter with parameters for a PhotoNeo PhoXi S.
#. **FC-GQCNN-4.0-PJ**: For `FC-GQ-CNN`_, trained on images of objects in clutter with parameters for a PhotoNeo PhoXi S.
#. **FC-GQCNN-4.0-SUCTION**: For `FC-GQ-CNN`_, trained on images of objects in clutter with parameters for a PhotoNeo PhoXi S.
**\*\*Note that GQ-CNN models are sensitive to the parameters used during dataset generation, specifically 1) Gripper geometry, an ABB YuMi Parallel Jaw Gripper for all our pre-trained models 2) Camera intrinsics, either a Primesense Carmine or PhotoNeo Phoxi S for all our pre-trained models (see above for which one) 3) Distance between camera and workspace during rendering, 50-70cm for all our pre-trained models. Thus we cannot guarantee performance of our pre-trained models on other physical setups. If you have a specific use-case in mind, please reach out to us.\*\*** We are actively researching how to generate more robust datasets that can generalize across robots, cameras, and viewpoints!
Python Policy
-------------
The example Python policy can be queried on saved images using: ::
$ python examples/policy.py <model_name> --depth_image <depth_image_filename> --segmask <segmask_filename> --camera_intr <camera_intr_filename>
The args are:
#. **model_name**: Name of the GQ-CNN model to use.
#. **depth_image_filename**: Path to a depth image (float array in .npy format).
#. **segmask_filename**: Path to an object segmentation mask (binary image in .png format).
#. **camera_intr_filename**: Path to a camera intrinsics file (.intr file generated with `BerkeleyAutomation's`_ `autolab_core`_ package).
.. _BerkeleyAutomation's: https://github.com/BerkeleyAutomation
.. _autolab_core: https://github.com/BerkeleyAutomation/autolab_core
To evaluate the pre-trained `Dex-Net 4.0`_ **parallel jaw** network on sample images of objects in heaps run: ::
$ python examples/policy.py GQCNN-4.0-PJ --depth_image data/examples/clutter/phoxi/dex-net_4.0/depth_0.npy --segmask data/examples/clutter/phoxi/dex-net_4.0/segmask_0.png --camera_intr data/calib/phoxi/phoxi.intr
To evaluate the pre-trained `Dex-Net 4.0`_ **suction** network on sample images of objects in heaps run: ::
$ python examples/policy.py GQCNN-4.0-SUCTION --depth_image data/examples/clutter/phoxi/dex-net_4.0/depth_0.npy --segmask data/examples/clutter/phoxi/dex-net_4.0/segmask_0.png --camera_intr data/calib/phoxi/phoxi.intr
.. _ros-policy:
ROS Policy
----------
First start the grasp planning service: ::
$ roslaunch gqcnn grasp_planning_service.launch model_name:=<model_name>
The args are:
#. **model_name**: Name of the GQ-CNN model to use. Default is **GQCNN-4.0-PJ**.
#. **model_dir**: Path to the directory where the GQ-CNN models are located. Default is **models/**. If you are using the provided **download_models.sh** script, you shouldn't have to modify this.
To start the grasp planning service with the pre-trained `Dex-Net 4.0`_ **parallel jaw** network run: ::
$ roslaunch gqcnn grasp_planning_service.launch model_name:=GQCNN-4.0-PJ
To start the grasp planning service with the pre-trained `Dex-Net 4.0`_ **suction** network run: ::
$ roslaunch gqcnn grasp_planning_service.launch model_name:=GQCNN-4.0-SUCTION
The example ROS policy can then be queried on saved images using: ::
$ python examples/policy_ros.py --depth_image <depth_image_filename> --segmask <segmask_filename> --camera_intr <camera_intr_filename>
The args are:
#. **depth_image_filename**: Path to a depth image (float array in .npy format).
#. **segmask_filename**: Path to an object segmentation mask (binary image in .png format).
#. **camera_intr_filename**: Path to a camera intrinsics file (.intr file generated with `BerkeleyAutomation's`_ `autolab_core`_ package).
To query the policy on sample images of objects in heaps run: ::
$ python examples/policy_ros.py --depth_image data/examples/clutter/phoxi/dex-net_4.0/depth_0.npy --segmask data/examples/clutter/phoxi/dex-net_4.0/segmask_0.png --camera_intr data/calib/phoxi/phoxi.intr
Usage on a Physical Robot with ROS
----------------------------------
To run the GQ-CNN on a physical robot with ROS, you will want to implement your own ROS node to query the grasp planning service similar to what `examples/policy_ros.py` does. If you are interested in replicating this functionality on your own robot, please contact Jeff Mahler (jmahler@berkeley.edu) with the subject line: "Interested in GQ-CNN ROS Service".
FC-GQ-CNN Policy
----------------
Our most recent research result, the `FC-GQ-CNN`_, combines novel fully convolutional network architectures with our prior work on GQ-CNNs to increase policy rate and reliability. Instead of relying on the `Cross Entropy Method (CEM)`_ to iteratively search over the policy action space for the best grasp, the FC-GQ-CNN instead densely and efficiently evaluates the entire action space in parallel. It is thus able to consider 5000x more grasps in 0.625s, resulting in a MPPH (Mean Picks Per Hour) of 296, compared to the prior 250 MPPH of `Dex-Net 4.0`_.
.. figure:: ../images/fcgqcnn_arch_diagram.png
:width: 100 %
:align: center
FC-GQ-CNN architecture.
You can download the pre-trained `FC-GQ-CNN`_ parallel jaw and suction models along with the other pre-trained models: ::
$ ./scripts/downloads/models/download_models.sh
Then run the Python policy with the `\\--fully_conv` flag.
To evaluate the pre-trained `FC-GQ-CNN`_ **parallel jaw** network on sample images of objects in heaps run: ::
$ python examples/policy.py FC-GQCNN-4.0-PJ --fully_conv --depth_image data/examples/clutter/phoxi/fcgqcnn/depth_0.npy --segmask data/examples/clutter/phoxi/fcgqcnn/segmask_0.png --camera_intr data/calib/phoxi/phoxi.intr
To evaluate the pre-trained `FC-GQ-CNN`_ **suction** network on sample images of objects in heaps run: ::
$ python examples/policy.py FC-GQCNN-4.0-SUCTION --fully_conv --depth_image data/examples/clutter/phoxi/fcgqcnn/depth_0.npy --segmask data/examples/clutter/phoxi/fcgqcnn/segmask_0.png --camera_intr data/calib/phoxi/phoxi.intr
With ROS
^^^^^^^^
Review the section on using the normal ROS policy first, which can be found :ref:`here <ros-policy>`.
Add the additional arg **fully_conv:=True** when launching the grasp planning service and provide the corresponding network (**FC-GQCNN-4.0-PJ** for **parallel jaw** and **FC-GQCNN-4.0-SUCTION** for **suction**).
If you wish to test on inputs other than those provided in `data/examples/clutter/phoxi/fcgqcnn/`, you will need to edit the input height and width configuration in the appropriate `cfg/examples/<fc_gqcnn_pj.yaml or fc_gqcnn_suction.yaml>` under `["policy"]["metric"]["fully_conv_gqcnn_config"]`.
================================================
FILE: docs/source/tutorials/training.rst
================================================
Training
~~~~~~~~
The `gqcnn` package can be used to train a `Dex-Net 4.0`_ GQ-CNN model on a custom offline `Dex-Net`_ dataset. Because training from scratch can be time-consuming, the most efficient way to train a new network is to fine-tune the weights of a pre-trained `Dex-Net 4.0`_ GQ-CNN model, which has already been trained on millions of images.
.. _Dex-Net 4.0: https://berkeleyautomation.github.io/dex-net/#dexnet_4
.. _Dex-Net: https://berkeleyautomation.github.io/dex-net/
To fine-tune a GQ-CNN run: ::
$ python tools/finetune.py <training_dataset_path> <pretrained_network_name> --config_filename <config_filename> --name <model_name>
The args are:
#. **training_dataset_path**: Path to the training dataset.
#. **pretrained_network_name**: Name of pre-trained GQ-CNN.
#. **config_filename**: Name of the config file to use.
#. **model_name**: Name for the model.
To train a GQ-CNN for a **parallel jaw** gripper on a sample dataset, run the fine-tuning script: ::
$ python tools/finetune.py data/training/example_pj/ GQCNN-4.0-PJ --config_filename cfg/finetune_example_pj.yaml --name gqcnn_example_pj
To train a GQ-CNN for a **suction** gripper run: ::
$ python tools/finetune.py data/training/example_suction/ GQCNN-4.0-SUCTION --config_filename cfg/finetune_example_suction.yaml --name gqcnn_example_suction
Visualizing Training
--------------------
The `gqcnn` model contains support for visualizing training progress through Tensorboard. Tensorboard is automatically launched when the training script is run and can be accessed by navigating to **localhost:6006** in a web browser. There you will find something like the following:
.. image:: ../images/tensorboard.png
:width: 100 %
Which displays useful training statistics such as validation error, minibatch loss, and learning rate.
The Tensorflow summaries are stored in `models/<model_name>/tensorboard_summaries/`.
================================================
FILE: docs/source/tutorials/tutorial.rst
================================================
Overview
~~~~~~~~
There are two main use cases of the `gqcnn` package:
#. :ref:`training` a `Dex-Net 4.0`_ GQ-CNN model on an offline `Dex-Net`_ dataset of point clouds, grasps, and grasp success metrics, and then grasp planning on RGBD images.
#. :ref:`grasp planning` on RGBD images using a pre-trained `Dex-Net 4.0`_ GQ-CNN model.
.. _Dex-Net 4.0: https://berkeleyautomation.github.io/dex-net/#dexnet_4
.. _Dex-Net: https://berkeleyautomation.github.io/dex-net/
Click on the links or scroll down to get started!
Prerequisites
-------------
Before running the tutorials please download the example models and datasets: ::
$ cd /path/to/your/gqcnn
$ ./scripts/downloads/download_example_data.sh
$ ./scripts/downloads/models/download_models.sh
Running Python Scripts
----------------------
All `gqcnn` Python scripts are designed to be run from the top-level directory of your `gqcnn` repo by default. This is because every script takes in a YAML file specifying parameters for the script, and this YAML file is stored relative to the repository root directory.
We recommend that you run all scripts using this paradigm: ::
cd /path/to/your/gqcnn
python /path/to/script.py
.. _training:
.. include:: training.rst
.. _analysis:
.. include:: analysis.rst
.. _grasp planning:
.. include:: planning.rst
================================================
FILE: examples/antipodal_grasp_sampling.py
================================================
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Demonstrates image-based antipodal grasp candidate sampling, which is used in
the Cross Entropy Method (CEM)-based GQ-CNN grasping policy. Samples images
from a BerkeleyAutomation/perception `RgbdSensor`.
Author
------
Jeff Mahler
"""
import argparse
import os
from autolab_core import RigidTransform, YamlConfig, Logger, RgbdImage
from perception import RgbdSensorFactory
from visualization import Visualizer2D as vis
from gqcnn.grasping import AntipodalDepthImageGraspSampler
# Set up logger.
logger = Logger.get_logger("examples/antipodal_grasp_sampling.py")
if __name__ == "__main__":
# Parse args.
parser = argparse.ArgumentParser(description=(
"Sample antipodal grasps on a depth image from an RgbdSensor"))
parser.add_argument("--config_filename",
type=str,
default="cfg/examples/antipodal_grasp_sampling.yaml",
help="path to configuration file to use")
args = parser.parse_args()
config_filename = args.config_filename
# Read config.
config = YamlConfig(config_filename)
sensor_type = config["sensor"]["type"]
sensor_frame = config["sensor"]["frame"]
num_grasp_samples = config["num_grasp_samples"]
gripper_width = config["gripper_width"]
inpaint_rescale_factor = config["inpaint_rescale_factor"]
visualize_sampling = config["visualize_sampling"]
sample_config = config["sampling"]
# Read camera calib.
tf_filename = "%s_to_world.tf" % (sensor_frame)
T_camera_world = RigidTransform.load(
os.path.join(config["calib_dir"], sensor_frame, tf_filename))
# Setup sensor.
sensor = RgbdSensorFactory.sensor(sensor_type, config["sensor"])
sensor.start()
camera_intr = sensor.ir_intrinsics
# Read images.
color_im, depth_im, _ = sensor.frames()
color_im = color_im.inpaint(rescale_factor=inpaint_rescale_factor)
depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
# Sample grasps.
grasp_sampler = AntipodalDepthImageGraspSampler(sample_config,
gripper_width)
grasps = grasp_sampler.sample(rgbd_im,
camera_intr,
num_grasp_samples,
segmask=None,
seed=100,
visualize=visualize_sampling)
# Visualize.
vis.figure()
vis.imshow(depth_im)
for grasp in grasps:
vis.grasp(grasp, scale=1.5, show_center=False, show_axis=True)
vis.title("Sampled grasps")
vis.show()
================================================
FILE: examples/policy.py
================================================
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Displays robust grasps planned using a GQ-CNN grapsing policy on a set of saved
RGB-D images. The default configuration for the standard GQ-CNN policy is
`cfg/examples/cfg/examples/gqcnn_pj.yaml`. The default configuration for the
Fully-Convolutional GQ-CNN policy is `cfg/examples/fc_gqcnn_pj.yaml`.
Author
------
Jeff Mahler & Vishal Satish
"""
import argparse
import json
import os
import time
import numpy as np
from autolab_core import (YamlConfig, Logger, BinaryImage, CameraIntrinsics,
ColorImage, DepthImage, RgbdImage)
from visualization import Visualizer2D as vis
from gqcnn.grasping import (RobustGraspingPolicy,
CrossEntropyRobustGraspingPolicy, RgbdImageState,
FullyConvolutionalGraspingPolicyParallelJaw,
FullyConvolutionalGraspingPolicySuction)
from gqcnn.utils import GripperMode
# Set up logger.
logger = Logger.get_logger("examples/policy.py")
if __name__ == "__main__":
# Parse args.
parser = argparse.ArgumentParser(
description="Run a grasping policy on an example image")
parser.add_argument("model_name",
type=str,
default=None,
help="name of a trained model to run")
parser.add_argument(
"--depth_image",
type=str,
default=None,
help="path to a test depth image stored as a .npy file")
parser.add_argument("--segmask",
type=str,
default=None,
help="path to an optional segmask to use")
parser.add_argument("--camera_intr",
type=str,
default=None,
help="path to the camera intrinsics")
parser.add_argument("--model_dir",
type=str,
default=None,
help="path to the folder in which the model is stored")
parser.add_argument("--config_filename",
type=str,
default=None,
help="path to configuration file to use")
parser.add_argument(
"--fully_conv",
action="store_true",
help=("run Fully-Convolutional GQ-CNN policy instead of standard"
" GQ-CNN policy"))
args = parser.parse_args()
model_name = args.model_name
depth_im_filename = args.depth_image
segmask_filename = args.segmask
camera_intr_filename = args.camera_intr
model_dir = args.model_dir
config_filename = args.config_filename
fully_conv = args.fully_conv
assert not (fully_conv and depth_im_filename is not None
and segmask_filename is None
), "Fully-Convolutional policy expects a segmask."
if depth_im_filename is None:
if fully_conv:
depth_im_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/examples/clutter/primesense/depth_0.npy")
else:
depth_im_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/examples/single_object/primesense/depth_0.npy")
if fully_conv and segmask_filename is None:
segmask_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/examples/clutter/primesense/segmask_0.png")
if camera_intr_filename is None:
camera_intr_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/calib/primesense/primesense.intr")
# Set model if provided.
if model_dir is None:
model_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../models")
model_path = os.path.join(model_dir, model_name)
# Get configs.
model_config = json.load(open(os.path.join(model_path, "config.json"),
"r"))
try:
gqcnn_config = model_config["gqcnn"]
gripper_mode = gqcnn_config["gripper_mode"]
except KeyError:
gqcnn_config = model_config["gqcnn_config"]
input_data_mode = gqcnn_config["input_data_mode"]
if input_data_mode == "tf_image":
gripper_mode = GripperMode.LEGACY_PARALLEL_JAW
elif input_data_mode == "tf_image_suction":
gripper_mode = GripperMode.LEGACY_SUCTION
elif input_data_mode == "suction":
gripper_mode = GripperMode.SUCTION
elif input_data_mode == "multi_suction":
gripper_mode = GripperMode.MULTI_SUCTION
elif input_data_mode == "parallel_jaw":
gripper_mode = GripperMode.PARALLEL_JAW
else:
raise ValueError(
"Input data mode {} not supported!".format(input_data_mode))
# Set config.
if config_filename is None:
if (gripper_mode == GripperMode.LEGACY_PARALLEL_JAW
or gripper_mode == GripperMode.PARALLEL_JAW):
if fully_conv:
config_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"cfg/examples/fc_gqcnn_pj.yaml")
else:
config_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"cfg/examples/gqcnn_pj.yaml")
elif (gripper_mode == GripperMode.LEGACY_SUCTION
or gripper_mode == GripperMode.SUCTION):
if fully_conv:
config_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"cfg/examples/fc_gqcnn_suction.yaml")
else:
config_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"cfg/examples/gqcnn_suction.yaml")
# Read config.
config = YamlConfig(config_filename)
inpaint_rescale_factor = config["inpaint_rescale_factor"]
policy_config = config["policy"]
# Make relative paths absolute.
if "gqcnn_model" in policy_config["metric"]:
policy_config["metric"]["gqcnn_model"] = model_path
if not os.path.isabs(policy_config["metric"]["gqcnn_model"]):
policy_config["metric"]["gqcnn_model"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
policy_config["metric"]["gqcnn_model"])
# Setup sensor.
camera_intr = CameraIntrinsics.load(camera_intr_filename)
# Read images.
depth_data = np.load(depth_im_filename)
depth_im = DepthImage(depth_data, frame=camera_intr.frame)
color_im = ColorImage(np.zeros([depth_im.height, depth_im.width,
3]).astype(np.uint8),
frame=camera_intr.frame)
# Optionally read a segmask.
segmask = None
if segmask_filename is not None:
segmask = BinaryImage.open(segmask_filename)
valid_px_mask = depth_im.invalid_pixel_mask().inverse()
if segmask is None:
segmask = valid_px_mask
else:
segmask = segmask.mask_binary(valid_px_mask)
# Inpaint.
depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
if "input_images" in policy_config["vis"] and policy_config["vis"][
"input_images"]:
vis.figure(size=(10, 10))
num_plot = 1
if segmask is not None:
num_plot = 2
vis.subplot(1, num_plot, 1)
vis.imshow(depth_im)
if segmask is not None:
vis.subplot(1, num_plot, 2)
vis.imshow(segmask)
vis.show()
# Create state.
rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)
# Set input sizes for fully-convolutional policy.
if fully_conv:
policy_config["metric"]["fully_conv_gqcnn_config"][
"im_height"] = depth_im.shape[0]
policy_config["metric"]["fully_conv_gqcnn_config"][
"im_width"] = depth_im.shape[1]
# Init policy.
if fully_conv:
# TODO(vsatish): We should really be doing this in some factory policy.
if policy_config["type"] == "fully_conv_suction":
policy = FullyConvolutionalGraspingPolicySuction(policy_config)
elif policy_config["type"] == "fully_conv_pj":
policy = FullyConvolutionalGraspingPolicyParallelJaw(policy_config)
else:
raise ValueError(
"Invalid fully-convolutional policy type: {}".format(
policy_config["type"]))
else:
policy_type = "cem"
if "type" in policy_config:
policy_type = policy_config["type"]
if policy_type == "ranking":
policy = RobustGraspingPolicy(policy_config)
elif policy_type == "cem":
policy = CrossEntropyRobustGraspingPolicy(policy_config)
else:
raise ValueError("Invalid policy type: {}".format(policy_type))
# Query policy.
policy_start = time.time()
action = policy(state)
logger.info("Planning took %.3f sec" % (time.time() - policy_start))
# Vis final grasp.
if policy_config["vis"]["final_grasp"]:
vis.figure(size=(10, 10))
vis.imshow(rgbd_im.depth,
vmin=policy_config["vis"]["vmin"],
vmax=policy_config["vis"]["vmax"])
vis.grasp(action.grasp, scale=2.5, show_center=False, show_axis=True)
vis.title("Planned grasp at depth {0:.3f}m with Q={1:.3f}".format(
action.grasp.depth, action.q_value))
vis.show()
================================================
FILE: examples/policy_ros.py
================================================
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Displays robust grasps planned using a GQ-CNN-based policy on a set of saved
RGB-D images. The default configuration is cfg/examples/policy.yaml.
Author
------
Jeff Mahler
"""
import argparse
import logging
import numpy as np
import os
import rosgraph.roslogging as rl
import rospy
import sys
from cv_bridge import CvBridge, CvBridgeError
from autolab_core import (Point, Logger, BinaryImage, CameraIntrinsics,
ColorImage, DepthImage)
from visualization import Visualizer2D as vis
from gqcnn.grasping import Grasp2D, SuctionPoint2D, GraspAction
from gqcnn.msg import GQCNNGrasp
from gqcnn.srv import GQCNNGraspPlanner, GQCNNGraspPlannerSegmask
# Set up logger.
logger = Logger.get_logger("examples/policy_ros.py")
if __name__ == "__main__":
# Parse args.
parser = argparse.ArgumentParser(
description="Run a grasping policy on an example image")
parser.add_argument(
"--depth_image",
type=str,
default=None,
help="path to a test depth image stored as a .npy file")
parser.add_argument("--segmask",
type=str,
default=None,
help="path to an optional segmask to use")
parser.add_argument("--camera_intr",
type=str,
default=None,
help="path to the camera intrinsics")
parser.add_argument("--gripper_width",
type=float,
default=0.05,
help="width of the gripper to plan for")
parser.add_argument("--namespace",
type=str,
default="gqcnn",
help="namespace of the ROS grasp planning service")
parser.add_argument("--vis_grasp",
type=bool,
default=True,
help="whether or not to visualize the grasp")
args = parser.parse_args()
depth_im_filename = args.depth_image
segmask_filename = args.segmask
camera_intr_filename = args.camera_intr
gripper_width = args.gripper_width
namespace = args.namespace
vis_grasp = args.vis_grasp
# Initialize the ROS node.
rospy.init_node("grasp_planning_example")
logging.getLogger().addHandler(rl.RosStreamHandler())
# Setup filenames.
if depth_im_filename is None:
depth_im_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/examples/single_object/primesense/depth_0.npy")
if camera_intr_filename is None:
camera_intr_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/calib/primesense/primesense.intr")
# Wait for grasp planning service and create service proxy.
rospy.wait_for_service("%s/grasp_planner" % (namespace))
rospy.wait_for_service("%s/grasp_planner_segmask" % (namespace))
plan_grasp = rospy.ServiceProxy("%s/grasp_planner" % (namespace),
GQCNNGraspPlanner)
plan_grasp_segmask = rospy.ServiceProxy(
"%s/grasp_planner_segmask" % (namespace), GQCNNGraspPlannerSegmask)
cv_bridge = CvBridge()
# Set up sensor.
camera_intr = CameraIntrinsics.load(camera_intr_filename)
# Read images.
depth_im = DepthImage.open(depth_im_filename, frame=camera_intr.frame)
color_im = ColorImage(np.zeros([depth_im.height, depth_im.width,
3]).astype(np.uint8),
frame=camera_intr.frame)
# Read segmask.
if segmask_filename is not None:
segmask = BinaryImage.open(segmask_filename, frame=camera_intr.frame)
grasp_resp = plan_grasp_segmask(color_im.rosmsg, depth_im.rosmsg,
camera_intr.rosmsg, segmask.rosmsg)
else:
grasp_resp = plan_grasp(color_im.rosmsg, depth_im.rosmsg,
camera_intr.rosmsg)
grasp = grasp_resp.grasp
# Convert to a grasp action.
grasp_type = grasp.grasp_type
if grasp_type == GQCNNGrasp.PARALLEL_JAW:
center = Point(np.array([grasp.center_px[0], grasp.center_px[1]]),
frame=camera_intr.frame)
grasp_2d = Grasp2D(center,
grasp.angle,
grasp.depth,
width=gripper_width,
camera_intr=camera_intr)
elif grasp_type == GQCNNGrasp.SUCTION:
center = Point(np.array([grasp.center_px[0], grasp.center_px[1]]),
frame=camera_intr.frame)
grasp_2d = SuctionPoint2D(center,
np.array([0, 0, 1]),
grasp.depth,
camera_intr=camera_intr)
else:
raise ValueError("Grasp type %d not recognized!" % (grasp_type))
try:
thumbnail = DepthImage(cv_bridge.imgmsg_to_cv2(
grasp.thumbnail, desired_encoding="passthrough"),
frame=camera_intr.frame)
except CvBridgeError as e:
logger.error(e)
logger.error("Failed to convert image")
sys.exit(1)
action = GraspAction(grasp_2d, grasp.q_value, thumbnail)
# Vis final grasp.
if vis_grasp:
vis.figure(size=(10, 10))
vis.imshow(depth_im, vmin=0.6, vmax=0.9)
vis.grasp(action.grasp, scale=2.5, show_center=False, show_axis=True)
vis.title("Planned grasp on depth (Q=%.3f)" % (action.q_value))
vis.show()
================================================
FILE: examples/policy_with_image_proc.py
================================================
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Displays robust grasps planned using a GQ-CNN-based policy on a set of saved
RGB-D images. The default configuration is cfg/examples/policy.yaml.
Author
------
Jeff Mahler
"""
import argparse
import os
import time
import numpy as np
import pcl
import skimage
from autolab_core import (PointCloud, RigidTransform, YamlConfig,
Logger, BinaryImage, CameraIntrinsics,
ColorImage, DepthImage, RgbdImage,
SegmentationImage)
from visualization import Visualizer2D as vis
from gqcnn import (RobustGraspingPolicy, CrossEntropyRobustGraspingPolicy,
RgbdImageState)
CLUSTER_TOL = 0.0015
MIN_CLUSTER_SIZE = 100
MAX_CLUSTER_SIZE = 1000000
# Set up logger.
logger = Logger.get_logger("tools/policy_with_image_proc.py")
if __name__ == "__main__":
# Parse args.
parser = argparse.ArgumentParser(
description="Run a grasping policy on an example image")
parser.add_argument(
"--depth_image",
type=str,
default=None,
help="path to a test depth image stored as a .npy file")
parser.add_argument("--segmask",
type=str,
default=None,
help="path to an optional segmask to use")
parser.add_argument("--camera_intrinsics",
type=str,
default=None,
help="path to the camera intrinsics")
parser.add_argument("--camera_pose",
type=str,
default=None,
help="path to the camera pose")
parser.add_argument("--model_dir",
type=str,
default=None,
help="path to a trained model to run")
parser.add_argument("--config_filename",
type=str,
default=None,
help="path to configuration file to use")
args = parser.parse_args()
depth_im_filename = args.depth_image
segmask_filename = args.segmask
camera_intr_filename = args.camera_intrinsics
camera_pose_filename = args.camera_pose
model_dir = args.model_dir
config_filename = args.config_filename
if depth_im_filename is None:
depth_im_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/examples/single_object/depth_0.npy")
if camera_intr_filename is None:
camera_intr_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/calib/primesense.intr")
if camera_pose_filename is None:
camera_pose_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"data/calib/primesense.tf")
if config_filename is None:
config_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
"cfg/examples/replication/dex-net_2.0.yaml")
# Read config.
config = YamlConfig(config_filename)
inpaint_rescale_factor = config["inpaint_rescale_factor"]
policy_config = config["policy"]
# Make relative paths absolute.
if model_dir is not None:
policy_config["metric"]["gqcnn_model"] = model_dir
if "gqcnn_model" in policy_config["metric"] and not os.path.isabs(
policy_config["metric"]["gqcnn_model"]):
policy_config["metric"]["gqcnn_model"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..",
policy_config["metric"]["gqcnn_model"])
# Setup sensor.
camera_intr = CameraIntrinsics.load(camera_intr_filename)
T_camera_world = RigidTransform.load(camera_pose_filename)
# Read images.
depth_data = np.load(depth_im_filename)
depth_data = depth_data.astype(np.float32) / 1000.0
depth_im = DepthImage(depth_data, frame=camera_intr.frame)
color_im = ColorImage(np.zeros([depth_im.height, depth_im.width,
3]).astype(np.uint8),
frame=camera_intr.frame)
# Optionally read a segmask.
mask = np.zeros((camera_intr.height, camera_intr.width, 1), dtype=np.uint8)
c = np.array([165, 460, 500, 135])
r = np.array([165, 165, 370, 370])
rr, cc = skimage.draw.polygon(r, c, shape=mask.shape)
mask[rr, cc, 0] = 255
segmask = BinaryImage(mask)
if segmask_filename is not None:
segmask = BinaryImage.open(segmask_filename)
valid_px_mask = depth_im.invalid_pixel_mask().inverse()
if segmask is None:
segmask = valid_px_mask
else:
segmask = segmask.mask_binary(valid_px_mask)
# Create new cloud.
point_cloud = camera_intr.deproject(depth_im)
point_cloud.remove_zero_points()
pcl_cloud = pcl.PointCloud(point_cloud.data.T.astype(np.float32))
tree = pcl_cloud.make_kdtree()
# Find large clusters (likely to be real objects instead of noise).
ec = pcl_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(CLUSTER_TOL)
ec.set_MinClusterSize(MIN_CLUSTER_SIZE)
ec.set_MaxClusterSize(MAX_CLUSTER_SIZE)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
num_clusters = len(cluster_indices)
obj_segmask_data = np.zeros(depth_im.shape)
# Read out all points in large clusters.
cur_i = 0
for j, indices in enumerate(cluster_indices):
num_points = len(indices)
points = np.zeros([3, num_points])
for i, index in enumerate(indices):
points[0, i] = pcl_cloud[index][0]
points[1, i] = pcl_cloud[index][1]
points[2, i] = pcl_cloud[index][2]
segment = PointCloud(points, frame=camera_intr.frame)
depth_segment = camera_intr.project_to_image(segment)
obj_segmask_data[depth_segment.data > 0] = j + 1
obj_segmask = SegmentationImage(obj_segmask_data.astype(np.uint8))
obj_segmask = obj_segmask.mask_binary(segmask)
# Inpaint.
depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
if "input_images" in policy_config["vis"] and policy_config["vis"][
"input_images"]:
vis.figure(size=(10, 10))
num_plot = 3
vis.subplot(1, num_plot, 1)
vis.imshow(depth_im)
vis.subplot(1, num_plot, 2)
vis.imshow(segmask)
vis.subplot(1, num_plot, 3)
vis.imshow(obj_segmask)
vis.show()
from visualization import Visualizer3D as vis3d
point_cloud = camera_intr.deproject(depth_im)
vis3d.figure()
vis3d.points(point_cloud,
subsample=3,
random=True,
color=(0, 0, 1),
scale=0.001)
vis3d.pose(RigidTransform())
vis3d.pose(T_camera_world.inverse())
vis3d.show()
# Create state.
rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im)
state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask)
# Init policy.
policy_type = "cem"
if "type" in policy_config:
policy_type = policy_config["type"]
if policy_type == "ranking":
policy = RobustGraspingPolicy(policy_config)
else:
policy = CrossEntropyRobustGraspingPolicy(policy_config)
policy_start = time.time()
action = policy(state)
logger.info("Planning took %.3f sec" % (time.time() - policy_start))
# Vis final grasp.
if policy_config["vis"]["final_grasp"]:
vis.figure(size=(10, 10))
vis.imshow(rgbd_im.depth,
vmin=policy_config["vis"]["vmin"],
vmax=policy_config["vis"]["vmax"])
vis.grasp(action.grasp, scale=2.5, show_center=False, show_axis=True)
vis.title("Planned grasp on depth (Q=%.3f)" % (action.q_value))
vis.show()
# Get grasp pose.
T_grasp_camera = action.grasp.pose(
grasp_approach_dir=-T_camera_world.inverse().z_axis)
grasp_pose_msg = T_grasp_camera.pose_msg
# TODO: Control to reach the grasp pose.
================================================
FILE: gqcnn/__init__.py
================================================
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
from .model import get_gqcnn_model, get_fc_gqcnn_model
from .training import get_gqcnn_trainer
from .grasping import (RobustGraspingPolicy, UniformRandomGraspingPolicy,
CrossEntropyRobustGraspingPolicy, RgbdImageState,
FullyConvolutionalGraspingPolicyParallelJaw,
FullyConvolutionalGraspingPolicySuction)
from .analysis import GQCNNAnalyzer
from .search import GQCNNSearch
from .utils import NoValidGraspsException, NoAntipodalPairsFoundException
__all__ = [
"get_gqcnn_model", "get_fc_gqcnn_model", "get_gqcnn_trainer",
"RobustGraspingPolicy", "UniformRandomGraspingPolicy",
"CrossEntropyRobustGraspingPolicy", "RgbdImageState",
"FullyConvolutionalGraspingPolicyParallelJaw",
"FullyConvolutionalGraspingPolicySuction", "GQCNNAnalyzer", "GQCNNSearch",
"NoValidGraspsException", "NoAntipodalPairsFoundException"
]
================================================
FILE: gqcnn/analysis/__init__.py
================================================
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents).
All Rights Reserved. Permission to use, copy, modify, and distribute this
software and its documentation for educational, research, and not-for-profit
purposes, without fee and without a signed licensing agreement, is hereby
granted, provided that the above copyright notice, this paragraph and the
following two paragraphs appear in all copies, modifications, and
distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
otl@berkeley.edu,
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHAN
gitextract_i8u5tpyn/
├── .flake8
├── .github/
│ └── ISSUE_TEMPLATE/
│ ├── bug-performance-issue--custom-images-.md
│ ├── bug-performance-issue--physical-robot-.md
│ ├── bug-performance-issue--replication-.md
│ ├── installation-issue.md
│ └── other-issue.md
├── .gitignore
├── .style.yapf
├── .travis.yml
├── CMakeLists.txt
├── LICENSE
├── README.md
├── cfg/
│ ├── examples/
│ │ ├── antipodal_grasp_sampling.yaml
│ │ ├── fc_gqcnn_pj.yaml
│ │ ├── fc_gqcnn_suction.yaml
│ │ ├── gqcnn_pj.yaml
│ │ ├── gqcnn_suction.yaml
│ │ ├── replication/
│ │ │ ├── dex-net_2.0.yaml
│ │ │ ├── dex-net_2.1.yaml
│ │ │ ├── dex-net_3.0.yaml
│ │ │ ├── dex-net_4.0_fc_pj.yaml
│ │ │ ├── dex-net_4.0_fc_suction.yaml
│ │ │ ├── dex-net_4.0_pj.yaml
│ │ │ └── dex-net_4.0_suction.yaml
│ │ └── ros/
│ │ ├── fc_gqcnn_pj.yaml
│ │ ├── fc_gqcnn_suction.yaml
│ │ ├── gqcnn_pj.yaml
│ │ └── gqcnn_suction.yaml
│ ├── finetune.yaml
│ ├── finetune_dex-net_4.0_pj.yaml
│ ├── finetune_dex-net_4.0_suction.yaml
│ ├── finetune_example_pj.yaml
│ ├── finetune_example_suction.yaml
│ ├── hyperparam_search/
│ │ ├── train_dex-net_4.0_fc_suction_hyperparam_search.yaml
│ │ └── train_hyperparam_search.yaml
│ ├── tools/
│ │ ├── analyze_gqcnn_performance.yaml
│ │ └── run_policy.yaml
│ ├── train.yaml
│ ├── train_dex-net_2.0.yaml
│ ├── train_dex-net_3.0.yaml
│ ├── train_dex-net_4.0_fc_pj.yaml
│ ├── train_dex-net_4.0_fc_suction.yaml
│ ├── train_dex-net_4.0_pj.yaml
│ ├── train_dex-net_4.0_suction.yaml
│ ├── train_example_pj.yaml
│ ├── train_example_suction.yaml
│ └── train_fc.yaml
├── ci/
│ └── travis/
│ └── format.sh
├── data/
│ ├── calib/
│ │ ├── phoxi/
│ │ │ ├── phoxi.intr
│ │ │ └── phoxi_to_world.tf
│ │ └── primesense/
│ │ ├── primesense.intr
│ │ └── primesense.tf
│ └── examples/
│ ├── clutter/
│ │ ├── phoxi/
│ │ │ ├── dex-net_4.0/
│ │ │ │ ├── depth_0.npy
│ │ │ │ ├── depth_1.npy
│ │ │ │ ├── depth_2.npy
│ │ │ │ ├── depth_3.npy
│ │ │ │ └── depth_4.npy
│ │ │ └── fcgqcnn/
│ │ │ ├── depth_0.npy
│ │ │ ├── depth_1.npy
│ │ │ ├── depth_2.npy
│ │ │ ├── depth_3.npy
│ │ │ └── depth_4.npy
│ │ └── primesense/
│ │ ├── depth_0.npy
│ │ ├── depth_1.npy
│ │ ├── depth_2.npy
│ │ ├── depth_3.npy
│ │ └── depth_4.npy
│ └── single_object/
│ └── primesense/
│ ├── depth_0.npy
│ ├── depth_1.npy
│ ├── depth_2.npy
│ ├── depth_3.npy
│ ├── depth_4.npy
│ ├── depth_5.npy
│ ├── depth_6.npy
│ ├── depth_7.npy
│ ├── depth_8.npy
│ └── depth_9.npy
├── docker/
│ ├── cpu/
│ │ └── Dockerfile
│ └── gpu/
│ └── Dockerfile
├── docs/
│ ├── Makefile
│ ├── gh_deploy.sh
│ ├── make.bat
│ └── source/
│ ├── api/
│ │ ├── analysis.rst
│ │ ├── gqcnn.rst
│ │ ├── policies.rst
│ │ └── training.rst
│ ├── benchmarks/
│ │ └── benchmarks.rst
│ ├── conf.py
│ ├── index.rst
│ ├── info/
│ │ └── info.rst
│ ├── install/
│ │ └── install.rst
│ ├── license/
│ │ └── license.rst
│ ├── replication/
│ │ └── replication.rst
│ └── tutorials/
│ ├── analysis.rst
│ ├── planning.rst
│ ├── training.rst
│ └── tutorial.rst
├── examples/
│ ├── antipodal_grasp_sampling.py
│ ├── policy.py
│ ├── policy_ros.py
│ └── policy_with_image_proc.py
├── gqcnn/
│ ├── __init__.py
│ ├── analysis/
│ │ ├── __init__.py
│ │ └── analyzer.py
│ ├── grasping/
│ │ ├── __init__.py
│ │ ├── actions.py
│ │ ├── constraint_fn.py
│ │ ├── grasp.py
│ │ ├── grasp_quality_function.py
│ │ ├── image_grasp_sampler.py
│ │ └── policy/
│ │ ├── __init__.py
│ │ ├── enums.py
│ │ ├── fc_policy.py
│ │ └── policy.py
│ ├── model/
│ │ ├── __init__.py
│ │ └── tf/
│ │ ├── __init__.py
│ │ ├── fc_network_tf.py
│ │ └── network_tf.py
│ ├── search/
│ │ ├── __init__.py
│ │ ├── enums.py
│ │ ├── resource_manager.py
│ │ ├── search.py
│ │ ├── trial.py
│ │ └── utils.py
│ ├── training/
│ │ ├── __init__.py
│ │ └── tf/
│ │ ├── __init__.py
│ │ └── trainer_tf.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── enums.py
│ │ ├── policy_exceptions.py
│ │ ├── train_stats_logger.py
│ │ └── utils.py
│ └── version.py
├── launch/
│ └── grasp_planning_service.launch
├── msg/
│ ├── Action.msg
│ ├── BoundingBox.msg
│ ├── GQCNNGrasp.msg
│ └── Observation.msg
├── package.xml
├── post-checkout
├── requirements/
│ ├── cpu_requirements.txt
│ ├── docs_requirements.txt
│ └── gpu_requirements.txt
├── ros_nodes/
│ └── grasp_planner_node.py
├── scripts/
│ ├── docker/
│ │ └── build-docker.sh
│ ├── policies/
│ │ ├── run_all_dex-net_2.0_examples.sh
│ │ ├── run_all_dex-net_2.1_examples.sh
│ │ ├── run_all_dex-net_3.0_examples.sh
│ │ ├── run_all_dex-net_4.0_fc_pj_examples.sh
│ │ ├── run_all_dex-net_4.0_fc_suction_examples.sh
│ │ ├── run_all_dex-net_4.0_pj_examples.sh
│ │ └── run_all_dex-net_4.0_suction_examples.sh
│ └── training/
│ ├── train_dex-net_2.0.sh
│ ├── train_dex-net_2.1.sh
│ ├── train_dex-net_3.0.sh
│ ├── train_dex-net_4.0_fc_pj.sh
│ ├── train_dex-net_4.0_fc_suction.sh
│ ├── train_dex-net_4.0_pj.sh
│ └── train_dex-net_4.0_suction.sh
├── setup.py
├── srv/
│ ├── GQCNNGraspPlanner.srv
│ ├── GQCNNGraspPlannerBoundingBox.srv
│ ├── GQCNNGraspPlannerFull.srv
│ └── GQCNNGraspPlannerSegmask.srv
└── tools/
├── analyze_gqcnn_performance.py
├── finetune.py
├── hyperparam_search.py
├── plot_training_losses.py
├── run_policy.py
└── train.py
SYMBOL INDEX (426 symbols across 25 files)
FILE: gqcnn/analysis/analyzer.py
class GQCNNAnalyzer (line 53) | class GQCNNAnalyzer(object):
method __init__ (line 56) | def __init__(self, config, verbose=True, plot_backend="pdf"):
method _parse_config (line 78) | def _parse_config(self):
method analyze (line 88) | def analyze(self, model_dir, output_dir, dataset_config=None):
method _plot_grasp (line 142) | def _plot_grasp(self,
method _run_prediction_single_model (line 189) | def _run_prediction_single_model(self, model_dir, model_output_dir,
method _plot (line 576) | def _plot(self, model_dir, model_output_dir, train_result, val_result):
FILE: gqcnn/grasping/actions.py
class Action (line 40) | class Action(object):
method __init__ (line 53) | def __init__(self, q_value=0.0, id=-1, metadata={}):
method q_value (line 59) | def q_value(self):
method id (line 63) | def id(self):
method metadata (line 67) | def metadata(self):
class NoAction (line 71) | class NoAction(Action):
class GraspAction3D (line 76) | class GraspAction3D(ABC, Action):
method __init__ (line 85) | def __init__(self, T_grasp_world, q_value=0.0, id=-1, metadata={}):
method project (line 90) | def project(self, camera_intr, T_camera_world):
class ParallelJawGrasp3D (line 94) | class ParallelJawGrasp3D(GraspAction3D):
method project (line 103) | def project(self, camera_intr, T_camera_world, gripper_width=0.05):
class SuctionGrasp3D (line 131) | class SuctionGrasp3D(GraspAction3D):
method project (line 140) | def project(self, camera_intr, T_camera_world):
class MultiSuctionGrasp3D (line 156) | class MultiSuctionGrasp3D(GraspAction3D):
method project (line 165) | def project(self, camera_intr, T_camera_world):
FILE: gqcnn/grasping/constraint_fn.py
class GraspConstraintFn (line 36) | class GraspConstraintFn(ABC):
method __init__ (line 39) | def __init__(self, config):
method __call__ (line 43) | def __call__(self, grasp):
method satisfies_constraints (line 59) | def satisfies_constraints(self, grasp):
class DiscreteApproachGraspConstraintFn (line 75) | class DiscreteApproachGraspConstraintFn(GraspConstraintFn):
method __init__ (line 79) | def __init__(self, config):
method satisfies_constraints (line 88) | def satisfies_constraints(self, grasp):
class GraspConstraintFnFactory (line 119) | class GraspConstraintFnFactory(object):
method constraint_fn (line 122) | def constraint_fn(fn_type, config):
FILE: gqcnn/grasping/grasp.py
class Grasp2D (line 36) | class Grasp2D(object):
method __init__ (line 57) | def __init__(self,
method axis (line 90) | def axis(self):
method approach_axis (line 95) | def approach_axis(self):
method approach_angle (line 99) | def approach_angle(self):
method frame (line 105) | def frame(self):
method width_px (line 113) | def width_px(self):
method endpoints (line 129) | def endpoints(self):
method feature_vec (line 136) | def feature_vec(self):
method from_feature_vec (line 146) | def from_feature_vec(v, width=0.0, camera_intr=None):
method pose (line 180) | def pose(self, grasp_approach_dir=None):
method image_dist (line 236) | def image_dist(g1, g2, alpha=1.0):
class SuctionPoint2D (line 264) | class SuctionPoint2D(object):
method __init__ (line 279) | def __init__(self, center, axis=None, depth=1.0, camera_intr=None):
method frame (line 310) | def frame(self):
method angle (line 317) | def angle(self):
method approach_angle (line 331) | def approach_angle(self):
method approach_axis (line 338) | def approach_axis(self):
method feature_vec (line 342) | def feature_vec(self):
method from_feature_vec (line 352) | def from_feature_vec(v, camera_intr=None, depth=None, axis=None):
method pose (line 390) | def pose(self):
method image_dist (line 431) | def image_dist(g1, g2, alpha=1.0):
class MultiSuctionPoint2D (line 460) | class MultiSuctionPoint2D(object):
method __init__ (line 473) | def __init__(self, pose, camera_intr=None):
method pose (line 493) | def pose(self):
method frame (line 497) | def frame(self):
method center (line 504) | def center(self):
method axis (line 511) | def axis(self):
method approach_axis (line 515) | def approach_axis(self):
method approach_angle (line 519) | def approach_angle(self):
method angle (line 526) | def angle(self):
method depth (line 535) | def depth(self):
method orientation (line 539) | def orientation(self):
method feature_vec (line 554) | def feature_vec(self):
method from_feature_vec (line 566) | def from_feature_vec(v,
method image_dist (line 625) | def image_dist(g1, g2, alpha=1.0):
FILE: gqcnn/grasping/grasp_quality_function.py
class GraspQualityFunction (line 47) | class GraspQualityFunction(ABC):
method __init__ (line 50) | def __init__(self):
method __call__ (line 54) | def __call__(self, state, actions, params=None):
method quality (line 59) | def quality(self, state, actions, params=None):
class ZeroGraspQualityFunction (line 80) | class ZeroGraspQualityFunction(object):
method quality (line 83) | def quality(self, state, actions, params=None):
class ParallelJawQualityFunction (line 104) | class ParallelJawQualityFunction(GraspQualityFunction):
method __init__ (line 108) | def __init__(self, config):
method friction_cone_angle (line 115) | def friction_cone_angle(self, action):
method force_closure (line 131) | def force_closure(self, action):
class ComForceClosureParallelJawQualityFunction (line 137) | class ComForceClosureParallelJawQualityFunction(ParallelJawQualityFuncti...
method __init__ (line 141) | def __init__(self, config):
method quality (line 146) | def quality(self, state, actions, params=None):
class SuctionQualityFunction (line 209) | class SuctionQualityFunction(GraspQualityFunction):
method __init__ (line 213) | def __init__(self, config):
method _points_in_window (line 220) | def _points_in_window(self, point_cloud_image, action, segmask=None):
method _points_to_matrices (line 239) | def _points_to_matrices(self, points):
method _best_fit_plane (line 247) | def _best_fit_plane(self, A, b):
method _sum_of_squared_residuals (line 256) | def _sum_of_squared_residuals(self, w, A, z):
class BestFitPlanaritySuctionQualityFunction (line 261) | class BestFitPlanaritySuctionQualityFunction(SuctionQualityFunction):
method __init__ (line 264) | def __init__(self, config):
method quality (line 267) | def quality(self, state, actions, params=None):
class ApproachPlanaritySuctionQualityFunction (line 349) | class ApproachPlanaritySuctionQualityFunction(SuctionQualityFunction):
method __init__ (line 352) | def __init__(self, config):
method _action_to_plane (line 356) | def _action_to_plane(self, point_cloud_image, action):
method quality (line 366) | def quality(self, state, actions, params=None):
class DiscApproachPlanaritySuctionQualityFunction (line 455) | class DiscApproachPlanaritySuctionQualityFunction(SuctionQualityFunction):
method __init__ (line 458) | def __init__(self, config):
method _action_to_plane (line 463) | def _action_to_plane(self, point_cloud_image, action):
method _points_in_window (line 473) | def _points_in_window(self, point_cloud_image, action, segmask=None):
method quality (line 509) | def quality(self, state, actions, params=None):
class ComApproachPlanaritySuctionQualityFunction (line 599) | class ComApproachPlanaritySuctionQualityFunction(
method __init__ (line 604) | def __init__(self, config):
method quality (line 610) | def quality(self, state, actions, params=None):
class ComDiscApproachPlanaritySuctionQualityFunction (line 663) | class ComDiscApproachPlanaritySuctionQualityFunction(
method __init__ (line 670) | def __init__(self, config):
method quality (line 680) | def quality(self, state, actions, params=None):
class GaussianCurvatureSuctionQualityFunction (line 744) | class GaussianCurvatureSuctionQualityFunction(SuctionQualityFunction):
method __init__ (line 747) | def __init__(self, config):
method _points_to_matrices (line 751) | def _points_to_matrices(self, points):
method quality (line 761) | def quality(self, state, actions, params=None):
class DiscCurvatureSuctionQualityFunction (line 817) | class DiscCurvatureSuctionQualityFunction(
method __init__ (line 820) | def __init__(self, config):
method _points_in_window (line 825) | def _points_in_window(self, point_cloud_image, action, segmask=None):
class ComDiscCurvatureSuctionQualityFunction (line 850) | class ComDiscCurvatureSuctionQualityFunction(
method __init__ (line 853) | def __init__(self, config):
method quality (line 859) | def quality(self, state, actions, params=None):
class GQCnnQualityFunction (line 915) | class GQCnnQualityFunction(GraspQualityFunction):
method __init__ (line 917) | def __init__(self, config):
method __del__ (line 933) | def __del__(self):
method gqcnn (line 941) | def gqcnn(self):
method gqcnn_recep_height (line 946) | def gqcnn_recep_height(self):
method gqcnn_recep_width (line 950) | def gqcnn_recep_width(self):
method gqcnn_stride (line 954) | def gqcnn_stride(self):
method config (line 958) | def config(self):
method grasps_to_tensors (line 962) | def grasps_to_tensors(self, grasps, state):
method quality (line 1025) | def quality(self, state, actions, params):
class NoMagicQualityFunction (line 1072) | class NoMagicQualityFunction(GraspQualityFunction):
method __init__ (line 1074) | def __init__(self, config):
method gqcnn (line 1105) | def gqcnn(self):
method config (line 1110) | def config(self):
method grasps_to_tensors (line 1114) | def grasps_to_tensors(self, grasps, state):
method quality (line 1181) | def quality(self, state, actions, params):
class FCGQCnnQualityFunction (line 1236) | class FCGQCnnQualityFunction(GraspQualityFunction):
method __init__ (line 1238) | def __init__(self, config):
method __del__ (line 1255) | def __del__(self):
method gqcnn (line 1263) | def gqcnn(self):
method config (line 1268) | def config(self):
method quality (line 1272) | def quality(self, images, depths, params=None):
class GraspQualityFunctionFactory (line 1276) | class GraspQualityFunctionFactory(object):
method quality_function (line 1280) | def quality_function(metric_type, config):
FILE: gqcnn/grasping/image_grasp_sampler.py
function force_closure (line 49) | def force_closure(p1, p2, n1, n2, mu):
class DepthSamplingMode (line 65) | class DepthSamplingMode(object):
class ImageGraspSampler (line 72) | class ImageGraspSampler(ABC):
method __init__ (line 82) | def __init__(self, config):
method sample (line 89) | def sample(self,
method _sample (line 142) | def _sample(self,
class AntipodalDepthImageGraspSampler (line 176) | class AntipodalDepthImageGraspSampler(ImageGraspSampler):
method __init__ (line 222) | def __init__(self, config, gripper_width=np.inf):
method _surface_normals (line 264) | def _surface_normals(self, depth_im, edge_pixels):
method _sample_depth (line 282) | def _sample_depth(self, min_depth, max_depth):
method _sample (line 292) | def _sample(self,
method _sample_antipodal_grasps (line 338) | def _sample_antipodal_grasps(self,
class DepthImageSuctionPointSampler (line 597) | class DepthImageSuctionPointSampler(ImageGraspSampler):
method __init__ (line 628) | def __init__(self, config):
method _sample (line 659) | def _sample(self,
method _sample_suction_points (line 703) | def _sample_suction_points(self,
class DepthImageMultiSuctionPointSampler (line 819) | class DepthImageMultiSuctionPointSampler(ImageGraspSampler):
method __init__ (line 850) | def __init__(self, config):
method _sample (line 881) | def _sample(self,
method _sample_suction_points (line 927) | def _sample_suction_points(self,
class ImageGraspSamplerFactory (line 1059) | class ImageGraspSamplerFactory(object):
method sampler (line 1063) | def sampler(sampler_type, config):
FILE: gqcnn/grasping/policy/enums.py
class SamplingMethod (line 33) | class SamplingMethod(object):
FILE: gqcnn/grasping/policy/fc_policy.py
class FullyConvolutionalGraspingPolicy (line 47) | class FullyConvolutionalGraspingPolicy(GraspingPolicy):
method __init__ (line 51) | def __init__(self, cfg, filters=None):
method _unpack_state (line 96) | def _unpack_state(self, state):
method _mask_predictions (line 102) | def _mask_predictions(self, preds, raw_segmask):
method _sample_predictions (line 126) | def _sample_predictions(self, preds, num_actions):
method _sample_predictions_flat (line 147) | def _sample_predictions_flat(self, preds_flat, num_samples):
method _get_actions (line 175) | def _get_actions(self, preds, ind, images, depths, camera_intr,
method _visualize_3d (line 181) | def _visualize_3d(self, actions, wrapped_depth_im, camera_intr,
method _visualize_affordance_map (line 187) | def _visualize_affordance_map(self,
method _visualize_2d (line 197) | def _visualize_2d(self,
method _filter (line 222) | def _filter(self, actions):
method _gen_images_and_depths (line 238) | def _gen_images_and_depths(self, depth, segmask):
method _action (line 242) | def _action(self, state, num_actions=1):
method action_set (line 327) | def action_set(self, state, num_actions):
class FullyConvolutionalGraspingPolicyParallelJaw (line 348) | class FullyConvolutionalGraspingPolicyParallelJaw(
method __init__ (line 352) | def __init__(self, cfg, filters=None):
method _sample_depths (line 371) | def _sample_depths(self, raw_depth_im, raw_seg):
method _get_actions (line 390) | def _get_actions(self, preds, ind, images, depths, camera_intr,
method _gen_images_and_depths (line 420) | def _gen_images_and_depths(self, depth, segmask):
method _visualize_3d (line 426) | def _visualize_3d(self, actions, wrapped_depth_im, camera_intr,
method _visualize_affordance_map (line 431) | def _visualize_affordance_map(self, preds, depth_im):
class FullyConvolutionalGraspingPolicySuction (line 437) | class FullyConvolutionalGraspingPolicySuction(FullyConvolutionalGrasping...
method _get_actions (line 441) | def _get_actions(self, preds, ind, images, depths, camera_intr,
method _visualize_affordance_map (line 473) | def _visualize_affordance_map(self,
method _gen_images_and_depths (line 510) | def _gen_images_and_depths(self, depth, segmask):
method _visualize_3d (line 515) | def _visualize_3d(self, actions, wrapped_depth_im, camera_intr,
FILE: gqcnn/grasping/policy/policy.py
class RgbdImageState (line 58) | class RgbdImageState(object):
method __init__ (line 61) | def __init__(self,
method save (line 87) | def save(self, save_dir):
method load (line 114) | def load(save_dir):
class GraspAction (line 151) | class GraspAction(object):
method __init__ (line 155) | def __init__(self, grasp, q_value, image=None, policy_name=None):
method save (line 173) | def save(self, save_dir):
method load (line 192) | def load(save_dir):
class Policy (line 218) | class Policy(ABC):
method __call__ (line 221) | def __call__(self, state):
method action (line 226) | def action(self, state):
class GraspingPolicy (line 231) | class GraspingPolicy(Policy):
method __init__ (line 235) | def __init__(self, config, init_sampler=True):
method config (line 308) | def config(self):
method grasp_sampler (line 319) | def grasp_sampler(self):
method grasp_quality_fn (line 330) | def grasp_quality_fn(self):
method grasp_constraint_fn (line 341) | def grasp_constraint_fn(self):
method gqcnn (line 352) | def gqcnn(self):
method set_constraint_fn (line 362) | def set_constraint_fn(self, constraint_fn):
method action (line 372) | def action(self, state):
method _action (line 409) | def _action(self, state):
method show (line 414) | def show(self, filename=None, dpi=100):
class UniformRandomGraspingPolicy (line 431) | class UniformRandomGraspingPolicy(GraspingPolicy):
method __init__ (line 434) | def __init__(self, config):
method _action (line 450) | def _action(self, state):
class RobustGraspingPolicy (line 500) | class RobustGraspingPolicy(GraspingPolicy):
method __init__ (line 505) | def __init__(self, config, filters=None):
method _parse_config (line 532) | def _parse_config(self):
method select (line 542) | def select(self, grasps, q_value):
method _action (line 590) | def _action(self, state):
class CrossEntropyRobustGraspingPolicy (line 673) | class CrossEntropyRobustGraspingPolicy(GraspingPolicy):
method __init__ (line 686) | def __init__(self, config, filters=None):
method _parse_config (line 727) | def _parse_config(self):
method select (line 771) | def select(self, grasps, q_values):
method _mask_predictions (line 822) | def _mask_predictions(self, pred_map, segmask):
method _gen_grasp_affordance_map (line 833) | def _gen_grasp_affordance_map(self, state, stride=1):
method _plot_grasp_affordance_map (line 888) | def _plot_grasp_affordance_map(self,
method action_set (line 936) | def action_set(self, state):
method _action (line 1269) | def _action(self, state):
class QFunctionRobustGraspingPolicy (line 1326) | class QFunctionRobustGraspingPolicy(CrossEntropyRobustGraspingPolicy):
method __init__ (line 1365) | def __init__(self, config):
method _parse_config (line 1370) | def _parse_config(self):
method _setup_gqcnn (line 1377) | def _setup_gqcnn(self):
class EpsilonGreedyQFunctionRobustGraspingPolicy (line 1396) | class EpsilonGreedyQFunctionRobustGraspingPolicy(QFunctionRobustGrasping...
method __init__ (line 1412) | def __init__(self, config):
method _parse_config (line 1416) | def _parse_config(self):
method epsilon (line 1421) | def epsilon(self):
method epsilon (line 1425) | def epsilon(self, val):
method greedy_action (line 1428) | def greedy_action(self, state):
method _action (line 1444) | def _action(self, state):
class CompositeGraspingPolicy (line 1522) | class CompositeGraspingPolicy(Policy):
method __init__ (line 1531) | def __init__(self, policies):
method policies (line 1538) | def policies(self):
method subpolicy (line 1541) | def subpolicy(self, name):
method set_constraint_fn (line 1544) | def set_constraint_fn(self, constraint_fn):
class PriorityCompositeGraspingPolicy (line 1549) | class PriorityCompositeGraspingPolicy(CompositeGraspingPolicy):
method __init__ (line 1551) | def __init__(self, policies, priority_list):
method priority_list (line 1562) | def priority_list(self):
method action (line 1565) | def action(self, state, policy_subset=None, min_q_value=-1.0):
method action_set (line 1590) | def action_set(self, state, policy_subset=None, min_q_value=-1.0):
class GreedyCompositeGraspingPolicy (line 1618) | class GreedyCompositeGraspingPolicy(CompositeGraspingPolicy):
method __init__ (line 1620) | def __init__(self, policies):
method action (line 1623) | def action(self, state, policy_subset=None, min_q_value=-1.0):
method action_set (line 1644) | def action_set(self, state, policy_subset=None, min_q_value=-1.0):
FILE: gqcnn/model/__init__.py
function get_gqcnn_model (line 33) | def get_gqcnn_model(backend="tf", verbose=True):
function get_fc_gqcnn_model (line 63) | def get_fc_gqcnn_model(backend="tf", verbose=True):
FILE: gqcnn/model/tf/fc_network_tf.py
class FCGQCNNTF (line 41) | class FCGQCNNTF(GQCNNTF):
method __init__ (line 50) | def __init__(self, gqcnn_config, fc_config, verbose=True, log_file=None):
method load (line 82) | def load(model_dir, fc_config, log_file=None):
method _parse_config (line 119) | def _parse_config(self, cfg):
method _pack (line 124) | def _pack(self, dim_h, dim_w, data, vector=False):
method _build_fully_conv_layer (line 144) | def _build_fully_conv_layer(self,
method _build_fully_conv_merge_layer (line 193) | def _build_fully_conv_merge_layer(self, input_node_im, input_node_pose,
method _build_im_stream (line 249) | def _build_im_stream(self,
method _build_merge_stream (line 326) | def _build_merge_stream(self, input_stream_1, input_stream_2, fan_in_1,
FILE: gqcnn/model/tf/network_tf.py
class GQCNNWeights (line 50) | class GQCNNWeights(object):
method __init__ (line 53) | def __init__(self):
class GQCNNTF (line 57) | class GQCNNTF(object):
method __init__ (line 60) | def __init__(self, gqcnn_config, verbose=True, log_file=None):
method load (line 84) | def load(model_dir, verbose=True, log_file=None):
method init_mean_and_std (line 225) | def init_mean_and_std(self, model_dir):
method set_base_network (line 284) | def set_base_network(self, model_dir):
method init_weights_file (line 360) | def init_weights_file(self, ckpt_file):
method _parse_config (line 388) | def _parse_config(self, gqcnn_config):
method initialize_network (line 494) | def initialize_network(self,
method open_session (line 556) | def open_session(self):
method close_session (line 572) | def close_session(self):
method __del__ (line 582) | def __del__(self):
method input_depth_mode (line 589) | def input_depth_mode(self):
method batch_size (line 593) | def batch_size(self):
method im_height (line 597) | def im_height(self):
method im_width (line 601) | def im_width(self):
method num_channels (line 605) | def num_channels(self):
method pose_dim (line 609) | def pose_dim(self):
method gripper_mode (line 613) | def gripper_mode(self):
method input_im_node (line 617) | def input_im_node(self):
method input_pose_node (line 621) | def input_pose_node(self):
method input_drop_rate_node (line 625) | def input_drop_rate_node(self):
method output (line 629) | def output(self):
method weights (line 633) | def weights(self):
method tf_graph (line 637) | def tf_graph(self):
method sess (line 641) | def sess(self):
method angular_bins (line 645) | def angular_bins(self):
method max_angle (line 649) | def max_angle(self):
method stride (line 653) | def stride(self):
method filters (line 661) | def filters(self):
method set_batch_size (line 687) | def set_batch_size(self, batch_size):
method set_im_mean (line 697) | def set_im_mean(self, im_mean):
method get_im_mean (line 707) | def get_im_mean(self):
method set_im_std (line 717) | def set_im_std(self, im_std):
method get_im_std (line 728) | def get_im_std(self):
method set_pose_mean (line 739) | def set_pose_mean(self, pose_mean):
method get_pose_mean (line 749) | def get_pose_mean(self):
method set_pose_std (line 760) | def set_pose_std(self, pose_std):
method get_pose_std (line 771) | def get_pose_std(self):
method set_im_depth_sub_mean (line 782) | def set_im_depth_sub_mean(self, im_depth_sub_mean):
method set_im_depth_sub_std (line 793) | def set_im_depth_sub_std(self, im_depth_sub_std):
method add_softmax_to_output (line 804) | def add_softmax_to_output(self):
method add_sigmoid_to_output (line 820) | def add_sigmoid_to_output(self):
method update_batch_size (line 826) | def update_batch_size(self, batch_size):
method _predict (line 836) | def _predict(self, image_arr, pose_arr, verbose=False):
method predict (line 919) | def predict(self, image_arr, pose_arr, verbose=False):
method featurize (line 935) | def featurize(self,
method _leaky_relu (line 1023) | def _leaky_relu(self, x, alpha=.1):
method _build_conv_layer (line 1026) | def _build_conv_layer(self,
method _build_fc_layer (line 1100) | def _build_fc_layer(self,
method _build_pc_layer (line 1153) | def _build_pc_layer(self, input_node, fan_in, out_size, name):
method _build_fc_merge (line 1186) | def _build_fc_merge(self, input_fc_node_1, input_fc_node_2, fan_in_1,
method _build_im_stream (line 1227) | def _build_im_stream(self,
method _build_pose_stream (line 1305) | def _build_pose_stream(self, input_node, fan_in, layers):
method _build_merge_stream (line 1328) | def _build_merge_stream(self, input_stream_1, input_stream_2, fan_in_1,
method _build_network (line 1377) | def _build_network(self, input_im_node, input_pose_node,
FILE: gqcnn/search/enums.py
class TrialConstants (line 33) | class TrialConstants(object):
class SearchConstants (line 44) | class SearchConstants(object):
FILE: gqcnn/search/resource_manager.py
class ResourceManager (line 50) | class ResourceManager(object):
method __init__ (line 52) | def __init__(self,
method cpu_cores (line 93) | def cpu_cores(self):
method _get_cpu_load (line 96) | def _get_cpu_load(self):
method _get_gpu_stats (line 105) | def _get_gpu_stats(self):
method _build_gpu_list (line 130) | def _build_gpu_list(self, max_possible_trials_per_device):
method num_trials_to_schedule (line 141) | def num_trials_to_schedule(self, num_pending_trials):
FILE: gqcnn/search/search.py
class GQCNNSearch (line 52) | class GQCNNSearch(object):
method __init__ (line 54) | def __init__(self,
method search (line 139) | def search(self):
FILE: gqcnn/search/trial.py
class TrialStatus (line 45) | class TrialStatus:
class GQCNNTrialWithAnalysis (line 52) | class GQCNNTrialWithAnalysis(ABC):
method __init__ (line 54) | def __init__(self, analysis_cfg, train_cfg, dataset_dir, split_name,
method _build_train_progress_dict (line 70) | def _build_train_progress_dict(self):
method _build_trial_progress_dict (line 77) | def _build_trial_progress_dict(self):
method _run (line 82) | def _run(self, trainer):
method _run_trial (line 85) | def _run_trial(self,
method finished (line 150) | def finished(self):
method errored_out (line 154) | def errored_out(self):
method error_msg (line 158) | def error_msg(self):
method training_status (line 162) | def training_status(self):
method begin (line 165) | def begin(self, gpu_avail="", cpu_cores_avail=[]):
method __str__ (line 175) | def __str__(self):
class GQCNNTrainingAndAnalysisTrial (line 201) | class GQCNNTrainingAndAnalysisTrial(GQCNNTrialWithAnalysis):
method _run (line 203) | def _run(self, trainer):
class GQCNNFineTuningAndAnalysisTrial (line 207) | class GQCNNFineTuningAndAnalysisTrial(GQCNNTrialWithAnalysis):
method __init__ (line 209) | def __init__(self, analysis_cfg, train_cfg, dataset_dir, base_model_dir,
method _run (line 217) | def _run(self, trainer):
FILE: gqcnn/search/utils.py
function get_fields_to_search_over (line 37) | def get_fields_to_search_over(train_config, prev_keys=[]):
function update_dict (line 60) | def update_dict(dict1, dict2):
function get_nested_key (line 68) | def get_nested_key(cfg, key):
function set_nested_key (line 75) | def set_nested_key(cfg, key, val):
function gen_config_summary_dict (line 82) | def gen_config_summary_dict(hyperparam_combination):
function parse_master_train_config (line 89) | def parse_master_train_config(train_config):
function gen_timestamp (line 154) | def gen_timestamp():
function gen_trial_params_train (line 158) | def gen_trial_params_train(master_train_configs, datasets, split_names):
function gen_trial_params_finetune (line 172) | def gen_trial_params_finetune(master_train_configs, datasets, base_models,
function gen_trial_params (line 188) | def gen_trial_params(master_train_configs,
function log_trial_status (line 200) | def log_trial_status(trials):
FILE: gqcnn/training/__init__.py
function get_gqcnn_trainer (line 32) | def get_gqcnn_trainer(backend="tf"):
FILE: gqcnn/training/tf/trainer_tf.py
class GQCNNTrainerTF (line 64) | class GQCNNTrainerTF(object):
method __init__ (line 67) | def __init__(self,
method _create_loss (line 126) | def _create_loss(self):
method _create_optimizer (line 164) | def _create_optimizer(self, loss, batch, var_list, learning_rate):
method _launch_tensorboard (line 210) | def _launch_tensorboard(self):
method _close_tensorboard (line 223) | def _close_tensorboard(self):
method train (line 228) | def train(self):
method _train (line 233) | def _train(self):
method finetune (line 250) | def finetune(self, base_model_dir):
method _finetune (line 261) | def _finetune(self, base_model_dir):
method _optimize_weights (line 286) | def _optimize_weights(self, finetune=False):
method _compute_data_metrics (line 618) | def _compute_data_metrics(self):
method _compute_split_indices (line 968) | def _compute_split_indices(self):
method _setup_output_dirs (line 1005) | def _setup_output_dirs(self):
method _save_configs (line 1027) | def _save_configs(self):
method _read_training_params (line 1054) | def _read_training_params(self):
method _setup_denoising_and_synthetic (line 1157) | def _setup_denoising_and_synthetic(self):
method _open_dataset (line 1174) | def _open_dataset(self):
method _compute_data_params (line 1194) | def _compute_data_params(self):
method _setup_tensorflow (line 1236) | def _setup_tensorflow(self):
method _setup_summaries (line 1279) | def _setup_summaries(self):
method _cleanup (line 1313) | def _cleanup(self):
method _flush_prefetch_queue (line 1335) | def _flush_prefetch_queue(self):
method _setup (line 1341) | def _setup(self):
method _load_and_enqueue (line 1374) | def _load_and_enqueue(self, seed):
method _distort (line 1557) | def _distort(self, image_arr, pose_arr):
method _error_rate_in_batches (line 1619) | def _error_rate_in_batches(self, num_files_eval=None, validation_set=T...
FILE: gqcnn/utils/enums.py
class GeneralConstants (line 37) | class GeneralConstants(object):
class ImageMode (line 49) | class ImageMode(object):
class TrainingMode (line 61) | class TrainingMode(object):
class GripperMode (line 67) | class GripperMode(object):
class InputDepthMode (line 76) | class InputDepthMode(object):
class GQCNNTrainingStatus (line 83) | class GQCNNTrainingStatus(object):
class GQCNNFilenames (line 90) | class GQCNNFilenames(object):
FILE: gqcnn/utils/policy_exceptions.py
class NoValidGraspsException (line 33) | class NoValidGraspsException(Exception):
method __init__ (line 37) | def __init__(self,
class NoAntipodalPairsFoundException (line 47) | class NoAntipodalPairsFoundException(Exception):
FILE: gqcnn/utils/train_stats_logger.py
class TrainStatsLogger (line 38) | class TrainStatsLogger(object):
method __init__ (line 41) | def __init__(self, experiment_dir):
method log (line 60) | def log(self):
method update (line 86) | def update(self, **stats):
FILE: gqcnn/utils/utils.py
function is_py2 (line 45) | def is_py2():
function set_cuda_visible_devices (line 49) | def set_cuda_visible_devices(gpu_list):
function pose_dim (line 74) | def pose_dim(gripper_mode):
function read_pose_data (line 104) | def read_pose_data(pose_arr, gripper_mode):
function reduce_shape (line 150) | def reduce_shape(shape):
function weight_name_to_layer_name (line 157) | def weight_name_to_layer_name(weight_name):
function imresize (line 175) | def imresize(image, size, interp="nearest"):
FILE: ros_nodes/grasp_planner_node.py
class GraspPlanner (line 58) | class GraspPlanner(object):
method __init__ (line 60) | def __init__(self, cfg, cv_bridge, grasping_policy, grasp_pose_publish...
method read_images (line 100) | def read_images(self, req):
method plan_grasp (line 156) | def plan_grasp(self, req):
method plan_grasp_bb (line 167) | def plan_grasp_bb(self, req):
method plan_grasp_segmask (line 181) | def plan_grasp_segmask(self, req):
method _plan_grasp (line 211) | def _plan_grasp(self,
method execute_policy (line 306) | def execute_policy(self, rgbd_image_state, grasping_policy,
FILE: setup.py
function get_tf_dep (line 47) | def get_tf_dep():
class DevelopCmd (line 69) | class DevelopCmd(develop):
method initialize_options (line 75) | def initialize_options(self):
method finalize_options (line 81) | def finalize_options(self):
method run (line 84) | def run(self):
class InstallCmd (line 104) | class InstallCmd(install, object):
method initialize_options (line 110) | def initialize_options(self):
method finalize_options (line 116) | def finalize_options(self):
method run (line 119) | def run(self):
Condensed preview — 170 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (809K chars).
[
{
"path": ".flake8",
"chars": 144,
"preview": "[flake8]\nignore = C408, E121, E123, E126, E226, E24, E704, W503, W504, W605\nexclude = docs/source/conf.py\ninline-quotes "
},
{
"path": ".github/ISSUE_TEMPLATE/bug-performance-issue--custom-images-.md",
"chars": 941,
"preview": "---\nname: Bug/Performance Issue [Custom Images]\nabout: You are testing your own images in software.\ntitle: 'Issue: Bug/P"
},
{
"path": ".github/ISSUE_TEMPLATE/bug-performance-issue--physical-robot-.md",
"chars": 1116,
"preview": "---\nname: Bug/Performance Issue [Physical Robot]\nabout: You are running on a physical robot.\ntitle: 'Issue: Bug/Performa"
},
{
"path": ".github/ISSUE_TEMPLATE/bug-performance-issue--replication-.md",
"chars": 891,
"preview": "---\nname: Bug/Performance Issue [Replication]\nabout: You are replicating training or the policy with the provided images"
},
{
"path": ".github/ISSUE_TEMPLATE/installation-issue.md",
"chars": 609,
"preview": "---\nname: Installation Issue\nabout: You are experiencing installation issues.\ntitle: 'Issue: Installation Issue'\nlabels:"
},
{
"path": ".github/ISSUE_TEMPLATE/other-issue.md",
"chars": 573,
"preview": "---\nname: Other Issue\nabout: Your issue doesn't fall into the above categories.\ntitle: 'Issue: Other Issue'\nlabels: ''\na"
},
{
"path": ".gitignore",
"chars": 1046,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
},
{
"path": ".style.yapf",
"chars": 90,
"preview": "[style]\nbased_on_style=pep8\nallow_split_before_dict_value=False\njoin_multiple_lines=False\n"
},
{
"path": ".travis.yml",
"chars": 2532,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "CMakeLists.txt",
"chars": 7557,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "LICENSE",
"chars": 1308,
"preview": "Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.\nPermission to use, copy, mo"
},
{
"path": "README.md",
"chars": 1439,
"preview": "## Note: Python 2.x support has officially been dropped.\n\n# Berkeley AUTOLAB's GQCNN Package\n<p>\n <a href=\"https://tra"
},
{
"path": "cfg/examples/antipodal_grasp_sampling.yaml",
"chars": 782,
"preview": "# policy params\npolicy:\n # general params\n deterministic: 1\n gripper_width: 0.05\n\n # sampling params\n sampling:\n "
},
{
"path": "cfg/examples/fc_gqcnn_pj.yaml",
"chars": 678,
"preview": "policy:\n type: fully_conv_pj\n\n sampling_method: top_k\n num_depth_bins: 16\n gripper_width: 0.05\n gqcnn_stride: 4\n g"
},
{
"path": "cfg/examples/fc_gqcnn_suction.yaml",
"chars": 648,
"preview": "policy:\n type: fully_conv_suction\n\n sampling_method: top_k\n gqcnn_stride: 4\n gqcnn_recep_h: 96\n gqcnn_recep_w: 96\n\n"
},
{
"path": "cfg/examples/gqcnn_pj.yaml",
"chars": 1158,
"preview": "policy:\n # optimization params\n num_seed_samples: 128\n num_gmm_samples: 64\n num_iters: 3\n gmm_refit_p: 0.25\n gmm_c"
},
{
"path": "cfg/examples/gqcnn_suction.yaml",
"chars": 1554,
"preview": "policy:\n # optimization params\n num_seed_samples: 200\n num_gmm_samples: 50\n num_iters: 3\n gmm_refit_p: 0.25\n gmm_c"
},
{
"path": "cfg/examples/replication/dex-net_2.0.yaml",
"chars": 1171,
"preview": "# policy params\npolicy:\n # optimization params\n num_seed_samples: 250\n num_gmm_samples: 50\n num_iters: 3\n gmm_refit"
},
{
"path": "cfg/examples/replication/dex-net_2.1.yaml",
"chars": 1171,
"preview": "# policy params\npolicy:\n # optimization params\n num_seed_samples: 250\n num_gmm_samples: 50\n num_iters: 3\n gmm_refit"
},
{
"path": "cfg/examples/replication/dex-net_3.0.yaml",
"chars": 1011,
"preview": "# policy params\npolicy:\n # optimization params\n num_seed_samples: 250\n num_gmm_samples: 50\n num_iters: 3\n gmm_refit"
},
{
"path": "cfg/examples/replication/dex-net_4.0_fc_pj.yaml",
"chars": 669,
"preview": "policy:\n type: fully_conv_pj\n\n sampling_method: top_k\n num_depth_bins: 16\n gripper_width: 0.05\n gqcnn_stride: 4\n g"
},
{
"path": "cfg/examples/replication/dex-net_4.0_fc_suction.yaml",
"chars": 636,
"preview": "policy:\n type: fully_conv_suction\n\n sampling_method: top_k\n gqcnn_stride: 4\n gqcnn_recep_h: 96\n gqcnn_recep_w: 96\n\n"
},
{
"path": "cfg/examples/replication/dex-net_4.0_pj.yaml",
"chars": 1158,
"preview": "policy:\n # optimization params\n num_seed_samples: 128\n num_gmm_samples: 64\n num_iters: 3\n gmm_refit_p: 0.25\n gmm_c"
},
{
"path": "cfg/examples/replication/dex-net_4.0_suction.yaml",
"chars": 1554,
"preview": "policy:\n # optimization params\n num_seed_samples: 200\n num_gmm_samples: 50\n num_iters: 3\n gmm_refit_p: 0.25\n gmm_c"
},
{
"path": "cfg/examples/ros/fc_gqcnn_pj.yaml",
"chars": 152,
"preview": "!include ../fc_gqcnn_pj.yaml\n\n# ROS-specific visualization\nvis:\n rgbd_state: 0\n cropped_rgbd_image: 0\n color_image: 0"
},
{
"path": "cfg/examples/ros/fc_gqcnn_suction.yaml",
"chars": 157,
"preview": "!include ../fc_gqcnn_suction.yaml\n\n# ROS-specific visualization\nvis:\n rgbd_state: 0\n cropped_rgbd_image: 0\n color_ima"
},
{
"path": "cfg/examples/ros/gqcnn_pj.yaml",
"chars": 149,
"preview": "!include ../gqcnn_pj.yaml\n\n# ROS-specific visualization\nvis:\n rgbd_state: 0\n cropped_rgbd_image: 0\n color_image: 0\n "
},
{
"path": "cfg/examples/ros/gqcnn_suction.yaml",
"chars": 154,
"preview": "!include ../gqcnn_suction.yaml\n\n# ROS-specific visualization\nvis:\n rgbd_state: 0\n cropped_rgbd_image: 0\n color_image:"
},
{
"path": "cfg/finetune.yaml",
"chars": 3724,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 10 "
},
{
"path": "cfg/finetune_dex-net_4.0_pj.yaml",
"chars": 3897,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/finetune_dex-net_4.0_suction.yaml",
"chars": 3892,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/finetune_example_pj.yaml",
"chars": 3897,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 20 "
},
{
"path": "cfg/finetune_example_suction.yaml",
"chars": 3892,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 20 "
},
{
"path": "cfg/hyperparam_search/train_dex-net_4.0_fc_suction_hyperparam_search.yaml",
"chars": 3951,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/hyperparam_search/train_hyperparam_search.yaml",
"chars": 4000,
"preview": "# Example hyper-parameter search config corresponding to cfg/train.yaml that\n# grid-searches through number of training "
},
{
"path": "cfg/tools/analyze_gqcnn_performance.yaml",
"chars": 76,
"preview": "log_rate: 10\nfont_size: 15\nline_width: 4\ndpi: 100\nnum_bins: 100\nnum_vis: 10\n"
},
{
"path": "cfg/tools/run_policy.yaml",
"chars": 1983,
"preview": "# policy params\npolicy:\n # optimization params\n num_seed_samples: 250\n num_gmm_samples: 100\n num_iters: 3\n "
},
{
"path": "cfg/train.yaml",
"chars": 3822,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/train_dex-net_2.0.yaml",
"chars": 3687,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/train_dex-net_3.0.yaml",
"chars": 3694,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 25 "
},
{
"path": "cfg/train_dex-net_4.0_fc_pj.yaml",
"chars": 3695,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/train_dex-net_4.0_fc_suction.yaml",
"chars": 3683,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/train_dex-net_4.0_pj.yaml",
"chars": 3828,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/train_dex-net_4.0_suction.yaml",
"chars": 3817,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/train_example_pj.yaml",
"chars": 3824,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 100 "
},
{
"path": "cfg/train_example_suction.yaml",
"chars": 3816,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "cfg/train_fc.yaml",
"chars": 3695,
"preview": "# general optimization params\ntrain_batch_size: 64\nval_batch_size: &val_batch_size 64\n\n# logging params\nnum_epochs: 50 "
},
{
"path": "ci/travis/format.sh",
"chars": 2124,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "data/calib/phoxi/phoxi.intr",
"chars": 130,
"preview": "{\"_cy\": 191.75, \"_cx\": 255.5, \"_fy\": 552.5, \"_height\": 386, \"_fx\": 552.5, \"_width\": 516, \"_skew\": 0.0, \"_K\": 0, \"_frame\""
},
{
"path": "data/calib/phoxi/phoxi_to_world.tf",
"chars": 127,
"preview": "phoxi\nworld\n0.385146 -0.121589 0.808145\n0.005659 -0.999983 0.001249\n-0.989208 -0.005415 0.146420\n-0.146410 -0.002064 -0."
},
{
"path": "data/calib/primesense/primesense.intr",
"chars": 143,
"preview": "{\"_cy\": 239.5, \"_cx\": 319.5, \"_fy\": 525.0, \"_height\": 480, \"_fx\": 525.0, \"_width\": 640, \"_skew\": 0.0, \"_K\": 0, \"_frame\":"
},
{
"path": "data/calib/primesense/primesense.tf",
"chars": 139,
"preview": "primesense_overhead\nworld\n0.214280 0.004186 0.872913\n0.021188 -0.975273 0.219984\n-0.999765 -0.021698 0.000098\n0.004678 -"
},
{
"path": "docker/cpu/Dockerfile",
"chars": 1705,
"preview": "FROM ubuntu:xenial\n\nMAINTAINER Vishal Satish <vsatish@berkeley.edu>\n\n# Args.\n# Must be an absolute path.\nARG work_dir=/r"
},
{
"path": "docker/gpu/Dockerfile",
"chars": 1793,
"preview": "FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu16.04\n\nMAINTAINER Vishal Satish <vsatish@berkeley.edu>\n\n# Args\n# `work_dir` mus"
},
{
"path": "docs/Makefile",
"chars": 1203,
"preview": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHI"
},
{
"path": "docs/gh_deploy.sh",
"chars": 35,
"preview": "#!/bin/sh\nmake gh-pages\ncd ../docs\n"
},
{
"path": "docs/make.bat",
"chars": 813,
"preview": "@ECHO OFF\r\n\r\npushd %~dp0\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sp"
},
{
"path": "docs/source/api/analysis.rst",
"chars": 293,
"preview": "Analysis\n========\n\nGQCNNAnalyzer\n~~~~~~~~~~~~~\nA tool for analyzing trained GQ-CNNs. Calculates statistics such as train"
},
{
"path": "docs/source/api/gqcnn.rst",
"chars": 1385,
"preview": "GQ-CNN\n======\n\nGQ-CNN and FC-GQ-CNN classes are **never accessed directly**, but through a lightweight factory function "
},
{
"path": "docs/source/api/policies.rst",
"chars": 3045,
"preview": "Policies\n========\n\nAll GQ-CNN grasping policies are child classes of the base :ref:`GraspingPolicy` class that implement"
},
{
"path": "docs/source/api/training.rst",
"chars": 461,
"preview": "Training\n========\n\nGQ-CNN training classes are **never accessed directly**, but through a lightweight factory function t"
},
{
"path": "docs/source/benchmarks/benchmarks.rst",
"chars": 826,
"preview": "Dex-Net 2.0\n~~~~~~~~~~~\nBelow are the highest classification accuracies achieved on the `Dex-Net 2.0`_ dataset on a rand"
},
{
"path": "docs/source/conf.py",
"chars": 9797,
"preview": "# -*- coding: utf-8 -*-\n#\n# core documentation build configuration file, created by\n# sphinx-quickstart on Sun Oct 16 14"
},
{
"path": "docs/source/index.rst",
"chars": 5251,
"preview": ".. GQCNN documentation master file, created by\n sphinx-quickstart on Thu May 4 16:09:26 2017.\n You can adapt this f"
},
{
"path": "docs/source/info/info.rst",
"chars": 1010,
"preview": "What are GQ-CNNs?\n-----------------\nGQ-CNNs are neural network architectures that take as input a depth image and grasp,"
},
{
"path": "docs/source/install/install.rst",
"chars": 3546,
"preview": "Prerequisites\n~~~~~~~~~~~~~\n\nPython\n\"\"\"\"\"\"\n\nThe `gqcnn` package has only been tested with `Python 3.5`, `Python 3.6`, an"
},
{
"path": "docs/source/license/license.rst",
"chars": 1700,
"preview": "License\n~~~~~~~\nThe `gqcnn` library, pre-trained models, and raw datasets are licensed under The Regents of the Universi"
},
{
"path": "docs/source/replication/replication.rst",
"chars": 5419,
"preview": "Replicating Results\n~~~~~~~~~~~~~~~~~~~\nNumerous publications in the larger `Dex-Net`_ project utilize GQ-CNNs, particul"
},
{
"path": "docs/source/tutorials/analysis.rst",
"chars": 2259,
"preview": "Analysis\n~~~~~~~~\nIt is helpful to check the training and validation loss and classification errors to ensure that the n"
},
{
"path": "docs/source/tutorials/planning.rst",
"chars": 10093,
"preview": "Grasp Planning\n~~~~~~~~~~~~~~\nGrasp planning involves searching for the grasp with the highest predicted probability of "
},
{
"path": "docs/source/tutorials/training.rst",
"chars": 1922,
"preview": "Training\n~~~~~~~~\nThe `gqcnn` package can be used to train a `Dex-Net 4.0`_ GQ-CNN model on a custom offline `Dex-Net`_ "
},
{
"path": "docs/source/tutorials/tutorial.rst",
"chars": 1328,
"preview": "Overview\n~~~~~~~~\nThere are two main use cases of the `gqcnn` package:\n\n#. :ref:`training` a `Dex-Net 4.0`_ GQ-CNN model"
},
{
"path": "examples/antipodal_grasp_sampling.py",
"chars": 4035,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "examples/policy.py",
"chars": 11075,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "examples/policy_ros.py",
"chars": 6939,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "examples/policy_with_image_proc.py",
"chars": 9404,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/__init__.py",
"chars": 2246,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/analysis/__init__.py",
"chars": 1404,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/analysis/analyzer.py",
"chars": 36036,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/__init__.py",
"chars": 2635,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/actions.py",
"chars": 5587,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/constraint_fn.py",
"chars": 4397,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/grasp.py",
"chars": 22760,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/grasp_quality_function.py",
"chars": 50608,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/image_grasp_sampler.py",
"chars": 43278,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/policy/__init__.py",
"chars": 1873,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/policy/enums.py",
"chars": 1472,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/policy/fc_policy.py",
"chars": 21964,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/grasping/policy/policy.py",
"chars": 63403,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/model/__init__.py",
"chars": 3176,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/model/tf/__init__.py",
"chars": 1444,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/model/tf/fc_network_tf.py",
"chars": 15423,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/model/tf/network_tf.py",
"chars": 57949,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/search/__init__.py",
"chars": 1398,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/search/enums.py",
"chars": 2091,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/search/resource_manager.py",
"chars": 8064,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/search/search.py",
"chars": 10586,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/search/trial.py",
"chars": 8877,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/search/utils.py",
"chars": 7970,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/training/__init__.py",
"chars": 2104,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/training/tf/__init__.py",
"chars": 1408,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/training/tf/trainer_tf.py",
"chars": 76151,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/utils/__init__.py",
"chars": 2170,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/utils/enums.py",
"chars": 3641,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/utils/policy_exceptions.py",
"chars": 2048,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/utils/train_stats_logger.py",
"chars": 4850,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/utils/utils.py",
"chars": 7636,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "gqcnn/version.py",
"chars": 22,
"preview": "__version__ = \"1.3.0\"\n"
},
{
"path": "launch/grasp_planning_service.launch",
"chars": 1956,
"preview": "<!--\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved. Permission to use, cop"
},
{
"path": "msg/Action.msg",
"chars": 1451,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "msg/BoundingBox.msg",
"chars": 1398,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "msg/GQCNNGrasp.msg",
"chars": 1519,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "msg/Observation.msg",
"chars": 1394,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "package.xml",
"chars": 3436,
"preview": "<?xml version=\"1.0\"?>\n<!--\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved. "
},
{
"path": "post-checkout",
"chars": 199,
"preview": "#!/bin/sh\n#\n# An example hook script to prepare a packed repository for use over\n# dumb transports.\n#\n# To enable this h"
},
{
"path": "requirements/cpu_requirements.txt",
"chars": 142,
"preview": "autolab-core\nautolab-perception\nvisualization\nnumpy\nopencv-python\nscipy\nmatplotlib\ntensorflow<=1.15.0\nscikit-learn\nsciki"
},
{
"path": "requirements/docs_requirements.txt",
"chars": 48,
"preview": "sphinx\nsphinxcontrib-napoleon\nsphinx_rtd_theme\n\n"
},
{
"path": "requirements/gpu_requirements.txt",
"chars": 146,
"preview": "autolab-core\nautolab-perception\nvisualization\nnumpy\nopencv-python\nscipy\nmatplotlib\ntensorflow-gpu<=1.15.0\nscikit-learn\ns"
},
{
"path": "ros_nodes/grasp_planner_node.py",
"chars": 18671,
"preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents)"
},
{
"path": "scripts/docker/build-docker.sh",
"chars": 1615,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/policies/run_all_dex-net_2.0_examples.sh",
"chars": 3868,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/policies/run_all_dex-net_2.1_examples.sh",
"chars": 2557,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/policies/run_all_dex-net_3.0_examples.sh",
"chars": 3868,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/policies/run_all_dex-net_4.0_fc_pj_examples.sh",
"chars": 2921,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/policies/run_all_dex-net_4.0_fc_suction_examples.sh",
"chars": 2971,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/policies/run_all_dex-net_4.0_pj_examples.sh",
"chars": 2866,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/policies/run_all_dex-net_4.0_suction_examples.sh",
"chars": 2916,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/training/train_dex-net_2.0.sh",
"chars": 1469,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/training/train_dex-net_2.1.sh",
"chars": 1502,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/training/train_dex-net_3.0.sh",
"chars": 1469,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/training/train_dex-net_4.0_fc_pj.sh",
"chars": 1487,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/training/train_dex-net_4.0_fc_suction.sh",
"chars": 1502,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/training/train_dex-net_4.0_pj.sh",
"chars": 1475,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "scripts/training/train_dex-net_4.0_suction.sh",
"chars": 1490,
"preview": "#!/bin/bash\n\n# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission"
},
{
"path": "setup.py",
"chars": 6393,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "srv/GQCNNGraspPlanner.srv",
"chars": 1497,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "srv/GQCNNGraspPlannerBoundingBox.srv",
"chars": 1522,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "srv/GQCNNGraspPlannerFull.srv",
"chars": 1548,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "srv/GQCNNGraspPlannerSegmask.srv",
"chars": 1523,
"preview": "# Copyright ©2017. The Regents of the University of California (Regents).\n# All Rights Reserved. Permission to use, copy"
},
{
"path": "tools/analyze_gqcnn_performance.py",
"chars": 4789,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "tools/finetune.py",
"chars": 6768,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "tools/hyperparam_search.py",
"chars": 5266,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "tools/plot_training_losses.py",
"chars": 6688,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "tools/run_policy.py",
"chars": 6089,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
},
{
"path": "tools/train.py",
"chars": 5889,
"preview": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright ©2017. The Regents of the University of California (Regents).\nAll Rights Reserved."
}
]
// ... and 25 more files (download for full content)
About this extraction
This page contains the full source code of the BerkeleyAutomation/gqcnn GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 170 files (754.8 KB), approximately 181.3k tokens, and a symbol index with 426 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.