Repository: Seeed-Projects/jetson-examples
Branch: main
Commit: 0a79b9978d5e
Files: 177
Total size: 348.5 KB
Directory structure:
gitextract_qsc773b7/
├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── build.sh
├── docs/
│ ├── develop.md
│ ├── examples.md
│ ├── install.md
│ └── publish.md
├── install.sh
├── pyproject.toml
├── reComputer/
│ ├── __init__.py
│ ├── main.py
│ └── scripts/
│ ├── MoveNet-Lightning/
│ │ ├── clean.sh
│ │ ├── getVersion.sh
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── MoveNet-Thunder/
│ │ ├── clean.sh
│ │ ├── getVersion.sh
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── MoveNetJS/
│ │ ├── clean.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── Sheared-LLaMA-2.7B-ShareGPT/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── audiocraft/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── check.sh
│ ├── clean.sh
│ ├── comfyui/
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── deep-live-cam/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── depth-anything/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── depth-anything-v2/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── depth-anything-v3/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── gpt-oss/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── live-llava/
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llama-factory/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llama3/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llama3.2/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llava/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llava-v1.5-7b/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llava-v1.6-vicuna-7b/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── nanodb/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── nanoowl/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── nvblox/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config/
│ │ │ ├── orbbec_stereo_capability_probe.yaml
│ │ │ └── orbbec_vslam_mobile.yaml
│ │ ├── config.yaml
│ │ ├── docker/
│ │ │ ├── Dockerfile.nvblox_orbbec
│ │ │ ├── launch_nvblox.sh
│ │ │ └── prepare_container_workspace.sh
│ │ ├── host/
│ │ │ └── orbbec_mobile_host.launch.py
│ │ ├── init.sh
│ │ ├── lib/
│ │ │ └── common.sh
│ │ ├── onedrive_downloader.py
│ │ ├── run.sh
│ │ ├── scripts/
│ │ │ ├── debug_runtime_connectivity.sh
│ │ │ ├── preflight.sh
│ │ │ ├── prepare_container.sh
│ │ │ ├── prepare_host.sh
│ │ │ └── run_demo.sh
│ │ └── start_nvblox_demo.sh
│ ├── ollama/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── parler-tts/
│ │ ├── clean.sh
│ │ ├── getVersion.sh
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── qwen3.5-4b/
│ │ ├── Dockerfile.jetson
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── ros1-jp6/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── run.sh
│ ├── stable-diffusion-webui/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── text-generation-webui/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── ultralytics-yolo/
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── update.sh
│ ├── utils.sh
│ ├── whisper/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── yolov10/
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ └── yolov8-rail-inspection/
│ ├── config.yaml
│ ├── init.sh
│ ├── readme.md
│ └── run.sh
└── setup.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
.github/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
!reComputer/scripts/nvblox/lib/
!reComputer/scripts/nvblox/lib/**
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2024 luozhixin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: MANIFEST.in
================================================
recursive-include reComputer/scripts *
global-exclude __pycache__
global-exclude *.pyc *.pyo
global-exclude *.png *.jpg *.jpeg *.gif *.bmp
================================================
FILE: README.md
================================================
# jetson-examples

[](https://discord.gg/5BQCkty7vN)
This repository provides examples for running AI models and applications on [NVIDIA Jetson devices](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) with a single command.
This repo builds upon the work of the [jetson-containers](https://github.com/dusty-nv/jetson-containers), [ultralytics](https://github.com/ultralytics/ultralytics) and other excellent projects.
## Features
- 🚀 **Easy Deployment:** Deploy state-of-the-art AI models on Jetson devices in one line.
- 🔄 **Versatile Examples:** Supports text generation, image generation, computer vision and so on.
- ⚡ **Optimized for Jetson:** Leverages Nvidia Jetson hardware for efficient performance.
## Install
To install the package, run:
```sh
pip3 install jetson-examples
```
> Notes:
> - Check [here](./docs/install.md) for more installation methods
> - To upgrade to the latest version, use: `pip3 install jetson-examples --upgrade`.
## Quickstart
To run and chat with [LLaVA](https://www.jetson-ai-lab.com/tutorial_llava.html), execute:
```sh
reComputer run llava
```
## Example list
Here are some examples that can be run:
| Example | Type | Model/Data Size | Docker Image Size | Command | Supported JetPack |
| ------------------------------------------------ | ------------------------ | --------------- | ---------- | --------------------------------------- | ------------------------------------------------ |
| 🆕 [Ultralytics-yolo](/reComputer/scripts/ultralytics-yolo/README.md) | Computer Vision | | 15.4GB | `reComputer run ultralytics-yolo` | 4.6, 5.1.1, 5.1.2, 5.1.3, 6.0, 6.1, 6.2 |
| 🆕 [Deep-Live-Cam](/reComputer/scripts/deep-live-cam/README.md) | Face-swapping | 0.5GB | 20GB | `reComputer run deep-live-cam` | 6.0 |
| 🆕 llama-factory | Finetune LLM | | 13.5GB | `reComputer run llama-factory` | 5.1.1, 5.1.2, 5.1.3 |
| 🆕 [ComfyUI](/reComputer/scripts/comfyui/README.md) |Computer Vision | | 20GB | `reComputer run comfyui` | 5.1.1, 5.1.2, 5.1.3 |
| [Depth-Anything-V2](/reComputer/scripts/depth-anything-v2/README.md) |Computer Vision | | 15GB | `reComputer run depth-anything-v2` | 5.1.1, 5.1.2, 5.1.3 |
| [Depth-Anything-V3](/reComputer/scripts/depth-anything-v3/README.md) |Computer Vision | | 7.6GB | `reComputer run depth-anything-v3` | 6.1, 6.2, 6.2.1 |
| 🆕 [Qwen3.5-4B](/reComputer/scripts/qwen3.5-4b/README.md) | Text (LLM) | 2.5GB | 0.2GB | `reComputer run qwen3.5-4b` | 6.1, 6.2, 6.2.1 |
| [Depth-Anything](/reComputer/scripts/depth-anything/README.md) |Computer Vision | | 12.9GB | `reComputer run depth-anything` | 5.1.1, 5.1.2, 5.1.3 |
| [Yolov10](/reComputer/scripts/yolov10/README.md) | Computer Vision | 7.2M | 5.74 GB | `reComputer run yolov10` | 5.1.1, 5.1.2, 5.1.3, 6.0 |
| Llama3 | Text (LLM) | 4.9GB | 10.5GB | `reComputer run llama3` | 5.1.1, 5.1.2, 5.1.3, 6.0 |
| [gpt-oss](/reComputer/scripts/gpt-oss/README.md) | Text (LLM) | 39GB | 31.28GB | `reComputer run gpt-oss` | 6.1, 6.2, 6.2.1 |
| [ros1-jp6](/reComputer/scripts/ros1-jp6/README.md) | Robotics / ROS 1 | * | 1.27GB | `reComputer run ros1-jp6` | 6.1, 6.2, 6.2.1 |
| [nvblox](/reComputer/scripts/nvblox/README.md) | Robotics / Mapping | * | 20.5GB+ | `reComputer run nvblox` | 6.x |
> Note: You should have enough space to run example, like `LLaVA`, at least `27.4GB` totally
More Examples can be found [examples.md](./docs/examples.md)
## Calling Contributors Join Us!
### How to work with us?
Want to add your own example? Check out the [development guide](./docs/develop.md).
We welcome contributions to improve jetson-examples! If you have an example you'd like to share, please submit a pull request. Thank you to all of our contributors! 🙏
This open call is listed in our [Contributor Project](https://github.com/orgs/Seeed-Studio/projects/6/views/1?filterQuery=jetson&pane=issue&itemId=64891723). If this is your first time joining us, [click here](https://github.com/orgs/Seeed-Studio/projects/6/views/1?pane=issue&itemId=30957479) to learn how the project works. We follow the steps with:
- Assignments: We offer a variety of assignments to enhance wiki content, each with a detailed description.
- Submission: Contributors can submit their content via a Pull Request after completing the assignments.
- Review: Maintainers will merge the submission and record the contributions.
**Contributors receive a $250 cash bonus as a token of appreciation.**
For any questions or further information, feel free to reach out via the GitHub issues page or contact edgeai@seeed.cc
## TODO List
- [ ] detect host environment and install what we need
- [ ] all type jetson support checking list
- [ ] try jetpack 6.0
- [ ] check disk space enough or not before run
- [ ] allow to setting some configs, such as `BASE_PATH`
- [ ] support jetson-containers update
- [ ] better table to show example's difference
### 👥 Contributors
## License
This project is licensed under the MIT License.
## Resources
- https://github.com/dusty-nv/jetson-containers
- https://www.jetson-ai-lab.com/
- https://www.ultralytics.com/
================================================
FILE: build.sh
================================================
#!/bin/bash
# 1 try clean older version
pip uninstall jetson-examples -y
# 2 clean last build files
rm -rf build/
# 3 install latest version
pip install .
# 5 build whl
read -p "build whl ? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
python3 -m pip install --upgrade build
echo "building..."
rm -rf dist/
python3 -m build
echo "build done."
else
echo "skip build."
fi
# 6 publish to Test PyPI
read -p "publish to test PyPI ? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
python3 -m pip install --upgrade twine
keyring --disable # https://github.com/pypa/twine/issues/847
echo "publishing to Test PyPI..."
python3 -m twine upload --repository testpypi dist/*
else
echo "skip publish."
fi
# 7 publish to PyPI
read -p "[Danger!!] publish to PyPI ? (confirm/*): " choice
if [[ $choice == "confirm" || $choice == "CONFIRM" ]]; then
python3 -m pip install --upgrade twine
keyring --disable # https://twine.readthedocs.io/en/stable/#disabling-keyring
echo "publishing to Prod PyPI..."
python3 -m twine upload --repository pypi dist/*
else
echo "skip publish."
fi
echo 'clean & build & publish ok.'
================================================
FILE: docs/develop.md
================================================
# Develop
This section provides guidance on how to contribute to the `jetson-examples` repository. It is highly recommended to develop and run your project on a Jetson device for the best experience.
## 0. Preparation
Follow these steps to get started:
```sh
# Clone the repository
git clone https://github.com/Seeed-Projects/jetson-examples.git
# Navigate to the repository
cd jetson-examples
# Install in 'develop mode'
pip install .
# Test the installed module
reComputer check
# If everything is okay, you should see the following output:
# Docker version...
# Python 3...
# ...
```
## 1. Project Structure
The project is structured as follows:
- `docs/`: This directory contains the project's documents.
- `assets/`: This directory contains document assets, such as images.
- `reComputer/`: This is the main directory of the Python module.
- `__init__.py`: This file is the initialization file for the Python module.
- `main.py`: This file contains the main logic code for the Python module.
- `scripts/`: This directory is used to store examples.
- `xxxxx/`: This is an example directory. Everything inside this directory will be installed into the system. You can save files of any type, such as images, Python scripts, executable files, etc.
- `init.sh`: **(optional)** This is the example init script. To initialize the project's initial data and environment.
- `run.sh`: **(MOST IMPORTANT)** This is the example startup script. It is the only entry point for your project.
- `readme.md`: **(optional)** This file provides an introduction to the example.
- `check.sh`: This is the checking script **(Not Finished yet)**.
- `run.sh`: This is the common startup script for examples.
- `install.sh`: This script uses `curl` and `github` to install `jetson-examples`.
- `pyproject.toml`: This file contains information on how to build and install `jetson-examples`.
## 2. Create Your Project

Follow these steps to create an `example` in this project:
```sh
# 1 Declare your project name as an environment variable
my_project=hello-world
# 2 Create a directory for your project
mkdir -p reComputer/scripts/$my_project
# 3 [required] Create the run.sh file
echo "echo 'hello world'" > reComputer/scripts/$my_project/run.sh
# 4 [option] Create the readme.md file
echo -e "# hello-world\n\n- Print \`hello-world\` to show how to add your project to this package" > reComputer/scripts/$my_project/readme.md
# 5 [option] Create the init.sh file
echo "echo 'init env'" > reComputer/scripts/$my_project/init.sh
# 6 [option] Create the clean.sh file
echo "echo 'clean data'" > reComputer/scripts/$my_project/clean.sh
```
After completing these steps, you should see the file changes as shown in the image below:

If you are familiar with creating and editing directories or files, you can use your preferred method.
## 3. Edit `$my_project/run.sh` to Customize Your Project
Use your preferred IDE (e.g., Vim, VS Code) to edit `reComputer/scripts/$my_project/run.sh` and add the desired functionality:
```sh
# Inside reComputer/scripts/$my_project/run.sh
echo 'hello world'
# TODO: Add code to achieve your desired functionality
# ...
```
## 4. Test Your Project
To test your project, follow these steps:
```sh
# Reinstall to make your new project work with `reComputer`
pip install .
# Run your new project with a one-line command
reComputer run hello-world
# INFO: Machine [Jetson AGX Orin] confirmed...
# Running example: hello-world
# ---- Example initialization ----
# jetson-ai-lab existed.
# ---- Example start ----
# hello world
# ---- Example done ----
```
## 5. (Optional) Add a `readme.md` File
If you want to provide additional information about your project, you can add a `readme.md` file. Use your preferred IDE to edit `reComputer/scripts/$my_project/readme.md`:
```sh
# hello-world
- Print hello-world to show how to add your project to this package
```
## 6. (Optional) Submit a New Pull Request
If you wish to contribute your project to the `jetson-examples` repository, you can follow these steps:
- 5.1 Fork this project.
- 5.2 Create a new branch in your project.
- 5.3 Commit the changes you made.
- 5.4 Push the changes to your project.
- 5.5 Create a pull request (`origin-git-repo/main <- your-git-repo/newbranch`) at [https://github.com/Seeed-Projects/jetson-examples/pulls](https://github.com/Seeed-Projects/jetson-examples/pulls).
- 5.6 Wait for a code review.
- 5.7 Once your code passes the review, it will be merged.
- 5.8 Thank you for your contribution!
================================================
FILE: docs/examples.md
================================================
# Example list
All examples that can be run:
| Example | Type | Model Size | Image Size | Command | Device |
| ------------------------------------------------ | ------------------------ | ---------- | ---------- | -------------------------------------------- | -------- |
| whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` | USB-CAM* |
| [yolov8-rail-inspection](/reComputer/scripts/yolov8-rail-inspection/readme.md) |Computer Vision(CV) | 6M | 13.8GB | `reComputer run yolov8-rail-inspection` | |
| [ultralytics-yolo](/reComputer/scripts/ultralytics-yolo/README.md) |Computer Vision(CV) | * | 15.4GB | `reComputer run ultralytics-yolo` | |
| [depth-anything](/reComputer/scripts/depth-anything/README.md) |Computer Vision(CV) | * | 12.9GB | `reComputer run depth-anything` | |
| [depth-anything-v3](/reComputer/scripts/depth-anything-v3/README.md) |Computer Vision(CV) | * | 7.6GB | `reComputer run depth-anything-v3` | |
| [qwen3.5-4b](/reComputer/scripts/qwen3.5-4b/README.md) | Text (LLM) | 2.5GB | * | `reComputer run qwen3.5-4b` | |
| [yolov10](/reComputer/scripts/yolov10/README.md) | Computer Vision(CV) | 7.2M | 5.74 GB | `reComputer run yolov10` | |
| text-generation-webui | Text (LLM) | 3.9GB | 14.8GB | `reComputer run text-generation-webui` | |
| llama3 | Text (LLM) | 4.9GB | 10.5GB | `reComputer run llama3` | |
| [gpt-oss](/reComputer/scripts/gpt-oss/README.md) | Text (LLM) | * | 31.28GB | `reComputer run gpt-oss` | |
| [ros1-jp6](/reComputer/scripts/ros1-jp6/README.md) | Robotics / ROS 1 | * | 1.27GB | `reComputer run ros1-jp6` | |
| [nvblox](/reComputer/scripts/nvblox/README.md) | Robotics / Mapping | * | 20.5GB+ | `reComputer run nvblox` | Gemini2 |
LLaMA | Text (LLM) | 1.5GB | 10.5GB | `reComputer run Sheared-LLaMA-2.7B-ShareGPT` | |
| llava-v1.5 | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava-v1.5-7b` | |
| llava-v1.6 | Text + Vision (VLM) | 13GB | 20.3GB | `reComputer run llava-v1.6-vicuna-7b` | |
| LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` | |
| Live LLaVA | Text + Vision (VLM) | 13GB | 20.3GB | `reComputer run live-llava` | USB-CAM* |
| stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` | |
| nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` | USB-CAM* |
| [nanodb](../reComputer/scripts/nanodb/readme.md) | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` | |
| [ollama](https://github.com/ollama/ollama) | Inference Server | * | 10.5GB | `reComputer run ollama` | |
| [TensorFlow MoveNet Thunder](/reComputer/scripts/MoveNet-Thunder/readme.md) |Computer Vision | | 7.7GB | `reComputer run MoveNet-Thunder` | USB-CAM*
| [TensorFlow MoveNet Lightning](/reComputer/scripts/MoveNet-Lightning/readme.md) |Computer Vision | | 7.48GB | `reComputer run MoveNet-Lightning` | USB-CAM*
| [TensorFlow MoveNet JS](/reComputer/scripts/MoveNetJS/readme.md) |Computer Vision | | 56.21MB | `reComputer run MoveNetJS` | USB-CAM*
| [Parler-TTS mini: expresso](/reComputer/scripts/parler-tts/readme.md) |Audio | | 6.9GB | `reComputer run parler-tts` |
================================================
FILE: docs/install.md
================================================
# Install
- use the way you like to install
## PyPI(recommend)
```sh
pip install jetson-examples
```
## Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
## Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
================================================
FILE: docs/publish.md
================================================
# publish
## pypi.org
```sh
# tools update
python3 -m pip install --upgrade build
python3 -m pip install --upgrade twine
```
### Test
```sh
# 1 build
python3 -m build
# 2 publish
python3 -m twine upload --repository testpypi dist/*
### WARNING: do not share you API token !!
# 3 test
pip install -i https://test.pypi.org/simple/ jetson-examples
### make sure version number right
```
### Prod
```sh
# 1 build
python3 -m build
# 2 publish
python3 -m twine upload --repository pypi dist/*
### WARNING: do not share you API token !!
# 3 test
pip install jetson-examples --upgrade
### make sure version number right
```
================================================
FILE: install.sh
================================================
#!/bin/bash
# TODO: make sure python3 in host is OK
cd /tmp && \
git clone https://github.com/Seeed-Projects/jetson-examples && \
cd jetson-examples && \
pip install . && \
echo "reComputer installed. try 'reComputer run whisper' to enjoy!"
================================================
FILE: pyproject.toml
================================================
[build-system]
requires = ["setuptools>=61.0.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "jetson-examples"
version = "0.2.5"
authors = [{ name = "luozhixin", email = "zhixin.luo@seeed.cc" }]
description = "Running Gen AI models and applications on NVIDIA Jetson devices with one-line command"
readme = "README.md"
requires-python = ">=3.8"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
keywords = [
"llama",
"llava",
"gpt",
"llm",
"nvidia",
"jetson",
"multimodal",
"jetson orin",
]
[project.scripts]
reComputer = "reComputer.main:run_script"
[project.urls]
Homepage = "https://github.com/Seeed-Projects/jetson-examples"
Issues = "https://github.com/Seeed-Projects/jetson-examples/issues"
# Tools settings -------------------------------------------------------------------------------------------------------
[tool.setuptools.packages.find]
where = ["."]
include = ["reComputer"]
[tool.setuptools]
include-package-data = true
[tool.setuptools.package-data]
"reComputer" = ["scripts/**/*"]
================================================
FILE: reComputer/__init__.py
================================================
__version__ = "0.1.3"
================================================
FILE: reComputer/main.py
================================================
import os
import subprocess
import sys
from pathlib import Path
def scripts_roots():
pkg_root = Path(__file__).resolve().parent
candidates = [
Path.cwd() / "reComputer" / "scripts",
pkg_root / "scripts",
]
source_hint = os.environ.get("JETSON_EXAMPLES_SOURCE")
if source_hint:
candidates.insert(1, Path(source_hint).expanduser().resolve() / "reComputer" / "scripts")
# Keep order while removing duplicates
dedup = []
seen = set()
for item in candidates:
key = str(item)
if key not in seen:
seen.add(key)
dedup.append(item)
return dedup
def scripts_root():
for root in scripts_roots():
if root.is_dir():
return str(root)
# fallback to package default path
return str(scripts_roots()[0])
def path_of_script(name):
for root in scripts_roots():
script_path = root / name
if script_path.exists():
return str(script_path)
return str(scripts_roots()[0] / name)
def list_all_examples(folder_path):
directory_names = []
for item in os.listdir(folder_path):
item_path = os.path.join(folder_path, item)
if os.path.isdir(item_path):
directory_names.append(item)
return directory_names
def run_script():
if len(sys.argv) == 3:
if sys.argv[1] == "run":
example_name = sys.argv[2]
# TODO: maybe use python instead of shell is better
subprocess.run(["bash", path_of_script("run.sh"), example_name])
elif sys.argv[1] == "clean":
example_name = sys.argv[2]
subprocess.run(["bash", path_of_script("clean.sh"), example_name])
else:
print("Only Support `run` or `clean` for now. try `reComputer run llava` .")
elif len(sys.argv) == 2:
if sys.argv[1] == "check":
subprocess.run(["bash", path_of_script("check.sh")])
elif sys.argv[1] == "update":
subprocess.run(["bash", path_of_script("update.sh")])
elif sys.argv[1] == "list":
example_folder = scripts_root()
directories = list_all_examples(example_folder)
print("example list:")
index = 1
for directory in directories:
print("{:03d}".format(index), "|", directory)
index += 1
print("-end-")
else:
print("reComputer help:")
print("---")
print("`reComputer check` | check system.")
print("`reComputer update` | update jetson-ai-lab.")
print("`reComputer list` | list all examples.")
print("`reComputer run xxx` | run an example.")
print("`reComputer clean xxx` | clean an example's data.")
print("---")
else:
print("Error Usage! try `reComputer help`.")
if __name__ == "__main__":
pass
================================================
FILE: reComputer/scripts/MoveNet-Lightning/clean.sh
================================================
#!/bin/bash
# get image
source ./getVersion.sh
# remove docker image
sudo docker rmi feiticeir0/movenet:tf2-${IMAGE_TAG}
================================================
FILE: reComputer/scripts/MoveNet-Lightning/getVersion.sh
================================================
#!/bin/bash
# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh
# and llama-factory init script
# we only have images for these - 36.2.0 works on 36.3.0
L4T_VERSIONS=("35.3.1", "35.4.1", "36.2.0", "36.3.0")
ARCH=$(uname -i)
# echo "ARCH: $ARCH"
if [ $ARCH = "aarch64" ]; then
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
#echo "reading L4T version from \"dpkg-query --show nvidia-l4t-core\""
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })
#echo ${L4T_VERSION_ARRAY[@]}
#echo ${#L4T_VERSION_ARRAY[@]}
L4T_RELEASE=${L4T_VERSION_ARRAY[0]}
L4T_REVISION=${L4T_VERSION_ARRAY[1]}
else
#echo "reading L4T version from /etc/nv_tegra_release"
L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
fi
L4T_REVISION_MAJOR=${L4T_REVISION:0:1}
L4T_REVISION_MINOR=${L4T_REVISION:2:1}
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
IMAGE_TAG=$L4T_VERSION
#echo "L4T_VERSION : $L4T_VERSION"
#echo "L4T_RELEASE : $L4T_RELEASE"
#echo "L4T_REVISION: $L4T_REVISION"
elif [ $ARCH != "x86_64" ]; then
echo "unsupported architecture: $ARCH"
exit 1
fi
if [[ ! " ${L4T_VERSIONS[@]} " =~ " ${L4T_VERSION} " ]]; then
echo "L4T_VERSION is not in the allowed versions list. Exiting."
exit 1
fi
# check if 36 to change IMAGE_TAG
if [ ${L4T_RELEASE} -eq "36" ]; then
# image tag will be 2.0
IMAGE_TAG="36.2.0"
fi
================================================
FILE: reComputer/scripts/MoveNet-Lightning/init.sh
================================================
#!/bin/bash
# Let's allow connections
xhost +local:docker
================================================
FILE: reComputer/scripts/MoveNet-Lightning/readme.md
================================================
# MoveNet
MoveNet is a ultra fast and accurate pose detection model.
We're demonstrating here using reComputer J402 and with MoveNet Lightning version

You can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)
## Getting started
#### Prerequisites
* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
* Webcam connected to reComputer
* Graphical desktop
* Docker installed
## Instalation
PyPI (best)
```bash
pip install jetson-examples
```
## Usage
1. Type the following command in a terminal
```bash
reComputer run MoveNet-Lightning
```
2. Start moving in front of the camera
================================================
FILE: reComputer/scripts/MoveNet-Lightning/run.sh
================================================
#!/bin/bash
# get L4T version
# it exports a variable IMAGE_TAG
source ./getVersion.sh
# pull docker image
docker pull feiticeir0/movenet-lightning:tf2-${IMAGE_TAG}"
docker run \
-e DISPLAY=$DISPLAY \
--runtime=nvidia \
--rm \
--device /dev/video0 \
-v /tmp/.X11-unix:/tmp/.X11-unix \
feiticeir0/movenet-lightning:tf2-${IMAGE_TAG}"
================================================
FILE: reComputer/scripts/MoveNet-Thunder/clean.sh
================================================
#!/bin/bash
# get image
source ./getVersion.sh
# remove docker image
sudo docker rmi feiticeir0/movenet:tf2-${TAG_IMAGE}
================================================
FILE: reComputer/scripts/MoveNet-Thunder/getVersion.sh
================================================
#!/bin/bash
# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh
# and llama-factory init script
# we only have images for these - 36.2.0 works on 36.3.0
L4T_VERSIONS=("35.3.1", "35.4.1", "36.2.0", "36.3.0")
ARCH=$(uname -i)
# echo "ARCH: $ARCH"
if [ $ARCH = "aarch64" ]; then
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
#echo "reading L4T version from \"dpkg-query --show nvidia-l4t-core\""
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })
#echo ${L4T_VERSION_ARRAY[@]}
#echo ${#L4T_VERSION_ARRAY[@]}
L4T_RELEASE=${L4T_VERSION_ARRAY[0]}
L4T_REVISION=${L4T_VERSION_ARRAY[1]}
else
#echo "reading L4T version from /etc/nv_tegra_release"
L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
fi
L4T_REVISION_MAJOR=${L4T_REVISION:0:1}
L4T_REVISION_MINOR=${L4T_REVISION:2:1}
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
IMAGE_TAG=$L4T_VERSION
#echo "L4T_VERSION : $L4T_VERSION"
#echo "L4T_RELEASE : $L4T_RELEASE"
#echo "L4T_REVISION: $L4T_REVISION"
elif [ $ARCH != "x86_64" ]; then
echo "unsupported architecture: $ARCH"
exit 1
fi
if [[ ! " ${L4T_VERSIONS[@]} " =~ " ${L4T_VERSION} " ]]; then
echo "L4T_VERSION is not in the allowed versions list. Exiting."
exit 1
fi
# check if 36 to change IMAGE_TAG
if [ ${L4T_RELEASE} -eq "36" ]; then
# image tag will be 2.0
IMAGE_TAG="36.2.0"
fi
================================================
FILE: reComputer/scripts/MoveNet-Thunder/init.sh
================================================
#!/bin/bash
# Let's allow connections
xhost +local:docker
================================================
FILE: reComputer/scripts/MoveNet-Thunder/readme.md
================================================
# MoveNet
MoveNet is a ultra fast and accurate pose detection model.
We're demonstrating here using reComputer J402 and with MoveNet Thunder version

You can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)
## Getting started
#### Prerequisites
* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
* Webcam connected to reComputer
* Graphical desktop
* Docker installed
## Instalation
PyPI (best)
```bash
pip install jetson-examples
```
## Usage
1. Type the following command in a terminal
```bash
reComputer run MoveNet-Thunder
```
2. Start moving in front of the camera
================================================
FILE: reComputer/scripts/MoveNet-Thunder/run.sh
================================================
#!/bin/bash
# get L4T version
# it exports a variable IMAGE_TAG
source ./getVersion.sh
# pull docker image
docker pull feiticeir0/movenet-thunder:tf2-${IMAGE_TAG}
docker run \
-e DISPLAY=$DISPLAY \
--runtime=nvidia \
--rm \
--device /dev/video0 \
-v /tmp/.X11-unix:/tmp/.X11-unix \
feiticeir0/movenet-thunder:tf2-${IMAGE_TAG}
================================================
FILE: reComputer/scripts/MoveNetJS/clean.sh
================================================
#!/bin/bash
# remove docker image
sudo docker rmi feiticeir0/movenetjs:latest
================================================
FILE: reComputer/scripts/MoveNetJS/readme.md
================================================
# MoveNet
MoveNet is a ultra fast and accurate pose detection model.
We're demonstrating here using reComputer J402

You can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)
## Getting started
#### Prerequisites
* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
* Webcam connected (one or the other)
* to the reComputer
* the computer you're using (remotely connected to the reComputer)
* Docker installed
## Instalation
PyPI (best)
```bash
pip install jetson-examples
```
## Usage
### Method 1
##### If you're running inside your reComputer
1. Type the following command in a terminal
```bash
reComputer run MoveNetJS
```
2. Open a web browser and go to [http://localhost:5000](http://localhost:5000)
3. Give permission to access webcam and wait a few seconds:
1. First will appear the webcam feed
2. Next will appear the lines estimating the pose
4. Start dancing
### Method 2
##### If you want to connect remotely with ssh to the reComputer
1. Connect using SSH but redirecting the 5000 port
```bash
ssh -L 5000:localhost:5000
@
```
2. Type the following command in a terminal
```bash
reComputer run movenetjs
```
2. Open a web browser (on your machine) and go to [http://localhost:5000](http://localhost:5000)
3. Give permission to access webcam and wait a few seconds:
1. First will appear the webcam feed
2. Next will appear the lines estimating the pose
4. Start dancing
**note** Firefox may fail showing webcam feed or pose estimation
================================================
FILE: reComputer/scripts/MoveNetJS/run.sh
================================================
#!/bin/bash
# pull docker image
docker push feiticeir0/movenetjs:latest
docker run \
--rm \
-p 5000:5000 \
feiticeir0/movenetjs:latest
================================================
FILE: reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag local_llm) \
python3 -m local_llm.chat --api=mlc \
--model princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT
================================================
FILE: reComputer/scripts/audiocraft/README.md
================================================
# AudioCraft Deployment on Jetson in One Line
## Hello
💡 In this demo, we refer to jetson-container to deploy audiocraft on Jetson devices. And generate music using a reference example.
🔥 Hightlights:
- **Audiocraft** is a tool designed for creating and manipulating audio content. 🎶
- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨
- **Jetson** is powerful AI hardware platform for edge computing.💻
Get your Jetson device ready and customize sounds with me.🚀
## Getting Started
- install **jetson-examples** by pip:
```sh
pip3 install jetson-examples
```
- restart reComputer
```sh
sudo restart
```
- run audiocraft on jetson in one line:
```sh
reComputer run audiocraft
```
## Reference
- https://github.com/dusty-nv/jetson-containers/tree/master/packages/audio/audiocraft
- https://github.com/facebookresearch/audiocraft
================================================
FILE: reComputer/scripts/audiocraft/clean.sh
================================================
#!/bin/bash
# TODO: clean old container
docker rmi $(/home/$USER/reComputer/jetson-containers/autotag audiocraft)
================================================
FILE: reComputer/scripts/audiocraft/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/audiocraft/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/audiocraft/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
jetson-containers run $(autotag audiocraft)
================================================
FILE: reComputer/scripts/check.sh
================================================
script_dir=$(dirname "$0")
docker --version && \
python3 -V && \
python -V && \
echo "now we can use more shell in $script_dir"
================================================
FILE: reComputer/scripts/clean.sh
================================================
#!/bin/bash
check_is_jetson_or_not() {
model_file="/proc/device-tree/model"
if [ -f "/proc/device-tree/model" ]; then
model=$(tr -d '\0' < /proc/device-tree/model | tr '[:upper:]' '[:lower:]')
if [[ $model =~ jetson|orin|nv|agx ]]; then
echo "INFO: machine[$model] confirmed..."
else
echo "WARNING: machine[$model] maybe not support..."
exit 1
fi
else
echo "ERROR: machine[$model] not support this..."
exit 1
fi
}
check_is_jetson_or_not
echo "clean example:$1"
BASE_PATH=/home/$USER/reComputer
# TODO: 要一个二次确认
echo "----clean example start----"
cd $JETSON_REPO_PATH
script_dir=$(dirname "$0")
start_script=$script_dir/$1/clean.sh
if [ -f $start_script ]; then
bash $start_script
else
echo "ERROR: Example[$1]/clean.sh Not Found."
fi
echo "----clean example done----"
================================================
FILE: reComputer/scripts/comfyui/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/comfyui/README.md
================================================
# Jetson-Example: Run ComfyUI (Stable Diffusion GUI) on NVIDIA Jetson Orin 🚀
## One-Click Quick Deployment of Plug-and-Play Stable Diffusion GUI
## **Introduction** 📘
[ComfyUI](https://github.com/comfyanonymous/ComfyUI) will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface.
In this project, you can quickly deploy ComfyUI on Nvidia Jetson Orin devices with one click.
## **Key Features**:
- **One-click installation and configuration support for Nvidia Jetson Orin devices.**
- **GPU acceleration to optimize the performance of stable diffusion pipelines.**
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
- Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/), [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/), [SD3](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) and [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/)
- [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/)
- Asynchronous Queue system
- Many optimizations: Only re-executes the parts of the workflow that changes between executions.
- Smart memory management: can automatically run models on GPUs with as low as 1GB vram.
For other features, please refer to the original project [ComfyUI](https://github.com/comfyanonymous/ComfyUI).
Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)
### Get a Jetson Orin Device 🛒
| Device Model | Description | Link |
|--------------|-------------|------|
| Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |
| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
## **Quickstart** ⚡
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### **Installation via PyPI (Recommended)** 🐍
1. Install the package:
```sh
pip install jetson-examples
```
2. Restart your reComputer:
```sh
sudo reboot
```
3. Run ComfyUI with one command:
```sh
reComputer run comfyui
```
- **Input Dir**: Mount the input directory in Docker to the host directory `~/ComfyUI/input`.
- **Output Dir**: Mount the output directory in Docker to the host directory `~/ComfyUI/output`.
- **Models Dir**: Mount the models directory in Docker to the host directory `~/ComfyUI/models`.
## **For more tutorials** 🔧
- [ComfyUI Basic Tutorial VN](https://comfyanonymous.github.io/ComfyUI_tutorial_vn/)
- [ComfyUI](https://github.com/comfyanonymous/ComfyUI)
- [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/)
- [Comfy Org](https://www.comfy.org/)
## **Shortcuts**
| Keybind | Explanation |
|------------------------------------|--------------------------------------------------------------------------------------------------------------------|
| Ctrl + Enter | Queue up current graph for generation |
| Ctrl + Shift + Enter | Queue up current graph as first for generation |
| Ctrl + Z/Ctrl + Y | Undo/Redo |
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
| Alt + C | Collapse/uncollapse selected nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Ctrl + B | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) |
| Delete/Backspace | Delete selected nodes |
| Ctrl + Backspace | Delete the current graph |
| Space | Move the canvas around when held and moving the cursor |
| Ctrl/Shift + Click | Add clicked node to selection |
| Ctrl + C/Ctrl + V | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) |
| Ctrl + C/Ctrl + Shift + V | Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |
| Shift + Drag | Move multiple selected nodes at the same time |
| Ctrl + D | Load default graph |
| Alt + `+` | Canvas Zoom in |
| Alt + `-` | Canvas Zoom out |
| Ctrl + Shift + LMB + Vertical drag | Canvas Zoom in/out |
| Q | Toggle visibility of the queue |
| H | Toggle visibility of history |
| R | Refresh graph |
| Double-Click LMB | Open node quick search palette |
## License
This project is licensed under the GNU General Public License v3.0
================================================
FILE: reComputer/scripts/comfyui/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="comfyui"
IMAGE_NAME="yaohui1998/comfyui"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAME
sudo rm -r /home/$USER/reComputer/ComfyUI
================================================
FILE: reComputer/scripts/comfyui/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 30 # in GB
REQUIRED_MEM_SPACE: 15
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/comfyui/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
# create folder.
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
================================================
FILE: reComputer/scripts/comfyui/run.sh
================================================
#!/bin/bash
CONTAINER_NAME="comfyui"
IMAGE_NAME="yaohui1998/comfyui"
# Pull the latest image
docker pull $IMAGE_NAME
cd /home/$USER/reComputer/
git clone https://github.com/comfyanonymous/ComfyUI.git
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
docker exec -it $CONTAINER_NAME /bin/bash
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it --rm \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v /home/$USER/reComputer/ComfyUI:/usr/src/ComfyUI-Seeed \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/deep-live-cam/Dockerfile
================================================
FROM yaohui1998/deep-live-cam:0.1
WORKDIR /usr/src/Deep-Live-Cam
CMD ["python3", "run.py", "--execution-provider", "cuda"]
================================================
FILE: reComputer/scripts/deep-live-cam/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/deep-live-cam/README.md
================================================
# Jetson-Example: Run Deep Live Cam on Seeed Studio NVIDIA AGX Orin Developer Kit 🚀
This project provides a one-click deployment of the Deep Live Cam AI face-swapping project on the [Seeed Studio Jetson AGX Orin Developer Kit](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html), retaining all the features of the [original project](https://github.com/hacksider/Deep-Live-Cam) and supporting functionalities such as image-to-image, image-to-video, and image-to-webcam.
All models and inference engine implemented in this project are from the official [Deep-Live-Cam](https://github.com/hacksider/Deep-Live-Cam).
## Get a Jetson Orin Device 🛒
| Device Model | Link |
|--------------|------|
| Jetson AGX Orin Dev Kit 32G | [Buy Here](https://www.seeedstudio.com/NVIDIA-Jetson-AGX-Orin-Developer-Kit-p-5314.html) |
| Jetson AGX Orin Dev Kit 64G | [Buy Here](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html) |
## New Features 🔥
### Resizable Preview Window
Dynamically improve the performance by using the --resizable parameter

### Face Mapping
Track faces and change it on the fly

source video

Tick this switch

Map the faces

And see the magic!
> The images in the "New Features" section are sourced from the [github community](https://github.com/hacksider/Deep-Live-Cam).
## 🥳Getting Started
### 📜Prerequisites
- AGX Orin Developer Kit [(🛒Buy Here)](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html)
- Jetpack 6.0
- USB Camera (optional)
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### 🚀Installation
PyPI(recommend)
```sh
pip install jetson-examples
```
Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
### 📋Usage
1. Run code:
```sh
reComputer run deep-live-cam
```
2. An `image` folder will be created in the user's home directory, where templates and the face images or videos that need to be swapped can be placed.
3. Click `Select a face` to choose an image of a face.
4. Click the `Select a target` button to choose a target face image.
5. Click `Preview` to display the transformed result, and click `Start` to save the result to the specified directory without displaying it.
6. Click `Preview` to display the transformed result, and click `Start` to save the result to the specified directory without displaying it.
7. You can choose the `Face enhancer` to enhance facial details and features.
8. Click `Live` to open the webcam for real-time conversion. Please connect a USB camera before starting the program.
> ⚠️ **Note**: The first time you convert an image, it may take approximately two minutes.
## 🙏🏻Thanks
[Deep-Live-Cam](https://github.com/hacksider/Deep-Live-Cam)
## 💨Contributing
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
## 🙅Disclaimer
This software is meant to be a productive contribution to the rapidly growing AI-generated media industry. It will help artists with tasks such as animating a custom character or using the character as a model for clothing etc.
The developers of this software are aware of its possible unethical applications and are committed to take preventative measures against them. It has a built-in check which prevents the program from working on inappropriate media including but not limited to nudity, graphic content, sensitive material such as war footage etc. We will continue to develop this project in the positive direction while adhering to law and ethics. This project may be shut down or include watermarks on the output if requested by law.
Users of this software are expected to use this software responsibly while abiding by local laws. If the face of a real person is being used, users are required to get consent from the concerned person and clearly mention that it is a deepfake when posting content online. Developers of this software will not be responsible for actions of end-users.
## ✅License
This project is licensed under the AGPL-3.0 License.
================================================
FILE: reComputer/scripts/deep-live-cam/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="deep-live-cam"
IMAGE_NAME="yaohui1998/deep-live-cam:1.0"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAMEs
sudo rm -r ~/images
================================================
FILE: reComputer/scripts/deep-live-cam/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 36.3.0
REQUIRED_DISK_SPACE: 40 # in GB
REQUIRED_MEM_SPACE: 20
PACKAGES:
- nvidia-jetpack
- x11-xserver-utils
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/deep-live-cam/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/deep-live-cam/run.sh
================================================
CONTAINER_NAME="deep-live-cam"
IMAGE_NAME="yaohui1998/deep-live-cam:1.0"
# Pull the latest image
docker pull $IMAGE_NAME
# Set display id
xhost +local:docker
export DISPLAY=:0
# mkdir image dir
mkdir ~/images
echo $DISPLAY
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it --rm \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v ~/images:/usr/src/Deep-Live-Cam/images \
-e DISPLAY=$DISPLAY \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/depth-anything/Dockerfile
================================================
FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3
RUN mkdir /usr/src/DepthAnything-on-Jetson-Orin
WORKDIR /usr/src/DepthAnything-on-Jetson-Orin
COPY . /usr/src/DepthAnything-on-Jetson-Orin
RUN pip install flask onnx flask flask_socketio huggingface_hub
CMD ["python3", "app.py"]
================================================
FILE: reComputer/scripts/depth-anything/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/depth-anything/README.md
================================================
# Jetson-Example: Run Depth Anything on NVIDIA Jetson Orin 🚀
This project provides an one-click deployment of the Depth Anything monocular depth estimation model developed by Hong Kong University and ByteDance. The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.
All models and inference engine implemented in this project are from the official [Depth Anything](https://depth-anything.github.io/).
## 🔥Features
- One-click deployment for Depth Anything models.
- WebUI for model conversion and depth estimation.
- Support for uploading videos/images or using the local camera
- Supports S, B, L models of Depth Anything with input sizes of 308, 384, 406, and 518.
### 🗝️WebUI Features
- **Choose model**: Select from depth_anything_vits14 models. (S, B, L)
- **Choose input size**: Select the desired input size.(308, 384, 406, 518)
- **Grayscale option**: Option to use grayscale.
- **Choose source**: Select the input source (Video, Image, Camera).
- **Export Model**: Automatically download and convert the model from PyTorch (.pth) to TensorRT format.
- **Start Estimation**: Begin depth estimation using the selected model and input source.
- **Stop Estimation**: Stop the ongoing depth estimation process.
## 🥳Getting Started
### 📜Prerequisites
- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
- Docker installed on reComputer
- USB Camera (optional)
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### 🚀Installation
PyPI(recommend)
```sh
pip install jetson-examples
```
Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
### 📋Usage
1. Run code:
```sh
reComputer run depth-anything
```
2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model, input size, and source.
3. Click on **Export Model** to download and convert the model.
4. Click on **Start Estimation** to begin the depth estimation process.
5. View the real-time depth estimation results on the WebUI.
## ⛏️Applications
- **Security**: Enhance surveillance systems with depth perception.
- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.
- **Underwater Scenes**: Apply depth estimation in underwater exploration.
- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.
## Further Development 🔧
- [Depth Anything Official](https://depth-anything.github.io/)
- [Depth Anything TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)
- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)
- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)
- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)
## 🙏🏻Contributing
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
## ✅License
This project is licensed under the MIT License.
## 🏷️Acknowledgements
- Depth Anything [project](https://depth-anything.github.io/) by Hong Kong University and ByteDance.
- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).
================================================
FILE: reComputer/scripts/depth-anything/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth-anything"
IMAGE_NAME="yaohui1998/depthanything-on-jetson-orin:latest"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAMEs
================================================
FILE: reComputer/scripts/depth-anything/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 20 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/depth-anything/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/depth-anything/run.sh
================================================
CONTAINER_NAME="depth-anything"
IMAGE_NAME="yaohui1998/depthanything-on-jetson-orin:latest"
# Pull the latest image
docker pull $IMAGE_NAME
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/depth-anything-v2/Dockerfile
================================================
FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3
RUN mkdir /usr/src/DepthAnything-on-Jetson-Orin
WORKDIR /usr/src/DepthAnything-on-Jetson-Orin
COPY . /usr/src/DepthAnything-on-Jetson-Orin
RUN pip install flask onnx flask flask_socketio huggingface_hub
CMD ["python3", "app.py"]
================================================
FILE: reComputer/scripts/depth-anything-v2/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/depth-anything-v2/README.md
================================================
# Jetson-Example: Run Depth Anything V2 on NVIDIA Jetson Orin 🚀
This project provides an one-click deployment of the Depth Anything V2 monocular depth estimation model developed by Hong Kong University and ByteDance. The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.
All models and inference engine implemented in this project are from the official [Depth Anything V2](https://github.com/DepthAnything/Depth-Anything-V2).
## 🔥Features
- One-click deployment for Depth Anything V2 models.
- WebUI for model conversion and depth estimation.
- Support for uploading videos/images or using the local camera
- Supports S, B, L models of Depth Anything V2 with input sizes 518.
### 🗝️WebUI Features
- **Choose model**: Select from Depth Anything V2 models. (S, B, L)
- **Grayscale option**: Option to use grayscale.
- **Choose source**: Select the input source (Video, Image, Camera).
- **Export Model**: Automatically download and convert the model from ONNX to TensorRT format.
- **Start Estimation**: Begin depth estimation using the selected model and input source.
- **Stop Estimation**: Stop the ongoing depth estimation process.
## 🥳Getting Started
### 📜Prerequisites
- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
- Docker installed on reComputer
- USB Camera (optional)
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### 🚀Installation
PyPI(recommend)
```sh
pip install jetson-examples
```
Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
### 📋Usage
1. Run code:
```sh
reComputer run depth-anything-v2
```
2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model and source.
3. Click on **Export Model** to download and convert the model.
4. Click on **Start Estimation** to begin the depth estimation process.
5. View the real-time depth estimation results on the WebUI.
## ⛏️Applications
- **Security**: Enhance surveillance systems with depth perception.
- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.
- **Underwater Scenes**: Apply depth estimation in underwater exploration.
- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.
## Further Development 🔧
- [Depth Anything V2 Official](https://github.com/DepthAnything/Depth-Anything-V2)
- [Depth Anything V2 TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)
- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)
- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)
- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)
## 🙏🏻Contributing
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
## ✅License
This project is licensed under the MIT License.
## 🏷️Acknowledgements
- Depth Anything V2 Official [project](https://github.com/DepthAnything/Depth-Anything-V2) by Hong Kong University and ByteDance.
- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).
================================================
FILE: reComputer/scripts/depth-anything-v2/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth-anything-v2"
IMAGE_NAME="yaohui1998/depthanything-v2-on-jetson-orin:latest"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAMEs
================================================
FILE: reComputer/scripts/depth-anything-v2/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 15 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/depth-anything-v2/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/depth-anything-v2/run.sh
================================================
CONTAINER_NAME="depth-anything-v2"
IMAGE_NAME="yaohui1998/depthanything-v2-on-jetson-orin:latest"
# Pull the latest image
docker pull $IMAGE_NAME
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/depth-anything-v3/Dockerfile
================================================
# This demo uses a prebuilt Docker image from Docker Hub.
FROM chenduola6/depth_anything_v3:jp6.2
================================================
FILE: reComputer/scripts/depth-anything-v3/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/depth-anything-v3/README.md
================================================
# Jetson-Example: Run Depth Anything V3 on NVIDIA Jetson
This project provides one-click deployment for **Depth Anything V3** on NVIDIA Jetson devices.
It uses the prebuilt Docker image:
```sh
chenduola6/depth-anything-v3:jp6.2
```
Image size: **7.6 GB**
Supported JetPack/L4T versions:
- JetPack 6.2 -> L4T 36.4.0
- JetPack 6.2.1 -> L4T 36.4.3
- JetPack 6.1 -> L4T 36.4.4
## Getting Started
### Prerequisites
- NVIDIA Jetson device with a supported L4T version
- Docker installed and available
- USB camera (for camera inference)
### Installation
PyPI (recommended):
```sh
pip install jetson-examples
```
GitHub (developer):
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
## Usage
1. Start the demo container with `reComputer`:
```sh
reComputer run depth-anything-v3
```
2. Enter the running container:
```bash
xhost +local:docker
docker run -it --rm \
--gpus all \
--network host \
--ipc host \
--privileged \
-e DISPLAY=$DISPLAY \
-e QT_X11_NO_MITSHM=1 \bash
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev:/dev \
chenduola6/depth-anything-v3
```
3. Run USB camera inference inside the container:
```sh
cd workspace/ros2-depth-anything-v3-trt
#build the engine file
source install/setup.bash
ros2 run depth_anything_v3 generate_engines onnx
```
> **Note**:If the Jetson swap space is insufficient, it may cause the engine export process to fail.
>
> ```bash
> #add swap space
> sudo mkdir -p /mnt/nvme
> sudo fallocate -l 16G /mnt/nvme/swapfile
> sudo chmod 600 /mnt/nvme/swapfile
> sudo mkswap /mnt/nvme/swapfile
> sudo swapon /mnt/nvme/swapfile
> ```
```bash
#Run a USB camera demo
USB_SIMPLE=1 ./run_camera_depth.sh
```
## Cleanup
Only remove the container (keep image cache):
```sh
reComputer clean depth-anything-v3
```
## References
- [Depth Anything v3 project](https://github.com/ByteDance-Seed/Depth-Anything-3)
- [ros2-depth-anything-v3-trt](https://github.com/ika-rwth-aachen/ros2-depth-anything-v3-trt)
- [Seeed jetson-examples](https://github.com/Seeed-Projects/jetson-examples)
================================================
FILE: reComputer/scripts/depth-anything-v3/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth_anything_v3"
# Prefer plain docker, fallback to sudo docker when user has no docker group permission
if docker info >/dev/null 2>&1; then
DOCKER_CMD=(docker)
else
DOCKER_CMD=(sudo docker)
fi
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" stop $CONTAINER_NAME
fi
if [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" rm $CONTAINER_NAME
echo "Container $CONTAINER_NAME removed."
else
echo "Container $CONTAINER_NAME does not exist."
fi
================================================
FILE: reComputer/scripts/depth-anything-v3/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 12 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
- x11-xserver-utils
DOCKER:
ENABLE: false
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/depth-anything-v3/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/depth-anything-v3/run.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth-anything-v3"
IMAGE_NAME="chenduola6/depth-anything-v3:jp6.2"
# Prefer plain docker, fallback to sudo docker when user has no docker group permission
if docker info >/dev/null 2>&1; then
DOCKER_CMD=(docker)
else
echo "Current user has no docker permission."
echo "Please enter sudo password once for this run."
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
# Keep sudo timestamp alive during long pulls/runs to avoid repeated prompts.
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done 2>/dev/null &
SUDO_KEEPALIVE_PID=$!
trap 'kill $SUDO_KEEPALIVE_PID >/dev/null 2>&1 || true' EXIT
DOCKER_CMD=(sudo docker)
fi
# Pull the latest image
"${DOCKER_CMD[@]}" pull $IMAGE_NAME
# Enable local X11 access for docker GUI apps
xhost +local:docker
# Use default display when DISPLAY is not set
if [ -z "$DISPLAY" ]; then
export DISPLAY=:0
fi
# Check if the container with the specified name already exists
if [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME already exists. Starting..."
"${DOCKER_CMD[@]}" start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
"${DOCKER_CMD[@]}" run -it \
--name $CONTAINER_NAME \
--gpus all \
--network host \
--ipc host \
--privileged \
-e DISPLAY=$DISPLAY \
-e QT_X11_NO_MITSHM=1 \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
$IMAGE_NAME
fi
echo "To run USB camera inference inside container:"
echo "1) ${DOCKER_CMD[*]} exec -it $CONTAINER_NAME /bin/bash"
echo "2) cd workspace/ros2-depth-anything-v3-trt"
echo "3) USB_SIMPLE=1 ./run_camera_depth.sh"
================================================
FILE: reComputer/scripts/gpt-oss/Dockerfile
================================================
FROM chenduola6/got-oss-20b:jp6
================================================
FILE: reComputer/scripts/gpt-oss/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/gpt-oss/README.md
================================================
# Jetson-Example: Run GPT-OSS 20B on NVIDIA Jetson
This project provides one-click deployment for **GPT-OSS 20B** on NVIDIA Jetson devices.
It uses the prebuilt Docker image:
```sh
chenduola6/got-oss-20b:jp6
```
Docker image size: **31.28 GB**
## Hardware Requirements
- NVIDIA Jetson device with at least **16GB VRAM**
- At least **50GB** available disk space
Supported JetPack/L4T versions:
- JetPack 6.2 -> L4T 36.4.0
- JetPack 6.2.1 -> L4T 36.4.3
- JetPack 6.1 -> L4T 36.4.4
## Getting Started
### Installation
PyPI (recommended):
```sh
pip install jetson-examples
```
GitHub (developer):
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
## Usage
### One-line deployment
```sh
reComputer run gpt-oss
```
This command pulls the image and starts `llama-server` in a detached container.
The script waits for `/v1/models` to become ready before exiting.
> **Note**: The script auto-detects the available GPU run mode on your Jetson (`--runtime nvidia` or `--gpus all`).
>
> **Note**: If prompted by the script, allow adding your user to the `docker` group so future runs do not require `sudo docker`. After adding the group, log out and log back in once.
>
> **Note**: If `curl /v1/models` returns `503 {"message":"Loading model"}`, the model is still loading. First startup can take several minutes.
>
> **Note**: If startup fails because of memory pressure, add swap space and try again:
>
> ```sh
> sudo fallocate -l 16G /swapfile
> sudo chmod 600 /swapfile
> sudo mkswap /swapfile
> sudo swapon /swapfile
> ```
You can lower memory usage when launching:
```sh
LLAMA_CTX=512 LLAMA_NGL=16 reComputer run gpt-oss
```
### Verify service
```sh
curl http://127.0.0.1:8080/v1/models
```
### Check logs
```sh
docker logs -f gpt-oss
```
## Manual Deployment (inside Docker)
```sh
docker pull chenduola6/got-oss-20b:jp6
docker run -it --rm \
--runtime nvidia \
--network host \
--ipc=host \
chenduola6/got-oss-20b:jp6
# inside the container
cd /root/gpt-oss/llama.cpp
./build/bin/llama-server \
-m /root/gpt-oss/gguf/gpt-oss-20b-Q4_K.gguf \
-ngl 20 -c 1024 \
--host 0.0.0.0 --port 8080
```
## Cleanup
Only remove the container (keep image cache):
```sh
reComputer clean gpt-oss
```
## References
- [llama.cpp](https://github.com/ggml-org/llama.cpp)
- [Seeed jetson-examples](https://github.com/Seeed-Projects/jetson-examples)
- [Setup step by step](https://wiki.seeedstudio.com/deploy_gptoss_on_jetson/)
================================================
FILE: reComputer/scripts/gpt-oss/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="gpt-oss"
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
if id -nG "$USER" | grep -qw docker; then
echo "Current user is already in docker group, but docker is still unavailable."
echo "Please make sure Docker daemon is running, for example:"
echo "sudo systemctl enable --now docker"
exit 1
fi
echo "Current user has no docker permission."
read -r -p "Add current user ($USER) to docker group now? (y/n): " choice
case "$choice" in
y|Y)
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
if ! getent group docker >/dev/null 2>&1; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
echo "Added $USER to docker group."
echo "Please log out and log back in (or reboot), then rerun:"
echo "reComputer clean gpt-oss"
exit 1
;;
*)
echo "Skipped docker group setup."
echo "You can run this manually:"
echo "sudo usermod -aG docker $USER"
exit 1
;;
esac
}
ensure_docker_access
DOCKER_CMD=(docker)
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" stop "$CONTAINER_NAME"
fi
if [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" rm "$CONTAINER_NAME"
echo "Container $CONTAINER_NAME removed."
else
echo "Container $CONTAINER_NAME does not exist."
fi
echo "Image is kept locally for faster next startup."
================================================
FILE: reComputer/scripts/gpt-oss/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 50 # in GB
REQUIRED_MEM_SPACE: 14
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: false
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/gpt-oss/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/gpt-oss/run.sh
================================================
#!/bin/bash
CONTAINER_NAME="gpt-oss"
IMAGE_NAME="chenduola6/got-oss-20b:jp6"
MODEL_PATH="/root/gpt-oss/gguf/gpt-oss-20b-Q4_K.gguf"
HOST="0.0.0.0"
PORT="${LLAMA_PORT:-8080}"
NGL="${LLAMA_NGL:-20}"
CTX="${LLAMA_CTX:-1024}"
STARTUP_TIMEOUT="${LLAMA_STARTUP_TIMEOUT:-600}"
SERVER_CMD="cd /root/gpt-oss/llama.cpp && ./build/bin/llama-server -m ${MODEL_PATH} -ngl ${NGL} -c ${CTX} --host ${HOST} --port ${PORT}"
GPU_FLAGS=()
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
if id -nG "$USER" | grep -qw docker; then
echo "Current user is already in docker group, but docker is still unavailable."
echo "Please make sure Docker daemon is running, for example:"
echo "sudo systemctl enable --now docker"
exit 1
fi
echo "Current user has no docker permission."
read -r -p "Add current user ($USER) to docker group now? (y/n): " choice
case "$choice" in
y|Y)
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
if ! getent group docker >/dev/null 2>&1; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
echo "Added $USER to docker group."
echo "Please log out and log back in (or reboot), then rerun:"
echo "reComputer run gpt-oss"
exit 1
;;
*)
echo "Skipped docker group setup."
echo "You can run this manually:"
echo "sudo usermod -aG docker $USER"
exit 1
;;
esac
}
ensure_docker_access
DOCKER_CMD=(docker)
ensure_image() {
if "${DOCKER_CMD[@]}" pull "$IMAGE_NAME"; then
return 0
fi
echo "Warning: failed to pull image from Docker Hub."
if "${DOCKER_CMD[@]}" image inspect "$IMAGE_NAME" >/dev/null 2>&1; then
echo "Found local image cache: $IMAGE_NAME"
echo "Continue with local image."
return 0
fi
echo "No local image cache found. Please check network and retry."
exit 1
}
create_container() {
"${DOCKER_CMD[@]}" run -d \
--name "$CONTAINER_NAME" \
"${GPU_FLAGS[@]}" \
--network host \
--ipc=host \
"$IMAGE_NAME" \
/bin/bash -lc "$SERVER_CMD"
}
probe_gpu_mode() {
if "${DOCKER_CMD[@]}" run --rm --runtime nvidia --network host --ipc=host "$IMAGE_NAME" /bin/sh -lc "exit 0" >/dev/null 2>&1; then
GPU_FLAGS=(--runtime nvidia)
echo "Using GPU mode: --runtime nvidia"
return 0
fi
if "${DOCKER_CMD[@]}" run --rm --gpus all --network host --ipc=host "$IMAGE_NAME" /bin/sh -lc "exit 0" >/dev/null 2>&1; then
GPU_FLAGS=(--gpus all)
echo "Using GPU mode: --gpus all"
return 0
fi
echo "Failed to detect a working Docker GPU mode."
echo "Tried: --runtime nvidia and --gpus all"
echo "Please check Docker + NVIDIA Container Runtime on this device."
exit 1
}
ensure_image
probe_gpu_mode
# Check if the container with the specified name already exists
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME is already running."
elif [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME already exists but is not running."
echo "Recreating with current runtime settings..."
"${DOCKER_CMD[@]}" rm -f "$CONTAINER_NAME" >/dev/null 2>&1 || true
if ! create_container >/dev/null; then
echo "Failed to create container."
exit 1
fi
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
if ! create_container >/dev/null; then
echo "Failed to create container."
exit 1
fi
fi
if [ -z "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container failed to reach running state."
echo "Inspect logs with: ${DOCKER_CMD[*]} logs $CONTAINER_NAME"
exit 1
fi
wait_for_server_ready() {
local endpoint="http://127.0.0.1:${PORT}/v1/models"
local elapsed=0
local interval=5
local raw_response=""
local response_body=""
local http_code="000"
local last_code="000"
local last_body=""
if ! command -v curl >/dev/null 2>&1; then
echo "curl not found, skip readiness probing."
return 0
fi
echo "Waiting for GPT-OSS to be ready at ${endpoint} (timeout: ${STARTUP_TIMEOUT}s)..."
while [ "$elapsed" -lt "$STARTUP_TIMEOUT" ]; do
if [ -z "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container exited before model became ready."
echo "Recent logs:"
"${DOCKER_CMD[@]}" logs --tail 80 "$CONTAINER_NAME"
return 1
fi
raw_response="$(curl -s --max-time 3 -w "\n%{http_code}" "$endpoint" 2>/dev/null || true)"
http_code="$(printf '%s' "$raw_response" | tail -n 1)"
response_body="$(printf '%s' "$raw_response" | sed '$d')"
last_code="$http_code"
last_body="$response_body"
# Ready when endpoint returns model list payload.
if [ "$http_code" = "200" ] && echo "$response_body" | grep -q "\"data\""; then
return 0
fi
# Typical warm-up response from llama-server while loading weights.
if [ "$http_code" = "503" ] && echo "$response_body" | grep -q "Loading model"; then
if [ $((elapsed % 30)) -eq 0 ]; then
echo "Model is still loading... (${elapsed}s)"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
continue
fi
if [ $((elapsed % 30)) -eq 0 ]; then
echo "Waiting model readiness... (${elapsed}s, http=${http_code})"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
done
echo "Model is still not ready after ${STARTUP_TIMEOUT}s."
echo "Last endpoint status: ${last_code}"
if [ -n "$last_body" ]; then
echo "Last endpoint response: $last_body"
fi
echo "Recent logs:"
"${DOCKER_CMD[@]}" logs --tail 80 "$CONTAINER_NAME"
echo "You can try lower memory settings:"
echo "LLAMA_CTX=512 LLAMA_NGL=16 reComputer run gpt-oss"
return 1
}
if ! wait_for_server_ready; then
exit 1
fi
echo "GPT-OSS server is ready at: http://127.0.0.1:${PORT}"
echo "Check models:"
echo "curl http://127.0.0.1:${PORT}/v1/models"
echo "Follow server logs:"
echo "${DOCKER_CMD[*]} logs -f $CONTAINER_NAME"
================================================
FILE: reComputer/scripts/live-llava/init.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/live-llava/run.sh
================================================
#!/bin/bash
SUPPORT_L4T_LIST="35.3.1"
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
get_l4t_version() {
ARCH=$(uname -i)
echo "ARCH: $ARCH"
if [ $ARCH = "aarch64" ]; then
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
echo "reading L4T version from \"dpkg-query --show nvidia-l4t-core\""
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })
L4T_RELEASE=${L4T_VERSION_ARRAY[0]}
L4T_REVISION=${L4T_VERSION_ARRAY[1]}
else
echo "reading L4T version from /etc/nv_tegra_release"
L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
fi
L4T_REVISION_MAJOR=${L4T_REVISION:0:1}
L4T_REVISION_MINOR=${L4T_REVISION:2:1}
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
echo "L4T_VERSION: $L4T_VERSION"
elif [ $ARCH != "x86_64" ]; then
echo "unsupported architecture: $ARCH" # show in red color
exit 1
fi
}
# 1. Check L4T version
get_l4t_version
CHECK_L4T_VERSION=0
for item in $SUPPORT_L4T_LIST; do
if [ "$item" = "$L4T_VERSION" ]; then
CHECK_L4T_VERSION=1
break
fi
done
if [ $CHECK_L4T_VERSION -eq 1 ]; then
echo "pass the version check"
else
echo "currently supported versions of jetpack are $SUPPORT_L4T_LIST" # show in red color
exit 1
fi
# 2. Check Google Chrome
if dpkg -s chromium-browser &>/dev/null; then
echo "Chrome is installed."
else
echo "install Google Chrome ..." # show in red color
sudo apt install chromium-browser
echo "Google Chrome installed successfully" # show in red color
fi
# 3. Generate Google browser key
FILE_NAME="key.pem"
FILE_PATH="$JETSON_REPO_PATH/data"
if [ -f "$FILE_PATH/$FILE_NAME" ]; then
echo "key file '$FILE_PATH/$FILE_NAME' exists."
else
cd $FILE_PATH
openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes -subj '/CN=localhost'
cd ..
fi
# 4. edit source code
cat >"$JETSON_REPO_PATH/packages/llm/local_llm/agents/video_query.py" <<'EOF'
#!/usr/bin/env python3
import time
import logging
import threading
from local_llm import Agent
from local_llm.plugins import (
VideoSource,
VideoOutput,
ChatQuery,
PrintStream,
ProcessProxy,
)
from local_llm.utils import ArgParser, print_table
from termcolor import cprint
from jetson_utils import cudaFont, cudaMemcpy, cudaToNumpy, cudaDeviceSynchronize
from flask import Flask, request
class VideoQuery(Agent):
"""
Perpetual always-on closed-loop visual agent that applies prompts to a video stream.
"""
def __init__(self, model="liuhaotian/llava-v1.5-7b", **kwargs):
super().__init__()
self.lock = threading.Lock()
# load model in another process for smooth streaming
# self.llm = ProcessProxy((lambda **kwargs: ChatQuery(model, drop_inputs=True, **kwargs)), **kwargs)
self.llm = ChatQuery(model, drop_inputs=True, **kwargs)
self.llm.add(PrintStream(color="green", relay=True).add(self.on_text))
self.llm.start()
# test / warm-up query
self.warmup = True
self.text = ""
self.eos = False
self.llm("What is 2+2?")
while self.warmup:
time.sleep(0.25)
# create video streams
self.video_source = VideoSource(**kwargs)
self.video_output = VideoOutput(**kwargs)
self.video_source.add(self.on_video, threaded=False)
self.video_output.start()
self.font = cudaFont()
# setup prompts
self.prompt = "Describe the image concisely and briefly."
# entry node
self.pipeline = [self.video_source]
def on_video(self, image):
np_image = cudaToNumpy(image)
cudaDeviceSynchronize()
self.llm(
[
"reset",
np_image,
self.prompt,
]
)
text = self.text.replace("\n", "").replace("", "").strip()
if text:
worlds = text.split()
line_counter = len(worlds) // 10
if len(worlds) % 10 != 0:
line_counter += 1
for l in range(line_counter):
line_text = " ".join(worlds[l * 10 : (l + 1) * 10])
self.font.OverlayText(
image,
text=line_text,
x=5,
y=int(79 + l * 37),
color=self.font.White,
background=self.font.Gray40,
)
self.font.OverlayText(
image,
text="Prompt: " + self.prompt,
x=5,
y=42,
color=(120, 215, 21),
background=self.font.Gray40,
)
self.video_output(image)
def on_text(self, text):
if self.eos:
self.text = text # new query response
self.eos = False
elif not self.warmup: # don't view warmup response
self.text = self.text + text
if text.endswith("") or text.endswith("###") or text.endswith("<|im_end|>"):
self.print_stats()
self.warmup = False
self.eos = True
def update_switch(self, on_off):
self.video_source.switch(on_off)
def update_prompts(self, new_prompt):
with self.lock:
if new_prompt:
self.prompt = new_prompt
def print_stats(self):
# print_table(self.llm.model.stats)
curr_time = time.perf_counter()
if not hasattr(self, "start_time"):
self.start_time = curr_time
else:
frame_time = curr_time - self.start_time
self.start_time = curr_time
logging.info(
f"refresh rate: {1.0 / frame_time:.2f} FPS ({frame_time*1000:.1f} ms)"
)
if __name__ == "__main__":
parser = ArgParser(extras=ArgParser.Defaults + ["video_input", "video_output"])
args = parser.parse_args()
# 独立线程运行
agent = VideoQuery(**vars(args))
def run_video_query():
agent.run()
video_query_thread = threading.Thread(target=run_video_query)
video_query_thread.start()
# 启动web服务
app = Flask(__name__)
@app.route("/update_prompt", methods=["POST"])
def update_prompts():
prompt = request.json.get("prompt")
if prompt:
agent.update_prompts(prompt)
return "Prompts updated successfully."
else:
return "Invalid prompts data."
@app.route("/update_switch", methods=["POST"])
def update_switch():
infer_or_not = True if request.json.get("switch") == "on" else False
agent.update_switch(infer_or_not)
return "stop" if not infer_or_not else "start"
@app.route("/update_params", methods=["POST"])
def update_params():
try:
agent.llm.max_new_tokens = request.json.get("max_new_tokens") or 128
agent.llm.min_new_tokens = request.json.get("min_new_tokens") or -1
agent.llm.do_sample = request.json.get("do_sample") or False
agent.llm.repetition_penalty = request.json.get("repetition_penalty") or 1.0
agent.llm.temperature = request.json.get("temperature") or 0.7
agent.llm.top_p = request.json.get("top_p") or 0.95
if request.json.get("system_prompt"):
agent.llm.chat_history.template["system_prompt"] = request.json.get(
"system_prompt"
)
return "params updated."
except Exception as e:
print(e)
return "update failure"
app.run(host="0.0.0.0", port=5555)
EOF
sed -i 's/from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, SiglipImageProcessor, SiglipVisionModel/from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection # , SiglipImageProcessor, SiglipVisionModel/' "$JETSON_REPO_PATH/packages/llm/local_llm/vision/clip_hf.py"
sed -i "s/'siglip': dict(preprocessor=SiglipImageProcessor, model=SiglipVisionModel),/# 'siglip': dict(preprocessor=SiglipImageProcessor, model=SiglipVisionModel),/" "$JETSON_REPO_PATH/packages/llm/local_llm/vision/clip_hf.py"
sed -i 's/from .audio import */# from .audio import */' "$JETSON_REPO_PATH/packages/llm/local_llm/plugins/__init__.py"
sed -i 's/from .nanodb import NanoDB/# from .nanodb import NanoDB/' "$JETSON_REPO_PATH/packages/llm/local_llm/plugins/__init__.py"
sed -i 's/import onnxruntime as ort/# import onnxruntime as ort/' "$JETSON_REPO_PATH/packages/llm/local_llm/utils/model.py"
echo "The script has been modified."
gnome-terminal -- /bin/bash -c "chromium-browser --disable-features=WebRtcHideLocalIpsWithMdns https://localhost:8554/; exec /bin/bash"
cd $JETSON_REPO_PATH
sudo docker run --runtime nvidia -it --rm --network host --volume /tmp/argus_socket:/tmp/argus_socket --volume /etc/enctune.conf:/etc/enctune.conf --volume /etc/nv_tegra_release:/etc/nv_tegra_release --volume /proc/device-tree/model:/tmp/nv_jetson_model --volume /var/run/dbus:/var/run/dbus --volume /var/run/avahi-daemon/socket:/var/run/avahi-daemon/socket --volume /var/run/docker.sock:/var/run/docker.sock --volume $JETSON_REPO_PATH/data:/data --device /dev/snd --device /dev/bus/usb -e DISPLAY=:0 -v /tmp/.X11-unix/:/tmp/.X11-unix -v /tmp/.docker.xauth:/tmp/.docker.xauth -e XAUTHORITY=/tmp/.docker.xauth --device /dev/video0 --device /dev/video1 -v $JETSON_REPO_PATH/packages/llm/local_llm:/opt/local_llm/local_llm -e SSL_KEY=/data/key.pem -e SSL_CERT=/data/cert.pem dustynv/local_llm:r35.3.1 python3 -m local_llm.agents.video_query --api=mlc --verbose --model liuhaotian/llava-v1.5-7b --max-new-tokens 32 --video-input /dev/video0 --video-output webrtc://@:8554/output
================================================
FILE: reComputer/scripts/llama-factory/README.md
================================================
# Finetune LLM by Llama-Factory on Jetson
## Hello
Now you can tailor a custom private local LLM to meet your requirements.
💡 Here's an example of quickly deploying [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory) on Jetson device.
🔥 Hightlights:
- **Llama-Factory** is an efficient tool to unify efficient Fine-Tuning of 100+ LLMs. 🚀🔍
- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨
- **Jetson** is powerful AI hardware platform for edge computing.💻
🛠️ Follow the tutorial below to quickly experience the performance of Llama-Factory on edge computing devices.
## Get a Jetson Orin Device 🛒
| Device Model | Description | Link |
|--------------|-------------|------|
| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
| NVIDIA® Jetson AGX Orin™ 64GB Developer Kit | Smallest and most powerful AI edge computer | [Buy Here](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html) |
## Getting Started
- install **jetson-examples** by pip:
```sh
pip3 install jetson-examples
```
- restart reComputer
```sh
sudo restart
```
- run Llama-Factory webui on jetson in one line:
```sh
reComputer run llama-factory
```
- Please visit http://127.0.0.1:7860
## Run Training Script
> **Note:** Some models and datasets require confirmation before using them, so we recommend logging in with your Hugging Face account by:
> `sudo docker exec -it llama-factory huggingface-cli login`
There are a lot of parameters to choose from webui, you can refer to here for more information.
For demonstration purposes, set `Model name: Phi-1.5-1.3B`, `Dataset: alpaca_zh`, leave the other parameters unchanged, and then click the `Start` button
## Build Docker Image
We highly recommend that you use `jetson-containers` to compile the docker container, as you can see [here](https://github.com/dusty-nv/jetson-containers/pull/566).
## Reference
- https://github.com/hiyouga/LLaMA-Factory
- https://github.com/dusty-nv/jetson-containers
================================================
FILE: reComputer/scripts/llama-factory/clean.sh
================================================
#!/bin/bash
sudo docker rmi youjiang9977/llama-factory:r35.4.1
sudo rm -rf /home/$USER/reComputer/jetson-containers/LLaMA-Factory/*
================================================
FILE: reComputer/scripts/llama-factory/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llama-factory/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llama-factory/run.sh
================================================
#!/bin/bash
DATA_PATH="/home/$USER/reComputer/jetson-containers/data"
sudo docker run -it --rm --network host --runtime nvidia \
--volume $DATA_PATH:/data \
--name llama-factory \
youjiang9977/llama-factory:r35.4.1
================================================
FILE: reComputer/scripts/llama3/clean.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
# search local image
img_tag=$($JETSON_REPO_PATH/autotag -p local ollama)
# 检查返回值
if [ $? -eq 0 ]; then
echo "Found Image successfully."
sudo docker rmi $img_tag
else
echo "[warn] Found Image failed with error code $?. skip delete Image."
fi
#
# 4 build whl
read -p "Delete all data for ollama? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
echo "Delete=> $JETSON_REPO_PATH/data/models/ollama/"
sudo rm -rf $JETSON_REPO_PATH/data/models/ollama/
echo "Clean Data Done."
else
echo "[warn] Skip Clean Data."
fi
================================================
FILE: reComputer/scripts/llama3/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 15 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llama3/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llama3/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
# try stop old server
docker rm -f ollama
# start new server
./run.sh -d --name ollama $(./autotag ollama)
# run a client
./run.sh $(./autotag ollama) /bin/ollama run llama3
# clean new server
docker rm -f ollama
================================================
FILE: reComputer/scripts/llama3.2/clean.sh
================================================
#!/bin/bash
get_l4t_version() {
local l4t_version=""
local release_line=$(head -n 1 /etc/nv_tegra_release)
if [[ $release_line =~ R([0-9]+)\ *\(release\),\ REVISION:\ ([0-9]+\.[0-9]+) ]]; then
local major="${BASH_REMATCH[1]}"
local revision="${BASH_REMATCH[2]}"
l4t_version="${major}.${revision}"
fi
echo "$l4t_version"
}
L4T_VERSION=$(get_l4t_version)
echo "Detected L4T version: $L4T_VERSION"
# Determine the Docker image based on L4T version
if [[ "$L4T_VERSION" == "35.3.1" || "$L4T_VERSION" == "35.4.1" || "$L4T_VERSION" == "35.5.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r35.3.1"
elif [[ "$L4T_VERSION" == "36.3.0" || "$L4T_VERSION" == "36.4.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r36.3.0"
else
echo "Error: L4T version $L4T_VERSION is not supported."
exit 1
fi
if [ "$(docker images -q "$IMAGE_NAME")" ]; then
echo "Deleting $IMAGE_NAME..."
docker rmi "$IMAGE_NAME"
echo "Image $IMAGE_NAME has been successfully deleted."
else
echo "No image named $IMAGE_NAME was found."
fi
================================================
FILE: reComputer/scripts/llama3.2/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
- 36.4.0
REQUIRED_DISK_SPACE: 15
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llama3.2/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llama3.2/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
get_l4t_version() {
local l4t_version=""
local release_line=$(head -n 1 /etc/nv_tegra_release)
if [[ $release_line =~ R([0-9]+)\ *\(release\),\ REVISION:\ ([0-9]+\.[0-9]+) ]]; then
local major="${BASH_REMATCH[1]}"
local revision="${BASH_REMATCH[2]}"
l4t_version="${major}.${revision}"
fi
echo "$l4t_version"
}
L4T_VERSION=$(get_l4t_version)
echo "Detected L4T version: $L4T_VERSION"
# Determine the Docker image based on L4T version
if [[ "$L4T_VERSION" == "35.3.1" || "$L4T_VERSION" == "35.4.1" || "$L4T_VERSION" == "35.5.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r35.3.1"
elif [[ "$L4T_VERSION" == "36.3.0" || "$L4T_VERSION" == "36.4.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r36.3.0"
else
echo "Error: L4T version $L4T_VERSION is not supported."
exit 1
fi
docker rm -f ollama
./run.sh -d --name ollama $IMAGE_NAME
./run.sh $IMAGE_NAME /bin/ollama run llama3.2
docker rm -f ollama
================================================
FILE: reComputer/scripts/llava/clean.sh
================================================
#!/bin/bash
docker rmi $(/home/$USER/reComputer/jetson-containers/autotag llava)
================================================
FILE: reComputer/scripts/llava/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 15 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llava/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llava/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag llava) \
python3 -m llava.serve.cli \
--model-path liuhaotian/llava-v1.5-7b \
--image-file /data/images/hoover.jpg
================================================
FILE: reComputer/scripts/llava-v1.5-7b/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llava-v1.5-7b/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llava-v1.5-7b/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag llava) \
python3 -m llava.serve.cli \
--model-path liuhaotian/llava-v1.5-7b \
--image-file /data/images/hoover.jpg
================================================
FILE: reComputer/scripts/llava-v1.6-vicuna-7b/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llava-v1.6-vicuna-7b/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llava-v1.6-vicuna-7b/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag local_llm) \
python3 -m local_llm --api=mlc \
--model liuhaotian/llava-v1.6-vicuna-7b \
--max-context-len 768 \
--max-new-tokens 128
================================================
FILE: reComputer/scripts/nanodb/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 80 # in GB
REQUIRED_MEM_SPACE: 15
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/nanodb/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/nanodb/readme.md
================================================
# NanoDB
## ref
-
## access
- using in machine, try `http://127.0.0.1:7860` in browser.
- using in other pc, make sure you know jetson's IP and try `http://<>:7860` in browser.
================================================
FILE: reComputer/scripts/nanodb/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
check_disk_space() {
directory="$1" # a directory
required_space_gb="$2" # how many GB we need
# get disk of directory
device=$(df -P "$directory" | awk 'NR==2 {print $1}')
echo $device
# get free space in KB
free_space=$(df -P "$device" | awk 'NR==2 {print $4}')
echo $free_space
# change unit to GB
free_space_gb=$(echo "scale=2; $free_space / 1024 / 1024" | bc)
echo $free_space_gb
# check and fast-fail
if (( $(echo "$free_space_gb >= $required_space_gb" | bc -l) )); then
echo "disk space ($1) enough, keep going."
else
echo "disk space ($1) not enough!! we need $2 GB!!"
exit 1
fi
}
# check data files TODO: support params to force download
DATA_PATH="$JETSON_REPO_PATH/data/datasets/coco/2017"
if [ ! -d $DATA_PATH ]; then
mkdir -p $DATA_PATH
fi
cd $DATA_PATH
# check val2017.zip
if [ ! -d "$DATA_PATH/val2017" ]; then
if [ ! -f "val2017.zip" ]; then
check_disk_space $DATA_PATH 1
wget http://images.cocodataset.org/zips/val2017.zip
else
echo "val2017.zip existed."
fi
check_disk_space $DATA_PATH 19
unzip val2017.zip && rm val2017.zip
else
echo "val2017/ existed."
fi
# check train2017.zip
if [ ! -d "$DATA_PATH/train2017" ]; then
if [ ! -f "train2017.zip" ]; then
check_disk_space $DATA_PATH 19
wget http://images.cocodataset.org/zips/train2017.zip
else
echo "train2017.zip existed."
fi
check_disk_space $DATA_PATH 19
unzip train2017.zip && rm train2017.zip
else
echo "train2017/ existed."
fi
if [ ! -d "$DATA_PATH/unlabeled2017" ]; then
# check unlabeled2017.zip
if [ ! -f "unlabeled2017.zip" ]; then
check_disk_space $DATA_PATH 19
wget http://images.cocodataset.org/zips/unlabeled2017.zip
else
echo "unlabeled2017.zip existed."
fi
check_disk_space $DATA_PATH 19
unzip unlabeled2017.zip && rm unlabeled2017.zip
else
echo "unlabeled2017/ existed."
fi
# check index files
INDEX_PATH="$JETSON_REPO_PATH/data/nanodb/coco/2017"
if [ ! -d $INDEX_PATH ]; then
cd $JETSON_REPO_PATH/data/
check_disk_space $JETSON_REPO_PATH 1
wget https://nvidia.box.com/shared/static/icw8qhgioyj4qsk832r4nj2p9olsxoci.gz -O nanodb_coco_2017.tar.gz
tar -xzvf nanodb_coco_2017.tar.gz
fi
# RUN
cd $JETSON_REPO_PATH
./run.sh $(./autotag nanodb) \
python3 -m nanodb \
--path /data/nanodb/coco/2017 \
--server --port=7860
================================================
FILE: reComputer/scripts/nanoowl/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/nanoowl/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/nanoowl/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag nanoowl) bash -c "ls /dev/video* && cd examples/tree_demo && python3 tree_demo.py ../../data/owl_image_encoder_patch32.engine"
================================================
FILE: reComputer/scripts/nvblox/README.md
================================================
# Jetson Example: Run NVBlox Mapping on NVIDIA Jetson

[Isaac ROS NVBlox](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox) is a high-performance GPU-accelerated 3D mapping framework developed by NVIDIA for real-time robotic perception. Unlike monocular depth estimation models, NVBlox consumes true depth input from RGB-D cameras or stereo cameras to construct accurate 3D scene representations.This case enables you to **quickly deploy the necessary environment for nvblox to run on your reComputer with just one click.**
Detailed instructions for environment configuration can be found at:[Deploy NVBlox with Orbbec Camera](https://wiki.seeedstudio.com/deploy_nvblox_jetson_agx_orin/)
Main process run it will:
1. Download `nvblox_images.tar` from the built-in OneDrive share link into `~/.cache/jetson-examples/nvblox`
2. Run `docker load -i` on that archive
3. Build the derived image and prepared host/container workspaces
4. Launch the static Gemini2 NVBlox demo
## Requirements
- NVIDIA Jetson Orin
- Ubuntu 22.04
- JetPack 6.x
- Docker with NVIDIA Container Runtime
- Orbbec Gemini2 or another Orbbec camera that provides `/camera/color/*` and `/camera/depth/*`
- Roughly 60GB free disk space for the cached archive, derived image, and managed workspace
## Usage
Run the full prepare + demo flow:
```sh
cd jetson-example/
pip install .
reComputer run nvblox
```
**Prepare only:**
```bash
NVBLOX_MODE=prepare reComputer run nvblox
```
Run only after preparation:
```sh
NVBLOX_MODE=run reComputer run nvblox
```
Force a rebuild of the prepared host/container workspaces:
```sh
NVBLOX_FORCE_REBUILD=1 reComputer run nvblox
```
Run headless:
```sh
NVBLOX_HEADLESS=1 reComputer run nvblox
```
Override the managed workspace root:
```sh
MANAGED_ROOT=/path/to/nvblox_demo reComputer run nvblox
```
Override the built-in OneDrive archive settings:
```sh
NVBLOX_IMAGE_SHARE_URL='https://...'
NVBLOX_IMAGE_ARCHIVE_NAME='nvblox_images.tar'
NVBLOX_IMAGE_CACHE_DIR="$HOME/.cache/jetson-examples/nvblox"
reComputer run nvblox
```
## Cleanup
```sh
reComputer clean nvblox
```
This removes the managed workspace, logs, partial downloads, the derived image `local/isaac_ros_nvblox_orbbec:jp6-humble`, and the running demo container if it exists.
It keeps:
- the cached base archive in `~/.cache/jetson-examples/nvblox`
- the loaded base image imported from `nvblox_images.tar`
## Troubleshooting
- The default path checks ordinary Gemini2 color/depth readiness, not stereo IR capability.
- Host readiness now requires only:
- `/camera/color/camera_info`
- `/camera/depth/camera_info`
- `/camera/color/image_raw`
- `/camera/depth/image_raw`
- Container readiness now checks host camera discovery through `/camera/color/camera_info` and `/camera/depth/camera_info`.
- The runtime success criterion is static map output from `/nvblox_node/static_esdf_pointcloud` or `/nvblox_node/static_map_slice`.
- `usb speed: 5000 Mbps` is not treated as proof that the full demo is healthy. The final authority is whether host color/depth, container visibility, static TF, and static map output all succeed.
- If the host driver exits and Gemini2 falls back to `usb_present_no_video`, the run path still attempts automatic recovery with udev refresh and USB rebind so you can usually retry without unplugging the camera.
- If the run still fails, use the built-in connectivity debugger:
```sh
bash reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh
```
That debug path follows the same stages as the default runtime:
1. Gemini2 device state
2. Host ROS discovery environment
3. Container ROS discovery environment
4. Host color/depth readiness
5. Container camera visibility
6. Managed static TF availability
7. Static NVBlox output
## Notes
- This example does not use `docker pull` for the base image path.
- The OneDrive downloader resolves the anonymous `download.aspx?...tempauth=...` URL from the preview page before downloading.
- `NVBLOX_MODE=run` expects an already prepared `MANAGED_ROOT`.
- The host camera is launched with `ros2 launch orbbec_camera gemini2.launch.py publish_tf:=false tf_publish_rate:=0.0`.
- The container workspace now centers on `nvblox_examples_bringup` static Orbbec launches and removes the old default dependence on Visual SLAM.
- The managed static TF chain is generated inside the prepared container workspace rather than relying on device-published TF.
- Headless mode switches the default launch file to `orbbec_debug.launch.py`, while GUI mode uses `orbbec_example.launch.py`.
================================================
FILE: reComputer/scripts/nvblox/clean.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/lib/common.sh"
MANAGED_ROOT="${MANAGED_ROOT:-${MANAGED_ROOT_DEFAULT}}"
CACHE_DIR="$(resolve_nvblox_image_cache_dir)"
maybe_enable_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
warn "docker command not found. Skipping container and image cleanup."
return 1
fi
if docker info >/dev/null 2>&1; then
DOCKER_PREFIX=()
return 0
fi
if sudo docker info >/dev/null 2>&1; then
DOCKER_PREFIX=(sudo)
return 0
fi
warn "Cannot access the Docker daemon. Skipping container and image cleanup."
return 1
}
remove_managed_root() {
local sentinel_path="${MANAGED_ROOT}/${MANAGED_SENTINEL_NAME}"
if [[ ! -e "${MANAGED_ROOT}" ]]; then
info "Managed root ${MANAGED_ROOT} does not exist."
return 0
fi
if [[ ! -f "${sentinel_path}" ]]; then
die "Managed root ${MANAGED_ROOT} exists but is not owned by the NVBlox example. Refusing to remove it."
fi
run_sudo rm -rf "${MANAGED_ROOT}"
info "Removed managed root ${MANAGED_ROOT}"
}
ensure_supported_user_context
if should_reexec_as_setup_user; then
printf '[reComputer][nvblox] Re-entering as %s.\n' "${SETUP_USER_NAME}" >&2
reexec_as_setup_user "${SCRIPT_DIR}/clean.sh"
fi
cleanup_residual_gemini2_processes "nvblox clean" || true
if maybe_enable_docker_access; then
if docker_cmd ps -a --format '{{.Names}}' | grep -Fxq "${CONTAINER_NAME_DEFAULT}"; then
info "Removing container ${CONTAINER_NAME_DEFAULT}"
docker_cmd rm -f "${CONTAINER_NAME_DEFAULT}" >/dev/null
else
info "Container ${CONTAINER_NAME_DEFAULT} does not exist."
fi
if docker_cmd image inspect "${DERIVED_IMAGE_TAG}" >/dev/null 2>&1; then
info "Removing derived image ${DERIVED_IMAGE_TAG}"
docker_cmd image rm -f "${DERIVED_IMAGE_TAG}" >/dev/null
else
info "Derived image ${DERIVED_IMAGE_TAG} does not exist."
fi
fi
remove_managed_root
cleanup_nvblox_partial_downloads "${CACHE_DIR}"
info "NVBlox clean complete. Cached base archive is kept in ${CACHE_DIR}"
================================================
FILE: reComputer/scripts/nvblox/config/orbbec_stereo_capability_probe.yaml
================================================
depth_registration: false
enable_point_cloud: false
enable_colored_point_cloud: false
device_preset: "High Accuracy"
laser_on_off_mode: 1
time_domain: "device"
enable_sync_host_time: true
align_mode: "SW"
camera_name: "camera"
enable_3d_reconstruction_mode: false
enable_color: false
color_width: 640
color_height: 480
color_fps: 5
color_format: "RGB"
color_qos: "SENSOR_DATA"
depth_width: 640
depth_height: 400
depth_fps: 15
depth_format: "Y16"
depth_qos: "SENSOR_DATA"
point_cloud_qos: "SENSOR_DATA"
enable_ir_auto_exposure: false
ir_exposure: 5000
ir_gain: 40
enable_left_ir: true
left_ir_width: 640
left_ir_height: 400
left_ir_fps: 15
left_ir_format: "Y8"
left_ir_qos: "SENSOR_DATA"
enable_right_ir: true
right_ir_width: 640
right_ir_height: 400
right_ir_fps: 15
right_ir_format: "Y8"
right_ir_qos: "SENSOR_DATA"
enable_sync_output_accel_gyro: false
enable_accel: false
accel_rate: "200hz"
accel_range: "4g"
enable_gyro: false
gyro_rate: "200hz"
gyro_range: "1000dps"
liner_accel_cov: "0.01"
angular_vel_cov: "0.01"
================================================
FILE: reComputer/scripts/nvblox/config/orbbec_vslam_mobile.yaml
================================================
depth_registration: true
enable_point_cloud: true
enable_colored_point_cloud: true
device_preset: "High Accuracy"
laser_on_off_mode: 1
time_domain: "device"
enable_sync_host_time: true
align_mode: "SW"
camera_name: "camera"
enable_3d_reconstruction_mode: true
enable_color: true
color_width: 640
color_height: 480
color_fps: 30
color_format: "RGB"
enable_color_auto_exposure: false
color_exposure: 50
color_gain: -1
color_qos: "SENSOR_DATA"
depth_width: 640
depth_height: 480
depth_fps: 30
depth_format: "Y16"
depth_qos: "SENSOR_DATA"
point_cloud_qos: "SENSOR_DATA"
enable_ir_auto_exposure: false
ir_exposure: 5000
ir_gain: 40
enable_left_ir: true
left_ir_width: 640
left_ir_height: 480
left_ir_fps: 30
left_ir_format: "Y8"
left_ir_qos: "SENSOR_DATA"
enable_right_ir: true
right_ir_width: 640
right_ir_height: 480
right_ir_fps: 30
right_ir_format: "Y8"
right_ir_qos: "SENSOR_DATA"
enable_sync_output_accel_gyro: false
enable_accel: false
accel_rate: "200hz"
accel_range: "4g"
enable_gyro: false
gyro_rate: "200hz"
gyro_range: "1000dps"
liner_accel_cov: "0.01"
angular_vel_cov: "0.01"
================================================
FILE: reComputer/scripts/nvblox/config.yaml
================================================
ALLOWED_L4T_VERSIONS:
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 60
REQUIRED_MEM_SPACE: 14
PACKAGES:
- nvidia-jetpack
- x11-xserver-utils
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/nvblox/docker/Dockerfile.nvblox_orbbec
================================================
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
ARG ROS_DISTRO=humble
ENV DEBIAN_FRONTEND=noninteractive
ENV ROS_DISTRO=${ROS_DISTRO}
SHELL ["/bin/bash", "-lc"]
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git \
curl \
ca-certificates \
python3-rosdep \
python3-vcstool \
python3-colcon-common-extensions \
build-essential \
libgflags-dev \
nlohmann-json3-dev \
libdw-dev \
libssl-dev \
mesa-utils \
libgl1 \
libgoogle-glog-dev \
ros-${ROS_DISTRO}-image-transport \
ros-${ROS_DISTRO}-image-transport-plugins \
ros-${ROS_DISTRO}-compressed-image-transport \
ros-${ROS_DISTRO}-image-publisher \
ros-${ROS_DISTRO}-camera-info-manager \
ros-${ROS_DISTRO}-diagnostic-updater \
ros-${ROS_DISTRO}-diagnostic-msgs \
ros-${ROS_DISTRO}-statistics-msgs \
ros-${ROS_DISTRO}-xacro \
ros-${ROS_DISTRO}-backward-ros \
ros-${ROS_DISTRO}-magic-enum \
ros-${ROS_DISTRO}-foxglove-msgs && \
rm -rf /var/lib/apt/lists/*
RUN if [[ -f /opt/ros/${ROS_DISTRO}/include/magic_enum.hpp ]]; then \
ln -sf /opt/ros/${ROS_DISTRO}/include/magic_enum.hpp /usr/include/magic_enum.hpp; \
fi && \
if [[ -d /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/foxglove_msgs/msg ]]; then \
mkdir -p /opt/ros/${ROS_DISTRO}/include/foxglove_msgs && \
ln -sfn /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/foxglove_msgs/msg /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/msg; \
fi
COPY docker/prepare_container_workspace.sh /opt/nvblox/bin/prepare_container_workspace.sh
COPY docker/launch_nvblox.sh /opt/nvblox/bin/launch_nvblox.sh
RUN chmod +x /opt/nvblox/bin/prepare_container_workspace.sh /opt/nvblox/bin/launch_nvblox.sh
WORKDIR /workspaces/isaac_ros-dev
================================================
FILE: reComputer/scripts/nvblox/docker/launch_nvblox.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
ROS_DISTRO="${ROS_DISTRO:-humble}"
NVBLOX_LAUNCH_FILE="${NVBLOX_LAUNCH_FILE:-orbbec_example.launch.py}"
EXPECTED_WORKSPACE_SPEC_VERSION="${EXPECTED_WORKSPACE_SPEC_VERSION:-}"
NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC="${NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC:-45}"
ISAAC_WS="/workspaces/isaac_ros-dev"
STAMP_PATH="${ISAAC_WS}/.setup-nvbox/container_workspace.env"
LAUNCH_PID=""
OUTPUT_PROBE_PID=""
ROS_DISCOVERY_ENV_VARS=(
"ROS_DOMAIN_ID"
"ROS_LOCALHOST_ONLY"
"RMW_IMPLEMENTATION"
"ROS_AUTOMATIC_DISCOVERY_RANGE"
"ROS_STATIC_PEERS"
"CYCLONEDDS_URI"
"CYCLONEDDS_HOME"
"FASTDDS_DEFAULT_PROFILES_FILE"
"FASTRTPS_DEFAULT_PROFILES_FILE"
)
[[ -f "/opt/ros/${ROS_DISTRO}/setup.bash" ]] || {
printf '[container][ERROR] Missing ROS setup at /opt/ros/%s/setup.bash\n' "${ROS_DISTRO}" >&2
exit 1
}
[[ -f "${ISAAC_WS}/install/setup.bash" ]] || {
printf '[container][ERROR] Missing workspace setup at %s/install/setup.bash\n' "${ISAAC_WS}" >&2
exit 1
}
[[ -f "${STAMP_PATH}" ]] || {
printf '[container][ERROR] Missing workspace stamp at %s\n' "${STAMP_PATH}" >&2
exit 1
}
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
# shellcheck disable=SC1091
source "/opt/ros/${ROS_DISTRO}/setup.bash"
# shellcheck disable=SC1090
source "${ISAAC_WS}/install/setup.bash"
# shellcheck disable=SC1090
source "${STAMP_PATH}"
if (( restore_nounset )); then
set -u
fi
if [[ -n "${EXPECTED_WORKSPACE_SPEC_VERSION}" ]] && \
[[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" != "${EXPECTED_WORKSPACE_SPEC_VERSION}" ]]; then
printf '[container][ERROR] Workspace spec mismatch. Expected %s, found %s\n' \
"${EXPECTED_WORKSPACE_SPEC_VERSION}" "${STAMP_WORKSPACE_SPEC_VERSION:-unknown}" >&2
exit 1
fi
PACKAGE_PREFIX="$(ros2 pkg prefix nvblox_examples_bringup 2>/dev/null || true)"
[[ -n "${PACKAGE_PREFIX}" ]] || {
printf '[container][ERROR] Cannot resolve nvblox_examples_bringup in the prepared workspace.\n' >&2
exit 1
}
LAUNCH_PATH="${PACKAGE_PREFIX}/share/nvblox_examples_bringup/launch/${NVBLOX_LAUNCH_FILE}"
[[ -f "${LAUNCH_PATH}" ]] || {
printf '[container][ERROR] Prepared launch file is missing: %s\n' "${LAUNCH_PATH}" >&2
exit 1
}
format_ros_discovery_env() {
local parts=()
local var_name=""
local value=""
local old_ifs="${IFS}"
for var_name in "${ROS_DISCOVERY_ENV_VARS[@]}"; do
value="${!var_name-}"
if [[ -n "${value}" ]]; then
parts+=("${var_name}=${value}")
else
parts+=("${var_name}=")
fi
done
IFS=', '
printf '%s\n' "${parts[*]}"
IFS="${old_ifs}"
}
printf '[container][INFO] Workspace spec: %s\n' "${STAMP_WORKSPACE_SPEC_VERSION:-unknown}"
printf '[container][INFO] Workspace stamped at: %s\n' "${STAMPED_AT:-unknown}"
printf '[container][INFO] Launching static demo file: %s\n' "${NVBLOX_LAUNCH_FILE}"
printf '[container][INFO] Managed static TF chain: odom -> base_link -> camera_link -> camera_color_optical_frame\n'
printf '[container][INFO] Expected camera info frame_id: camera_color_optical_frame\n'
printf '[container][INFO] Container ROS discovery env: %s\n' "$(format_ros_discovery_env)"
probe_nvblox_runtime_output() {
python3 - "${NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC}" <<'PY'
import sys
import time
import rclpy
from nav_msgs.msg import OccupancyGrid
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import PointCloud2
timeout_seconds = float(sys.argv[1])
class NvbloxOutputProbe(Node):
def __init__(self):
super().__init__('nvblox_runtime_output_probe')
self.result = None
self.create_subscription(
PointCloud2,
'/nvblox_node/static_esdf_pointcloud',
self._pointcloud_callback,
qos_profile_sensor_data)
self.create_subscription(
OccupancyGrid,
'/nvblox_node/static_map_slice',
self._map_slice_callback,
10)
def _pointcloud_callback(self, msg: PointCloud2):
self.result = (
'/nvblox_node/static_esdf_pointcloud',
f'frame_id={msg.header.frame_id or ""} width={msg.width} height={msg.height}')
def _map_slice_callback(self, msg: OccupancyGrid):
self.result = (
'/nvblox_node/static_map_slice',
f'frame_id={msg.header.frame_id or ""} width={msg.info.width} '
f'height={msg.info.height} resolution={msg.info.resolution:.3f}')
def main() -> int:
print(
'[container][INFO] Starting runtime output probe for '
'/nvblox_node/static_esdf_pointcloud and /nvblox_node/static_map_slice '
f'({timeout_seconds:.0f}s timeout)',
flush=True)
rclpy.init(args=None)
node = NvbloxOutputProbe()
executor = SingleThreadedExecutor()
executor.add_node(node)
deadline = time.monotonic() + timeout_seconds
try:
while time.monotonic() < deadline and node.result is None:
executor.spin_once(timeout_sec=0.2)
if node.result is None:
print(
'[container][WARN] Runtime output probe timed out waiting for '
'/nvblox_node/static_esdf_pointcloud or /nvblox_node/static_map_slice. '
'Readiness probes passed, but no runtime map output was observed yet.',
flush=True)
return 1
topic_name, details = node.result
print(f'[container][INFO] Runtime output probe received {topic_name}: {details}', flush=True)
return 0
finally:
executor.remove_node(node)
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
}
forward_signal() {
local signal="$1"
[[ -n "${LAUNCH_PID}" ]] && kill "-${signal}" "${LAUNCH_PID}" 2>/dev/null || true
[[ -n "${OUTPUT_PROBE_PID}" ]] && kill "-${signal}" "${OUTPUT_PROBE_PID}" 2>/dev/null || true
}
trap 'forward_signal INT' INT
trap 'forward_signal TERM' TERM
ros2 launch nvblox_examples_bringup "${NVBLOX_LAUNCH_FILE}" &
LAUNCH_PID=$!
probe_nvblox_runtime_output &
OUTPUT_PROBE_PID=$!
set +e
wait "${LAUNCH_PID}"
launch_status=$?
set -e
if [[ -n "${OUTPUT_PROBE_PID}" ]] && kill -0 "${OUTPUT_PROBE_PID}" 2>/dev/null; then
kill -TERM "${OUTPUT_PROBE_PID}" 2>/dev/null || true
fi
wait "${OUTPUT_PROBE_PID}" 2>/dev/null || true
exit "${launch_status}"
================================================
FILE: reComputer/scripts/nvblox/docker/prepare_container_workspace.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
ROS_DISTRO="${ROS_DISTRO:-humble}"
FORCE_REBUILD="${FORCE_REBUILD:-0}"
SETUP_IMAGE_ID="${SETUP_IMAGE_ID:-}"
SETUP_IMAGE_CONTEXT_HASH="${SETUP_IMAGE_CONTEXT_HASH:-}"
COMMUNITY_REPO_URL="${COMMUNITY_REPO_URL:-https://github.com/jjjadand/isaac-NVblox-Orbbec.git}"
COMMUNITY_REPO_BRANCH="${COMMUNITY_REPO_BRANCH:-main}"
OFFICIAL_NVBLOX_REPO_URL="${OFFICIAL_NVBLOX_REPO_URL:-https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox.git}"
OFFICIAL_NVBLOX_REPO_BRANCH="${OFFICIAL_NVBLOX_REPO_BRANCH:-release-3.2}"
WORKSPACE_SPEC_VERSION="${EXPECTED_WORKSPACE_SPEC_VERSION:-static-demo-final-v3}"
ISAAC_WS="/workspaces/isaac_ros-dev"
SRC_DIR="${ISAAC_WS}/src"
SETUP_DIR="${ISAAC_WS}/.setup-nvbox"
STAMP_PATH="${SETUP_DIR}/container_workspace.env"
COMMUNITY_REPO_PATH="${SETUP_DIR}/isaac-NVblox-Orbbec"
OFFICIAL_NVBLOX_REPO_PATH="${SETUP_DIR}/isaac_ros_nvblox"
COMMUNITY_COMMON_ROOT="${COMMUNITY_REPO_PATH}/src/isaac_ros_common"
COMMUNITY_NITROS_ROOT="${COMMUNITY_REPO_PATH}/src/isaac_ros_nitros"
COMMUNITY_NVBLOX_ROOT="${COMMUNITY_REPO_PATH}/src/isaac_ros_nvblox"
OFFICIAL_NVBLOX_ROOT="${OFFICIAL_NVBLOX_REPO_PATH}"
COMMUNITY_COMMON_PACKAGE_PATHS=(
"isaac_common"
"isaac_ros_common"
"isaac_ros_launch_utils"
"isaac_ros_tensor_list_interfaces"
)
COMMUNITY_NITROS_PACKAGE_PATHS=(
"isaac_ros_gxf"
"isaac_ros_nitros"
"isaac_ros_managed_nitros"
"isaac_ros_nitros_type/isaac_ros_nitros_camera_info_type"
"isaac_ros_nitros_type/isaac_ros_nitros_image_type"
"isaac_ros_nitros_type/isaac_ros_nitros_tensor_list_type"
"isaac_ros_gxf_extensions/gxf_isaac_message_compositor"
"isaac_ros_gxf_extensions/gxf_isaac_optimizer"
"isaac_ros_gxf_extensions/gxf_isaac_gxf_helpers"
"isaac_ros_gxf_extensions/gxf_isaac_sight"
"isaac_ros_gxf_extensions/gxf_isaac_atlas"
"isaac_ros_gxf_extensions/gxf_isaac_gems"
)
OFFICIAL_NVBLOX_PACKAGE_PATHS=(
"nvblox_msgs"
"nvblox_ros_common"
"nvblox_ros_python_utils"
"nvblox_ros"
"nvblox_rviz_plugin"
"nvblox_examples/nvblox_examples_bringup"
)
STATIC_DEMO_OVERLAY_FILE_PATHS=(
"nvblox_examples/nvblox_examples_bringup/config/visualization/orbbec_example.rviz"
)
GENERATED_LAUNCH_FILE_PATHS=(
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py"
)
GENERATED_CONFIG_FILE_PATHS=(
"nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
REQUIRED_SRC_PATHS=(
"isaac_common"
"isaac_ros_common"
"isaac_ros_launch_utils"
"isaac_ros_tensor_list_interfaces"
"isaac_ros_gxf"
"isaac_ros_nitros"
"isaac_ros_managed_nitros"
"isaac_ros_nitros_type/isaac_ros_nitros_camera_info_type"
"isaac_ros_nitros_type/isaac_ros_nitros_image_type"
"isaac_ros_nitros_type/isaac_ros_nitros_tensor_list_type"
"isaac_ros_gxf_extensions/gxf_isaac_message_compositor"
"isaac_ros_gxf_extensions/gxf_isaac_optimizer"
"isaac_ros_gxf_extensions/gxf_isaac_gxf_helpers"
"isaac_ros_gxf_extensions/gxf_isaac_sight"
"isaac_ros_gxf_extensions/gxf_isaac_atlas"
"isaac_ros_gxf_extensions/gxf_isaac_gems"
"nvblox_msgs"
"nvblox_ros_common"
"nvblox_ros_python_utils"
"nvblox_ros"
"nvblox_rviz_plugin"
"nvblox_examples/nvblox_examples_bringup"
)
REQUIRED_SRC_FILE_PATHS=(
"nvblox_ros/CMakeLists.txt"
"nvblox_ros/nvblox_core/CMakeLists.txt"
"nvblox_ros/nvblox_core/cmake/cuda/setup_compute_capability.cmake"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py"
"nvblox_examples/nvblox_examples_bringup/config/visualization/orbbec_example.rviz"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py"
"nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
EXCLUDED_SRC_PATHS=(
"isaac_ros_pynitros"
"isaac_ros_managed_nitros_examples"
"isaac_ros_nitros_bridge"
"isaac_ros_nitros_topic_tools"
"isaac_ros_visual_slam"
"isaac_ros_visual_slam_interfaces"
"nvblox_nav2"
"nvblox_examples/nvblox_image_padding"
"nvblox_examples/semantic_label_conversion"
)
STATIC_DEMO_REMOVED_DEPENDENCIES=(
"nova_carter_navigation"
"isaac_ros_visual_slam"
"isaac_ros_visual_slam_interfaces"
"isaac_ros_peoplenet_models_install"
"isaac_ros_detectnet"
"isaac_ros_peoplesemseg_models_install"
"isaac_ros_dnn_image_encoder"
"isaac_ros_triton"
"isaac_ros_unet"
"semantic_label_conversion"
"nvblox_image_padding"
)
ROSDEP_SKIP_KEYS=(
"isaac_ros_peoplenet_models_install"
"isaac_ros_detectnet"
"isaac_ros_image_proc"
)
COLCON_TARGETS=(
"nvblox_examples_bringup"
)
RUNTIME_REQUIRED_PACKAGES=(
"nvblox_examples_bringup"
"nvblox_ros"
)
INSTALL_REQUIRED_FILE_PATHS=(
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_transforms.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_example.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_debug.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
log() {
printf '[container][%s] %s\n' "$(date '+%Y-%m-%d %H:%M:%S')" "$*"
}
die() {
printf '[container][ERROR] %s\n' "$*" >&2
exit 1
}
source_ros() {
local restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
# shellcheck disable=SC1091
source "/opt/ros/${ROS_DISTRO}/setup.bash"
if [[ -f "${ISAAC_WS}/install/setup.bash" ]]; then
# shellcheck disable=SC1090
source "${ISAAC_WS}/install/setup.bash"
fi
if (( restore_nounset )); then
set -u
fi
}
ensure_rosdep_ready() {
if [[ ! -f /etc/ros/rosdep/sources.list.d/20-default.list ]]; then
log "Initializing rosdep."
rosdep init || true
fi
log "Updating rosdep."
rosdep update
}
ensure_git_safe_directory() {
local repo_path="$1"
[[ -n "${repo_path}" ]] || return 0
[[ -e "${repo_path}" ]] || return 0
if git config --global --get-all safe.directory 2>/dev/null | grep -Fqx "${repo_path}"; then
return 0
fi
git config --global --add safe.directory "${repo_path}"
}
resolve_gitdir_path() {
local repo_path="$1"
local dot_git_path="${repo_path}/.git"
local gitdir_value=""
if [[ -d "${dot_git_path}" ]]; then
printf '%s\n' "${dot_git_path}"
return 0
fi
if [[ -f "${dot_git_path}" ]]; then
gitdir_value="$(sed -n 's/^gitdir: //p' "${dot_git_path}" | head -n 1)"
[[ -n "${gitdir_value}" ]] || return 1
if [[ "${gitdir_value}" = /* ]]; then
printf '%s\n' "${gitdir_value}"
else
printf '%s\n' "$(cd "${repo_path}" && cd "${gitdir_value}" && pwd)"
fi
return 0
fi
return 1
}
ensure_repo_safe_directories() {
local repo_path="$1"
local gitdir_path=""
ensure_git_safe_directory "${repo_path}"
if gitdir_path="$(resolve_gitdir_path "${repo_path}" 2>/dev/null)"; then
ensure_git_safe_directory "${gitdir_path}"
fi
}
extract_dubious_ownership_paths() {
local log_path="$1"
sed -n "s/.*detected dubious ownership in repository at '\(.*\)'/\1/p" "${log_path}" | sort -u
}
ensure_paths_from_ownership_log() {
local log_path="$1"
local repo_path=""
while IFS= read -r repo_path; do
[[ -n "${repo_path}" ]] || continue
ensure_repo_safe_directories "${repo_path}"
done < <(extract_dubious_ownership_paths "${log_path}")
}
assert_git_repo_metadata() {
local repo_path="$1"
local label="$2"
[[ ! -e "${repo_path}" ]] && return 0
[[ -e "${repo_path}/.git" ]] && return 0
die "Managed ${label} cache at ${repo_path} is missing Git metadata. Delete ${repo_path} and rerun prepare."
}
assert_git_repo_accessible() {
local repo_path="$1"
local label="$2"
local git_log
[[ -e "${repo_path}" ]] || return 0
assert_git_repo_metadata "${repo_path}" "${label}"
ensure_repo_safe_directories "${repo_path}"
git_log="$(mktemp)"
if git -C "${repo_path}" rev-parse --is-inside-work-tree >/dev/null 2>"${git_log}"; then
rm -f "${git_log}"
return 0
fi
if grep -q "detected dubious ownership" "${git_log}"; then
ensure_paths_from_ownership_log "${git_log}"
if git -C "${repo_path}" rev-parse --is-inside-work-tree >/dev/null 2>"${git_log}"; then
rm -f "${git_log}"
return 0
fi
fi
cat "${git_log}" >&2 || true
rm -f "${git_log}"
die "Managed ${label} cache at ${repo_path} is not usable. Delete ${repo_path} and rerun prepare."
}
initialize_managed_git_access() {
mkdir -p "${HOME}" >/dev/null 2>&1 || true
touch "${HOME}/.gitconfig" >/dev/null 2>&1 || true
ensure_repo_safe_directories "${COMMUNITY_REPO_PATH}"
ensure_repo_safe_directories "${OFFICIAL_NVBLOX_REPO_PATH}"
ensure_repo_safe_directories "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core"
}
verify_managed_git_cache_state() {
assert_git_repo_accessible "${COMMUNITY_REPO_PATH}" "community repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}" "official Isaac ROS Nvblox repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core" "official Isaac ROS Nvblox submodule"
}
clone_or_update_repo() {
local repo_url="$1"
local repo_branch="$2"
local repo_path="$3"
local repo_name="$4"
mkdir -p "${SRC_DIR}" "${SETUP_DIR}"
if [[ ! -d "${repo_path}/.git" ]]; then
log "Cloning ${repo_name} from ${repo_url}."
git clone --branch "${repo_branch}" --depth 1 "${repo_url}" "${repo_path}"
ensure_repo_safe_directories "${repo_path}"
return 0
fi
assert_git_repo_accessible "${repo_path}" "${repo_name}"
if [[ -n "$(git -C "${repo_path}" status --porcelain)" ]]; then
die "Managed repo has local changes at ${repo_path}."
fi
log "Refreshing ${repo_name}."
git -C "${repo_path}" fetch --depth 1 origin "${repo_branch}"
git -C "${repo_path}" checkout -B "${repo_branch}" "origin/${repo_branch}"
}
sync_git_submodule() {
local repo_path="$1"
local submodule_path="$2"
local label="$3"
local submodule_repo_path="${repo_path}/${submodule_path}"
local git_log=""
assert_git_repo_accessible "${repo_path}" "${label}"
ensure_repo_safe_directories "${submodule_repo_path}"
log "Syncing ${label} submodule ${submodule_path}."
git -C "${repo_path}" submodule sync -- "${submodule_path}"
git_log="$(mktemp)"
if ! git -C "${repo_path}" submodule update --init --depth 1 -- "${submodule_path}" 2>"${git_log}"; then
if grep -q "detected dubious ownership" "${git_log}"; then
ensure_paths_from_ownership_log "${git_log}"
ensure_repo_safe_directories "${repo_path}"
ensure_repo_safe_directories "${submodule_repo_path}"
: > "${git_log}"
if ! git -C "${repo_path}" submodule update --init --depth 1 -- "${submodule_path}" 2>"${git_log}"; then
cat "${git_log}" >&2 || true
rm -f "${git_log}"
die "Failed to sync ${label} submodule ${submodule_path} after refreshing Git safe.directory entries."
fi
else
cat "${git_log}" >&2 || true
rm -f "${git_log}"
die "Failed to sync ${label} submodule ${submodule_path}."
fi
fi
rm -f "${git_log}"
assert_git_repo_accessible "${submodule_repo_path}" "${label} submodule"
}
verify_workspace_install() {
local package_name=""
local file_path=""
[[ -f "${ISAAC_WS}/install/setup.bash" ]] || return 1
source_ros
for package_name in "${RUNTIME_REQUIRED_PACKAGES[@]}"; do
ros2 pkg prefix "${package_name}" >/dev/null 2>&1 || return 1
done
for file_path in "${INSTALL_REQUIRED_FILE_PATHS[@]}"; do
[[ -f "${ISAAC_WS}/${file_path}" ]] || return 1
done
}
stamp_current() {
[[ -f "${STAMP_PATH}" ]] || return 1
# shellcheck disable=SC1090
source "${STAMP_PATH}"
[[ "${STAMP_IMAGE_ID:-}" == "${SETUP_IMAGE_ID}" ]] || return 1
[[ "${STAMP_IMAGE_CONTEXT_HASH:-}" == "${SETUP_IMAGE_CONTEXT_HASH}" ]] || return 1
[[ "${STAMP_COMMUNITY_COMMIT:-}" == "${COMMUNITY_COMMIT}" ]] || return 1
[[ "${STAMP_OFFICIAL_NVBLOX_COMMIT:-}" == "${OFFICIAL_NVBLOX_COMMIT}" ]] || return 1
[[ "${STAMP_OFFICIAL_NVBLOX_CORE_COMMIT:-}" == "${OFFICIAL_NVBLOX_CORE_COMMIT}" ]] || return 1
[[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" == "${WORKSPACE_SPEC_VERSION}" ]] || return 1
verify_synced_workspace_layout
verify_workspace_install
}
write_stamp() {
{
printf 'STAMP_IMAGE_ID=%q\n' "${SETUP_IMAGE_ID}"
printf 'STAMP_IMAGE_CONTEXT_HASH=%q\n' "${SETUP_IMAGE_CONTEXT_HASH}"
printf 'STAMP_COMMUNITY_COMMIT=%q\n' "${COMMUNITY_COMMIT}"
printf 'STAMP_OFFICIAL_NVBLOX_COMMIT=%q\n' "${OFFICIAL_NVBLOX_COMMIT}"
printf 'STAMP_OFFICIAL_NVBLOX_CORE_COMMIT=%q\n' "${OFFICIAL_NVBLOX_CORE_COMMIT}"
printf 'STAMP_WORKSPACE_SPEC_VERSION=%q\n' "${WORKSPACE_SPEC_VERSION}"
printf 'STAMPED_AT=%q\n' "$(date -Is 2>/dev/null || date)"
} > "${STAMP_PATH}"
}
clear_managed_src_dir() {
mkdir -p "${SRC_DIR}"
find "${SRC_DIR}" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
}
copy_package_path() {
local source_root="$1"
local package_path="$2"
local src_path="${source_root}/${package_path}"
local dest_path="${SRC_DIR}/${package_path}"
[[ -d "${src_path}" ]] || die "Expected package path ${package_path} is missing from ${source_root}."
mkdir -p "$(dirname "${dest_path}")"
rm -rf "${dest_path}"
cp -a "${src_path}" "${dest_path}"
}
copy_package_root() {
local source_root="$1"
local package_name="$2"
local dest_path="${SRC_DIR}/${package_name}"
[[ -f "${source_root}/package.xml" ]] || die "Expected root package.xml is missing from ${source_root}."
mkdir -p "${dest_path}"
rm -rf "${dest_path}"
mkdir -p "${dest_path}"
find "${source_root}" -mindepth 1 -maxdepth 1 ! -name '.git' -exec cp -a {} "${dest_path}/" \;
}
sync_package_group() {
local source_root="$1"
shift
local package_path=""
for package_path in "$@"; do
copy_package_path "${source_root}" "${package_path}"
done
}
apply_overlay_files() {
local source_root="$1"
shift
local relative_path=""
local source_path=""
local dest_path=""
for relative_path in "$@"; do
source_path="${source_root}/${relative_path}"
dest_path="${SRC_DIR}/${relative_path}"
[[ -f "${source_path}" ]] || die "Expected overlay file ${relative_path} is missing from ${source_root}."
mkdir -p "$(dirname "${dest_path}")"
cp -a "${source_path}" "${dest_path}"
done
}
write_orbbec_transforms_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
def static_tf(parent: str, child: str, xyz: tuple[float, float, float], rpy: tuple[float, float, float]) -> Node:
return Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=[
'--x', str(xyz[0]),
'--y', str(xyz[1]),
'--z', str(xyz[2]),
'--roll', str(rpy[0]),
'--pitch', str(rpy[1]),
'--yaw', str(rpy[2]),
'--frame-id', parent,
'--child-frame-id', child,
],
output='screen')
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
actions = args.get_launch_actions()
actions.append(static_tf('odom', 'base_link', (0.0, 0.0, 0.0), (0.0, 0.0, 0.0)))
actions.append(static_tf('base_link', 'camera_link', (0.1, 0.0, 0.2), (0.0, 0.0, 0.0)))
actions.append(static_tf('camera_link', 'camera0_link', (0.0, 0.0, 0.0), (0.0, 0.0, 0.0)))
actions.append(static_tf(
'camera_link',
'camera_color_optical_frame',
(0.0, 0.0, 0.0),
(-1.57079632679, 0.0, -1.57079632679)))
actions.append(static_tf(
'camera_color_optical_frame',
'camera_depth_optical_frame',
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0)))
return LaunchDescription(actions)
EOF
}
write_orbbec_static_config() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml" <<'EOF'
/**:
ros__parameters:
use_lidar: false
input_qos: "SENSOR_DATA"
map_clearing_frame_id: "base_link"
esdf_slice_bounds_visualization_attachment_frame_id: "base_link"
static_mapper:
esdf_slice_height: 0.0
esdf_slice_min_height: -0.1
esdf_slice_max_height: 0.3
EOF
}
write_orbbec_example_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
from nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
args.add_arg('log_level', 'info', choices=['debug', 'info', 'warn'], cli=True)
actions = args.get_launch_actions()
actions.append(
lu.include(
'nvblox_examples_bringup',
'launch/orbbec_transforms.launch.py'))
actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))
base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')
realsense_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_realsense.yaml')
orbbec_static_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_orbbec_static.yaml')
nvblox_node = ComposableNode(
name='nvblox_node',
package='nvblox_ros',
plugin='nvblox::NvbloxNode',
remappings=[
('camera_0/depth/image', '/camera/depth/image_raw'),
('camera_0/depth/camera_info', '/camera/depth/camera_info'),
('camera_0/color/image', '/camera/color/image_raw'),
('camera_0/color/camera_info', '/camera/color/camera_info'),
],
parameters=[
base_config,
realsense_config,
orbbec_static_config,
{'num_cameras': 1},
{'use_lidar': False},
],
)
actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))
rviz_config_path = lu.get_path(
'nvblox_examples_bringup',
'config/visualization/orbbec_example.rviz')
actions.append(
Node(
package='rviz2',
executable='rviz2',
arguments=['-d', str(rviz_config_path)],
output='screen'))
return LaunchDescription(actions)
EOF
}
write_orbbec_debug_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
from nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
args.add_arg('log_level', 'debug', choices=['debug', 'info', 'warn'], cli=True)
actions = args.get_launch_actions()
actions.append(
lu.include(
'nvblox_examples_bringup',
'launch/orbbec_transforms.launch.py'))
actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))
base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')
realsense_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_realsense.yaml')
orbbec_static_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_orbbec_static.yaml')
nvblox_node = ComposableNode(
name='nvblox_node',
package='nvblox_ros',
plugin='nvblox::NvbloxNode',
remappings=[
('camera_0/depth/image', '/camera/depth/image_raw'),
('camera_0/depth/camera_info', '/camera/depth/camera_info'),
('camera_0/color/image', '/camera/color/image_raw'),
('camera_0/color/camera_info', '/camera/color/camera_info'),
],
parameters=[
base_config,
realsense_config,
orbbec_static_config,
{'num_cameras': 1},
{'use_lidar': False},
],
)
actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))
return LaunchDescription(actions)
EOF
}
write_orbbec_standalone_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
from nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
args.add_arg('log_level', 'info', choices=['debug', 'info', 'warn'], cli=True)
actions = args.get_launch_actions()
actions.append(
lu.include(
'nvblox_examples_bringup',
'launch/orbbec_transforms.launch.py'))
actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))
base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')
realsense_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_realsense.yaml')
orbbec_static_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_orbbec_static.yaml')
nvblox_node = ComposableNode(
name='nvblox_node',
package='nvblox_ros',
plugin='nvblox::NvbloxNode',
remappings=[
('camera_0/depth/image', '/camera/depth/image_raw'),
('camera_0/depth/camera_info', '/camera/depth/camera_info'),
('camera_0/color/image', '/camera/color/image_raw'),
('camera_0/color/camera_info', '/camera/color/camera_info'),
],
parameters=[
base_config,
realsense_config,
orbbec_static_config,
{'num_cameras': 1},
{'use_lidar': False},
],
)
actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))
return LaunchDescription(actions)
EOF
}
generate_static_demo_launches() {
log "Generating managed static demo launch files."
mkdir -p \
"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch" \
"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations"
write_orbbec_transforms_launch
write_orbbec_static_config
write_orbbec_example_launch
write_orbbec_debug_launch
write_orbbec_standalone_launch
}
patch_manifest_remove_dependencies() {
local manifest_path="$1"
shift
local dependency_name=""
[[ -f "${manifest_path}" ]] || die "Expected manifest does not exist: ${manifest_path}"
for dependency_name in "$@"; do
sed -i "/>${dependency_name}${dependency_name}<" "${bringup_manifest}"; then
die "Static demo manifest still declares excluded dependency ${dependency_name}."
fi
done
}
sync_static_demo_workspace() {
log "Syncing package whitelist into the managed workspace."
clear_managed_src_dir
sync_package_group "${COMMUNITY_COMMON_ROOT}" "${COMMUNITY_COMMON_PACKAGE_PATHS[@]}"
sync_package_group "${COMMUNITY_NITROS_ROOT}" "${COMMUNITY_NITROS_PACKAGE_PATHS[@]}"
sync_package_group "${OFFICIAL_NVBLOX_ROOT}" "${OFFICIAL_NVBLOX_PACKAGE_PATHS[@]}"
apply_overlay_files "${COMMUNITY_NVBLOX_ROOT}" "${STATIC_DEMO_OVERLAY_FILE_PATHS[@]}"
generate_static_demo_launches
patch_static_demo_manifests
verify_synced_workspace_layout
}
rebuild_workspace() {
local rosdep_dependency_args=(
--dependency-types buildtool
--dependency-types buildtool_export
--dependency-types build
--dependency-types build_export
--dependency-types exec
)
local rosdep_skip_args=()
local skip_key=""
source_ros
ensure_rosdep_ready
for skip_key in "${ROSDEP_SKIP_KEYS[@]}"; do
rosdep_skip_args+=(--skip-keys "${skip_key}")
done
log "Installing workspace dependencies with rosdep."
(
cd "${ISAAC_WS}"
rosdep install \
--from-paths src \
--ignore-src \
-r \
-y \
--rosdistro "${ROS_DISTRO}" \
"${rosdep_dependency_args[@]}" \
"${rosdep_skip_args[@]}"
)
run_colcon_build() {
(
cd "${ISAAC_WS}"
colcon build \
--packages-up-to "${COLCON_TARGETS[@]}" \
--symlink-install \
--event-handlers console_direct+ \
--cmake-args -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=OFF
)
}
patch_ext_stdgpu_cuda_compat() {
local ext_stdgpu_root=""
local memory_detail_path=""
local unordered_base_path=""
local patched_any=1
while IFS= read -r ext_stdgpu_root; do
memory_detail_path="${ext_stdgpu_root}/src/stdgpu/impl/memory_detail.h"
unordered_base_path="${ext_stdgpu_root}/src/stdgpu/impl/unordered_base_detail.cuh"
if [[ -f "${memory_detail_path}" ]]; then
if python3 - "${memory_detail_path}" <<'PY'
from pathlib import Path
import sys
path = Path(sys.argv[1])
text = path.read_text()
replacements = {
"construct_at(p, forward(args)...);": "stdgpu::construct_at(p, stdgpu::forward(args)...);",
"destroy_at(p);": "stdgpu::destroy_at(p);",
"return to_address(pointer_traits::to_address(p));": "return stdgpu::to_address(pointer_traits::to_address(p));",
}
changed = False
for old, new in replacements.items():
if old in text:
text = text.replace(old, new)
changed = True
if changed:
path.write_text(text)
sys.exit(0 if changed else 1)
PY
then
log "Applied CUDA 12.6 stdgpu compatibility patch to ${memory_detail_path}."
patched_any=0
fi
fi
if [[ -f "${unordered_base_path}" ]]; then
if python3 - "${unordered_base_path}" <<'PY'
from pathlib import Path
import sys
path = Path(sys.argv[1])
text = path.read_text()
replacements = {
"_base.insert(*to_address(_begin + i));": "_base.insert(*stdgpu::to_address(_begin + i));",
}
changed = False
for old, new in replacements.items():
if old in text:
text = text.replace(old, new)
changed = True
if changed:
path.write_text(text)
sys.exit(0 if changed else 1)
PY
then
log "Applied CUDA 12.6 stdgpu compatibility patch to ${unordered_base_path}."
patched_any=0
fi
fi
done < <(find "${ISAAC_WS}/build" -type d -path '*/_deps/ext_stdgpu-src' 2>/dev/null | sort)
return "${patched_any}"
}
log "Building container workspace."
rm -rf "${ISAAC_WS}/build" "${ISAAC_WS}/install" "${ISAAC_WS}/log"
if run_colcon_build; then
return 0
fi
if patch_ext_stdgpu_cuda_compat; then
log "Retrying container workspace build after applying stdgpu CUDA compatibility patches."
run_colcon_build
return 0
fi
die "Container workspace build failed before the compatibility patch could be applied."
}
initialize_managed_git_access
verify_managed_git_cache_state
clone_or_update_repo "${COMMUNITY_REPO_URL}" "${COMMUNITY_REPO_BRANCH}" "${COMMUNITY_REPO_PATH}" "community repo"
clone_or_update_repo "${OFFICIAL_NVBLOX_REPO_URL}" "${OFFICIAL_NVBLOX_REPO_BRANCH}" "${OFFICIAL_NVBLOX_REPO_PATH}" "official Isaac ROS Nvblox repo"
sync_git_submodule "${OFFICIAL_NVBLOX_REPO_PATH}" "nvblox_ros/nvblox_core" "official Isaac ROS Nvblox"
assert_git_repo_accessible "${COMMUNITY_REPO_PATH}" "community repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}" "official Isaac ROS Nvblox repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core" "official Isaac ROS Nvblox submodule"
COMMUNITY_COMMIT="$(git -C "${COMMUNITY_REPO_PATH}" rev-parse HEAD)"
OFFICIAL_NVBLOX_COMMIT="$(git -C "${OFFICIAL_NVBLOX_REPO_PATH}" rev-parse HEAD)"
OFFICIAL_NVBLOX_CORE_COMMIT="$(git -C "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core" rev-parse HEAD)"
if [[ "${FORCE_REBUILD}" != "1" ]] && stamp_current; then
log "Container workspace is already current. Skipping rebuild."
exit 0
fi
sync_static_demo_workspace
rebuild_workspace
verify_synced_workspace_layout
verify_workspace_install || die "Container workspace verification failed."
write_stamp
log "Container workspace preparation complete."
================================================
FILE: reComputer/scripts/nvblox/host/orbbec_mobile_host.launch.py
================================================
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import LoadComposableNodes, Node
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
config_file_path = LaunchConfiguration('config_file_path')
container_name = LaunchConfiguration('component_container_name', default='orbbec_host_container')
container = Node(
name=container_name,
package='rclcpp_components',
executable='component_container_mt',
output='screen')
load_orbbec_node = LoadComposableNodes(
target_container=container_name,
composable_node_descriptions=[
ComposableNode(
namespace='camera',
name='orbbec_camera_node',
package='orbbec_camera',
plugin='orbbec_camera::OBCameraNodeDriver',
parameters=[config_file_path],
remappings=[
('/camera/left_ir/image_raw', '~/output/infra_1'),
('/camera/right_ir/image_raw', '~/output/infra_2'),
('/camera/depth/image_raw', '~/output/depth'),
('/camera/depth_registered/points', '~/output/pointcloud'),
],
)
])
return LaunchDescription([
DeclareLaunchArgument('config_file_path'),
DeclareLaunchArgument('component_container_name', default_value='orbbec_host_container'),
container,
load_orbbec_node,
])
================================================
FILE: reComputer/scripts/nvblox/init.sh
================================================
#!/bin/bash
echo "NVBlox preflight, image download, docker load, and demo setup are handled by 'reComputer run nvblox'."
================================================
FILE: reComputer/scripts/nvblox/lib/common.sh
================================================
#!/usr/bin/env bash
if [[ "${SETUP_NVBOX_COMMON_SH:-0}" == "1" ]]; then
return 0
fi
readonly SETUP_NVBOX_COMMON_SH=1
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
readonly PROJECT_ROOT
common_fatal() {
printf '[setup-nvbox][ERROR] %s\n' "$*" >&2
exit 1
}
resolve_setup_user_name() {
if [[ -n "${SUDO_USER:-}" && "${SUDO_USER}" != "root" ]]; then
printf '%s\n' "${SUDO_USER}"
return 0
fi
id -un
}
lookup_user_passwd_entry() {
local user_name="$1"
getent passwd "${user_name}" 2>/dev/null | head -n 1
}
resolve_user_home() {
local user_name="$1"
local passwd_entry=""
passwd_entry="$(lookup_user_passwd_entry "${user_name}")"
[[ -n "${passwd_entry}" ]] || common_fatal "Cannot resolve passwd entry for user ${user_name}."
printf '%s\n' "$(cut -d: -f6 <<<"${passwd_entry}")"
}
readonly SETUP_USER_NAME="$(resolve_setup_user_name)"
readonly SETUP_USER_HOME="$(resolve_user_home "${SETUP_USER_NAME}")"
readonly SETUP_USER_UID="$(id -u "${SETUP_USER_NAME}")"
readonly SETUP_USER_GID="$(id -g "${SETUP_USER_NAME}")"
readonly MANAGED_ROOT_DEFAULT="${SETUP_USER_HOME}/nvblox_demo"
readonly MANAGED_SENTINEL_NAME=".managed-by-setup-nvbox"
readonly ROS_DISTRO_DEFAULT="humble"
readonly ORBBEC_VERSION="v2.3.4"
readonly ORBBEC_REPO_URL="https://github.com/orbbec/OrbbecSDK_ROS2.git"
readonly GEMINI2_USB_VENDOR_ID="2bc5"
readonly GEMINI2_USB_PRODUCT_ID="0670"
readonly GEMINI2_READY_TIMEOUT_SECONDS=15
readonly GEMINI2_SIGNAL_TIMEOUT_SECONDS=5
readonly HOST_CAMERA_LOG_TAIL_LINES=40
readonly COMMUNITY_REPO_URL_DEFAULT="https://github.com/jjjadand/isaac-NVblox-Orbbec.git"
readonly COMMUNITY_REPO_BRANCH_DEFAULT="main"
readonly BASE_IMAGE_PREFERRED="isaac_ros_dev-aarch64:latest"
readonly DERIVED_IMAGE_TAG="local/isaac_ros_nvblox_orbbec:jp6-humble"
readonly CONTAINER_NAME_DEFAULT="isaac_ros_nvblox_orbbec"
readonly CONTAINER_WORKSPACE_SPEC_VERSION="static-demo-final-v3"
readonly NVBLOX_IMAGE_SHARE_URL_DEFAULT="https://seeedstudio88-my.sharepoint.com/:u:/g/personal/youjiang_yu_seeedstudio88_onmicrosoft_com/IQCCDToomY6WSaRZdfsTs9vXAengb-SCEvNfSUgq0cipP6w?e=z9axor"
readonly NVBLOX_IMAGE_ARCHIVE_NAME_DEFAULT="nvblox_images.tar"
readonly NVBLOX_IMAGE_CACHE_DIR_DEFAULT="${SETUP_USER_HOME}/.cache/jetson-examples/nvblox"
readonly FASTDDS_RUNTIME_DIR_RELATIVE=".runtime/fastdds"
readonly FASTDDS_UDP_ONLY_PROFILE_FILENAME="udp_only.xml"
readonly ROS_DISCOVERY_ENV_VARS=(
"ROS_DOMAIN_ID"
"ROS_LOCALHOST_ONLY"
"RMW_IMPLEMENTATION"
"ROS_AUTOMATIC_DISCOVERY_RANGE"
"ROS_STATIC_PEERS"
"CYCLONEDDS_URI"
"CYCLONEDDS_HOME"
"FASTDDS_DEFAULT_PROFILES_FILE"
"FASTRTPS_DEFAULT_PROFILES_FILE"
)
readonly ROS_DISCOVERY_PATH_ENV_VARS=(
"CYCLONEDDS_URI"
"CYCLONEDDS_HOME"
"FASTDDS_DEFAULT_PROFILES_FILE"
"FASTRTPS_DEFAULT_PROFILES_FILE"
)
DOCKER_PREFIX=()
timestamp() {
date '+%Y-%m-%d %H:%M:%S'
}
log() {
local level="$1"
shift
printf '[%s] [%s] %s\n' "$(timestamp)" "${level}" "$*"
}
info() {
log INFO "$@"
}
warn() {
log WARN "$@"
}
error() {
log ERROR "$@" >&2
}
die() {
error "$@"
exit 1
}
resolve_nvblox_image_share_url() {
printf '%s\n' "${NVBLOX_IMAGE_SHARE_URL:-${NVBLOX_IMAGE_SHARE_URL_DEFAULT}}"
}
resolve_nvblox_image_archive_name() {
printf '%s\n' "${NVBLOX_IMAGE_ARCHIVE_NAME:-${NVBLOX_IMAGE_ARCHIVE_NAME_DEFAULT}}"
}
resolve_nvblox_image_cache_dir() {
printf '%s\n' "${NVBLOX_IMAGE_CACHE_DIR:-${NVBLOX_IMAGE_CACHE_DIR_DEFAULT}}"
}
resolve_nvblox_image_archive_path() {
local cache_dir="${1:-$(resolve_nvblox_image_cache_dir)}"
local archive_name="${2:-$(resolve_nvblox_image_archive_name)}"
printf '%s/%s\n' "${cache_dir%/}" "${archive_name}"
}
cleanup_nvblox_partial_downloads() {
local cache_dir="${1:-$(resolve_nvblox_image_cache_dir)}"
local partial_file=""
[[ -d "${cache_dir}" ]] || return 0
while IFS= read -r partial_file; do
[[ -n "${partial_file}" ]] || continue
rm -f "${partial_file}"
info "Removed partial NVBlox download ${partial_file}"
done < <(find "${cache_dir}" -maxdepth 1 -type f -name '*.part' 2>/dev/null | sort)
}
ensure_supported_user_context() {
if [[ "${EUID}" -eq 0 && -z "${SUDO_USER:-}" ]]; then
die "Running from a root login shell is not supported. Use your normal user account, or invoke this script with sudo from that account."
fi
if [[ "${EUID}" -eq 0 && "${SETUP_USER_NAME}" == "root" ]]; then
die "Cannot determine a non-root setup user from sudo context."
fi
}
should_reexec_as_setup_user() {
[[ "${EUID}" -eq 0 ]] || return 1
[[ -n "${SUDO_USER:-}" && "${SUDO_USER}" != "root" ]] || return 1
[[ "${SETUP_NVBOX_REEXECED:-0}" != "1" ]]
}
reexec_as_setup_user() {
local script_path="$1"
shift
local env_args=("SETUP_NVBOX_REEXECED=1")
if [[ -n "${MANAGED_ROOT:-}" ]]; then
env_args+=("MANAGED_ROOT=${MANAGED_ROOT}")
fi
exec sudo -H -u "${SETUP_USER_NAME}" env "${env_args[@]}" bash "${script_path}" "$@"
}
run_sudo() {
if [[ "${EUID}" -eq 0 ]]; then
"$@"
else
sudo "$@"
fi
}
run_sudo_noninteractive() {
if [[ "${EUID}" -eq 0 ]]; then
"$@"
return 0
fi
sudo -n "$@"
}
guard_managed_root_path() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
if [[ -e "${root}" && ! -e "${sentinel}" ]]; then
die "Managed root ${root} exists but is not owned by this project. Refusing to continue."
fi
}
bootstrap_managed_root() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
guard_managed_root_path "${root}"
mkdir -p "${root}/logs" "${root}/.stamps"
if [[ ! -f "${sentinel}" ]]; then
{
printf 'managed_root=%s\n' "${root}"
printf 'created_at=%s\n' "$(date -Is 2>/dev/null || date)"
printf 'project_root=%s\n' "${PROJECT_ROOT}"
} > "${sentinel}"
fi
}
repair_managed_root_ownership() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
[[ -d "${root}" ]] || return 0
[[ -f "${sentinel}" ]] || return 0
if find "${root}" \( ! -uid "${SETUP_USER_UID}" -o ! -gid "${SETUP_USER_GID}" \) -print -quit 2>/dev/null | grep -q .; then
info "Repairing managed root ownership under ${root}."
run_sudo chown -R "${SETUP_USER_UID}:${SETUP_USER_GID}" "${root}"
fi
}
require_bootstrapped_managed_root() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
if [[ ! -f "${sentinel}" ]]; then
die "Managed root ${root} is not prepared. Run with --prepare-only or the default mode first."
fi
}
package_installed() {
local package_name="$1"
dpkg-query -W -f='${Status}' "${package_name}" 2>/dev/null | grep -q 'install ok installed'
}
install_packages_if_missing() {
local missing=()
local package_name
for package_name in "$@"; do
if ! package_installed "${package_name}"; then
missing+=("${package_name}")
fi
done
if ((${#missing[@]} == 0)); then
return 0
fi
info "Installing apt packages: ${missing[*]}"
run_sudo apt-get update
run_sudo apt-get install -y --no-install-recommends "${missing[@]}"
}
assert_command() {
local command_name="$1"
command -v "${command_name}" >/dev/null 2>&1 || die "Required command not found: ${command_name}"
}
read_file_lower_trimmed() {
local file_path="$1"
tr '[:upper:]' '[:lower:]' < "${file_path}" | tr -d '[:space:]'
}
find_usb_device_with_ids() {
local start_path="$1"
local current_path=""
current_path="$(readlink -f "${start_path}" 2>/dev/null || true)"
[[ -n "${current_path}" ]] || return 1
while [[ "${current_path}" != "/" ]]; do
if [[ -f "${current_path}/idVendor" && -f "${current_path}/idProduct" ]]; then
if [[ "$(read_file_lower_trimmed "${current_path}/idVendor")" == "${GEMINI2_USB_VENDOR_ID}" ]] && \
[[ "$(read_file_lower_trimmed "${current_path}/idProduct")" == "${GEMINI2_USB_PRODUCT_ID}" ]]; then
printf '%s\n' "${current_path}"
return 0
fi
fi
current_path="$(dirname "${current_path}")"
done
return 1
}
gemini2_usb_device_dirs() {
local device_dir
for device_dir in /sys/bus/usb/devices/*; do
[[ -f "${device_dir}/idVendor" && -f "${device_dir}/idProduct" ]] || continue
if [[ "$(read_file_lower_trimmed "${device_dir}/idVendor")" == "${GEMINI2_USB_VENDOR_ID}" ]] && \
[[ "$(read_file_lower_trimmed "${device_dir}/idProduct")" == "${GEMINI2_USB_PRODUCT_ID}" ]]; then
printf '%s\n' "${device_dir}"
fi
done
}
gemini2_usb_present() {
local usb_device=""
usb_device="$(gemini2_usb_device_dirs | head -n 1 || true)"
[[ -n "${usb_device}" ]]
}
gemini2_usb_link_speed_mbps() {
local device_dir=""
local speed_path=""
local speed_value=""
device_dir="$(gemini2_usb_device_dirs | head -n 1 || true)"
[[ -n "${device_dir}" ]] || return 0
speed_path="${device_dir}/speed"
[[ -f "${speed_path}" ]] || return 0
speed_value="$(tr -d '[:space:]' < "${speed_path}" 2>/dev/null || true)"
[[ "${speed_value}" =~ ^[0-9]+$ ]] || return 0
printf '%s\n' "${speed_value}"
}
gemini2_video_nodes() {
local video_sysfs_path=""
local video_name=""
for video_sysfs_path in /sys/class/video4linux/video*; do
[[ -e "${video_sysfs_path}" ]] || continue
if find_usb_device_with_ids "${video_sysfs_path}/device" >/dev/null 2>&1; then
video_name="$(basename "${video_sysfs_path}")"
[[ -e "/dev/${video_name}" ]] || continue
printf '/dev/%s\n' "${video_name}"
fi
done | sort -u
}
gemini2_video_nodes_joined() {
local video_nodes=()
mapfile -t video_nodes < <(gemini2_video_nodes)
if ((${#video_nodes[@]} == 0)); then
return 0
fi
printf '%s\n' "${video_nodes[*]}"
}
log_gemini2_video_nodes_snapshot() {
local prefix="${1:-Gemini2 /dev/video snapshot}"
local video_nodes=""
video_nodes="$(gemini2_video_nodes_joined)"
if [[ -n "${video_nodes}" ]]; then
info "${prefix}: ${video_nodes}"
else
warn "${prefix}: "
fi
}
gemini2_device_state() {
local video_nodes=""
if ! gemini2_usb_present; then
printf 'usb_missing\n'
return 0
fi
video_nodes="$(gemini2_video_nodes_joined)"
if [[ -n "${video_nodes}" ]]; then
printf 'ready\n'
else
printf 'usb_present_no_video\n'
fi
}
log_gemini2_device_state() {
local prefix="${1:-Gemini2 device state}"
local state=""
local video_nodes=""
local speed_mbps=""
state="$(gemini2_device_state)"
video_nodes="$(gemini2_video_nodes_joined)"
speed_mbps="$(gemini2_usb_link_speed_mbps)"
if [[ -n "${video_nodes}" ]]; then
if [[ -n "${speed_mbps}" ]]; then
info "${prefix}: ${state} (video nodes: ${video_nodes}; usb speed: ${speed_mbps} Mbps)"
else
info "${prefix}: ${state} (video nodes: ${video_nodes})"
fi
else
if [[ -n "${speed_mbps}" ]]; then
info "${prefix}: ${state} (usb speed: ${speed_mbps} Mbps)"
else
info "${prefix}: ${state}"
fi
fi
}
gemini2_detected() {
[[ "$(gemini2_device_state)" == "ready" ]]
}
wait_for_gemini2_ready() {
local timeout_seconds="${1:-${GEMINI2_READY_TIMEOUT_SECONDS}}"
local deadline=$((SECONDS + timeout_seconds))
while ((SECONDS < deadline)); do
if [[ "$(gemini2_device_state)" == "ready" ]]; then
return 0
fi
sleep 1
done
return 1
}
collect_live_pids() {
local pid=""
for pid in "$@"; do
if kill -0 "${pid}" 2>/dev/null; then
printf '%s\n' "${pid}"
fi
done
}
cleanup_residual_gemini2_processes() {
local context="${1:-Gemini2 cleanup}"
local patterns=(
'ros2 launch orbbec_camera gemini2.launch.py'
'ros2 launch orbbec_camera gemini_330_series.launch.py'
'orbbec_mobile_host.launch.py'
'camera_container'
'orbbec_host_container'
'orbbec_camera_node'
)
local pattern=""
local pid=""
local signal=""
local deadline=0
local pids=()
local live_pids=()
declare -A seen_pids=()
command -v pgrep >/dev/null 2>&1 || return 0
for pattern in "${patterns[@]}"; do
while IFS= read -r pid; do
[[ -n "${pid}" ]] || continue
[[ -n "${seen_pids[${pid}]:-}" ]] && continue
seen_pids["${pid}"]=1
pids+=("${pid}")
done < <(pgrep -f -- "${pattern}" || true)
done
if ((${#pids[@]} == 0)); then
return 0
fi
for pid in "${pids[@]}"; do
info "${context}: found residual Gemini2 host process ${pid}: $(ps -p "${pid}" -o args= 2>/dev/null | sed 's/^[[:space:]]*//' || true)"
done
for signal in INT TERM KILL; do
live_pids=()
mapfile -t live_pids < <(collect_live_pids "${pids[@]}")
((${#live_pids[@]} == 0)) && return 0
info "${context}: sending SIG${signal} to Gemini2 host processes: ${live_pids[*]}"
kill "-${signal}" "${live_pids[@]}" 2>/dev/null || true
deadline=$((SECONDS + GEMINI2_SIGNAL_TIMEOUT_SECONDS))
while ((SECONDS < deadline)); do
mapfile -t live_pids < <(collect_live_pids "${pids[@]}")
((${#live_pids[@]} == 0)) && return 0
sleep 1
done
done
mapfile -t live_pids < <(collect_live_pids "${pids[@]}")
if ((${#live_pids[@]} != 0)); then
warn "${context}: Gemini2 host processes are still alive after SIGKILL: ${live_pids[*]}"
return 1
fi
return 0
}
gemini2_refresh_udev() {
local interactive_sudo="${1:-1}"
if ! command -v udevadm >/dev/null 2>&1; then
warn "udevadm is not available; skipping Gemini2 udev refresh."
return 1
fi
info "Refreshing udev rules for Gemini2."
if (( interactive_sudo )); then
run_sudo udevadm control --reload-rules
run_sudo udevadm trigger
else
if ! run_sudo_noninteractive udevadm control --reload-rules; then
warn "Skipping Gemini2 udev refresh because passwordless sudo is not available."
return 1
fi
run_sudo_noninteractive udevadm trigger || return 1
fi
return 0
}
write_sysfs_value_with_sudo() {
local file_path="$1"
local value="$2"
local interactive_sudo="${3:-1}"
if (( interactive_sudo )); then
run_sudo bash -lc "printf '%s' '${value}' > '${file_path}'"
else
if ! run_sudo_noninteractive bash -lc "printf '%s' '${value}' > '${file_path}'"; then
warn "Skipping Gemini2 sysfs write to ${file_path} because passwordless sudo is not available."
return 1
fi
fi
}
rebind_gemini2_usb_devices() {
local interactive_sudo="${1:-1}"
local device_dir=""
local device_name=""
local found_device=0
while IFS= read -r device_dir; do
[[ -n "${device_dir}" ]] || continue
found_device=1
device_name="$(basename "${device_dir}")"
info "Rebinding Gemini2 USB device ${device_name}."
write_sysfs_value_with_sudo "/sys/bus/usb/drivers/usb/unbind" "${device_name}" "${interactive_sudo}" || return 1
sleep 1
write_sysfs_value_with_sudo "/sys/bus/usb/drivers/usb/bind" "${device_name}" "${interactive_sudo}" || return 1
done < <(gemini2_usb_device_dirs)
(( found_device )) || return 1
return 0
}
recover_gemini2_device() {
local context="${1:-Gemini2 recovery}"
local cleanup_processes="${2:-1}"
local allow_usb_rebind="${3:-1}"
local interactive_sudo="${4:-1}"
log_gemini2_device_state "Gemini2 device state before ${context}"
if [[ "$(gemini2_device_state)" == "ready" ]]; then
return 0
fi
if [[ "$(gemini2_device_state)" == "usb_missing" ]]; then
return 1
fi
if (( cleanup_processes )); then
cleanup_residual_gemini2_processes "${context}" || true
fi
if gemini2_refresh_udev "${interactive_sudo}"; then
if wait_for_gemini2_ready "${GEMINI2_READY_TIMEOUT_SECONDS}"; then
info "Gemini2 recovery succeeded after udev refresh (${context})."
log_gemini2_device_state "Gemini2 device state after ${context}"
return 0
fi
fi
if (( allow_usb_rebind )) && gemini2_usb_present; then
if rebind_gemini2_usb_devices "${interactive_sudo}"; then
if wait_for_gemini2_ready "${GEMINI2_READY_TIMEOUT_SECONDS}"; then
info "Gemini2 recovery succeeded after USB rebind (${context})."
log_gemini2_device_state "Gemini2 device state after ${context}"
return 0
fi
fi
fi
log_gemini2_device_state "Gemini2 device state after ${context}"
return 1
}
recover_gemini2_after_host_camera_failure() {
local context="${1:-host camera failure}"
local initial_state="${2:-}"
local current_state=""
current_state="$(gemini2_device_state)"
if [[ "${initial_state}" != "ready" || "${current_state}" != "usb_present_no_video" ]]; then
return 1
fi
warn "Gemini2 lost its /dev/video nodes during ${context}. Attempting one full recovery."
if recover_gemini2_device "${context}" 0 1 1; then
info "Gemini2 full recovery succeeded after ${context}."
return 0
fi
warn "Gemini2 full recovery did not restore /dev/video nodes after ${context}."
return 1
}
log_host_camera_failure_diagnostics() {
local log_path="$1"
local readiness_output="${2:-}"
local context="${3:-Host camera failure}"
local line=""
local speed_mbps=""
warn "${context}: Gemini2 device state is $(gemini2_device_state)."
speed_mbps="$(gemini2_usb_link_speed_mbps)"
if [[ -n "${speed_mbps}" ]]; then
warn "${context}: Gemini2 USB link speed is ${speed_mbps} Mbps."
fi
log_gemini2_video_nodes_snapshot "${context} /dev/video snapshot"
if [[ -n "${readiness_output}" ]]; then
while IFS= read -r line; do
[[ -n "${line}" ]] || continue
warn "${context}: ${line}"
done <<< "${readiness_output}"
else
warn "${context}: readiness probe produced no additional output."
fi
if [[ -f "${log_path}" ]]; then
warn "${context}: host camera log tail (${log_path})"
while IFS= read -r line; do
[[ -n "${line}" ]] || continue
warn "[host-camera-log] ${line}"
done < <(tail -n "${HOST_CAMERA_LOG_TAIL_LINES}" "${log_path}" 2>/dev/null || true)
else
warn "${context}: host camera log is missing at ${log_path}."
fi
}
assert_supported_platform() {
local arch=""
local model=""
local jetpack_version=""
local jetpack_major=""
arch="$(dpkg --print-architecture 2>/dev/null || uname -m)"
if [[ "${arch}" != "arm64" && "${arch}" != "aarch64" ]]; then
die "Unsupported architecture: ${arch}. This script only supports Jetson Orin arm64."
fi
[[ -f /etc/os-release ]] || die "Cannot detect OS version because /etc/os-release is missing."
# shellcheck disable=SC1091
source /etc/os-release
[[ "${ID:-}" == "ubuntu" ]] || die "Unsupported OS: ${ID:-unknown}. Ubuntu 22.04 is required."
[[ "${VERSION_ID:-}" == "22.04" ]] || die "Unsupported Ubuntu version: ${VERSION_ID:-unknown}. Ubuntu 22.04 is required."
[[ -f /proc/device-tree/model ]] || die "Cannot detect Jetson model from /proc/device-tree/model."
model="$(tr -d '\0' < /proc/device-tree/model)"
[[ "${model}" == *"Jetson"* ]] || die "Unsupported Jetson model: ${model}. A Jetson Orin device is required."
[[ "${model}" == *"Orin"* ]] || die "Unsupported Jetson model: ${model}. A Jetson Orin device is required."
jetpack_version="$(dpkg-query -W -f='${Version}' nvidia-jetpack 2>/dev/null || true)"
[[ -n "${jetpack_version}" ]] || die "nvidia-jetpack is not installed. JetPack 6.x is required."
if [[ "${jetpack_version}" =~ ^([0-9]+) ]]; then
jetpack_major="${BASH_REMATCH[1]}"
else
die "Unable to parse nvidia-jetpack version: ${jetpack_version}"
fi
[[ "${jetpack_major}" == "6" ]] || die "Unsupported JetPack version: ${jetpack_version}. JetPack 6.x is required."
info "Platform OK: ${model}, Ubuntu ${VERSION_ID}, JetPack ${jetpack_version}"
}
check_apt_locks() {
local lock_path
local pids
if ! command -v fuser >/dev/null 2>&1; then
warn "fuser is not available; skipping apt lock inspection."
return 0
fi
for lock_path in /var/lib/dpkg/lock-frontend /var/lib/dpkg/lock; do
pids="$(fuser "${lock_path}" 2>/dev/null || true)"
if [[ -n "${pids}" ]]; then
die "apt/dpkg lock detected on ${lock_path} (pids: ${pids}). Resolve it before continuing."
fi
done
}
check_network_endpoints() {
local endpoint
assert_command curl
for endpoint in "$@"; do
if ! curl -fsSI --max-time 10 "${endpoint}" >/dev/null 2>&1; then
die "Cannot reach ${endpoint}. Network access is required for prepare mode."
fi
done
}
warn_on_unreachable_endpoints() {
local endpoint
assert_command curl
for endpoint in "$@"; do
if curl -fsSI --max-time 10 "${endpoint}" >/dev/null 2>&1; then
info "Network probe OK: ${endpoint}"
else
warn "Network probe failed for ${endpoint}. Continuing; the real install steps will fail later if access is actually required."
fi
done
}
ensure_docker_access() {
if docker info >/dev/null 2>&1; then
DOCKER_PREFIX=()
return 0
fi
if sudo docker info >/dev/null 2>&1; then
DOCKER_PREFIX=(sudo)
return 0
fi
die "Cannot access the Docker daemon with docker or sudo docker."
}
docker_cmd() {
if ((${#DOCKER_PREFIX[@]})); then
"${DOCKER_PREFIX[@]}" docker "$@"
else
docker "$@"
fi
}
append_jetson_container_args() {
local -n jetson_docker_args_ref="$1"
jetson_docker_args_ref+=(
--runtime=nvidia
--privileged
--network host
--ipc host
--pid host
--ulimit memlock=-1
--ulimit stack=67108864
-e "NVIDIA_VISIBLE_DEVICES=nvidia.com/gpu=all,nvidia.com/pva=all"
-e "NVIDIA_DRIVER_CAPABILITIES=all"
-e "ISAAC_ROS_WS=/workspaces/isaac_ros-dev"
-v /etc/localtime:/etc/localtime:ro
-v /tmp:/tmp
)
if [[ -f /usr/bin/tegrastats ]]; then
jetson_docker_args_ref+=(-v /usr/bin/tegrastats:/usr/bin/tegrastats)
fi
if [[ -d /usr/lib/aarch64-linux-gnu/tegra ]]; then
jetson_docker_args_ref+=(-v /usr/lib/aarch64-linux-gnu/tegra:/usr/lib/aarch64-linux-gnu/tegra)
fi
if [[ -d /usr/src/jetson_multimedia_api ]]; then
jetson_docker_args_ref+=(-v /usr/src/jetson_multimedia_api:/usr/src/jetson_multimedia_api)
fi
if [[ -d /usr/share/vpi3 ]]; then
jetson_docker_args_ref+=(-v /usr/share/vpi3:/usr/share/vpi3)
fi
if [[ -d /dev/input ]]; then
jetson_docker_args_ref+=(-v /dev/input:/dev/input)
fi
if getent group jtop >/dev/null 2>&1 && [[ -S /run/jtop.sock ]]; then
jetson_docker_args_ref+=(-v /run/jtop.sock:/run/jtop.sock:ro)
fi
}
resolve_ros_discovery_env_value() {
local var_name="$1"
local value=""
case "${var_name}" in
RMW_IMPLEMENTATION)
value="${RMW_IMPLEMENTATION:-}"
if [[ -z "${value}" ]]; then
value="rmw_fastrtps_cpp"
fi
;;
*)
value="${!var_name-}"
;;
esac
printf '%s\n' "${value}"
}
export_effective_ros_discovery_env() {
local var_name=""
local value=""
for var_name in "${ROS_DISCOVERY_ENV_VARS[@]}"; do
value="$(resolve_ros_discovery_env_value "${var_name}")"
if [[ -n "${value}" ]]; then
export "${var_name}=${value}"
else
unset "${var_name}" || true
fi
done
}
ros_discovery_env_summary() {
local parts=()
local var_name=""
local value=""
local old_ifs="${IFS}"
for var_name in "${ROS_DISCOVERY_ENV_VARS[@]}"; do
value="$(resolve_ros_discovery_env_value "${var_name}")"
if [[ -n "${value}" ]]; then
parts+=("${var_name}=${value}")
else
parts+=("${var_name}=")
fi
done
IFS=', '
printf '%s\n' "${parts[*]}"
IFS="${old_ifs}"
}
log_ros_discovery_env() {
local prefix="${1:-ROS discovery env}"
info "${prefix}: $(ros_discovery_env_summary)"
}
emit_ros_discovery_env_shell_exports() {
local var_name=""
local value=""
for var_name in "${ROS_DISCOVERY_ENV_VARS[@]}"; do
value="$(resolve_ros_discovery_env_value "${var_name}")"
if [[ -n "${value}" ]]; then
printf 'export %s=%q\n' "${var_name}" "${value}"
else
printf 'unset %s\n' "${var_name}"
fi
done
}
managed_fastdds_profile_path() {
local managed_root="$1"
printf '%s/%s/%s\n' "${managed_root}" "${FASTDDS_RUNTIME_DIR_RELATIVE}" "${FASTDDS_UDP_ONLY_PROFILE_FILENAME}"
}
write_managed_fastdds_udp_profile() {
local managed_root="$1"
local profile_path=""
local profile_dir=""
profile_path="$(managed_fastdds_profile_path "${managed_root}")"
profile_dir="$(dirname "${profile_path}")"
mkdir -p "${profile_dir}"
cat > "${profile_path}" <<'EOF'
udp_transport
UDPv4
false
udp_transport
EOF
printf '%s\n' "${profile_path}"
}
enable_managed_fastdds_udp_runtime() {
local managed_root="$1"
local profile_path=""
profile_path="$(write_managed_fastdds_udp_profile "${managed_root}")"
export FASTDDS_DEFAULT_PROFILES_FILE="${profile_path}"
export FASTRTPS_DEFAULT_PROFILES_FILE="${profile_path}"
info "Managed Fast DDS UDP-only profile: ${profile_path}"
}
append_ros_discovery_env_args() {
local -n ros_discovery_env_args_ref="$1"
local var_name=""
local value=""
for var_name in "${ROS_DISCOVERY_ENV_VARS[@]}"; do
value="$(resolve_ros_discovery_env_value "${var_name}")"
if [[ -n "${value}" ]]; then
ros_discovery_env_args_ref+=(-e "${var_name}=${value}")
fi
done
}
resolve_ros_discovery_mount_source() {
local var_name="$1"
local value=""
value="$(resolve_ros_discovery_env_value "${var_name}")"
[[ -n "${value}" ]] || return 1
case "${var_name}" in
CYCLONEDDS_URI)
if [[ "${value}" == file://* ]]; then
value="${value#file://}"
fi
;;
esac
[[ "${value}" = /* ]] || return 1
[[ -e "${value}" ]] || return 1
printf '%s\n' "${value}"
}
append_ros_discovery_mount_args() {
local -n ros_discovery_mount_args_ref="$1"
local var_name=""
local mount_source=""
local mount_mode="ro"
declare -A seen_mounts=()
for var_name in "${ROS_DISCOVERY_PATH_ENV_VARS[@]}"; do
mount_source="$(resolve_ros_discovery_mount_source "${var_name}" || true)"
[[ -n "${mount_source}" ]] || continue
[[ -z "${seen_mounts[${mount_source}]:-}" ]] || continue
seen_mounts["${mount_source}"]=1
if [[ -d "${mount_source}" && "${var_name}" == "CYCLONEDDS_HOME" ]]; then
mount_mode="rw"
else
mount_mode="ro"
fi
ros_discovery_mount_args_ref+=(-v "${mount_source}:${mount_source}:${mount_mode}")
done
}
append_ros_discovery_container_args() {
local docker_args_name="$1"
append_ros_discovery_env_args "${docker_args_name}"
append_ros_discovery_mount_args "${docker_args_name}"
}
validate_package_install_artifacts() {
local workspace_root="$1"
local package_name="$2"
shift 2
local required_paths=("$@")
local required_artifact_list=""
local validate_cmd=""
local validate_args=(
run
--rm
-e "ROS_DISTRO=${ROS_DISTRO_DEFAULT}"
-e "PACKAGE_NAME=${package_name}"
-v "${workspace_root}:/workspaces/isaac_ros-dev"
)
((${#required_paths[@]} != 0)) || return 0
required_artifact_list="$(printf '%s\n' "${required_paths[@]}")"
append_jetson_container_args validate_args
append_ros_discovery_container_args validate_args
validate_args+=(-e "REQUIRED_ARTIFACT_LIST=${required_artifact_list}")
validate_cmd=$(
cat <<'EOF'
set -euo pipefail
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
source "/opt/ros/${ROS_DISTRO}/setup.bash"
source "/workspaces/isaac_ros-dev/install/setup.bash"
if (( restore_nounset )); then
set -u
fi
PACKAGE_PREFIX="$(ros2 pkg prefix "${PACKAGE_NAME}" 2>/dev/null || true)"
[[ -n "${PACKAGE_PREFIX}" ]]
INSTALL_ROOT="${PACKAGE_PREFIX}/share/${PACKAGE_NAME}"
while IFS= read -r relative_path; do
[[ -n "${relative_path}" ]] || continue
[[ -f "${INSTALL_ROOT}/${relative_path}" ]] || {
printf '%s\n' "${INSTALL_ROOT}/${relative_path}" >&2
exit 10
}
done <<< "${REQUIRED_ARTIFACT_LIST}"
EOF
)
docker_cmd "${validate_args[@]}" "${DERIVED_IMAGE_TAG}" bash -lc "${validate_cmd}"
}
select_base_image() {
local candidate=""
if docker_cmd image inspect "${BASE_IMAGE_PREFERRED}" >/dev/null 2>&1; then
printf '%s\n' "${BASE_IMAGE_PREFERRED}"
return 0
fi
candidate="$(docker_cmd image ls --format '{{.Repository}}:{{.Tag}}' | grep -E '^nvcr\.io/nvidia/isaac/ros:.*aarch64-ros2_humble' | head -n 1 || true)"
if [[ -n "${candidate}" ]]; then
printf '%s\n' "${candidate}"
return 0
fi
return 1
}
acceptable_base_image_hint() {
printf '%s\n' "${BASE_IMAGE_PREFERRED} or nvcr.io/nvidia/isaac/ros:*aarch64-ros2_humble*"
}
docker_image_id() {
local image_ref="$1"
docker_cmd image inspect --format '{{.Id}}' "${image_ref}"
}
compute_tree_hash() {
local combined=""
local file_path
assert_command sha256sum
for file_path in "$@"; do
[[ -f "${file_path}" ]] || die "Cannot hash missing file: ${file_path}"
combined+=$(sha256sum "${file_path}")
done
printf '%s' "${combined}" | sha256sum | awk '{print $1}'
}
container_image_context_hash() {
compute_tree_hash \
"${PROJECT_ROOT}/docker/Dockerfile.nvblox_orbbec" \
"${PROJECT_ROOT}/docker/prepare_container_workspace.sh" \
"${PROJECT_ROOT}/docker/launch_nvblox.sh"
}
source_ros_setup() {
local workspace_root="${1:-}"
local restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
# shellcheck disable=SC1091
source "/opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash"
if [[ -n "${workspace_root}" && -f "${workspace_root}/install/setup.bash" ]]; then
# shellcheck disable=SC1090
source "${workspace_root}/install/setup.bash"
fi
if (( restore_nounset )); then
set -u
fi
}
================================================
FILE: reComputer/scripts/nvblox/onedrive_downloader.py
================================================
#!/usr/bin/env python3
"""Download public OneDrive/SharePoint share links with resume support."""
from __future__ import annotations
import argparse
import re
import sys
from pathlib import Path
from urllib.parse import parse_qsl, unquote, urlencode, urlparse, urlunparse
import requests
from tqdm import tqdm
CHUNK_SIZE = 65536
MIN_VALID_SIZE = 1024 * 1024
PROBE_CHUNK_SIZE = 4096
REQUEST_TIMEOUT = (15, 600)
DEFAULT_SHARE_URL = (
"https://seeedstudio88-my.sharepoint.com/:u:/g/personal/"
"youjiang_yu_seeedstudio88_onmicrosoft_com/"
"IQCCDToomY6WSaRZdfsTs9vXAengb-SCEvNfSUgq0cipP6w?e=z9axor"
)
DEFAULT_FILENAME = "nvblox_images.tar"
DEFAULT_OUTPUT_DIR = Path.home() / ".cache" / "jetson-examples" / "nvblox"
SUPPORTED_DOMAINS = ("sharepoint.com", "sharepoint.cn")
SHARE_LINK_RE = re.compile(r"^/:[a-z]:/", re.IGNORECASE)
TEXT_ERROR_MARKERS = (
"forbidden",
"access denied",
"sign in",
"login",
"not found",
"permission",
)
class DownloadError(Exception):
"""Raised when the download cannot proceed safely."""
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Download a public Microsoft 365 OneDrive/SharePoint share link."
)
parser.add_argument(
"share_url",
nargs="?",
default=DEFAULT_SHARE_URL,
help="Public sharepoint.com/sharepoint.cn share link",
)
parser.add_argument(
"legacy_filename",
nargs="?",
help="Legacy positional filename override",
)
parser.add_argument(
"--output-dir",
"--download-dir",
dest="output_dir",
type=Path,
default=DEFAULT_OUTPUT_DIR,
help=f"Directory to save the file (default: {DEFAULT_OUTPUT_DIR})",
)
parser.add_argument(
"--filename",
help="Override the detected filename. Only the final path component is used.",
)
parser.add_argument(
"--force",
action="store_true",
help="Redownload even if the target file already exists.",
)
parser.add_argument(
"--aria2c",
action="store_true",
help="Print an aria2c command for the resolved direct download URL",
)
return parser.parse_args()
def is_supported_host(hostname: str) -> bool:
hostname = hostname.lower()
return any(
hostname == domain or hostname.endswith(f".{domain}")
for domain in SUPPORTED_DOMAINS
)
def sanitize_filename(value: str | None) -> str | None:
if not value:
return None
candidate = value.strip().strip("\"'")
if not candidate:
return None
candidate = candidate.replace("\\", "/")
candidate = Path(candidate).name
if candidate in {"", ".", ".."}:
return None
return candidate
def validate_source_url(raw_url: str) -> str:
url = raw_url.strip()
if not url:
raise DownloadError("share_url is required.")
parsed = urlparse(url)
if parsed.scheme not in {"http", "https"}:
raise DownloadError("URL must start with http:// or https://.")
hostname = parsed.hostname or ""
if not is_supported_host(hostname):
raise DownloadError(
"Only public sharepoint.com/sharepoint.cn links are supported in v1."
)
lower_path = (parsed.path or "").lower()
if "/_layouts/15/onedrive.aspx" in lower_path:
raise DownloadError(
"Unsupported page-style OneDrive URL. Use a public share link instead of "
"a /_layouts/15/onedrive.aspx page or a login-protected page."
)
if not parsed.path:
raise DownloadError("URL path is empty.")
return url
def needs_download_flag(parsed_url) -> bool:
return bool(SHARE_LINK_RE.match(parsed_url.path or ""))
def with_download_flag(url: str) -> str:
parsed = urlparse(url)
if not needs_download_flag(parsed):
return url
query_items = [
(key, value)
for key, value in parse_qsl(parsed.query, keep_blank_values=True)
if key.lower() != "download"
]
query_items.append(("download", "1"))
return urlunparse(parsed._replace(query=urlencode(query_items, doseq=True)))
def looks_like_landing_page(content_type: str, first_chunk: bytes) -> bool:
content_type = (content_type or "").lower()
first = (first_chunk or b"").lstrip()
first_lower = first.lower()
if "text/html" in content_type or "application/xhtml" in content_type:
return True
if first_lower.startswith(b" str | None:
if not header_value:
return None
match = re.search(
r"filename\*\s*=\s*(?:[A-Za-z0-9!#$&+\-.^_`|~]+'[^']*')?([^;]+)",
header_value,
flags=re.IGNORECASE,
)
if match:
return sanitize_filename(unquote(match.group(1).strip().strip("\"'")))
match = re.search(r'filename\s*=\s*"([^"]+)"', header_value, flags=re.IGNORECASE)
if match:
return sanitize_filename(match.group(1))
match = re.search(r"filename\s*=\s*([^;]+)", header_value, flags=re.IGNORECASE)
if match:
return sanitize_filename(match.group(1))
return None
def filename_from_url(url: str) -> str | None:
parsed = urlparse(url)
return sanitize_filename(unquote(Path(parsed.path or "").name))
def probe_remote_target(url: str, filename_override: str | None) -> tuple[str, str]:
headers = {"Range": "bytes=0-0"}
try:
response = requests.get(
url,
stream=True,
timeout=REQUEST_TIMEOUT,
allow_redirects=True,
headers=headers,
)
except requests.RequestException as exc:
raise DownloadError(f"Failed to resolve the download target: {exc}") from exc
try:
response.raise_for_status()
first_chunk = next(response.iter_content(chunk_size=PROBE_CHUNK_SIZE), b"")
if looks_like_landing_page(response.headers.get("content-type", ""), first_chunk):
raise DownloadError(
"The link resolved to an HTML/text page instead of a downloadable file."
)
filename = (
sanitize_filename(filename_override)
or filename_from_content_disposition(
response.headers.get("content-disposition")
)
or filename_from_url(response.url)
)
if not filename:
raise DownloadError(
"Could not infer a filename from the response. Pass --filename."
)
return response.url, filename
except requests.RequestException as exc:
raise DownloadError(f"Failed to resolve the download target: {exc}") from exc
finally:
response.close()
def prepare_target_paths(
output_dir: Path, filename: str, force: bool
) -> tuple[Path, Path, bool]:
output_dir.mkdir(parents=True, exist_ok=True)
filepath = output_dir / filename
tmp_path = filepath.with_suffix(filepath.suffix + ".part")
if force:
if filepath.exists():
print(f"Removing cached file: {filepath}")
filepath.unlink()
if tmp_path.exists():
print(f"Removing partial download: {tmp_path}")
tmp_path.unlink()
return filepath, tmp_path, False
if filepath.exists():
size = filepath.stat().st_size
if size > MIN_VALID_SIZE:
print(f"File already exists: {filepath}")
return filepath, tmp_path, True
print(
f"Existing file is too small ({size} bytes), redownloading: {filepath}"
)
filepath.unlink()
if tmp_path.exists():
tmp_path.unlink()
return filepath, tmp_path, False
def progress_stream():
try:
return open("/dev/tty", "w", encoding="utf-8", buffering=1)
except OSError:
return sys.stdout
def download_file(url: str, filepath: Path, filename: str) -> None:
tmp_path = filepath.with_suffix(filepath.suffix + ".part")
while True:
resume_pos = tmp_path.stat().st_size if tmp_path.exists() else 0
headers = {}
if resume_pos > 0:
headers["Range"] = f"bytes={resume_pos}-"
print(f"Resuming download from byte {resume_pos}")
try:
response = requests.get(
url,
stream=True,
timeout=REQUEST_TIMEOUT,
allow_redirects=True,
headers=headers,
)
except requests.RequestException as exc:
raise DownloadError(f"Failed to start download: {exc}") from exc
try:
if resume_pos > 0 and response.status_code == 200:
print("Server ignored the resume request, restarting from byte 0.")
response.close()
tmp_path.unlink(missing_ok=True)
continue
response.raise_for_status()
total_size = int(response.headers.get("content-length", 0) or 0)
if total_size and resume_pos:
total_size += resume_pos
chunks = response.iter_content(chunk_size=CHUNK_SIZE)
first_chunk = next((chunk for chunk in chunks if chunk), b"")
if not first_chunk:
raise DownloadError("Downloaded content is empty.")
if resume_pos == 0 and looks_like_landing_page(
response.headers.get("content-type", ""), first_chunk
):
raise DownloadError(
"The link resolved to an HTML/text page instead of a downloadable file."
)
written = resume_pos + len(first_chunk)
mode = "ab" if resume_pos > 0 else "wb"
progress_file = progress_stream()
progress_bar = tqdm(
desc=filename,
initial=resume_pos,
total=total_size if total_size > 0 else None,
unit="B",
unit_scale=True,
unit_divisor=1024,
file=progress_file,
dynamic_ncols=True,
ascii=True,
leave=False,
mininterval=0.2,
smoothing=0.1,
)
try:
with open(tmp_path, mode) as handle:
handle.write(first_chunk)
progress_bar.update(len(first_chunk))
for chunk in chunks:
if not chunk:
continue
handle.write(chunk)
written += len(chunk)
progress_bar.update(len(chunk))
finally:
progress_bar.close()
if progress_file not in (sys.stdout, sys.stderr):
progress_file.write("\n")
progress_file.close()
if written < MIN_VALID_SIZE:
tmp_path.unlink(missing_ok=True)
raise DownloadError(
f"Downloaded file is unexpectedly small: {written} bytes."
)
tmp_path.replace(filepath)
return
except requests.RequestException as exc:
raise DownloadError(
f"Download interrupted by a network/protocol error: {exc}"
) from exc
finally:
response.close()
def main() -> int:
args = parse_args()
try:
validated_url = validate_source_url(args.share_url)
normalized_url = with_download_flag(validated_url)
output_dir = args.output_dir.expanduser()
filename_override = sanitize_filename(args.filename or args.legacy_filename)
if (args.filename or args.legacy_filename) and not filename_override:
raise DownloadError("Invalid filename value.")
print(f"Resolving download target: {normalized_url}")
resolved_url, detected_filename = probe_remote_target(normalized_url, filename_override)
filename = filename_override or detected_filename or DEFAULT_FILENAME
filepath, _tmp_path, already_exists = prepare_target_paths(
output_dir, filename, args.force
)
if already_exists:
return 0
if resolved_url != normalized_url:
print(f"Resolved file URL: {resolved_url}")
print(f"Download URL: {normalized_url}")
print(f"Saving to: {filepath}")
if args.aria2c:
print(f"aria2c '{resolved_url}' -d '{output_dir}' -o '{filename}'")
return 0
download_file(normalized_url, filepath, filename)
print(f"Download complete: {filepath}")
return 0
except DownloadError as exc:
print(f"Error: {exc}")
return 1
except OSError as exc:
print(f"Error: {exc}")
return 1
if __name__ == "__main__":
raise SystemExit(main())
================================================
FILE: reComputer/scripts/nvblox/run.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
MODE="${NVBLOX_MODE:-all}"
START_ARGS=()
is_truthy() {
case "${1:-}" in
1|true|TRUE|True|yes|YES|Yes|on|ON|On)
return 0
;;
*)
return 1
;;
esac
}
case "${MODE}" in
""|all)
;;
prepare|prepare-only)
START_ARGS+=(--prepare-only)
;;
run|run-only)
START_ARGS+=(--run-only)
;;
*)
echo "Invalid NVBLOX_MODE='${MODE}'. Use all, prepare, or run." >&2
exit 1
;;
esac
if is_truthy "${NVBLOX_FORCE_REBUILD:-0}"; then
START_ARGS+=(--force-rebuild)
fi
if is_truthy "${NVBLOX_HEADLESS:-0}"; then
START_ARGS+=(--headless)
fi
bash "${SCRIPT_DIR}/start_nvblox_demo.sh" "${START_ARGS[@]}"
================================================
FILE: reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/../lib/common.sh"
MANAGED_ROOT="${MANAGED_ROOT_DEFAULT}"
HEADLESS=0
USE_GUI=0
LAUNCH_FILE="orbbec_example.launch.py"
EXPECTED_CAMERA_INFO_FRAME="camera_color_optical_frame"
DEBUG_CAMERA_VISIBILITY_TIMEOUT_SEC=20
DEBUG_STATIC_TF_TIMEOUT_SEC=20
DEBUG_RUNTIME_OUTPUT_TIMEOUT_SEC=30
CURRENT_STAGE=""
XHOST_GRANTED=0
HOST_CAMERA_PID=""
while (($#)); do
case "$1" in
--managed-root)
shift
MANAGED_ROOT="$1"
;;
--headless)
HEADLESS=1
;;
*)
die "Unknown argument: $1"
;;
esac
shift
done
ensure_supported_user_context
if should_reexec_as_setup_user; then
die "Do not invoke debug_runtime_connectivity.sh with sudo directly. Run it as the setup user."
fi
require_bootstrapped_managed_root "${MANAGED_ROOT}"
ensure_docker_access
HOST_WS="${MANAGED_ROOT}/ros2_ws"
CONTAINER_WS="${MANAGED_ROOT}/isaac_ros-dev"
CONTAINER_STAMP="${CONTAINER_WS}/.setup-nvbox/container_workspace.env"
IMAGE_STAMP="${MANAGED_ROOT}/.stamps/derived_image.env"
HOST_STAMP="${MANAGED_ROOT}/.stamps/host_workspace.env"
LOG_DIR="${MANAGED_ROOT}/logs"
HOST_CAMERA_LOG="${LOG_DIR}/host-camera-debug-$(date '+%Y%m%d-%H%M%S').log"
PREPARED_CONTAINER_REQUIRED_PACKAGE="nvblox_examples_bringup"
PREPARED_CONTAINER_REQUIRED_PATHS=(
"launch/orbbec_transforms.launch.py"
"launch/orbbec_example.launch.py"
"launch/orbbec_debug.launch.py"
"launch/orbbec_nvblox_standalone.launch.py"
"config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
begin_stage() {
CURRENT_STAGE="$1"
info "Stage ${CURRENT_STAGE}"
}
pass_stage() {
info "PASS ${CURRENT_STAGE}"
}
fail_stage() {
local message="${1:-failed}"
die "FAIL ${CURRENT_STAGE}: ${message}"
}
build_base_container_args() {
local docker_args_name="$1"
local -n base_docker_args_ref="${docker_args_name}"
base_docker_args_ref=(
run
--rm
-e "ROS_DISTRO=${ROS_DISTRO_DEFAULT}"
-v "${CONTAINER_WS}:/workspaces/isaac_ros-dev"
)
append_jetson_container_args "${docker_args_name}"
append_ros_discovery_container_args "${docker_args_name}"
}
append_gui_container_args() {
local gui_docker_args_name="$1"
local -n gui_docker_args_ref="${gui_docker_args_name}"
if (( USE_GUI )); then
gui_docker_args_ref+=(
-e "DISPLAY=${DISPLAY}"
-e "QT_X11_NO_MITSHM=1"
-v /tmp/.X11-unix:/tmp/.X11-unix:rw
)
fi
}
report_prepared_runtime_state() {
local current_context_hash=""
local current_image_id=""
info "Using existing prepared artifacts only. This debug path does not rebuild the image or workspace."
[[ -f "${HOST_WS}/install/setup.bash" ]] || die "Host workspace is missing at ${HOST_WS}."
[[ -f "${CONTAINER_WS}/install/setup.bash" ]] || die "Container workspace is missing at ${CONTAINER_WS}."
[[ -f "${CONTAINER_STAMP}" ]] || die "Container workspace stamp is missing at ${CONTAINER_STAMP}."
docker_cmd image inspect "${DERIVED_IMAGE_TAG}" >/dev/null 2>&1 || die "Derived image ${DERIVED_IMAGE_TAG} is missing."
current_context_hash="$(container_image_context_hash)"
current_image_id="$(docker_image_id "${DERIVED_IMAGE_TAG}")"
if [[ -f "${IMAGE_STAMP}" ]]; then
# shellcheck disable=SC1090
source "${IMAGE_STAMP}"
info "Prepared derived image context hash: ${STAMP_CONTEXT_HASH:-unknown}"
info "Prepared derived image stamped at: ${STAMPED_AT:-unknown}"
if [[ "${STAMP_CONTEXT_HASH:-}" != "${current_context_hash}" ]]; then
warn "Prepared derived image context hash differs from the current repo state. Continuing with the existing image for diagnosis."
fi
else
warn "Derived image stamp is missing at ${IMAGE_STAMP}. Continuing with the existing image for diagnosis."
fi
if [[ -f "${HOST_STAMP}" ]]; then
# shellcheck disable=SC1090
source "${HOST_STAMP}"
info "Prepared host Orbbec version: ${HOST_ORBBEC_VERSION:-unknown}"
info "Prepared host workspace stamped at: ${HOST_STAMPED_AT:-unknown}"
if [[ -n "${HOST_ORBBEC_VERSION:-}" && "${HOST_ORBBEC_VERSION:-}" != "${ORBBEC_VERSION}" ]]; then
warn "Prepared host workspace version differs from the current repo target (${ORBBEC_VERSION}). Continuing with the prepared host workspace for diagnosis."
fi
else
warn "Host workspace stamp is missing at ${HOST_STAMP}. Continuing with the prepared host workspace for diagnosis."
fi
source_ros_setup "${HOST_WS}"
ros2 pkg prefix orbbec_camera >/dev/null 2>&1 || \
die "Prepared host workspace cannot resolve orbbec_camera."
# shellcheck disable=SC1090
source "${CONTAINER_STAMP}"
info "Prepared container workspace spec: ${STAMP_WORKSPACE_SPEC_VERSION:-unknown}"
info "Prepared container workspace stamped at: ${STAMPED_AT:-unknown}"
if [[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" != "${CONTAINER_WORKSPACE_SPEC_VERSION}" ]]; then
warn "Prepared container workspace spec differs from the current repo target (${CONTAINER_WORKSPACE_SPEC_VERSION}). Continuing with the prepared workspace for diagnosis."
fi
if [[ "${STAMP_IMAGE_CONTEXT_HASH:-}" != "${current_context_hash}" ]]; then
warn "Prepared container workspace context hash differs from the current repo state. Continuing with the prepared workspace for diagnosis."
fi
if [[ "${STAMP_IMAGE_ID:-}" != "${current_image_id}" ]]; then
warn "Prepared container workspace was built against a different derived image. Continuing with the current prepared workspace for diagnosis."
fi
if ! validate_package_install_artifacts "${CONTAINER_WS}" "${PREPARED_CONTAINER_REQUIRED_PACKAGE}" "${PREPARED_CONTAINER_REQUIRED_PATHS[@]}"; then
die "Prepared container install artifacts are missing or invalid."
fi
info "Validated prepared container artifacts: ${PREPARED_CONTAINER_REQUIRED_PATHS[*]}"
}
ensure_gemini2_ready_for_debug() {
local gemini2_state=""
cleanup_residual_gemini2_processes "pre-debug Gemini2 cleanup" || true
log_gemini2_device_state "Gemini2 device state before debug"
gemini2_state="$(gemini2_device_state)"
case "${gemini2_state}" in
ready)
return 0
;;
usb_missing)
return 1
;;
usb_present_no_video)
warn "Gemini2 USB device is present, but no /dev/video nodes were found. Attempting one automatic recovery."
recover_gemini2_device "debug preflight" 0 1 1
return $?
;;
*)
warn "Unexpected Gemini2 device state during debug preflight: ${gemini2_state}"
return 1
;;
esac
}
stop_host_camera_driver() {
local signal=""
local deadline=0
if [[ -n "${HOST_CAMERA_PID}" ]] && kill -0 "${HOST_CAMERA_PID}" 2>/dev/null; then
info "Stopping host Gemini2 driver (pid=${HOST_CAMERA_PID})."
for signal in INT TERM KILL; do
kill "-${signal}" "${HOST_CAMERA_PID}" 2>/dev/null || true
deadline=$((SECONDS + GEMINI2_SIGNAL_TIMEOUT_SECONDS))
while ((SECONDS < deadline)); do
if ! kill -0 "${HOST_CAMERA_PID}" 2>/dev/null; then
break 2
fi
sleep 1
done
done
fi
HOST_CAMERA_PID=""
cleanup_residual_gemini2_processes "post-debug Gemini2 cleanup" || true
log_gemini2_device_state "Gemini2 device state after debug cleanup"
}
cleanup() {
stop_host_camera_driver
if (( XHOST_GRANTED )); then
xhost -si:localuser:root >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT INT TERM
launch_host_camera() {
local launch_cmd=""
launch_cmd=$(
cat <>"${HOST_CAMERA_LOG}" 2>&1 &
HOST_CAMERA_PID=$!
info "Host camera log: ${HOST_CAMERA_LOG}"
}
wait_for_camera_streams_ready() {
local readiness_output=""
source_ros_setup "${HOST_WS}"
readiness_output="$(
python3 - "${EXPECTED_CAMERA_INFO_FRAME}" <<'PY' 2>&1
import sys
import time
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import CameraInfo, Image
expected_frame = sys.argv[1]
timeout_seconds = 90.0
class CameraReadinessProbe(Node):
def __init__(self):
super().__init__('orbbec_host_readiness_probe')
self.frames = {}
self.received = {
'color_info': False,
'depth_info': False,
'color_image': False,
'depth_image': False,
}
self.create_subscription(
CameraInfo,
'/camera/color/camera_info',
self._color_info_callback,
qos_profile_sensor_data)
self.create_subscription(
CameraInfo,
'/camera/depth/camera_info',
self._depth_info_callback,
qos_profile_sensor_data)
self.create_subscription(
Image,
'/camera/color/image_raw',
self._color_image_callback,
qos_profile_sensor_data)
self.create_subscription(
Image,
'/camera/depth/image_raw',
self._depth_image_callback,
qos_profile_sensor_data)
def _color_info_callback(self, msg: CameraInfo):
self.received['color_info'] = True
self.frames['color_info'] = msg.header.frame_id
def _depth_info_callback(self, msg: CameraInfo):
self.received['depth_info'] = True
self.frames['depth_info'] = msg.header.frame_id
def _color_image_callback(self, msg: Image):
self.received['color_image'] = True
def _depth_image_callback(self, msg: Image):
self.received['depth_image'] = True
def main():
rclpy.init(args=None)
node = CameraReadinessProbe()
executor = SingleThreadedExecutor()
executor.add_node(node)
deadline = time.monotonic() + timeout_seconds
try:
while time.monotonic() < deadline:
executor.spin_once(timeout_sec=0.2)
if all(node.received.values()):
break
missing = [name for name, received in node.received.items() if not received]
if missing:
print(
'Host stream readiness probe timed out waiting for: ' + ', '.join(missing),
file=sys.stderr)
return 1
color_frame = node.frames.get('color_info', '')
depth_frame = node.frames.get('depth_info', '')
print(f'/camera/color/camera_info frame_id={color_frame}')
print(f'/camera/depth/camera_info frame_id={depth_frame}')
if color_frame != expected_frame:
print(
f'Unexpected /camera/color/camera_info frame_id: {color_frame} '
f'(expected {expected_frame})',
file=sys.stderr)
return 1
if depth_frame != expected_frame:
print(
f'Unexpected /camera/depth/camera_info frame_id: {depth_frame} '
f'(expected {expected_frame})',
file=sys.stderr)
return 1
return 0
finally:
executor.remove_node(node)
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
)" || {
printf '%s\n' "${readiness_output}" >&2
return 1
}
while IFS= read -r readiness_line; do
[[ -n "${readiness_line}" ]] || continue
info "${readiness_line}"
done <<< "${readiness_output}"
return 0
}
probe_container_camera_visibility() {
local probe_output=""
local probe_args=()
build_base_container_args probe_args
probe_args+=(
-e "EXPECTED_CAMERA_INFO_FRAME=${EXPECTED_CAMERA_INFO_FRAME}"
-e "PROBE_TIMEOUT_SECONDS=${DEBUG_CAMERA_VISIBILITY_TIMEOUT_SEC}"
)
probe_output="$(
docker_cmd "${probe_args[@]}" "${DERIVED_IMAGE_TAG}" bash -lc "$(cat <<'EOF'
set -euo pipefail
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
source "/opt/ros/${ROS_DISTRO}/setup.bash"
source "/workspaces/isaac_ros-dev/install/setup.bash"
if (( restore_nounset )); then
set -u
fi
print_discovery_snapshot() {
printf '[container-probe] Container ROS discovery env: ROS_DOMAIN_ID=%s, ROS_LOCALHOST_ONLY=%s, RMW_IMPLEMENTATION=%s, ROS_AUTOMATIC_DISCOVERY_RANGE=%s, ROS_STATIC_PEERS=%s, CYCLONEDDS_URI=%s, CYCLONEDDS_HOME=%s, FASTDDS_DEFAULT_PROFILES_FILE=%s, FASTRTPS_DEFAULT_PROFILES_FILE=%s\n' \
"${ROS_DOMAIN_ID:-}" \
"${ROS_LOCALHOST_ONLY:-}" \
"${RMW_IMPLEMENTATION:-}" \
"${ROS_AUTOMATIC_DISCOVERY_RANGE:-}" \
"${ROS_STATIC_PEERS:-}" \
"${CYCLONEDDS_URI:-}" \
"${CYCLONEDDS_HOME:-}" \
"${FASTDDS_DEFAULT_PROFILES_FILE:-}" \
"${FASTRTPS_DEFAULT_PROFILES_FILE:-}"
printf '[container-probe] ros2 topic list snapshot:\n'
ros2 topic list 2>&1 | sed 's/^/[container-probe][topic] /'
printf '[container-probe] ros2 node list snapshot:\n'
ros2 node list 2>&1 | sed 's/^/[container-probe][node] /'
}
probe_status=0
set +e
python3 - "${EXPECTED_CAMERA_INFO_FRAME}" "${PROBE_TIMEOUT_SECONDS}" <<'PY'
import sys
import time
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import CameraInfo
expected_frame = sys.argv[1]
timeout_seconds = float(sys.argv[2])
class CameraVisibilityProbe(Node):
def __init__(self):
super().__init__('orbbec_container_camera_visibility_probe')
self.frames = {}
self.create_subscription(
CameraInfo,
'/camera/color/camera_info',
self._color_info_callback,
qos_profile_sensor_data)
self.create_subscription(
CameraInfo,
'/camera/depth/camera_info',
self._depth_info_callback,
qos_profile_sensor_data)
def _color_info_callback(self, msg: CameraInfo):
self.frames['color'] = msg.header.frame_id
def _depth_info_callback(self, msg: CameraInfo):
self.frames['depth'] = msg.header.frame_id
def main() -> int:
print('[container-probe] Waiting for host camera_info topics inside the container', flush=True)
rclpy.init(args=None)
node = CameraVisibilityProbe()
executor = SingleThreadedExecutor()
executor.add_node(node)
deadline = time.monotonic() + timeout_seconds
try:
while time.monotonic() < deadline:
executor.spin_once(timeout_sec=0.2)
if 'color' in node.frames and 'depth' in node.frames:
break
missing = []
if 'color' not in node.frames:
missing.append('/camera/color/camera_info')
if 'depth' not in node.frames:
missing.append('/camera/depth/camera_info')
if missing:
print(
'[container-probe] Timed out waiting for: ' + ', '.join(missing),
file=sys.stderr,
flush=True)
return 1
print(f'[container-probe] Observed /camera/color/camera_info frame_id: {node.frames["color"]}', flush=True)
print(f'[container-probe] Observed /camera/depth/camera_info frame_id: {node.frames["depth"]}', flush=True)
if node.frames['color'] != expected_frame:
print(
f'[container-probe] Unexpected /camera/color/camera_info frame_id: {node.frames["color"]} '
f'(expected {expected_frame})',
file=sys.stderr,
flush=True)
return 1
if node.frames['depth'] != expected_frame:
print(
f'[container-probe] Unexpected /camera/depth/camera_info frame_id: {node.frames["depth"]} '
f'(expected {expected_frame})',
file=sys.stderr,
flush=True)
return 1
print('[container-probe] Container camera visibility probe passed.', flush=True)
return 0
finally:
executor.remove_node(node)
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
probe_status=$?
set -e
if (( probe_status != 0 )); then
print_discovery_snapshot
exit "${probe_status}"
fi
EOF
)" 2>&1
)" || {
printf '%s\n' "${probe_output}" >&2
return 1
}
while IFS= read -r probe_line; do
[[ -n "${probe_line}" ]] || continue
info "${probe_line}"
done <<< "${probe_output}"
return 0
}
probe_container_static_tf() {
local probe_output=""
local probe_args=()
build_base_container_args probe_args
probe_args+=(-e "PROBE_TIMEOUT_SECONDS=${DEBUG_STATIC_TF_TIMEOUT_SEC}")
probe_output="$(
docker_cmd "${probe_args[@]}" "${DERIVED_IMAGE_TAG}" bash -lc "$(cat <<'EOF'
set -euo pipefail
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
source "/opt/ros/${ROS_DISTRO}/setup.bash"
source "/workspaces/isaac_ros-dev/install/setup.bash"
if (( restore_nounset )); then
set -u
fi
LOG_FILE="/tmp/orbbec-tf-probe.log"
LAUNCH_PID=""
LAUNCH_STOP_TIMEOUT=8
terminate_launch() {
local signal=""
local deadline=0
if [[ -z "${LAUNCH_PID}" ]] || ! kill -0 "${LAUNCH_PID}" 2>/dev/null; then
LAUNCH_PID=""
return 0
fi
for signal in INT TERM KILL; do
kill "-${signal}" "${LAUNCH_PID}" 2>/dev/null || true
deadline=$((SECONDS + LAUNCH_STOP_TIMEOUT))
while ((SECONDS < deadline)); do
if ! kill -0 "${LAUNCH_PID}" 2>/dev/null; then
wait "${LAUNCH_PID}" 2>/dev/null || true
LAUNCH_PID=""
return 0
fi
sleep 1
done
done
wait "${LAUNCH_PID}" 2>/dev/null || true
LAUNCH_PID=""
}
cleanup() {
terminate_launch
}
trap cleanup EXIT INT TERM
ros2 launch nvblox_examples_bringup orbbec_transforms.launch.py >"${LOG_FILE}" 2>&1 &
LAUNCH_PID=$!
status=0
python3 - "${PROBE_TIMEOUT_SECONDS}" <<'PY' || status=$?
import sys
import time
import rclpy
from rclpy.duration import Duration
from rclpy.time import Time
from tf2_ros import Buffer, TransformListener
timeout_seconds = float(sys.argv[1])
required_transforms = [
('odom', 'base_link'),
('odom', 'camera_link'),
('odom', 'camera_color_optical_frame'),
]
def main() -> int:
print('[container-tf-probe] Waiting for managed static TF chain inside the container', flush=True)
rclpy.init(args=None)
node = rclpy.create_node('orbbec_container_tf_probe')
tf_buffer = Buffer(cache_time=Duration(seconds=timeout_seconds))
tf_listener = TransformListener(tf_buffer, node, spin_thread=False)
deadline = time.monotonic() + timeout_seconds
last_missing = []
try:
while time.monotonic() < deadline:
rclpy.spin_once(node, timeout_sec=0.2)
last_missing = []
for target_frame, source_frame in required_transforms:
if not tf_buffer.can_transform(
target_frame,
source_frame,
Time(),
timeout=Duration(seconds=0.1)):
last_missing.append(f'{target_frame} <- {source_frame}')
if not last_missing:
print(
'[container-tf-probe] TF probe passed for odom <- base_link, '
'odom <- camera_link, odom <- camera_color_optical_frame',
flush=True)
return 0
print(
'[container-tf-probe] TF probe failed. Missing transforms: '
+ ', '.join(last_missing or ['unknown']),
file=sys.stderr,
flush=True)
return 1
finally:
del tf_listener
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
if (( status != 0 )); then
printf '[container-tf-probe] Relevant launch log tail:\n'
tail -n 40 "${LOG_FILE}" 2>/dev/null || true
fi
terminate_launch
exit "${status}"
EOF
)" 2>&1
)" || {
printf '%s\n' "${probe_output}" >&2
return 1
}
while IFS= read -r probe_line; do
[[ -n "${probe_line}" ]] || continue
info "${probe_line}"
done <<< "${probe_output}"
return 0
}
probe_full_demo_runtime_output() {
local probe_output=""
local probe_args=()
build_base_container_args probe_args
probe_args+=(
-e "NVBLOX_LAUNCH_FILE=${LAUNCH_FILE}"
-e "RUNTIME_PROBE_TIMEOUT_SECONDS=${DEBUG_RUNTIME_OUTPUT_TIMEOUT_SEC}"
)
append_gui_container_args probe_args
probe_output="$(
docker_cmd "${probe_args[@]}" "${DERIVED_IMAGE_TAG}" bash -lc "$(cat <<'EOF'
set -euo pipefail
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
source "/opt/ros/${ROS_DISTRO}/setup.bash"
source "/workspaces/isaac_ros-dev/install/setup.bash"
source "/workspaces/isaac_ros-dev/.setup-nvbox/container_workspace.env"
if (( restore_nounset )); then
set -u
fi
PACKAGE_PREFIX="$(ros2 pkg prefix nvblox_examples_bringup 2>/dev/null || true)"
[[ -n "${PACKAGE_PREFIX}" ]]
[[ -f "${PACKAGE_PREFIX}/share/nvblox_examples_bringup/launch/${NVBLOX_LAUNCH_FILE}" ]]
printf '[full-demo-probe] Workspace spec: %s\n' "${STAMP_WORKSPACE_SPEC_VERSION:-unknown}"
printf '[full-demo-probe] Launch file: %s\n' "${NVBLOX_LAUNCH_FILE}"
printf '[full-demo-probe] Managed static TF chain: odom -> base_link -> camera_link -> camera_color_optical_frame\n'
LAUNCH_LOG="/tmp/nvblox-full-demo-probe.log"
LAUNCH_PID=""
PROBE_PID=""
cleanup() {
if [[ -n "${PROBE_PID}" ]] && kill -0 "${PROBE_PID}" 2>/dev/null; then
kill -TERM "${PROBE_PID}" 2>/dev/null || true
wait "${PROBE_PID}" 2>/dev/null || true
fi
if [[ -n "${LAUNCH_PID}" ]] && kill -0 "${LAUNCH_PID}" 2>/dev/null; then
kill -INT "${LAUNCH_PID}" 2>/dev/null || true
wait "${LAUNCH_PID}" 2>/dev/null || true
fi
}
trap cleanup EXIT INT TERM
ros2 launch nvblox_examples_bringup "${NVBLOX_LAUNCH_FILE}" >"${LAUNCH_LOG}" 2>&1 &
LAUNCH_PID=$!
python3 - "${RUNTIME_PROBE_TIMEOUT_SECONDS}" <<'PY' &
import sys
import time
import rclpy
from nav_msgs.msg import OccupancyGrid
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import PointCloud2
timeout_seconds = float(sys.argv[1])
class NvbloxOutputProbe(Node):
def __init__(self):
super().__init__('nvblox_runtime_output_probe')
self.result = None
self.create_subscription(
PointCloud2,
'/nvblox_node/static_esdf_pointcloud',
self._pointcloud_callback,
qos_profile_sensor_data)
self.create_subscription(
OccupancyGrid,
'/nvblox_node/static_map_slice',
self._map_slice_callback,
10)
def _pointcloud_callback(self, msg: PointCloud2):
self.result = (
'/nvblox_node/static_esdf_pointcloud',
f'frame_id={msg.header.frame_id or ""} width={msg.width} height={msg.height}')
def _map_slice_callback(self, msg: OccupancyGrid):
self.result = (
'/nvblox_node/static_map_slice',
f'frame_id={msg.header.frame_id or ""} width={msg.info.width} '
f'height={msg.info.height} resolution={msg.info.resolution:.3f}')
def main() -> int:
print(
'[full-demo-probe] Waiting for /nvblox_node/static_esdf_pointcloud or '
'/nvblox_node/static_map_slice',
flush=True)
rclpy.init(args=None)
node = NvbloxOutputProbe()
executor = SingleThreadedExecutor()
executor.add_node(node)
deadline = time.monotonic() + timeout_seconds
try:
while time.monotonic() < deadline and node.result is None:
executor.spin_once(timeout_sec=0.2)
if node.result is None:
print(
'[full-demo-probe] Runtime output probe timed out waiting for '
'/nvblox_node/static_esdf_pointcloud or /nvblox_node/static_map_slice.',
file=sys.stderr,
flush=True)
return 2
topic_name, details = node.result
print(f'[full-demo-probe] Runtime output probe received {topic_name}: {details}', flush=True)
return 0
finally:
executor.remove_node(node)
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
PROBE_PID=$!
while true; do
if ! kill -0 "${LAUNCH_PID}" 2>/dev/null; then
wait "${LAUNCH_PID}" || launch_status=$?
launch_status="${launch_status:-0}"
if kill -0 "${PROBE_PID}" 2>/dev/null; then
kill -TERM "${PROBE_PID}" 2>/dev/null || true
wait "${PROBE_PID}" 2>/dev/null || true
fi
if grep -q 'Camera info readiness probe timed out waiting for:' "${LAUNCH_LOG}"; then
printf '[full-demo-probe] Launch failed during internal camera readiness probe.\n'
grep 'Camera info readiness probe timed out waiting for:' "${LAUNCH_LOG}" | tail -n 1
printf '[full-demo-probe] Relevant launch log tail:\n'
tail -n 40 "${LAUNCH_LOG}" 2>/dev/null || true
exit 1
fi
if grep -q 'TF readiness probe failed.' "${LAUNCH_LOG}"; then
printf '[full-demo-probe] Launch failed during TF readiness.\n'
grep 'TF readiness probe failed.' "${LAUNCH_LOG}" | tail -n 1
printf '[full-demo-probe] Relevant launch log tail:\n'
tail -n 40 "${LAUNCH_LOG}" 2>/dev/null || true
exit 1
fi
printf '[full-demo-probe] Launch exited before runtime output probe succeeded (status=%s).\n' "${launch_status}"
printf '[full-demo-probe] Relevant launch log tail:\n'
tail -n 40 "${LAUNCH_LOG}" 2>/dev/null || true
exit 1
fi
if ! kill -0 "${PROBE_PID}" 2>/dev/null; then
wait "${PROBE_PID}" || probe_status=$?
probe_status="${probe_status:-0}"
if (( probe_status == 0 )); then
printf '[full-demo-probe] Runtime output probe passed. Stopping demo launch.\n'
exit 0
fi
printf '[full-demo-probe] Runtime output probe finished without observing map output.\n'
printf '[full-demo-probe] Relevant launch log tail:\n'
tail -n 40 "${LAUNCH_LOG}" 2>/dev/null || true
exit 1
fi
sleep 1
done
EOF
)" 2>&1
)" || {
printf '%s\n' "${probe_output}" >&2
return 1
}
while IFS= read -r probe_line; do
[[ -n "${probe_line}" ]] || continue
info "${probe_line}"
done <<< "${probe_output}"
return 0
}
configure_display() {
if (( HEADLESS )); then
LAUNCH_FILE="orbbec_debug.launch.py"
return 0
fi
if [[ -z "${DISPLAY:-}" ]]; then
warn "DISPLAY is not set. Falling back to headless launch probing."
HEADLESS=1
LAUNCH_FILE="orbbec_debug.launch.py"
return 0
fi
if [[ ! -d /tmp/.X11-unix ]]; then
warn "/tmp/.X11-unix is missing. Falling back to headless launch probing."
HEADLESS=1
LAUNCH_FILE="orbbec_debug.launch.py"
return 0
fi
if ! command -v xhost >/dev/null 2>&1; then
warn "xhost is not available. Falling back to headless launch probing."
HEADLESS=1
LAUNCH_FILE="orbbec_debug.launch.py"
return 0
fi
if xhost +si:localuser:root >/dev/null 2>&1; then
XHOST_GRANTED=1
USE_GUI=1
LAUNCH_FILE="orbbec_example.launch.py"
return 0
fi
warn "Failed to grant X11 access for the container. Falling back to headless launch probing."
HEADLESS=1
LAUNCH_FILE="orbbec_debug.launch.py"
}
enable_managed_fastdds_udp_runtime "${MANAGED_ROOT}"
export_effective_ros_discovery_env
configure_display
report_prepared_runtime_state
begin_stage "1/7 Gemini2 device state"
if ensure_gemini2_ready_for_debug; then
pass_stage
else
fail_stage "Gemini2 is not ready for runtime debugging."
fi
begin_stage "2/7 Host ROS discovery env"
log_ros_discovery_env "Host ROS discovery env"
pass_stage
begin_stage "3/7 Container ROS discovery env"
info "Container ROS discovery env: $(ros_discovery_env_summary)"
pass_stage
begin_stage "4/7 Host camera stream readiness"
launch_host_camera
if wait_for_camera_streams_ready; then
pass_stage
else
if ! kill -0 "${HOST_CAMERA_PID}" 2>/dev/null; then
fail_stage "Host Gemini2 driver exited before camera streams became ready. Check ${HOST_CAMERA_LOG}."
fi
fail_stage "Camera stream readiness probe failed. Check ${HOST_CAMERA_LOG}."
fi
begin_stage "5/7 Container camera visibility probe"
if probe_container_camera_visibility; then
pass_stage
else
fail_stage "The container cannot discover host camera_info topics with the current ROS discovery environment."
fi
begin_stage "6/7 Container static TF probe"
if probe_container_static_tf; then
pass_stage
else
fail_stage "The container managed static TF chain is not queryable."
fi
begin_stage "7/7 Full demo runtime output probe"
if probe_full_demo_runtime_output; then
pass_stage
else
fail_stage "The current prepared launch/runtime path did not reach stable map output."
fi
info "Runtime connectivity debug completed successfully."
================================================
FILE: reComputer/scripts/nvblox/scripts/preflight.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/../lib/common.sh"
MANAGED_ROOT="${MANAGED_ROOT_DEFAULT}"
MODE_PREPARE=0
MODE_RUN=0
while (($#)); do
case "$1" in
--managed-root)
shift
MANAGED_ROOT="$1"
;;
--prepare)
MODE_PREPARE=1
;;
--run)
MODE_RUN=1
;;
*)
die "Unknown argument: $1"
;;
esac
shift
done
(( MODE_PREPARE || MODE_RUN )) || die "preflight.sh requires --prepare, --run, or both."
ensure_supported_user_context
if should_reexec_as_setup_user; then
die "Do not invoke preflight.sh with sudo directly. Run reComputer run nvblox instead."
fi
guard_managed_root_path "${MANAGED_ROOT}"
if (( MODE_PREPARE )); then
bootstrap_managed_root "${MANAGED_ROOT}"
else
require_bootstrapped_managed_root "${MANAGED_ROOT}"
fi
assert_command sudo
assert_command git
assert_command bash
assert_supported_platform
check_apt_locks
ensure_docker_access
if (( MODE_PREPARE )); then
warn_on_unreachable_endpoints "https://github.com" "https://packages.ros.org" "https://raw.githubusercontent.com/ros/rosdistro/master/ros.key"
if ! base_image="$(select_base_image)"; then
die "No supported local base image found. Run reComputer run nvblox to download and load the OneDrive archive, or ensure $(acceptable_base_image_hint) already exists."
fi
info "Selected base image: ${base_image}"
fi
if (( MODE_RUN )); then
gemini2_state="$(gemini2_device_state)"
log_gemini2_device_state "Gemini2 device state during preflight"
case "${gemini2_state}" in
ready)
;;
usb_missing)
die "Gemini2 is not connected. Current device state: usb_missing."
;;
usb_present_no_video)
warn "Gemini2 USB device is present, but no /dev/video nodes were found. Attempting one automatic recovery."
if ! recover_gemini2_device "run preflight" 1 1 1; then
gemini2_state="$(gemini2_device_state)"
die "Gemini2 USB device is present, but video nodes were not recovered. Current device state: ${gemini2_state}. Reconnect the camera if this persists."
fi
;;
*)
die "Unexpected Gemini2 device state during preflight: ${gemini2_state}"
;;
esac
if (( ! MODE_PREPARE )) && ! docker_cmd image inspect "${DERIVED_IMAGE_TAG}" >/dev/null 2>&1; then
die "Derived image ${DERIVED_IMAGE_TAG} does not exist. Run with --prepare-only or the default mode first."
fi
fi
info "Preflight checks passed."
================================================
FILE: reComputer/scripts/nvblox/scripts/prepare_container.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/../lib/common.sh"
MANAGED_ROOT="${MANAGED_ROOT_DEFAULT}"
FORCE_REBUILD=0
while (($#)); do
case "$1" in
--managed-root)
shift
MANAGED_ROOT="$1"
;;
--force-rebuild)
FORCE_REBUILD=1
;;
*)
die "Unknown argument: $1"
;;
esac
shift
done
ensure_supported_user_context
if should_reexec_as_setup_user; then
die "Do not invoke prepare_container.sh with sudo directly. Run reComputer run nvblox instead."
fi
bootstrap_managed_root "${MANAGED_ROOT}"
ensure_docker_access
CONTAINER_WS="${MANAGED_ROOT}/isaac_ros-dev"
IMAGE_STAMP="${MANAGED_ROOT}/.stamps/derived_image.env"
DOCKERFILE_PATH="${PROJECT_ROOT}/docker/Dockerfile.nvblox_orbbec"
CONTEXT_HASH="$(container_image_context_hash)"
PREPARED_CONTAINER_REQUIRED_PACKAGE="nvblox_examples_bringup"
PREPARED_CONTAINER_REQUIRED_PATHS=(
"launch/orbbec_transforms.launch.py"
"launch/orbbec_example.launch.py"
"launch/orbbec_debug.launch.py"
"launch/orbbec_nvblox_standalone.launch.py"
"config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
probe_gpu_runtime() {
local image_ref="$1"
local log_file="${2:-/dev/null}"
local args=(run --rm --entrypoint /bin/bash)
append_jetson_container_args args
docker_cmd "${args[@]}" "${image_ref}" -lc 'echo runtime-ok >/dev/null' >"${log_file}" 2>&1
}
ensure_nvidia_runtime() {
local base_image="$1"
local config_file="/etc/nvidia-container-runtime/config.toml"
local probe_log
probe_log="$(mktemp)"
trap 'rm -f "${probe_log}"' RETURN
if probe_gpu_runtime "${base_image}" "${probe_log}"; then
info "NVIDIA container runtime probe succeeded."
return 0
fi
warn "Initial NVIDIA runtime probe output:"
sed 's/^/[probe] /' "${probe_log}" >&2 || true
warn "NVIDIA runtime probe failed. Trying to switch the runtime to csv mode."
[[ -f "${config_file}" ]] || die "Runtime config file ${config_file} was not found."
run_sudo sed -i -E 's/mode = "(auto|cdi)"/mode = "csv"/' "${config_file}"
run_sudo systemctl restart docker
: > "${probe_log}"
if ! probe_gpu_runtime "${base_image}" "${probe_log}"; then
sed 's/^/[probe] /' "${probe_log}" >&2 || true
die "NVIDIA runtime probe still fails after switching to csv mode."
fi
info "NVIDIA container runtime is now working."
}
image_stamp_current() {
[[ -f "${IMAGE_STAMP}" ]] || return 1
# shellcheck disable=SC1090
source "${IMAGE_STAMP}"
[[ "${STAMP_BASE_IMAGE_REF:-}" == "${BASE_IMAGE_REF}" ]] || return 1
[[ "${STAMP_BASE_IMAGE_ID:-}" == "${BASE_IMAGE_ID}" ]] || return 1
[[ "${STAMP_CONTEXT_HASH:-}" == "${CONTEXT_HASH}" ]] || return 1
docker_cmd image inspect "${DERIVED_IMAGE_TAG}" >/dev/null 2>&1
}
write_image_stamp() {
{
printf 'STAMP_BASE_IMAGE_REF=%q\n' "${BASE_IMAGE_REF}"
printf 'STAMP_BASE_IMAGE_ID=%q\n' "${BASE_IMAGE_ID}"
printf 'STAMP_CONTEXT_HASH=%q\n' "${CONTEXT_HASH}"
printf 'STAMPED_AT=%q\n' "$(date -Is 2>/dev/null || date)"
} > "${IMAGE_STAMP}"
}
build_derived_image() {
info "Building derived image ${DERIVED_IMAGE_TAG} from ${BASE_IMAGE_REF}."
docker_cmd build \
--network host \
--build-arg "BASE_IMAGE=${BASE_IMAGE_REF}" \
--build-arg "ROS_DISTRO=${ROS_DISTRO_DEFAULT}" \
-t "${DERIVED_IMAGE_TAG}" \
-f "${DOCKERFILE_PATH}" \
"${PROJECT_ROOT}"
}
prepare_container_workspace() {
local args=(run --rm)
mkdir -p "${CONTAINER_WS}/src" "${CONTAINER_WS}/.setup-nvbox"
info "Preparing container workspace in ${CONTAINER_WS}."
append_jetson_container_args args
args+=(
-e "ROS_DISTRO=${ROS_DISTRO_DEFAULT}" \
-e "FORCE_REBUILD=${FORCE_REBUILD}" \
-e "EXPECTED_WORKSPACE_SPEC_VERSION=${CONTAINER_WORKSPACE_SPEC_VERSION}" \
-e "SETUP_IMAGE_ID=${DERIVED_IMAGE_ID}" \
-e "SETUP_IMAGE_CONTEXT_HASH=${CONTEXT_HASH}" \
-e "COMMUNITY_REPO_URL=${COMMUNITY_REPO_URL_DEFAULT}" \
-e "COMMUNITY_REPO_BRANCH=${COMMUNITY_REPO_BRANCH_DEFAULT}" \
-v "${CONTAINER_WS}:/workspaces/isaac_ros-dev" \
"${DERIVED_IMAGE_TAG}" \
/opt/nvblox/bin/prepare_container_workspace.sh
)
docker_cmd "${args[@]}"
}
validate_prepared_container_workspace() {
local stamp_path="${CONTAINER_WS}/.setup-nvbox/container_workspace.env"
local current_image_id=""
[[ -f "${CONTAINER_WS}/install/setup.bash" ]] || die "Prepared container workspace is missing ${CONTAINER_WS}/install/setup.bash."
[[ -f "${stamp_path}" ]] || die "Prepared container workspace stamp is missing at ${stamp_path}."
current_image_id="$(docker_image_id "${DERIVED_IMAGE_TAG}")"
# shellcheck disable=SC1090
source "${stamp_path}"
[[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" == "${CONTAINER_WORKSPACE_SPEC_VERSION}" ]] || \
die "Prepared container workspace spec is ${STAMP_WORKSPACE_SPEC_VERSION:-unknown}, expected ${CONTAINER_WORKSPACE_SPEC_VERSION}."
[[ "${STAMP_IMAGE_CONTEXT_HASH:-}" == "${CONTEXT_HASH}" ]] || \
die "Prepared container workspace context hash is stale. Expected ${CONTEXT_HASH}, got ${STAMP_IMAGE_CONTEXT_HASH:-unknown}."
[[ "${STAMP_IMAGE_ID:-}" == "${current_image_id}" ]] || \
die "Prepared container workspace was built against image ${STAMP_IMAGE_ID:-unknown}, expected ${current_image_id}."
if ! validate_package_install_artifacts "${CONTAINER_WS}" "${PREPARED_CONTAINER_REQUIRED_PACKAGE}" "${PREPARED_CONTAINER_REQUIRED_PATHS[@]}"; then
die "Prepared container install artifacts are missing or invalid inside the container workspace."
fi
info "Prepared container workspace spec: ${STAMP_WORKSPACE_SPEC_VERSION}"
info "Prepared container workspace stamped at: ${STAMPED_AT:-unknown}"
info "Verified prepared launch artifacts: ${PREPARED_CONTAINER_REQUIRED_PATHS[*]}"
}
BASE_IMAGE_REF="$(select_base_image || true)"
[[ -n "${BASE_IMAGE_REF}" ]] || die "No supported local base image found. Run reComputer run nvblox to download and load the OneDrive archive, or ensure $(acceptable_base_image_hint) already exists."
BASE_IMAGE_ID="$(docker_image_id "${BASE_IMAGE_REF}")"
ensure_nvidia_runtime "${BASE_IMAGE_REF}"
if (( FORCE_REBUILD )) || ! image_stamp_current; then
build_derived_image
write_image_stamp
else
info "Derived image ${DERIVED_IMAGE_TAG} is current. Skipping rebuild."
fi
DERIVED_IMAGE_ID="$(docker_image_id "${DERIVED_IMAGE_TAG}")"
prepare_container_workspace
repair_managed_root_ownership "${MANAGED_ROOT}"
validate_prepared_container_workspace
info "Container preparation complete."
================================================
FILE: reComputer/scripts/nvblox/scripts/prepare_host.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/../lib/common.sh"
MANAGED_ROOT="${MANAGED_ROOT_DEFAULT}"
FORCE_REBUILD=0
while (($#)); do
case "$1" in
--managed-root)
shift
MANAGED_ROOT="$1"
;;
--force-rebuild)
FORCE_REBUILD=1
;;
*)
die "Unknown argument: $1"
;;
esac
shift
done
ensure_supported_user_context
if should_reexec_as_setup_user; then
die "Do not invoke prepare_host.sh with sudo directly. Run reComputer run nvblox instead."
fi
bootstrap_managed_root "${MANAGED_ROOT}"
HOST_WS="${MANAGED_ROOT}/ros2_ws"
HOST_REPO="${HOST_WS}/src/OrbbecSDK_ROS2"
HOST_STAMP="${MANAGED_ROOT}/.stamps/host_workspace.env"
ensure_locale() {
install_packages_if_missing locales
if ! locale -a 2>/dev/null | grep -qi '^en_US\.utf-8$'; then
info "Generating en_US.UTF-8 locale."
printf 'en_US.UTF-8 UTF-8\n' | run_sudo tee -a /etc/locale.gen >/dev/null
run_sudo locale-gen en_US.UTF-8
fi
run_sudo update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
}
ensure_ros2_repository() {
local ros_keyring="/usr/share/keyrings/ros-archive-keyring.gpg"
local ros_source="/etc/apt/sources.list.d/ros2.list"
local repo_line=""
if [[ -f "/opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash" ]]; then
return 0
fi
info "Installing ROS 2 apt repository."
install_packages_if_missing curl gnupg lsb-release ca-certificates software-properties-common
run_sudo add-apt-repository universe -y
if [[ ! -f "${ros_keyring}" ]]; then
run_sudo curl -fsSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o "${ros_keyring}"
fi
# shellcheck disable=SC1091
source /etc/os-release
repo_line="deb [arch=$(dpkg --print-architecture) signed-by=${ros_keyring}] http://packages.ros.org/ros2/ubuntu ${UBUNTU_CODENAME} main"
if [[ ! -f "${ros_source}" ]] || ! grep -Fqx "${repo_line}" "${ros_source}" 2>/dev/null; then
printf '%s\n' "${repo_line}" | run_sudo tee "${ros_source}" >/dev/null
fi
}
ensure_ros2_humble() {
if [[ -f "/opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash" ]]; then
info "ROS 2 ${ROS_DISTRO_DEFAULT} already installed."
return 0
fi
ensure_locale
ensure_ros2_repository
install_packages_if_missing "ros-${ROS_DISTRO_DEFAULT}-desktop" python3-rosdep python3-vcstool python3-colcon-common-extensions
}
ensure_rosdep_ready() {
install_packages_if_missing python3-rosdep python3-vcstool python3-colcon-common-extensions python3-pip build-essential git curl
if [[ ! -f /etc/ros/rosdep/sources.list.d/20-default.list ]]; then
info "Initializing rosdep."
run_sudo rosdep init
fi
info "Updating rosdep."
rosdep update
}
sync_orbbec_repo() {
mkdir -p "${HOST_WS}/src"
if [[ ! -d "${HOST_REPO}/.git" ]]; then
info "Cloning OrbbecSDK_ROS2 ${ORBBEC_VERSION}."
git clone --branch "${ORBBEC_VERSION}" --depth 1 "${ORBBEC_REPO_URL}" "${HOST_REPO}"
return 0
fi
if [[ -n "$(git -C "${HOST_REPO}" status --porcelain)" ]]; then
die "Managed Orbbec repo at ${HOST_REPO} has local changes. Clean it or remove ${MANAGED_ROOT} before retrying."
fi
info "Refreshing OrbbecSDK_ROS2 checkout."
git -C "${HOST_REPO}" fetch --depth 1 origin "refs/tags/${ORBBEC_VERSION}:refs/tags/${ORBBEC_VERSION}"
git -C "${HOST_REPO}" checkout -f "${ORBBEC_VERSION}"
}
install_orbbec_udev_rules() {
info "Installing Orbbec udev rules."
(
cd "${HOST_REPO}/orbbec_camera/scripts"
run_sudo bash install_udev_rules.sh
)
run_sudo udevadm control --reload-rules
run_sudo udevadm trigger
}
verify_host_workspace() {
local pkg_prefix=""
[[ -f "${HOST_WS}/install/setup.bash" ]] || return 1
source_ros_setup "${HOST_WS}"
pkg_prefix="$(ros2 pkg prefix orbbec_camera 2>/dev/null || true)"
[[ -n "${pkg_prefix}" ]] || return 1
[[ -d "${pkg_prefix}/share/orbbec_camera" ]] || return 1
}
host_stamp_current() {
[[ -f "${HOST_STAMP}" ]] || return 1
# shellcheck disable=SC1090
source "${HOST_STAMP}"
[[ "${HOST_ORBBEC_VERSION:-}" == "${ORBBEC_VERSION}" ]] || return 1
verify_host_workspace
}
write_host_stamp() {
{
printf 'HOST_ORBBEC_VERSION=%q\n' "${ORBBEC_VERSION}"
printf 'HOST_STAMPED_AT=%q\n' "$(date -Is 2>/dev/null || date)"
} > "${HOST_STAMP}"
}
ensure_locale
ensure_ros2_humble
ensure_rosdep_ready
install_packages_if_missing \
libgflags-dev \
nlohmann-json3-dev \
libdw-dev \
libssl-dev \
mesa-utils \
libgl1 \
libgoogle-glog-dev \
"ros-${ROS_DISTRO_DEFAULT}-image-transport" \
"ros-${ROS_DISTRO_DEFAULT}-image-transport-plugins" \
"ros-${ROS_DISTRO_DEFAULT}-compressed-image-transport" \
"ros-${ROS_DISTRO_DEFAULT}-image-publisher" \
"ros-${ROS_DISTRO_DEFAULT}-camera-info-manager" \
"ros-${ROS_DISTRO_DEFAULT}-diagnostic-updater" \
"ros-${ROS_DISTRO_DEFAULT}-diagnostic-msgs" \
"ros-${ROS_DISTRO_DEFAULT}-statistics-msgs" \
"ros-${ROS_DISTRO_DEFAULT}-xacro" \
"ros-${ROS_DISTRO_DEFAULT}-backward-ros"
sync_orbbec_repo
install_orbbec_udev_rules
if (( FORCE_REBUILD == 0 )) && host_stamp_current; then
info "Host Orbbec workspace is already prepared. Skipping rebuild."
exit 0
fi
source_ros_setup
info "Installing host workspace rosdep dependencies."
(
cd "${HOST_WS}"
rosdep install --from-paths src --ignore-src -r -y --rosdistro "${ROS_DISTRO_DEFAULT}"
)
info "Building host Orbbec workspace."
(
cd "${HOST_WS}"
if (( FORCE_REBUILD )); then
rm -rf build install log
fi
colcon build --event-handlers console_direct+ --cmake-args -DCMAKE_BUILD_TYPE=Release
)
verify_host_workspace || die "Host Orbbec workspace verification failed."
write_host_stamp
info "Host preparation complete."
================================================
FILE: reComputer/scripts/nvblox/scripts/run_demo.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/../lib/common.sh"
MANAGED_ROOT="${MANAGED_ROOT_DEFAULT}"
HEADLESS=0
while (($#)); do
case "$1" in
--managed-root)
shift
MANAGED_ROOT="$1"
;;
--headless)
HEADLESS=1
;;
*)
die "Unknown argument: $1"
;;
esac
shift
done
ensure_supported_user_context
if should_reexec_as_setup_user; then
die "Do not invoke run_demo.sh with sudo directly. Run reComputer run nvblox instead."
fi
require_bootstrapped_managed_root "${MANAGED_ROOT}"
ensure_docker_access
HOST_WS="${MANAGED_ROOT}/ros2_ws"
CONTAINER_WS="${MANAGED_ROOT}/isaac_ros-dev"
CONTAINER_STAMP="${CONTAINER_WS}/.setup-nvbox/container_workspace.env"
IMAGE_STAMP="${MANAGED_ROOT}/.stamps/derived_image.env"
HOST_STAMP="${MANAGED_ROOT}/.stamps/host_workspace.env"
LOG_DIR="${MANAGED_ROOT}/logs"
CONTAINER_NAME="${CONTAINER_NAME_DEFAULT}"
HOST_CAMERA_LOG="${LOG_DIR}/host-camera-$(date '+%Y%m%d-%H%M%S').log"
HOST_CAMERA_PID=""
XHOST_GRANTED=0
LAUNCH_FILE="orbbec_example.launch.py"
USE_GUI=0
EXPECTED_CAMERA_INFO_FRAME="camera_color_optical_frame"
PREPARE_HINT="Run NVBLOX_MODE=prepare NVBLOX_FORCE_REBUILD=1 reComputer run nvblox."
CONTAINER_PREPARE_HINT="Prepared container workspace is invalid. ${PREPARE_HINT}"
[[ -f "${HOST_WS}/install/setup.bash" ]] || die "Host workspace is missing at ${HOST_WS}. ${PREPARE_HINT}"
[[ -f "${CONTAINER_WS}/install/setup.bash" ]] || die "Container workspace is missing at ${CONTAINER_WS}. ${PREPARE_HINT}"
[[ -f "${CONTAINER_STAMP}" ]] || die "Container workspace stamp is missing at ${CONTAINER_STAMP}. ${PREPARE_HINT}"
docker_cmd image inspect "${DERIVED_IMAGE_TAG}" >/dev/null 2>&1 || die "Derived image ${DERIVED_IMAGE_TAG} is missing. ${PREPARE_HINT}"
PREPARED_CONTAINER_REQUIRED_PACKAGE="nvblox_examples_bringup"
PREPARED_CONTAINER_REQUIRED_PATHS=(
"launch/orbbec_transforms.launch.py"
"launch/orbbec_example.launch.py"
"launch/orbbec_debug.launch.py"
"launch/orbbec_nvblox_standalone.launch.py"
"config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
CONTAINER_STATIC_TF_TIMEOUT_SEC=20
validate_prepared_image_state() {
local current_context_hash=""
[[ -f "${IMAGE_STAMP}" ]] || die "Derived image stamp is missing at ${IMAGE_STAMP}. ${PREPARE_HINT}"
docker_cmd image inspect "${DERIVED_IMAGE_TAG}" >/dev/null 2>&1 || die "Derived image ${DERIVED_IMAGE_TAG} is missing. ${PREPARE_HINT}"
current_context_hash="$(container_image_context_hash)"
# shellcheck disable=SC1090
source "${IMAGE_STAMP}"
[[ "${STAMP_CONTEXT_HASH:-}" == "${current_context_hash}" ]] || \
die "Derived image ${DERIVED_IMAGE_TAG} is stale for the current repo state. ${PREPARE_HINT}"
info "Prepared derived image context hash: ${STAMP_CONTEXT_HASH}"
info "Prepared derived image stamped at: ${STAMPED_AT:-unknown}"
}
validate_prepared_host_workspace() {
[[ -f "${HOST_STAMP}" ]] || die "Host workspace stamp is missing at ${HOST_STAMP}. ${PREPARE_HINT}"
[[ -f "${HOST_WS}/install/setup.bash" ]] || die "Host workspace is missing at ${HOST_WS}. ${PREPARE_HINT}"
# shellcheck disable=SC1090
source "${HOST_STAMP}"
[[ "${HOST_ORBBEC_VERSION:-}" == "${ORBBEC_VERSION}" ]] || \
die "Prepared host workspace version is ${HOST_ORBBEC_VERSION:-unknown}, expected ${ORBBEC_VERSION}. ${PREPARE_HINT}"
source_ros_setup "${HOST_WS}"
ros2 pkg prefix orbbec_camera >/dev/null 2>&1 || \
die "Prepared host workspace cannot resolve orbbec_camera. ${PREPARE_HINT}"
info "Prepared host Orbbec version: ${HOST_ORBBEC_VERSION}"
info "Prepared host workspace stamped at: ${HOST_STAMPED_AT:-unknown}"
}
validate_prepared_container_workspace_state() {
local current_context_hash=""
local current_image_id=""
[[ -f "${CONTAINER_WS}/install/setup.bash" ]] || die "Container workspace is missing at ${CONTAINER_WS}. ${PREPARE_HINT}"
[[ -f "${CONTAINER_STAMP}" ]] || die "Container workspace stamp is missing at ${CONTAINER_STAMP}. ${PREPARE_HINT}"
current_context_hash="$(container_image_context_hash)"
current_image_id="$(docker_image_id "${DERIVED_IMAGE_TAG}")"
# shellcheck disable=SC1090
source "${CONTAINER_STAMP}"
[[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" == "${CONTAINER_WORKSPACE_SPEC_VERSION}" ]] || \
die "Prepared container workspace spec is ${STAMP_WORKSPACE_SPEC_VERSION:-unknown}, expected ${CONTAINER_WORKSPACE_SPEC_VERSION}. ${PREPARE_HINT}"
[[ "${STAMP_IMAGE_CONTEXT_HASH:-}" == "${current_context_hash}" ]] || \
die "Prepared container workspace is stale for the current repo state. ${PREPARE_HINT}"
[[ "${STAMP_IMAGE_ID:-}" == "${current_image_id}" ]] || \
die "Prepared container workspace was built against image ${STAMP_IMAGE_ID:-unknown}, expected ${current_image_id}. ${PREPARE_HINT}"
if ! validate_package_install_artifacts "${CONTAINER_WS}" "${PREPARED_CONTAINER_REQUIRED_PACKAGE}" "${PREPARED_CONTAINER_REQUIRED_PATHS[@]}"; then
die "Prepared container install artifacts are missing or invalid. ${PREPARE_HINT}"
fi
info "Prepared container workspace spec: ${STAMP_WORKSPACE_SPEC_VERSION}"
info "Prepared container workspace stamped at: ${STAMPED_AT:-unknown}"
info "Validated prepared container artifacts: ${PREPARED_CONTAINER_REQUIRED_PATHS[*]}"
}
probe_container_camera_visibility() {
local probe_output=""
local probe_args=(
run
--rm
-e "ROS_DISTRO=${ROS_DISTRO_DEFAULT}"
-e "EXPECTED_CAMERA_INFO_FRAME=${EXPECTED_CAMERA_INFO_FRAME}"
-e "PROBE_TIMEOUT_SECONDS=20"
-v "${CONTAINER_WS}:/workspaces/isaac_ros-dev"
)
append_jetson_container_args probe_args
append_ros_discovery_container_args probe_args
probe_output="$(
docker_cmd "${probe_args[@]}" "${DERIVED_IMAGE_TAG}" bash -lc "$(cat <<'EOF'
set -euo pipefail
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
source "/opt/ros/${ROS_DISTRO}/setup.bash"
source "/workspaces/isaac_ros-dev/install/setup.bash"
if (( restore_nounset )); then
set -u
fi
python3 - "${EXPECTED_CAMERA_INFO_FRAME}" "${PROBE_TIMEOUT_SECONDS}" <<'PY'
import sys
import time
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import CameraInfo
expected_frame = sys.argv[1]
timeout_seconds = float(sys.argv[2])
class CameraVisibilityProbe(Node):
def __init__(self):
super().__init__('orbbec_container_camera_visibility_probe')
self.frames = {}
self.create_subscription(
CameraInfo,
'/camera/color/camera_info',
self._color_info_callback,
qos_profile_sensor_data)
self.create_subscription(
CameraInfo,
'/camera/depth/camera_info',
self._depth_info_callback,
qos_profile_sensor_data)
def _color_info_callback(self, msg: CameraInfo):
self.frames['color'] = msg.header.frame_id
def _depth_info_callback(self, msg: CameraInfo):
self.frames['depth'] = msg.header.frame_id
def main() -> int:
print('[container-probe] Waiting for host camera_info topics inside the container', flush=True)
rclpy.init(args=None)
node = CameraVisibilityProbe()
executor = SingleThreadedExecutor()
executor.add_node(node)
deadline = time.monotonic() + timeout_seconds
try:
while time.monotonic() < deadline:
executor.spin_once(timeout_sec=0.2)
if 'color' in node.frames and 'depth' in node.frames:
break
missing = []
if 'color' not in node.frames:
missing.append('/camera/color/camera_info')
if 'depth' not in node.frames:
missing.append('/camera/depth/camera_info')
if missing:
print(
'[container-probe] Timed out waiting for: ' + ', '.join(missing),
file=sys.stderr,
flush=True)
return 1
print(f'[container-probe] Observed /camera/color/camera_info frame_id: {node.frames["color"]}', flush=True)
print(f'[container-probe] Observed /camera/depth/camera_info frame_id: {node.frames["depth"]}', flush=True)
if node.frames['color'] != expected_frame:
print(
f'[container-probe] Unexpected /camera/color/camera_info frame_id: {node.frames["color"]} '
f'(expected {expected_frame})',
file=sys.stderr,
flush=True)
return 1
if node.frames['depth'] != expected_frame:
print(
f'[container-probe] Unexpected /camera/depth/camera_info frame_id: {node.frames["depth"]} '
f'(expected {expected_frame})',
file=sys.stderr,
flush=True)
return 1
print('[container-probe] Container camera visibility probe passed.', flush=True)
return 0
finally:
executor.remove_node(node)
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
EOF
)" 2>&1
)" || {
printf '%s\n' "${probe_output}" >&2
return 1
}
while IFS= read -r probe_line; do
[[ -n "${probe_line}" ]] || continue
info "${probe_line}"
done <<< "${probe_output}"
return 0
}
probe_container_static_tf() {
local probe_output=""
local probe_args=(
run
--rm
-e "ROS_DISTRO=${ROS_DISTRO_DEFAULT}"
-e "PROBE_TIMEOUT_SECONDS=${CONTAINER_STATIC_TF_TIMEOUT_SEC}"
-v "${CONTAINER_WS}:/workspaces/isaac_ros-dev"
)
append_jetson_container_args probe_args
append_ros_discovery_container_args probe_args
probe_output="$(
docker_cmd "${probe_args[@]}" "${DERIVED_IMAGE_TAG}" bash -lc "$(cat <<'EOF'
set -euo pipefail
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
source "/opt/ros/${ROS_DISTRO}/setup.bash"
source "/workspaces/isaac_ros-dev/install/setup.bash"
if (( restore_nounset )); then
set -u
fi
LOG_FILE="/tmp/orbbec-tf-probe.log"
LAUNCH_PID=""
LAUNCH_STOP_TIMEOUT=8
terminate_launch() {
local signal=""
local deadline=0
if [[ -z "${LAUNCH_PID}" ]] || ! kill -0 "${LAUNCH_PID}" 2>/dev/null; then
LAUNCH_PID=""
return 0
fi
for signal in INT TERM KILL; do
kill "-${signal}" "${LAUNCH_PID}" 2>/dev/null || true
deadline=$((SECONDS + LAUNCH_STOP_TIMEOUT))
while ((SECONDS < deadline)); do
if ! kill -0 "${LAUNCH_PID}" 2>/dev/null; then
wait "${LAUNCH_PID}" 2>/dev/null || true
LAUNCH_PID=""
return 0
fi
sleep 1
done
done
wait "${LAUNCH_PID}" 2>/dev/null || true
LAUNCH_PID=""
}
cleanup() {
terminate_launch
}
trap cleanup EXIT INT TERM
ros2 launch nvblox_examples_bringup orbbec_transforms.launch.py >"${LOG_FILE}" 2>&1 &
LAUNCH_PID=$!
status=0
python3 - "${PROBE_TIMEOUT_SECONDS}" <<'PY' || status=$?
import sys
import time
import rclpy
from rclpy.duration import Duration
from rclpy.time import Time
from tf2_ros import Buffer, TransformListener
timeout_seconds = float(sys.argv[1])
required_transforms = [
('odom', 'base_link'),
('odom', 'camera_link'),
('odom', 'camera_color_optical_frame'),
]
def main() -> int:
print('[container-tf-probe] Waiting for managed static TF chain inside the container', flush=True)
rclpy.init(args=None)
node = rclpy.create_node('orbbec_container_tf_probe')
tf_buffer = Buffer(cache_time=Duration(seconds=timeout_seconds))
tf_listener = TransformListener(tf_buffer, node, spin_thread=False)
deadline = time.monotonic() + timeout_seconds
last_missing = []
try:
while time.monotonic() < deadline:
rclpy.spin_once(node, timeout_sec=0.2)
last_missing = []
for target_frame, source_frame in required_transforms:
if not tf_buffer.can_transform(
target_frame,
source_frame,
Time(),
timeout=Duration(seconds=0.1)):
last_missing.append(f'{target_frame} <- {source_frame}')
if not last_missing:
print(
'[container-tf-probe] TF probe passed for odom <- base_link, '
'odom <- camera_link, odom <- camera_color_optical_frame',
flush=True)
return 0
print(
'[container-tf-probe] TF probe failed. Missing transforms: '
+ ', '.join(last_missing or ['unknown']),
file=sys.stderr,
flush=True)
return 1
finally:
del tf_listener
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
if (( status != 0 )); then
printf '[container-tf-probe] Relevant launch log tail:\n'
tail -n 40 "${LOG_FILE}" 2>/dev/null || true
fi
terminate_launch
exit "${status}"
EOF
)" 2>&1
)" || {
printf '%s\n' "${probe_output}" >&2
return 1
}
while IFS= read -r probe_line; do
[[ -n "${probe_line}" ]] || continue
info "${probe_line}"
done <<< "${probe_output}"
return 0
}
ensure_gemini2_ready_for_run() {
local gemini2_state=""
cleanup_residual_gemini2_processes "pre-run Gemini2 cleanup" || true
log_gemini2_device_state "Gemini2 device state before host launch"
gemini2_state="$(gemini2_device_state)"
case "${gemini2_state}" in
ready)
return 0
;;
usb_missing)
die "Gemini2 is not connected. Current device state: usb_missing."
;;
usb_present_no_video)
warn "Gemini2 USB device is present, but no /dev/video nodes were found before host launch. Attempting one automatic recovery."
if ! recover_gemini2_device "pre-run host launch" 0 1 1; then
gemini2_state="$(gemini2_device_state)"
die "Gemini2 USB device is present, but video nodes were not recovered before launch. Current device state: ${gemini2_state}."
fi
;;
*)
die "Unexpected Gemini2 device state before host launch: ${gemini2_state}"
;;
esac
}
stop_host_camera_driver() {
local signal=""
local deadline=0
if [[ -n "${HOST_CAMERA_PID}" ]] && kill -0 "${HOST_CAMERA_PID}" 2>/dev/null; then
info "Stopping host Gemini2 driver (pid=${HOST_CAMERA_PID})."
for signal in INT TERM KILL; do
kill "-${signal}" "${HOST_CAMERA_PID}" 2>/dev/null || true
deadline=$((SECONDS + GEMINI2_SIGNAL_TIMEOUT_SECONDS))
while ((SECONDS < deadline)); do
if ! kill -0 "${HOST_CAMERA_PID}" 2>/dev/null; then
break 2
fi
sleep 1
done
done
fi
HOST_CAMERA_PID=""
cleanup_residual_gemini2_processes "post-run Gemini2 cleanup" || true
if [[ "$(gemini2_device_state)" == "usb_present_no_video" ]]; then
warn "Gemini2 USB device is still present, but /dev/video nodes are missing after cleanup. Attempting full recovery."
if ! recover_gemini2_device "post-run cleanup" 0 1 0; then
warn "Gemini2 full recovery did not restore /dev/video nodes after cleanup."
fi
fi
log_gemini2_device_state "Gemini2 device state after cleanup"
}
cleanup() {
stop_host_camera_driver
if (( XHOST_GRANTED )); then
xhost -si:localuser:root >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT INT TERM
launch_host_camera() {
local launch_cmd
ensure_gemini2_ready_for_run
launch_cmd=$(
cat <>"${HOST_CAMERA_LOG}" 2>&1 &
HOST_CAMERA_PID=$!
info "Host camera log: ${HOST_CAMERA_LOG}"
}
wait_for_camera_streams_ready() {
local readiness_output=""
source_ros_setup "${HOST_WS}"
readiness_output="$(
python3 - "${EXPECTED_CAMERA_INFO_FRAME}" <<'PY' 2>&1
import sys
import time
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import CameraInfo, Image
expected_frame = sys.argv[1]
timeout_seconds = 90.0
class CameraReadinessProbe(Node):
def __init__(self):
super().__init__('orbbec_host_readiness_probe')
self.frames = {}
self.received = {
'color_info': False,
'depth_info': False,
'color_image': False,
'depth_image': False,
}
self.create_subscription(
CameraInfo,
'/camera/color/camera_info',
self._color_info_callback,
qos_profile_sensor_data)
self.create_subscription(
CameraInfo,
'/camera/depth/camera_info',
self._depth_info_callback,
qos_profile_sensor_data)
self.create_subscription(
Image,
'/camera/color/image_raw',
self._color_image_callback,
qos_profile_sensor_data)
self.create_subscription(
Image,
'/camera/depth/image_raw',
self._depth_image_callback,
qos_profile_sensor_data)
def _color_info_callback(self, msg: CameraInfo):
self.received['color_info'] = True
self.frames['color_info'] = msg.header.frame_id
def _depth_info_callback(self, msg: CameraInfo):
self.received['depth_info'] = True
self.frames['depth_info'] = msg.header.frame_id
def _color_image_callback(self, msg: Image):
self.received['color_image'] = True
def _depth_image_callback(self, msg: Image):
self.received['depth_image'] = True
def main():
rclpy.init(args=None)
node = CameraReadinessProbe()
executor = SingleThreadedExecutor()
executor.add_node(node)
deadline = time.monotonic() + timeout_seconds
try:
while time.monotonic() < deadline:
executor.spin_once(timeout_sec=0.2)
if all(node.received.values()):
break
missing = [name for name, received in node.received.items() if not received]
if missing:
print(
'Host stream readiness probe timed out waiting for: ' + ', '.join(missing),
file=sys.stderr)
return 1
color_frame = node.frames.get('color_info', '')
depth_frame = node.frames.get('depth_info', '')
print(f'/camera/color/camera_info frame_id={color_frame}')
print(f'/camera/depth/camera_info frame_id={depth_frame}')
if color_frame != expected_frame:
print(
f'Unexpected /camera/color/camera_info frame_id: {color_frame} '
f'(expected {expected_frame})',
file=sys.stderr)
return 1
if depth_frame != expected_frame:
print(
f'Unexpected /camera/depth/camera_info frame_id: {depth_frame} '
f'(expected {expected_frame})',
file=sys.stderr)
return 1
return 0
finally:
executor.remove_node(node)
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
)" || {
printf '%s\n' "${readiness_output}" >&2
return 1
}
while IFS= read -r readiness_line; do
[[ -n "${readiness_line}" ]] || continue
info "${readiness_line}"
done <<< "${readiness_output}"
return 0
}
validate_container_launch_artifact() {
local validate_cmd=""
local validate_args=(
run
--rm
-e "ROS_DISTRO=${ROS_DISTRO_DEFAULT}"
-e "NVBLOX_LAUNCH_FILE=${LAUNCH_FILE}"
-e "EXPECTED_WORKSPACE_SPEC_VERSION=${CONTAINER_WORKSPACE_SPEC_VERSION}"
-v "${CONTAINER_WS}:/workspaces/isaac_ros-dev"
)
append_jetson_container_args validate_args
append_ros_discovery_container_args validate_args
validate_cmd=$(
cat <<'EOF'
set -euo pipefail
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
source "/opt/ros/${ROS_DISTRO}/setup.bash"
source "/workspaces/isaac_ros-dev/install/setup.bash"
source "/workspaces/isaac_ros-dev/.setup-nvbox/container_workspace.env"
if (( restore_nounset )); then
set -u
fi
PACKAGE_PREFIX="$(ros2 pkg prefix nvblox_examples_bringup 2>/dev/null || true)"
[[ -n "${PACKAGE_PREFIX}" ]]
[[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" == "${EXPECTED_WORKSPACE_SPEC_VERSION}" ]]
[[ -f "${PACKAGE_PREFIX}/share/nvblox_examples_bringup/launch/${NVBLOX_LAUNCH_FILE}" ]]
EOF
)
info "Validating prepared launch artifact inside the container."
docker_cmd "${validate_args[@]}" "${DERIVED_IMAGE_TAG}" bash -lc "${validate_cmd}" >/dev/null 2>&1
}
configure_display() {
if (( HEADLESS )); then
return 0
fi
if [[ -z "${DISPLAY:-}" ]]; then
warn "DISPLAY is not set. Falling back to headless mode."
HEADLESS=1
return 0
fi
if [[ ! -d /tmp/.X11-unix ]]; then
warn "/tmp/.X11-unix is missing. Falling back to headless mode."
HEADLESS=1
return 0
fi
if ! command -v xhost >/dev/null 2>&1; then
warn "xhost is not available. Falling back to headless mode."
HEADLESS=1
return 0
fi
if xhost +si:localuser:root >/dev/null 2>&1; then
XHOST_GRANTED=1
USE_GUI=1
LAUNCH_FILE="orbbec_example.launch.py"
return 0
fi
warn "Failed to grant X11 access for the container. Falling back to headless mode."
HEADLESS=1
}
if (( HEADLESS )); then
LAUNCH_FILE="orbbec_debug.launch.py"
fi
configure_display
if (( HEADLESS )); then
LAUNCH_FILE="orbbec_debug.launch.py"
fi
enable_managed_fastdds_udp_runtime "${MANAGED_ROOT}"
export_effective_ros_discovery_env
log_ros_discovery_env "Host ROS discovery env"
info "Container ROS discovery env: $(ros_discovery_env_summary)"
validate_prepared_image_state
validate_prepared_host_workspace
validate_prepared_container_workspace_state
if ! validate_container_launch_artifact; then
die "${CONTAINER_PREPARE_HINT}"
fi
launch_host_camera
if ! wait_for_camera_streams_ready; then
if ! kill -0 "${HOST_CAMERA_PID}" 2>/dev/null; then
die "Host Gemini2 driver exited before camera streams became ready. Check ${HOST_CAMERA_LOG}."
fi
die "Camera stream readiness probe failed. Check ${HOST_CAMERA_LOG}."
fi
info "Camera streams and frame IDs are ready."
if ! probe_container_camera_visibility; then
die "Host camera streams are ready, but the container cannot discover host camera topics. Check the ROS discovery environment shown above, or run bash reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh for a discovery snapshot."
fi
if ! probe_container_static_tf; then
die "Host camera streams and container camera visibility are ready, but the managed static TF chain is not queryable inside the container."
fi
docker_cmd rm -f "${CONTAINER_NAME}" >/dev/null 2>&1 || true
DOCKER_ARGS=(
run
--rm
--name "${CONTAINER_NAME}"
-e "ROS_DISTRO=${ROS_DISTRO_DEFAULT}"
-e "NVBLOX_LAUNCH_FILE=${LAUNCH_FILE}"
-e "EXPECTED_WORKSPACE_SPEC_VERSION=${CONTAINER_WORKSPACE_SPEC_VERSION}"
-v "${CONTAINER_WS}:/workspaces/isaac_ros-dev"
-v "${PROJECT_ROOT}/docker/launch_nvblox.sh:/opt/nvblox/bin/launch_nvblox.sh:ro"
)
append_jetson_container_args DOCKER_ARGS
append_ros_discovery_container_args DOCKER_ARGS
if [[ -t 0 && -t 1 ]]; then
DOCKER_ARGS+=(-it)
else
DOCKER_ARGS+=(-i)
fi
if (( USE_GUI )); then
DOCKER_ARGS+=(
-e "DISPLAY=${DISPLAY}"
-e "QT_X11_NO_MITSHM=1"
-v /tmp/.X11-unix:/tmp/.X11-unix:rw
)
else
info "Starting in headless mode with ${LAUNCH_FILE}."
fi
info "Launching NVBlox demo in container ${CONTAINER_NAME}."
docker_cmd "${DOCKER_ARGS[@]}" "${DERIVED_IMAGE_TAG}" bash /opt/nvblox/bin/launch_nvblox.sh
================================================
FILE: reComputer/scripts/nvblox/start_nvblox_demo.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/lib/common.sh"
MODE_PREPARE=1
MODE_RUN=1
FORCE_REBUILD=0
HEADLESS=0
MANAGED_ROOT="${MANAGED_ROOT:-${MANAGED_ROOT_DEFAULT}}"
ORIGINAL_ARGS=("$@")
ensure_base_image() {
local base_image=""
local share_url=""
local archive_name=""
local cache_dir=""
local archive_path=""
assert_command python3
ensure_docker_access
base_image="$(select_base_image || true)"
if [[ -n "${base_image}" ]]; then
info "Base image already present: ${base_image}. Skipping OneDrive download and docker load."
return 0
fi
install_packages_if_missing python3-requests python3-tqdm
share_url="$(resolve_nvblox_image_share_url)"
archive_name="$(resolve_nvblox_image_archive_name)"
cache_dir="$(resolve_nvblox_image_cache_dir)"
archive_path="$(resolve_nvblox_image_archive_path "${cache_dir}" "${archive_name}")"
mkdir -p "${cache_dir}"
cleanup_nvblox_partial_downloads "${cache_dir}"
info "Ensuring NVBlox base image archive at ${archive_path}"
python3 "${SCRIPT_DIR}/onedrive_downloader.py" "${share_url}" --filename "${archive_name}" --output-dir "${cache_dir}"
[[ -f "${archive_path}" ]] || die "Base image archive was not created at ${archive_path}."
info "Loading Docker image archive ${archive_path}"
docker_cmd load -i "${archive_path}"
base_image="$(select_base_image || true)"
[[ -n "${base_image}" ]] || die "docker load finished, but no supported local base image was detected. Expected $(acceptable_base_image_hint)."
info "Base image ready: ${base_image}"
}
usage() {
cat <<'EOF'
Usage:
./start_nvblox_demo.sh
./start_nvblox_demo.sh --prepare-only
./start_nvblox_demo.sh --run-only
./start_nvblox_demo.sh --force-rebuild
./start_nvblox_demo.sh --headless
Environment:
MANAGED_ROOT Override managed workspace root. Default: ~/nvblox_demo
NVBLOX_IMAGE_SHARE_URL Override the default OneDrive share link
NVBLOX_IMAGE_ARCHIVE_NAME Override the downloaded archive filename
NVBLOX_IMAGE_CACHE_DIR Override the Docker archive cache directory
EOF
}
while (($#)); do
case "$1" in
--prepare-only)
MODE_PREPARE=1
MODE_RUN=0
;;
--run-only)
MODE_PREPARE=0
MODE_RUN=1
;;
--force-rebuild)
FORCE_REBUILD=1
;;
--headless)
HEADLESS=1
;;
-h|--help)
usage
exit 0
;;
*)
die "Unknown argument: $1"
;;
esac
shift
done
if (( MODE_PREPARE == 0 && MODE_RUN == 0 )); then
die "Nothing to do. Use the default mode, --prepare-only, or --run-only."
fi
ensure_supported_user_context
if should_reexec_as_setup_user; then
printf '[reComputer][nvblox] Re-entering as %s.\n' "${SETUP_USER_NAME}" >&2
reexec_as_setup_user "${SCRIPT_DIR}/start_nvblox_demo.sh" "${ORIGINAL_ARGS[@]}"
fi
guard_managed_root_path "${MANAGED_ROOT}"
if (( MODE_PREPARE )); then
repair_managed_root_ownership "${MANAGED_ROOT}"
bootstrap_managed_root "${MANAGED_ROOT}"
else
require_bootstrapped_managed_root "${MANAGED_ROOT}"
fi
mkdir -p "${MANAGED_ROOT}/logs"
RUN_LOG="${MANAGED_ROOT}/logs/run-$(date '+%Y%m%d-%H%M%S').log"
exec > >(tee -a "${RUN_LOG}") 2>&1
info "Managed root: ${MANAGED_ROOT}"
info "Run log: ${RUN_LOG}"
info "Mode: prepare=${MODE_PREPARE} run=${MODE_RUN} force_rebuild=${FORCE_REBUILD} headless=${HEADLESS}"
if (( MODE_PREPARE )); then
ensure_base_image
fi
PREFLIGHT_ARGS=(--managed-root "${MANAGED_ROOT}")
if (( MODE_PREPARE )); then
PREFLIGHT_ARGS+=(--prepare)
fi
if (( MODE_RUN )); then
PREFLIGHT_ARGS+=(--run)
fi
bash "${SCRIPT_DIR}/scripts/preflight.sh" "${PREFLIGHT_ARGS[@]}"
if (( MODE_PREPARE )); then
PREPARE_ARGS=(--managed-root "${MANAGED_ROOT}")
if (( FORCE_REBUILD )); then
PREPARE_ARGS+=(--force-rebuild)
fi
bash "${SCRIPT_DIR}/scripts/prepare_host.sh" "${PREPARE_ARGS[@]}"
bash "${SCRIPT_DIR}/scripts/prepare_container.sh" "${PREPARE_ARGS[@]}"
fi
if (( MODE_RUN )); then
RUN_ARGS=(--managed-root "${MANAGED_ROOT}")
if (( HEADLESS )); then
RUN_ARGS+=(--headless)
fi
bash "${SCRIPT_DIR}/scripts/run_demo.sh" "${RUN_ARGS[@]}"
fi
info "Done."
================================================
FILE: reComputer/scripts/ollama/clean.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
# search local image
img_tag=$($JETSON_REPO_PATH/autotag -p local ollama)
# 检查返回值
if [ $? -eq 0 ]; then
echo "Found Image successfully."
sudo docker rmi $img_tag
else
echo "[warn] Found Image failed with error code $?. skip delete Image."
fi
#
# 4 build whl
read -p "Delete all data for ollama? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
echo "Delete=> $JETSON_REPO_PATH/data/models/ollama/"
sudo rm -rf $JETSON_REPO_PATH/data/models/ollama/
echo "Clean Data Done."
else
echo "[warn] Skip Clean Data."
fi
================================================
FILE: reComputer/scripts/ollama/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 15 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/ollama/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/ollama/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
# try stop old server
docker rm -f ollama
# run Front-end
./run.sh $(./autotag ollama)
# user only can access with http://ip:11434
================================================
FILE: reComputer/scripts/parler-tts/clean.sh
================================================
#!/bin/bash
# get image
source ./getVersion.sh
# remove docker image
sudo docker rmi feiticeir0/parler-tts:${TAG_IMAGE}
================================================
FILE: reComputer/scripts/parler-tts/getVersion.sh
================================================
#!/bin/bash
# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh
# and llama-factory init script
# we only have images for these - 36.2.0 works on 36.3.0
L4T_VERSIONS=("35.3.1", "35.4.1", "36.2.0", "36.3.0")
ARCH=$(uname -i)
# echo "ARCH: $ARCH"
if [ $ARCH = "aarch64" ]; then
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
#echo "reading L4T version from \"dpkg-query --show nvidia-l4t-core\""
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })
#echo ${L4T_VERSION_ARRAY[@]}
#echo ${#L4T_VERSION_ARRAY[@]}
L4T_RELEASE=${L4T_VERSION_ARRAY[0]}
L4T_REVISION=${L4T_VERSION_ARRAY[1]}
else
#echo "reading L4T version from /etc/nv_tegra_release"
L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
fi
L4T_REVISION_MAJOR=${L4T_REVISION:0:1}
L4T_REVISION_MINOR=${L4T_REVISION:2:1}
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
IMAGE_TAG=$L4T_VERSION
#echo "L4T_VERSION : $L4T_VERSION"
#echo "L4T_RELEASE : $L4T_RELEASE"
#echo "L4T_REVISION: $L4T_REVISION"
elif [ $ARCH != "x86_64" ]; then
echo "unsupported architecture: $ARCH"
exit 1
fi
if [[ ! " ${L4T_VERSIONS[@]} " =~ " ${L4T_VERSION} " ]]; then
echo "L4T_VERSION is not in the allowed versions list. Exiting."
exit 1
fi
# check if 36 to change IMAGE_TAG
if [ ${L4T_RELEASE} -eq "36" ]; then
# image tag will be 2.0
IMAGE_TAG="36.2.0"
fi
================================================
FILE: reComputer/scripts/parler-tts/init.sh
================================================
#!/bin/bash
echo "Creating models directory at /home/$USER/models"
# Create Model dir in User home
mkdir /home/$USER/models
================================================
FILE: reComputer/scripts/parler-tts/readme.md
================================================
# Parler TTS Mini: Expresso
Parler-TTS Mini: Expresso is a fine-tuned version of Parler-TTS Mini v0.1 on the Expresso dataset. It is a lightweight text-to-speech (TTS) model that can generate high-quality, natural sounding speech. Compared to the original model, Parler-TTS Expresso provides superior control over emotions (happy, confused, laughing, sad) and consistent voices (Jerry, Thomas, Elisabeth, Talia).
[You can get more information on HuggingFace](https://huggingface.co/parler-tts/parler-tts-mini-expresso)
![Gradio Interface] (audio1.png)
![Gradio Interface result] (audio2.png)
## Getting started
#### Prerequisites
* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
* Audio Columns
* Docker installed
## Instalation
PyPI (best)
```bash
pip install jetson-examples
```
## Usage
### Method 1
##### If you're running inside your reComputer
1. Type the following command in a terminal
```bash
reComputer run parler-tts
```
2. Open a web browser and go to [http://localhost:7860](http://localhost:7860)
3. A Gradio interface will appear with two text boxes
1. The first for you to write the text that will be converted to audio
2. A second one for you to describe the speaker: Male/Female, tone, pitch, mood, etc.. See the examples in Parler-tts page.
4. When you press submit, after a while, the audio will appear on the right box. You can also download the file if yo want.
### Method 2
##### If you want to connect remotely with ssh to the reComputer
1. Connect using SSH but redirecting the 7860 port
```bash
ssh -L 7860:localhost:7860 @
```
2. Type the following command in a terminal
```bash
reComputer run parler-tts
```
3. Open a web browser (on your machine) and go to [http://localhost:7860](http://localhost:7860)
4. The same instructions above.
## Manual Run
If you want to run the docker image outside jetson-examples, here's the command:
```bash
docker run --rm -p 7860:7860 --runtime=nvidia -v $(MODELS_DIR):/app feiticeir0/parler_tts:r36.2.0
```
**MODELS_DIR** is a directory where HuggingFace will place the models downloaded from its hub. If you want to run the image several times, the code will only download the model once, if that diretory stays the same.
This is controlled by an environment variable called HF_HOME.
[More info about HF environment variables](https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables)
================================================
FILE: reComputer/scripts/parler-tts/run.sh
================================================
#!/bin/bash
MODELS_DIR=/home/$USER/models
# get L4T version
# it exports a variable IMAGE_TAG
source ./getVersion.sh
# pull docker image
echo "docker push feiticeir0/parler_tts:${IMAGE_TAG}"
docker run \
--rm \
-p 7860:7860 \
--runtime=nvidia \
-v $(MODELS_DIR):/app \
feiticeir0/parler_tts:${IMAGE_TAG}
================================================
FILE: reComputer/scripts/qwen3.5-4b/Dockerfile.jetson
================================================
# Jetson Orin (sm_87) llama.cpp inference image
# Build flow is maintained outside this repo; this file is kept here as the
# reference runtime image definition used by the demo script.
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libgomp1 curl && \
rm -rf /var/lib/apt/lists/*
COPY dist/bin/llama-server /usr/local/bin/
COPY dist/bin/llama-cli /usr/local/bin/
RUN mkdir -p /usr/local/lib/llama
COPY dist/lib/ /usr/local/lib/llama/
RUN echo "/usr/local/lib/llama" > /etc/ld.so.conf.d/llama.conf && ldconfig
ENV PATH=/usr/local/cuda/bin:$PATH
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/lib/llama
VOLUME ["/models"]
WORKDIR /models
ENV LLAMA_ARG_HOST=0.0.0.0
ENV LLAMA_ARG_PORT=8080
EXPOSE 8080
HEALTHCHECK --interval=30s --timeout=5s \
CMD curl -f http://localhost:8080/health || exit 1
ENTRYPOINT ["llama-server"]
================================================
FILE: reComputer/scripts/qwen3.5-4b/README.md
================================================
# Jetson-Example: Run Qwen3.5-4B on NVIDIA Jetson
This example runs **Qwen3.5-4B** on Jetson Orin with **llama.cpp** and exposes an OpenAI-compatible API server.
It uses:
- a prebuilt Docker image archive imported locally on first run
- the `unsloth/Qwen3.5-4B-GGUF` model in `Q4_K_M` format
Supported JetPack/L4T targets:
- JetPack 6.1 -> L4T 36.3.0
- JetPack 6.2 -> L4T 36.4.0
- JetPack 6.2.1 -> L4T 36.4.3 / 36.4.4
Test status:
- validated on JetPack 6.2
- expected to work on JetPack 6.1 to 6.2.1
## Getting Started
### Prerequisites
- NVIDIA Jetson Orin device
- Docker installed and available
- `aria2` installed
### Installation
PyPI:
```sh
pip install jetson-examples
```
GitHub:
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
## Usage
Start the demo:
```sh
reComputer run qwen3.5-4b
```
The first run downloads the image archive and model, then starts the server on:
```text
http://127.0.0.1:8080
```
Check the model list:
```sh
curl http://127.0.0.1:8080/v1/models
```
Chat via OpenAI-compatible API:
```sh
curl http://127.0.0.1:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "qwen",
"messages": [{"role": "user", "content": "Hello!"}],
"max_tokens": 512
}'
```
Python example:
```python
from openai import OpenAI
client = OpenAI(base_url="http://127.0.0.1:8080/v1", api_key="none")
response = client.chat.completions.create(
model="qwen",
messages=[{"role": "user", "content": "Hello!"}],
)
print(response.choices[0].message.content)
```
## Environment Variables
- `QWEN35_PORT`: host port, default `8080`
- `QWEN35_CTX_SIZE`: context length, default `8192`
- `QWEN35_GPU_LAYERS`: override automatic GPU layer selection
- `QWEN35_MODELS_DIR`: model cache directory, default `$HOME/models`
## Cleanup
Stop and remove the container:
```sh
reComputer clean qwen3.5-4b
```
The downloaded image and model cache are kept for faster startup next time.
================================================
FILE: reComputer/scripts/qwen3.5-4b/clean.sh
================================================
#!/bin/bash
set -euo pipefail
CONTAINER_NAME="qwen3.5-4b"
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
if id -nG "$USER" | grep -qw docker; then
echo "Current user is already in docker group, but docker is still unavailable."
echo "Please make sure Docker daemon is running, for example:"
echo "sudo systemctl enable --now docker"
exit 1
fi
echo "Current user has no docker permission."
read -r -p "Add current user ($USER) to docker group now? (y/n): " choice
case "$choice" in
y|Y)
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
if ! getent group docker >/dev/null 2>&1; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
echo "Added $USER to docker group."
echo "Please log out and log back in (or reboot), then rerun:"
echo "reComputer clean qwen3.5-4b"
exit 1
;;
*)
echo "Skipped docker group setup."
echo "You can run this manually:"
echo "sudo usermod -aG docker $USER"
exit 1
;;
esac
}
ensure_docker_access
DOCKER_CMD=(docker)
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" stop "$CONTAINER_NAME" >/dev/null
fi
if [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" rm "$CONTAINER_NAME" >/dev/null
echo "Container $CONTAINER_NAME removed."
else
echo "Container $CONTAINER_NAME does not exist."
fi
echo "Image and model cache are kept locally for faster next startup."
================================================
FILE: reComputer/scripts/qwen3.5-4b/config.yaml
================================================
# Tested and compatible JetPack/L4T versions.
ALLOWED_L4T_VERSIONS:
- 36.3.0
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 15
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
- aria2
DOCKER:
ENABLE: false
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/qwen3.5-4b/init.sh
================================================
#!/bin/bash
source "$(dirname "$(realpath "$0")")/../utils.sh"
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/qwen3.5-4b/run.sh
================================================
#!/bin/bash
set -euo pipefail
CONTAINER_NAME="qwen3.5-4b"
IMAGE_NAME="${QWEN35_IMAGE_NAME:-llama-jetson}"
IMAGE_ARCHIVE_URL="${QWEN35_IMAGE_ARCHIVE_URL:-https://seeedstudio88-my.sharepoint.com/personal/youjiang_yu_seeedstudio88_onmicrosoft_com/_layouts/15/download.aspx?share=IQBA2papRoneTrRhf5DQ_dOnAV3EvVgvJ3LKb1q8qltMlSM}"
MODEL_URL="${QWEN35_MODEL_URL:-https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/Qwen3.5-4B-Q4_K_M.gguf}"
MODELS_DIR="${QWEN35_MODELS_DIR:-$HOME/models}"
MODEL_FILE="${QWEN35_MODEL_FILE:-$MODELS_DIR/Qwen3.5-4B-Q4_K_M.gguf}"
HOST_PORT="${QWEN35_PORT:-8080}"
CONTAINER_PORT=8080
STARTUP_TIMEOUT="${QWEN35_STARTUP_TIMEOUT:-600}"
GPU_FLAGS=()
LIB_MOUNTS=()
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
if id -nG "$USER" | grep -qw docker; then
echo "Current user is already in docker group, but docker is still unavailable."
echo "Please make sure Docker daemon is running, for example:"
echo "sudo systemctl enable --now docker"
exit 1
fi
echo "Current user has no docker permission."
read -r -p "Add current user ($USER) to docker group now? (y/n): " choice
case "$choice" in
y|Y)
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
if ! getent group docker >/dev/null 2>&1; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
echo "Added $USER to docker group."
echo "Please log out and log back in (or reboot), then rerun:"
echo "reComputer run qwen3.5-4b"
exit 1
;;
*)
echo "Skipped docker group setup."
echo "You can run this manually:"
echo "sudo usermod -aG docker $USER"
exit 1
;;
esac
}
ensure_docker_access
DOCKER_CMD=(docker)
ensure_image() {
if "${DOCKER_CMD[@]}" image inspect "$IMAGE_NAME" >/dev/null 2>&1; then
echo "Docker image already exists locally: $IMAGE_NAME"
return 0
fi
local archive_path
archive_path="$(mktemp /tmp/qwen3.5-4b-image.XXXXXX.tar.gz)"
echo "Downloading Docker image archive..."
aria2c \
--continue=true \
--max-connection-per-server=8 \
--split=8 \
--min-split-size=10M \
--retry-wait=5 \
--max-tries=0 \
--dir="$(dirname "$archive_path")" \
--out="$(basename "$archive_path")" \
"$IMAGE_ARCHIVE_URL"
echo "Importing Docker image..."
"${DOCKER_CMD[@]}" load -i "$archive_path"
rm -f "$archive_path"
}
ensure_model() {
mkdir -p "$MODELS_DIR"
if [ -f "$MODEL_FILE" ]; then
echo "Model already exists locally: $MODEL_FILE"
return 0
fi
echo "Downloading model..."
aria2c \
--continue=true \
--max-connection-per-server=8 \
--split=8 \
--min-split-size=10M \
--retry-wait=5 \
--max-tries=0 \
--dir="$MODELS_DIR" \
--out="$(basename "$MODEL_FILE")" \
"$MODEL_URL"
}
select_gpu_layers() {
local total_mem_mb
total_mem_mb="$(free -m | awk '/^Mem:/{print $2}')"
if [ "$total_mem_mb" -ge 60000 ]; then
echo 99
elif [ "$total_mem_mb" -ge 14000 ]; then
echo 80
elif [ "$total_mem_mb" -ge 7000 ]; then
echo 40
else
echo 20
fi
}
probe_gpu_mode() {
if "${DOCKER_CMD[@]}" run --rm --runtime nvidia "$IMAGE_NAME" --help >/dev/null 2>&1; then
GPU_FLAGS=(--runtime nvidia)
echo "Using GPU mode: --runtime nvidia"
return 0
fi
if "${DOCKER_CMD[@]}" run --rm --gpus all "$IMAGE_NAME" --help >/dev/null 2>&1; then
GPU_FLAGS=(--gpus all)
echo "Using GPU mode: --gpus all"
return 0
fi
echo "Failed to detect a working Docker GPU mode."
echo "Tried: --runtime nvidia and --gpus all"
echo "Please check Docker + NVIDIA Container Runtime on this device."
exit 1
}
collect_library_mounts() {
local candidate
local candidates=(
"/usr/local/cuda/lib64:/usr/local/cuda/lib64:ro"
"/usr/lib/aarch64-linux-gnu/nvidia:/usr/lib/aarch64-linux-gnu/nvidia:ro"
"/usr/lib/aarch64-linux-gnu/libcuda.so.1:/usr/lib/aarch64-linux-gnu/libcuda.so.1:ro"
)
for candidate in "${candidates[@]}"; do
if [ -e "${candidate%%:*}" ]; then
LIB_MOUNTS+=(-v "$candidate")
fi
done
}
wait_for_server_ready() {
local endpoint="http://127.0.0.1:${HOST_PORT}/v1/models"
local elapsed=0
local interval=5
local raw_response=""
local response_body=""
local http_code="000"
if ! command -v curl >/dev/null 2>&1; then
echo "curl not found, skip readiness probing."
return 0
fi
echo "Waiting for Qwen server to be ready at ${endpoint} (timeout: ${STARTUP_TIMEOUT}s)..."
while [ "$elapsed" -lt "$STARTUP_TIMEOUT" ]; do
if [ -z "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container exited before model became ready."
echo "Recent logs:"
"${DOCKER_CMD[@]}" logs --tail 80 "$CONTAINER_NAME"
return 1
fi
raw_response="$(curl -s --max-time 3 -w "\n%{http_code}" "$endpoint" 2>/dev/null || true)"
http_code="$(printf '%s' "$raw_response" | tail -n 1)"
response_body="$(printf '%s' "$raw_response" | sed '$d')"
if [ "$http_code" = "200" ] && echo "$response_body" | grep -q "\"data\""; then
return 0
fi
if [ "$http_code" = "503" ] && echo "$response_body" | grep -q "Loading model"; then
if [ $((elapsed % 30)) -eq 0 ]; then
echo "Model is still loading... (${elapsed}s)"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
continue
fi
if [ $((elapsed % 30)) -eq 0 ]; then
echo "Waiting model readiness... (${elapsed}s, http=${http_code})"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
done
echo "Model is still not ready after ${STARTUP_TIMEOUT}s."
echo "Recent logs:"
"${DOCKER_CMD[@]}" logs --tail 80 "$CONTAINER_NAME"
return 1
}
ensure_image
ensure_model
probe_gpu_mode
collect_library_mounts
GPU_LAYERS="${QWEN35_GPU_LAYERS:-$(select_gpu_layers)}"
echo "Using --n-gpu-layers ${GPU_LAYERS}"
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME is already running."
elif [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME already exists but is not running."
echo "Recreating with current settings..."
"${DOCKER_CMD[@]}" rm -f "$CONTAINER_NAME" >/dev/null
"${DOCKER_CMD[@]}" run -d \
--name "$CONTAINER_NAME" \
"${GPU_FLAGS[@]}" \
-p "${HOST_PORT}:${CONTAINER_PORT}" \
-v "$MODELS_DIR":/models \
"${LIB_MOUNTS[@]}" \
-e LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/lib/aarch64-linux-gnu/nvidia:/usr/lib/aarch64-linux-gnu:/usr/local/lib/llama \
"$IMAGE_NAME" \
--model "/models/$(basename "$MODEL_FILE")" \
--ctx-size "${QWEN35_CTX_SIZE:-8192}" \
--host 0.0.0.0 \
--port "${CONTAINER_PORT}" \
--n-gpu-layers "${GPU_LAYERS}" >/dev/null
else
echo "Creating and starting container $CONTAINER_NAME..."
"${DOCKER_CMD[@]}" run -d \
--name "$CONTAINER_NAME" \
"${GPU_FLAGS[@]}" \
-p "${HOST_PORT}:${CONTAINER_PORT}" \
-v "$MODELS_DIR":/models \
"${LIB_MOUNTS[@]}" \
-e LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/lib/aarch64-linux-gnu/nvidia:/usr/lib/aarch64-linux-gnu:/usr/local/lib/llama \
"$IMAGE_NAME" \
--model "/models/$(basename "$MODEL_FILE")" \
--ctx-size "${QWEN35_CTX_SIZE:-8192}" \
--host 0.0.0.0 \
--port "${CONTAINER_PORT}" \
--n-gpu-layers "${GPU_LAYERS}" >/dev/null
fi
if ! wait_for_server_ready; then
exit 1
fi
echo "Qwen3.5-4B server is ready at: http://127.0.0.1:${HOST_PORT}"
echo "Check models:"
echo "curl http://127.0.0.1:${HOST_PORT}/v1/models"
echo "Chat API example:"
echo "curl http://127.0.0.1:${HOST_PORT}/v1/chat/completions -H 'Content-Type: application/json' -d '{\"model\":\"qwen\",\"messages\":[{\"role\":\"user\",\"content\":\"Hello!\"}]}'"
echo "Follow server logs:"
echo "${DOCKER_CMD[*]} logs -f $CONTAINER_NAME"
================================================
FILE: reComputer/scripts/ros1-jp6/README.md
================================================
# Jetson-Example: Run ROS 1 Noetic on NVIDIA Jetson
This example downloads a prebuilt ROS 1 Noetic Docker archive from a public OneDrive/SharePoint link, loads it into Docker as:
```sh
ros:noetic
```
Archive size: about **1.27 GB**
Supported JetPack/L4T versions:
- JetPack 6.2 -> L4T 36.4.0
- JetPack 6.2.1 -> L4T 36.4.3
- JetPack 6.1 -> L4T 36.4.4
## Getting Started
PyPI (recommended):
```sh
pip install jetson-examples
```
GitHub (developer):
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
## Usage
Launch an interactive shell in the container:
```sh
reComputer run ros1-jp6
```
The example will:
1. Download the Docker archive from SharePoint if it is not cached
2. Run `docker load -i` to import the image
3. Start the container with Jetson-friendly Docker flags
The SharePoint share link is a normal `:u:/...` public link. The downloader automatically appends `download=1`, so you do not need to manually rewrite the URL.
Cache location:
```sh
~/.cache/jetson-examples/ros1-jp6/ros-noetic-jp6.tar
```
## Verify The Image
Only prepare the image and skip container startup:
```sh
ROS1_JP6_SKIP_RUN=1 reComputer run ros1-jp6
```
Run a non-interactive ROS smoke test:
```sh
ROS1_JP6_COMMAND='source /opt/ros/noetic/setup.bash && rosversion -d' reComputer run ros1-jp6
```
## Export With docker save
After the image is loaded locally, save it back to a tar archive:
```sh
ROS1_JP6_SKIP_RUN=1 \
ROS1_JP6_SAVE_PATH=/tmp/ros-noetic-jp6.tar \
reComputer run ros1-jp6
```
This is equivalent to:
```sh
docker save -o /tmp/ros-noetic-jp6.tar ros:noetic
```
## Environment Variables
You can override the default behavior with these variables:
```sh
ROS1_JP6_SHARE_URL
ROS1_JP6_ARCHIVE_NAME
ROS1_JP6_CACHE_DIR
ROS1_JP6_IMAGE
ROS1_JP6_CONTAINER_NAME
ROS1_JP6_COMMAND
ROS1_JP6_SKIP_RUN
ROS1_JP6_SAVE_PATH
```
## Cleanup
Only remove the container:
```sh
reComputer clean ros1-jp6
```
The local image cache and the downloaded archive are kept.
================================================
FILE: reComputer/scripts/ros1-jp6/clean.sh
================================================
#!/bin/bash
set -euo pipefail
CONTAINER_NAME="${ROS1_JP6_CONTAINER_NAME:-ros1-jp6}"
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
echo "Docker daemon is not available to the current user."
echo "Please make sure Docker is running and your user can access /var/run/docker.sock."
exit 1
}
ensure_docker_access
if [ "$(docker ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
docker stop "${CONTAINER_NAME}"
fi
if [ "$(docker ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
docker rm "${CONTAINER_NAME}"
echo "Container ${CONTAINER_NAME} removed."
else
echo "Container ${CONTAINER_NAME} does not exist."
fi
echo "Image cache and downloaded archive are kept locally."
================================================
FILE: reComputer/scripts/ros1-jp6/config.yaml
================================================
ALLOWED_L4T_VERSIONS:
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 10
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
- python3-requests
- python3-tqdm
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/ros1-jp6/init.sh
================================================
#!/bin/bash
source "$(dirname "$(realpath "$0")")/../utils.sh"
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/ros1-jp6/run.sh
================================================
#!/bin/bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DOWNLOADER_SCRIPT="${SCRIPT_DIR}/../nvblox/onedrive_downloader.py"
IMAGE_NAME="${ROS1_JP6_IMAGE:-ros:noetic}"
CONTAINER_NAME="${ROS1_JP6_CONTAINER_NAME:-ros1-jp6}"
SHARE_URL="${ROS1_JP6_SHARE_URL:-https://seeedstudio88-my.sharepoint.com/:u:/g/personal/youjiang_yu_seeedstudio88_onmicrosoft_com/IQCOgjRBDytqT4jKdktOzhdIAUf97NfnQJ4lk_DAHpLTaRY?e=Nw0RjJ}"
CACHE_DIR="${ROS1_JP6_CACHE_DIR:-$HOME/.cache/jetson-examples/ros1-jp6}"
ARCHIVE_NAME="${ROS1_JP6_ARCHIVE_NAME:-ros-noetic-jp6.tar}"
ARCHIVE_PATH="${CACHE_DIR%/}/${ARCHIVE_NAME}"
SAVE_PATH="${ROS1_JP6_SAVE_PATH:-}"
SKIP_RUN="${ROS1_JP6_SKIP_RUN:-0}"
CONTAINER_COMMAND="${ROS1_JP6_COMMAND:-bash}"
DOCKER_RUN_FLAGS=()
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
if id -nG "$USER" | grep -qw docker; then
echo "Current user is already in docker group, but docker is still unavailable."
echo "Please make sure Docker daemon is running, for example:"
echo "sudo systemctl enable --now docker"
exit 1
fi
echo "Current user has no docker permission."
read -r -p "Add current user ($USER) to docker group now? (y/n): " choice
case "$choice" in
y|Y)
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
if ! getent group docker >/dev/null 2>&1; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
echo "Added $USER to docker group."
echo "Please log out and log back in (or reboot), then rerun:"
echo "reComputer run ros1-jp6"
exit 1
;;
*)
echo "Skipped docker group setup."
echo "You can run this manually:"
echo "sudo usermod -aG docker $USER"
exit 1
;;
esac
}
require_downloader() {
if [[ ! -f "${DOWNLOADER_SCRIPT}" ]]; then
echo "OneDrive downloader not found: ${DOWNLOADER_SCRIPT}"
exit 1
fi
}
ensure_archive() {
mkdir -p "${CACHE_DIR}"
if [[ -f "${ARCHIVE_PATH}" && -s "${ARCHIVE_PATH}" ]]; then
echo "Using cached archive: ${ARCHIVE_PATH}"
return 0
fi
require_downloader
echo "Downloading ROS 1 archive from SharePoint..."
python3 "${DOWNLOADER_SCRIPT}" "${SHARE_URL}" --filename "${ARCHIVE_NAME}" --output-dir "${CACHE_DIR}"
}
ensure_image() {
if docker image inspect "${IMAGE_NAME}" >/dev/null 2>&1; then
echo "Docker image already present: ${IMAGE_NAME}"
return 0
fi
ensure_archive
echo "Loading Docker image archive: ${ARCHIVE_PATH}"
docker load -i "${ARCHIVE_PATH}"
if ! docker image inspect "${IMAGE_NAME}" >/dev/null 2>&1; then
echo "Expected image not found after docker load: ${IMAGE_NAME}"
exit 1
fi
}
maybe_save_image() {
if [[ -z "${SAVE_PATH}" ]]; then
return 0
fi
mkdir -p "$(dirname "${SAVE_PATH}")"
echo "Saving image ${IMAGE_NAME} to ${SAVE_PATH}"
docker save -o "${SAVE_PATH}" "${IMAGE_NAME}"
}
prepare_run_flags() {
if docker run --rm --runtime nvidia "${IMAGE_NAME}" /bin/sh -lc "exit 0" >/dev/null 2>&1; then
DOCKER_RUN_FLAGS+=(--runtime nvidia)
echo "Using GPU mode: --runtime nvidia"
return 0
fi
if docker run --rm --gpus all "${IMAGE_NAME}" /bin/sh -lc "exit 0" >/dev/null 2>&1; then
DOCKER_RUN_FLAGS+=(--gpus all)
echo "Using GPU mode: --gpus all"
return 0
fi
echo "Warning: no GPU runtime detected. Falling back to CPU-only container start."
}
run_container() {
local tty_args=()
local docker_args=(
--rm
--name "${CONTAINER_NAME}"
--network host
--ipc host
--privileged
-v /dev:/dev
)
if [[ -t 0 && -t 1 ]]; then
tty_args=(-it)
fi
if [[ -n "${DISPLAY:-}" ]]; then
docker_args+=(
-e "DISPLAY=${DISPLAY}"
-e QT_X11_NO_MITSHM=1
-v /tmp/.X11-unix:/tmp/.X11-unix
)
fi
if [[ -n "${ROS_MASTER_URI:-}" ]]; then
docker_args+=(-e "ROS_MASTER_URI=${ROS_MASTER_URI}")
fi
if [[ -n "${ROS_IP:-}" ]]; then
docker_args+=(-e "ROS_IP=${ROS_IP}")
fi
if [[ -n "${ROS_HOSTNAME:-}" ]]; then
docker_args+=(-e "ROS_HOSTNAME=${ROS_HOSTNAME}")
fi
if docker ps -a -q -f name="^/${CONTAINER_NAME}$" | grep -q .; then
docker rm -f "${CONTAINER_NAME}" >/dev/null 2>&1 || true
fi
echo "Starting ${IMAGE_NAME}"
docker run "${tty_args[@]}" "${DOCKER_RUN_FLAGS[@]}" "${docker_args[@]}" "${IMAGE_NAME}" /bin/bash -lc "${CONTAINER_COMMAND}"
}
ensure_docker_access
ensure_image
maybe_save_image
if [[ "${SKIP_RUN}" == "1" ]]; then
echo "ROS1_JP6_SKIP_RUN=1, image preparation finished."
exit 0
fi
prepare_run_flags
run_container
================================================
FILE: reComputer/scripts/run.sh
================================================
#!/bin/bash
handle_error() {
echo "An error occurred. Exiting..."
exit 1
}
trap 'handle_error' ERR
check_is_jetson_or_not() {
model_file="/proc/device-tree/model"
if [ -f "/proc/device-tree/model" ]; then
model=$(tr -d '\0' < /proc/device-tree/model | tr '[:upper:]' '[:lower:]')
if [[ $model =~ jetson|orin|nv|agx ]]; then
echo "INFO: machine[$model] confirmed..."
else
echo "WARNING: machine[$model] maybe not support..."
exit 1
fi
else
echo "ERROR: machine[$model] not support this..."
exit 1
fi
}
check_is_jetson_or_not
echo "run example:$1"
BASE_PATH=/home/$USER/reComputer
cd $JETSON_REPO_PATH
script_dir=$(dirname "$0")
init_script=$script_dir/$1/init.sh
if [ -f $init_script ]; then
echo "----example init----"
bash $init_script
else
echo "WARN: Example[$1] init.sh Not Found."
fi
start_script=$script_dir/$1/run.sh
if [ -f $start_script ]; then
echo "----example start----"
bash $start_script
else
echo "ERROR: Example[$1] run.sh Not Found."
fi
echo "----example done----"
================================================
FILE: reComputer/scripts/stable-diffusion-webui/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/stable-diffusion-webui/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/stable-diffusion-webui/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag stable-diffusion-webui)
================================================
FILE: reComputer/scripts/text-generation-webui/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/text-generation-webui/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/text-generation-webui/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
# download llm model
./run.sh --workdir=/opt/text-generation-webui $(./autotag text-generation-webui) /bin/bash -c \
'python3 download-model.py --output=/data/models/text-generation-webui TheBloke/Llama-2-7b-Chat-GPTQ'
# run text-generation-webui
./run.sh $(./autotag text-generation-webui)
================================================
FILE: reComputer/scripts/ultralytics-yolo/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/ultralytics-yolo/README.md
================================================
# Jetson-Example: Run Ultralytics YOLO Platform Service on NVIDIA Jetson Orin 🚀(**Supported YOLOV11**)
## One-Click Quick Deployment of Plug-and-Play All Ultralytics YOLO for All Task Models with Web UI and HTTP API Interface
## Introduction 📘
In this project, you can quickly deploy all Ultralytics YOLO task models on Nvidia Jetson Orin devices with one click. This setup enables object detection, segmentation, human pose estimation, and classification. It supports uploading local videos, images, and using a webcam, and also allows one-click TensorRT model conversion. By accessing [http://127.0.0.1:5000](http://127.0.0.1:5000) on your local machine or within the same LAN, you can quickly start using Ultralytics YOLO. Additionally, an HTTP API method has been added at [http://127.0.0.1:5000/results](http://127.0.0.1:5000/results) to display detection data results for any task, and an additional Python script is provided to read YOLO detection data within Docker.
## **Key Features**:
1. **One-Click Deployment and Plug-and-Play**: Quickly deploy all YOLO task models on Nvidia Jetson Orin devices.
2. **Comprehensive Task Support**: Enables object detection, segmentation, human pose estimation, and classification.
3. **Versatile Input Options**: Supports uploading local videos, images, and using a webcam.
4. **TensorRT Model Conversion**: Allows one-click conversion of models to TensorRT.
5. **Web UI Access**: Easy access via [`http://127.0.0.1:5000`](http://127.0.0.1:5000) on the local machine or within the same LAN.
6. **HTTP API Interface**: Added HTTP API at [`http://127.0.0.1:5000/results`](http://127.0.0.1:5000/results) to display detection data results.
7. **Python Script Support**: Provides an additional Python script to read YOLO detection data within Docker.
[](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models)
All models implemented in this project are from the official [Ultralytics Yolo](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models).
# Supported Task Models
| Model Type | Pre-trained Weights / Filenames | Task | Inference | Validation | Training | Export |
|-------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------|-----------|------------|----------|--------|
| YOLOv5u | yolov5nu, yolov5su, yolov5mu, yolov5lu, yolov5xu, yolov5n6u, yolov5s6u, yolov5m6u, yolov5l6u, yolov5x6u | Object Detection | ✅ | ✅ | ✅ | ✅ |
| YOLOv8 | yolov8n.pt, yolov8s.pt, yolov8m.pt, yolov8l.pt, yolov8x.pt | Detection | ✅ | ✅ | ✅ | ✅ |
| YOLOv8-seg | yolov8n-seg.pt, yolov8s-seg.pt, yolov8m-seg.pt, yolov8l-seg.pt, yolov8x-seg.pt | Instance Segmentation | ✅ | ✅ | ✅ | ✅ |
| YOLOv8-pose | yolov8n-pose.pt, yolov8s-pose.pt, yolov8m-pose.pt, yolov8l-pose.pt, yolov8x-pose-p6.pt | Pose/Keypoints | ✅ | ✅ | ✅ | ✅ |
| YOLOv8-obb | yolov8n-obb.pt, yolov8s-obb.pt, yolov8m-obb.pt, yolov8l-obb.pt, yolov8x-obb.pt | Oriented Detection | ✅ | ✅ | ✅ | ✅ |
| YOLOv8-cls | yolov8n-cls.pt, yolov8s-cls.pt, yolov8m-cls.pt, yolov8l-cls.pt, yolov8x-cls.pt | Classification | ✅ | ✅ | ✅ | ✅ |
| YOLOv11 | yolov11n.pt, yolov11s.pt, yolov11m.pt, yolov11l.pt, yolov11x.pt | Detection | ✅ | ✅ | ✅ | ✅ |
| YOLOv11-seg | yolov11n-seg.pt, yolov11s-seg.pt, yolov11m-seg.pt, yolov11l-seg.pt, yolov11x-seg.pt | Instance Segmentation | ✅ | ✅ | ✅ | ✅ |
| YOLOv11-pose| yolov11n-pose.pt, yolov11s-pose.pt, yolov11m-pose.pt, yolov11l-pose.pt, yolov11x-pose.pt | Pose/Keypoints | ✅ | ✅ | ✅ | ✅ |
| YOLOv11-obb | yolov11n-obb.pt, yolov11s-obb.pt, yolov11m-obb.pt, yolov11l-obb.pt, yolov11x-obb.pt | Oriented Detection | ✅ | ✅ | ✅ | ✅ |
| YOLOv11-cls | yolov11n-cls.pt, yolov11s-cls.pt, yolov11m-cls.pt, yolov11l-cls.pt, yolov11x-cls.pt | Classification | ✅ | ✅ | ✅ | ✅ |
### Get a Jetson Orin Device 🛒
| Device Model | Description | Link |
|--------------|-------------|------|
| Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |
| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
## Quickstart ⚡
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### Installation via PyPI (Recommended) 🐍
1. Install the package:
```sh
pip install jetson-examples
```
2. Restart your reComputer:
```sh
sudo reboot
```
3. Run Ultralytics YOLO on Jetson with one command:
```sh
reComputer run ultralytics-yolo
```
4. "Enter [`http://127.0.0.1:5001`](http://127.0.0.1:5001) or http://device_IP:5001 in your browser to access the Web UI."
- **Choose Model**: Select Yolo version and models for various tasks such as object detection, classification, segmentation, human pose estimation, OBB, etc.
- **Upload Custom Model**: Users can upload their own trained YOLO models.
- **Choose Input Type**: Users can select to input locally uploaded images, videos, or real-time camera devices.
- **Enable TensorRT**: Choose whether to convert and use the TensorRT model. The initial conversion may require varying amounts of time.
5. If you want to see the detection result data, you can enter [`http://127.0.0.1:5000/results`](http://127.0.0.1:5000/results) in your browser to view the `JSON` formatted data results. These results include `boxes` for object detection, `masks` for segmentation, `keypoints` for human pose estimation, and the `names` corresponding to all numerical categories.
We also provide a Python script to help users integrate the data into their own programs.
```python
import requests
def fetch_results():
response = requests.get('http://localhost:5001/results')
if response.status_code == 200:
results = response.json()
return results
else:
print('Failed to fetch results')
return None
results = fetch_results()
print(results)
```
## Notes 📝
- To stop detection at any time, press the Stop button.
- When accessing the WebUI from other devices within the same LAN, use the URL: `http://{Jetson_IP}:5000`.
- You can view the JSON formatted detection results by accessing http://{Jetson_IP}:5000/results.
- The first model conversion may require different amounts of time depending on the hardware and network environment, so please be patient.
## Further Development 🔧
- [Training a YOLO Model](https://wiki.seeedstudio.com/How_to_Train_and_Deploy_YOLOv8_on_reComputer/)
- [TensorRT Acceleration](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/)
- [Multistreams using Deepstream](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/#multistream-model-benchmarks) Tutorials.
## License
This project is licensed under the MIT License.
================================================
FILE: reComputer/scripts/ultralytics-yolo/clean.sh
================================================
CONTAINER_NAME="ultralytics-yolo"
# Function to get L4T version
get_l4t_version() {
local l4t_version=""
local release_line=$(head -n 1 /etc/nv_tegra_release)
if [[ $release_line =~ R([0-9]+)\ *\(release\),\ REVISION:\ ([0-9]+\.[0-9]+) ]]; then
local major="${BASH_REMATCH[1]}"
local revision="${BASH_REMATCH[2]}"
l4t_version="${major}.${revision}"
fi
echo "$l4t_version"
}
L4T_VERSION=$(get_l4t_version)
echo "Detected L4T version: $L4T_VERSION"
# Determine the Docker image based on L4T version
if [[ "$L4T_VERSION" == "32.6.1" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack4:1.0"
elif [[ "$L4T_VERSION" == "35.3.1" || "$L4T_VERSION" == "35.4.1" || "$L4T_VERSION" == "35.5.0" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack5:1.0"
elif [[ "$L4T_VERSION" == "36.3.0" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack6:1.0"
else
echo "Error: L4T version $L4T_VERSION is not supported."
exit 1
fi
echo "Using Docker image: $IMAGE_NAME"
sudo rm -r ~/yolo_models
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAME
================================================
FILE: reComputer/scripts/ultralytics-yolo/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 32.6.1
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
- 36.4.0
- 36.4.3
REQUIRED_DISK_SPACE: 16 # in GB
REQUIRED_MEM_SPACE: 2
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/ultralytics-yolo/init.sh
================================================
#!/bin/bash
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/ultralytics-yolo/run.sh
================================================
#!/bin/bash
CONTAINER_NAME="ultralytics-yolo"
# Function to get L4T version
get_l4t_version() {
local l4t_version=""
local release_line=$(head -n 1 /etc/nv_tegra_release)
if [[ $release_line =~ R([0-9]+)\ *\(release\),\ REVISION:\ ([0-9]+\.[0-9]+) ]]; then
local major="${BASH_REMATCH[1]}"
local revision="${BASH_REMATCH[2]}"
l4t_version="${major}.${revision}"
fi
echo "$l4t_version"
}
L4T_VERSION=$(get_l4t_version)
echo "Detected L4T version: $L4T_VERSION"
# Determine the Docker image based on L4T version
if [[ "$L4T_VERSION" == "32.6.1" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack4:1.0"
elif [[ "$L4T_VERSION" == "35.3.1" || "$L4T_VERSION" == "35.4.1" || "$L4T_VERSION" == "35.5.0" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack5:1.0"
elif [[ "$L4T_VERSION" == "36.3.0" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack6:1.0"
elif [[ "$L4T_VERSION" == "36.4.0" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack61:v1.0"
elif [[ "$L4T_VERSION" == "36.4.3" ]]; then
IMAGE_NAME="yaohui1998/ultralytics-jetpack61:v1.0"
else
echo "Error: L4T version $L4T_VERSION is not supported."
exit 1
fi
echo "Using Docker image: $IMAGE_NAME"
# Pull the Docker image
docker pull $IMAGE_NAME
# make dir for save models
mkdir ~/yolo_models
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
echo "Please open http://127.0.0.1:5000 to access the WebUI."
docker start $CONTAINER_NAME
docker exec -it $CONTAINER_NAME /bin/bash
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v ~/yolo_models/:/usr/src/ultralytics/models/ \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/update.sh
================================================
#!/bin/bash
echo "--update jetson-containers repo--"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers/tree/d1573a3e8d7ba3fef36ebb23a7391e60eaf64db7"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
# 5 publish to Test PyPI
read -p "follow the newest version maybe bring bugs, are you sure about the update? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
cd $JETSON_REPO_PATH
git pull
pip3 install -r requirements.txt
else
echo "skip update."
fi
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
sudo apt update; sudo apt install -y python3-pip
pip3 install -r requirements.txt
fi
================================================
FILE: reComputer/scripts/utils.sh
================================================
#!/bin/bash
check_base_env()
{
# 1. Set color value
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
YELLOW=$(tput setaf 3)
BLUE=$(tput setaf 4)
MAGENTA=$(tput setaf 5)
CYAN=$(tput setaf 6)
RESET=$(tput sgr0)
# 2. Load config file
local CONFIG_FILE=$1
echo "CONFIG_FILE_PATH=$CONFIG_FILE"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo "Error: YAML file '$CONFIG_FILE' not found."
exit 1
fi
# Install yq for parsing YAML file
if ! command -v yq &> /dev/null
then
echo "yq is not installed. Installing yq with pip3..."
pip3 install yq
if command -v yq &> /dev/null
then
echo "yq has been successfully installed."
else
echo "Failed to install yq."
exit 1
fi
else
echo "yq is already installed."
fi
if ! command -v jq &> /dev/null
then
echo "jq is not installed. Installing jq..."
sudo apt-get update
sudo apt-get install -y jq
if command -v jq &> /dev/null
then
echo "jq has been successfully installed."
jq --version
else
echo "Failed to install jq."
exit 1
fi
else
echo "jq is already installed."
jq --version
fi
ALLOWED_L4T_VERSIONS=($(yq -r '.ALLOWED_L4T_VERSIONS[]' $CONFIG_FILE))
REQUIRED_DISK_SPACE=$(yq -r '.REQUIRED_DISK_SPACE' $CONFIG_FILE)
REQUIRED_MEM_SPACE=$(yq -r '.REQUIRED_MEM_SPACE' $CONFIG_FILE)
PACKAGES=($(yq -r '.PACKAGES[]' $CONFIG_FILE))
DOCKER=$(yq -r '.DOCKER.ENABLE' $CONFIG_FILE)
DESIRED_DAEMON_JSON=$(yq -r '.DOCKER.DAEMON' $CONFIG_FILE)
echo "${ALLOWED_L4T_VERSIONS[@]}"
# 3. Check L4T version
ARCH=$(uname -i)
if [ "$ARCH" = "aarch64" ]; then
# Check for L4T version string
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
fi
L4T_RELEASE=$(echo "$L4T_VERSION_STRING" | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo "$L4T_VERSION_STRING" | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
elif [ "$ARCH" = "x86_64" ]; then
echo "${RED}Unsupported architecture: $ARCH${RESET}"
exit 1
fi
if [[ " ${ALLOWED_L4T_VERSIONS[@]} " =~ " ${L4T_VERSION} " ]]; then
echo "L4T VERSION ${GREEN}${L4T_VERSION}${RESET} is in the allowed: ${GREEN}OK!${RESET}"
else
echo "${RED}L4T VERSION ${GREEN}${L4T_VERSION}${RESET}${RED} is not in the allowed versions list.${RESET}"
echo "${RED}The JetPack versions currently supported by this container are: ${GREEN}${ALLOWED_L4T_VERSIONS[@]}${RESET}${RED}. ${RESET}"
echo "${RED}For more information : https://github.com/Seeed-Projects/jetson-examples ${RESET}"
exit 1
fi
# Install additional apt packages
for PACKAGE in $PACKAGES; do
if ! dpkg -l | grep -qw "$PACKAGE"; then
echo "Installing $PACKAGE..."
sudo apt-get install -y $PACKAGE
fi
echo "$PACKAGE is installed: ${GREEN}OK!${RESET}"
done
# 4. Check disk space
CURRENT_DISK_SPACE=$(df -BG --output=avail / | tail -1 | sed 's/[^0-9]*//g')
if [ "$CURRENT_DISK_SPACE" -lt "$REQUIRED_DISK_SPACE" ]; then
echo "${RED}Insufficient disk space. Required: ${REQUIRED_DISK_SPACE}G, Available: ${CURRENT_DISK_SPACE}G. ${RESET}"
exit 1
else
echo "Required ${GREEN}${REQUIRED_DISK_SPACE}GB${RESET}/${GREEN}${CURRENT_DISK_SPACE}GB${RESET} disk space: ${GREEN}OK!${RESET}"
fi
# 5. Check memory space
CURRENT_MEM_SPACE=$(free -g | awk '/^Mem:/{print $2}')
if [ "$CURRENT_MEM_SPACE" -lt "$REQUIRED_MEM_SPACE" ]; then
echo "${RED}Insufficient memory: $CURRENT_MEM_SPACE GB (minimum required: $REQUIRED_MEM_SPACE GB).${RESET}"
exit 1
else
echo "Required ${GREEN}${REQUIRED_MEM_SPACE}GB${RESET}/${GREEN}${CURRENT_MEM_SPACE}GB${RESET} memory space: ${GREEN}OK!${RESET}"
fi
# 6. Prepare Docker env
if [ "$DOCKER" = "true" ]; then
# 6.1 Check if Docker is installed
if ! command -v docker &> /dev/null; then
echo "${BLUE}Docker is not installed. Installing Docker...${RESET}"
sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=arm64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install -y docker-ce
sudo systemctl enable docker
sudo systemctl start docker
sudo usermod -aG docker $USER
sudo systemctl restart docker
echo "${BLUE}Permissions added. Please rerun the command.${RESET}"
newgrp docker
echo "Docker has been installed and configured."
fi
# 6.2 Modify the Docker configuration file
DAEMON_JSON_PATH="/etc/docker/daemon.json"
NECESSARY_CONTENT=
if [ ! -f "$DAEMON_JSON_PATH" ]; then
echo "${BLUE}Creating $DAEMON_JSON_PATH with the desired content...${RESET}"
echo "$DESIRED_DAEMON_JSON" | sudo tee $DAEMON_JSON_PATH > /dev/null
sudo systemctl restart docker
echo "${GREEN}$DAEMON_JSON_PATH has been created.${RESET}"
elif [ "$(jq -e '.["default-runtime"] == "nvidia" and .runtimes.nvidia.path == "nvidia-container-runtime" and (.runtimes.nvidia.runtimeArgs | length == 0)' "$DAEMON_JSON_PATH")" != "true" ]; then
# elif [ "$(cat $DAEMON_JSON_PATH)" != "$DESIRED_DAEMON_JSON" ]; then
echo "${BLUE}Backing up the existing $DAEMON_JSON_PATH to /etc/docker/daemon_backup.json ...${RESET}"
sudo cp "$DAEMON_JSON_PATH" "/etc/docker/daemon_backup.json"
echo "${GREEN}Backup completed.${RESET}"
echo "${BLUE}Updating $DAEMON_JSON_PATH with the desired content...${RESET}"
echo "$DESIRED_DAEMON_JSON" | sudo tee $DAEMON_JSON_PATH > /dev/null
sudo systemctl restart docker
echo "${GREEN}$DAEMON_JSON_PATH has been updated.${RESET}"
else
echo "${GREEN}$DAEMON_JSON_PATH already exists and has the correct content.${RESET}"
fi
# 6.3 Check permissions
if ! docker info &> /dev/null; then
echo "The current user does not have permissions to use Docker. Adding permissions..."
sudo usermod -aG docker $USER
sudo systemctl restart docker
echo "${BLUE}Permissions added. Please rerun the command.${RESET}"
newgrp docker
else
echo "${GREEN}Docker is installed and the current user has permissions to use it.${RESET}"
fi
else
echo "No need to configure Docker."
fi
}
================================================
FILE: reComputer/scripts/whisper/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/whisper/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/whisper/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag whisper)
================================================
FILE: reComputer/scripts/yolov10/Dockerfile
================================================
FROM dustynv/l4t-pytorch:r35.3.1
WORKDIR /opt
RUN pip3 install --no-cache-dir --verbose gradio==4.31.5
RUN git clone https://github.com/THU-MIG/yolov10.git && \
cd yolov10 && \
sed -i '/opencv-python>=4.6.0/ s/^/# /' pyproject.toml && \
sed -i '/torch>=1.8.0/ s/^/# /' pyproject.toml && \
sed -i '/torchvision>=0.9.0/ s/^/# /' pyproject.toml && \
pip3 install -e . && \
mkdir weights
CMD cd /opt/yolov10 && ls weights && python3 app.py
================================================
FILE: reComputer/scripts/yolov10/README.md
================================================
# Quickly Experience YOLOv10 on Jetson
## Hello
💡 Here's an example of quickly deploying YOLOv10 on a Jetson device.
🔥 Hightlights:
- **Yolov10** is a state-of-the-art real-time object detection model. 🚀🔍
- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨
- **Jetson** is powerful AI hardware platform for edge computing.💻
🛠️ Follow the tutorial below to quickly experience the performance of YOLOv10 on edge computing devices.
## Get a Jetson Orin Device 🛒
| Device Model | Description | Link |
|--------------|-------------|------|
| Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |
| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
## Getting Started
- install **jetson-examples** by pip:
```sh
pip3 install jetson-examples
```
- restart reComputer
```sh
sudo restart
```
- run yolov10 on jetson in one line:
```sh
reComputer run yolov10
```
- Please visit http://127.0.0.1:7860
## Change Model
This example will automatically download the YOLOv10s model at startup. If you want to try different models, please use the following command to download the model and then select the appropriate model through the WebUI.
> **Note:** You can also download the model via a browser and copy the model to `/home/$USER/reComputer/yolov10/weights`.
| Model | Download Command |
| :------------: | :----------------------: |
| [YOLOv10-N](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10n.pt) | `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10n.pt` |
| [YOLOv10-S](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10s.pt) | `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10s.pt` |
| [YOLOv10-M](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10m.pt) | `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10m.pt` |
| [YOLOv10-B](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10b.pt) | `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10b.pt` |
| [YOLOv10-L](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10l.pt) | `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10l.pt` |
| [YOLOv10-X](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10x.pt) | `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10x.pt` |
## Build Docker Image
Our provided container is built based on the `jetson-container`. This example provide a Dockerfile, allowing you to build a more suitable container according to your needs.
```sh
sudo docker build -t yolov10-jetson .
```
> **Note:** Additionally, you can train models, test models, and export models within the Docker container environment. For detailed information, please refer to `THU-MIG/yolov10`.
## Reference
- https://github.com/THU-MIG/yolov10
- https://github.com/dusty-nv/jetson-containers
================================================
FILE: reComputer/scripts/yolov10/clean.sh
================================================
#!/bin/bash
sudo docker rmi youjiang9977/yolov10-jetson:5.1.1
sudo rm -rf /home/$USER/reComputer/yolov10
================================================
FILE: reComputer/scripts/yolov10/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 20 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/yolov10/init.sh
================================================
#!/bin/bash
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
# make dirs
BASE_PATH=/home/$USER/reComputer
sudo mkdir -p $BASE_PATH/yolov10/weights
sudo mkdir -p $BASE_PATH/yolov10/run
echo "create workspace at $BASE_PATH/yolov10"
# download models
echo "download yolov10 models"
WEIGHTS_FILE=$BASE_PATH/yolov10/weights/yolov10s.pt
if [ ! -f $WEIGHTS_FILE ]; then
sudo wget -P $BASE_PATH/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10s.pt
else
echo "Weights file already exists: $WEIGHTS_FILE"
fi
================================================
FILE: reComputer/scripts/yolov10/run.sh
================================================
#!/bin/bash
sudo docker run -it --rm --net=host --runtime nvidia \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /home/$USER/reComputer/yolov10/weights:/opt/yolov10/weights \
-v /home/$USER/reComputer/yolov10/runs:/opt/yolov10/runs \
youjiang9977/yolov10-jetson:5.1.1
================================================
FILE: reComputer/scripts/yolov8-rail-inspection/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 20 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/yolov8-rail-inspection/init.sh
================================================
#!/bin/bash
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/yolov8-rail-inspection/readme.md
================================================
# Abstract
This project harnesses YOLOv8 technology, specifically tailored for precise identification and counting of bolts at fixed distances along a designated track, as well as for estimating odometer readings and vehicle speed calculations. It incorporates a test video stored within the ```/video``` directory of a Docker container, with the outcomes of these tests saved in the ```/result``` directory, subsequently relayed to the host machine's home directory via Docker mechanisms. Furthermore, the system offers real-time visualization of these processes through a WebUI accessible at ```http://127.0.0.1:5000``` within the local network.
## Install
PyPI(recommend)
```sh
pip install jetson-examples
```
Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
## Quickstart
```sh
reComputer run yolov8-rail-inspection
```
## Note
The display feature of the WebUI is experimental. Opening the WebUI visualization requires waiting for loading time of less than one minute. Optimization for this issue will be addressed in future updates.
## FAQs
1. The project has been tested on the Jetson Orin platform, and its execution entails the use of Docker; therefore, it is essential to ensure that all necessary Docker components are fully installed and functional.
2. During program execution, you may encounter an ```ERROR: Could not open requirements file.``` This error message does not impact the normal operation of the program and can be safely ignored.
3. The ultimate visualization of the results is presented through a web interface. Upon executing the command to run the ```reComputer yolov8-rail-inspection```, the terminal will output the URL for the visualization webpage. Upon clicking the link, you may need to wait a few seconds for the program to initialize and commence operation.
================================================
FILE: reComputer/scripts/yolov8-rail-inspection/run.sh
================================================
#!/bin/bash
docker pull yaohui1998/bolt_inspection:1.0
if [ "$(docker ps -aq -f name=yolov8_rain_inspection)" ]; then
echo "Found existing container named yolov8_rain_inspection. Executing Python script inside the container..."
docker start yolov8_rain_inspection
docker exec yolov8_rain_inspection python3 bolt_inspection.py
docker cp yolov8_rain_inspection:/usr/src/ultralytics/Jetson-example/result/ ~/
else
echo "No existing container named counter found. Pulling image and running container..."
docker run -it --rm --network host \
--ipc=host \
--runtime=nvidia \
-v /tmp/.X11-unix:/tmp/.X11-unix:ro \
-v /home:/home \
-e DISPLAY=:0 \
--privileged \
--name yolov8_rain_inspection \
--device=/dev/*:/dev/* \
yaohui1998/bolt_inspection:1.0
fi
================================================
FILE: setup.py
================================================
from pathlib import Path
from setuptools import setup
README_PATH = Path(__file__).parent / "README.md"
LONG_DESCRIPTION = README_PATH.read_text(encoding="utf-8")
PACKAGE_ROOT = Path(__file__).parent / "reComputer"
def package_files(root: Path):
files = []
for path in root.rglob("*"):
if not path.is_file():
continue
if "__pycache__" in path.parts:
continue
if path.suffix in {".pyc", ".pyo"}:
continue
files.append(path.relative_to(PACKAGE_ROOT).as_posix())
return sorted(files)
setup(
name="jetson-examples",
version="0.2.5",
author="luozhixin",
author_email="zhixin.luo@seeed.cc",
description="Running Gen AI models and applications on NVIDIA Jetson devices with one-line command",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
python_requires=">=3.8",
keywords=[
"llama",
"llava",
"gpt",
"llm",
"nvidia",
"jetson",
"multimodal",
"jetson orin",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=["reComputer"],
include_package_data=True,
package_data={"reComputer": package_files(PACKAGE_ROOT / "scripts")},
entry_points={
"console_scripts": [
"reComputer=reComputer.main:run_script",
]
},
project_urls={
"Homepage": "https://github.com/Seeed-Projects/jetson-examples",
"Issues": "https://github.com/Seeed-Projects/jetson-examples/issues",
},
)