Showing preview only (391K chars total). Download the full file or copy to clipboard to get everything.
Repository: Seeed-Projects/jetson-examples
Branch: main
Commit: 0a79b9978d5e
Files: 177
Total size: 348.5 KB
Directory structure:
gitextract_qsc773b7/
├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── build.sh
├── docs/
│ ├── develop.md
│ ├── examples.md
│ ├── install.md
│ └── publish.md
├── install.sh
├── pyproject.toml
├── reComputer/
│ ├── __init__.py
│ ├── main.py
│ └── scripts/
│ ├── MoveNet-Lightning/
│ │ ├── clean.sh
│ │ ├── getVersion.sh
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── MoveNet-Thunder/
│ │ ├── clean.sh
│ │ ├── getVersion.sh
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── MoveNetJS/
│ │ ├── clean.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── Sheared-LLaMA-2.7B-ShareGPT/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── audiocraft/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── check.sh
│ ├── clean.sh
│ ├── comfyui/
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── deep-live-cam/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── depth-anything/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── depth-anything-v2/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── depth-anything-v3/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── gpt-oss/
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── live-llava/
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llama-factory/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llama3/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llama3.2/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llava/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llava-v1.5-7b/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── llava-v1.6-vicuna-7b/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── nanodb/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── nanoowl/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── nvblox/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config/
│ │ │ ├── orbbec_stereo_capability_probe.yaml
│ │ │ └── orbbec_vslam_mobile.yaml
│ │ ├── config.yaml
│ │ ├── docker/
│ │ │ ├── Dockerfile.nvblox_orbbec
│ │ │ ├── launch_nvblox.sh
│ │ │ └── prepare_container_workspace.sh
│ │ ├── host/
│ │ │ └── orbbec_mobile_host.launch.py
│ │ ├── init.sh
│ │ ├── lib/
│ │ │ └── common.sh
│ │ ├── onedrive_downloader.py
│ │ ├── run.sh
│ │ ├── scripts/
│ │ │ ├── debug_runtime_connectivity.sh
│ │ │ ├── preflight.sh
│ │ │ ├── prepare_container.sh
│ │ │ ├── prepare_host.sh
│ │ │ └── run_demo.sh
│ │ └── start_nvblox_demo.sh
│ ├── ollama/
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── parler-tts/
│ │ ├── clean.sh
│ │ ├── getVersion.sh
│ │ ├── init.sh
│ │ ├── readme.md
│ │ └── run.sh
│ ├── qwen3.5-4b/
│ │ ├── Dockerfile.jetson
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── ros1-jp6/
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── run.sh
│ ├── stable-diffusion-webui/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── text-generation-webui/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── ultralytics-yolo/
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── update.sh
│ ├── utils.sh
│ ├── whisper/
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ ├── yolov10/
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── clean.sh
│ │ ├── config.yaml
│ │ ├── init.sh
│ │ └── run.sh
│ └── yolov8-rail-inspection/
│ ├── config.yaml
│ ├── init.sh
│ ├── readme.md
│ └── run.sh
└── setup.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
.github/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
!reComputer/scripts/nvblox/lib/
!reComputer/scripts/nvblox/lib/**
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2024 luozhixin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: MANIFEST.in
================================================
recursive-include reComputer/scripts *
global-exclude __pycache__
global-exclude *.pyc *.pyo
global-exclude *.png *.jpg *.jpeg *.gif *.bmp
================================================
FILE: README.md
================================================
# jetson-examples
<div align="">
<img alt="jetson" width="1200px" src="https://files.seeedstudio.com/wiki/reComputer-Jetson/jetson-examples/Jetson1200x300.png">
</dev>
[](https://discord.gg/5BQCkty7vN)
This repository provides examples for running AI models and applications on [NVIDIA Jetson devices](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) with a single command.
This repo builds upon the work of the [jetson-containers](https://github.com/dusty-nv/jetson-containers), [ultralytics](https://github.com/ultralytics/ultralytics) and other excellent projects.
## Features
- 🚀 **Easy Deployment:** Deploy state-of-the-art AI models on Jetson devices in one line.
- 🔄 **Versatile Examples:** Supports text generation, image generation, computer vision and so on.
- ⚡ **Optimized for Jetson:** Leverages Nvidia Jetson hardware for efficient performance.
## Install
To install the package, run:
```sh
pip3 install jetson-examples
```
> Notes:
> - Check [here](./docs/install.md) for more installation methods
> - To upgrade to the latest version, use: `pip3 install jetson-examples --upgrade`.
## Quickstart
To run and chat with [LLaVA](https://www.jetson-ai-lab.com/tutorial_llava.html), execute:
```sh
reComputer run llava
```
<div align="center">
<img alt="jetson" width="1200px" src="./docs/assets/llava.png">
</div>
## Example list
Here are some examples that can be run:
| Example | Type | Model/Data Size | Docker Image Size | Command | Supported JetPack |
| ------------------------------------------------ | ------------------------ | --------------- | ---------- | --------------------------------------- | ------------------------------------------------ |
| 🆕 [Ultralytics-yolo](/reComputer/scripts/ultralytics-yolo/README.md) | Computer Vision | | 15.4GB | `reComputer run ultralytics-yolo` | 4.6, 5.1.1, 5.1.2, 5.1.3, 6.0, 6.1, 6.2 |
| 🆕 [Deep-Live-Cam](/reComputer/scripts/deep-live-cam/README.md) | Face-swapping | 0.5GB | 20GB | `reComputer run deep-live-cam` | 6.0 |
| 🆕 llama-factory | Finetune LLM | | 13.5GB | `reComputer run llama-factory` | 5.1.1, 5.1.2, 5.1.3 |
| 🆕 [ComfyUI](/reComputer/scripts/comfyui/README.md) |Computer Vision | | 20GB | `reComputer run comfyui` | 5.1.1, 5.1.2, 5.1.3 |
| [Depth-Anything-V2](/reComputer/scripts/depth-anything-v2/README.md) |Computer Vision | | 15GB | `reComputer run depth-anything-v2` | 5.1.1, 5.1.2, 5.1.3 |
| [Depth-Anything-V3](/reComputer/scripts/depth-anything-v3/README.md) |Computer Vision | | 7.6GB | `reComputer run depth-anything-v3` | 6.1, 6.2, 6.2.1 |
| 🆕 [Qwen3.5-4B](/reComputer/scripts/qwen3.5-4b/README.md) | Text (LLM) | 2.5GB | 0.2GB | `reComputer run qwen3.5-4b` | 6.1, 6.2, 6.2.1 |
| [Depth-Anything](/reComputer/scripts/depth-anything/README.md) |Computer Vision | | 12.9GB | `reComputer run depth-anything` | 5.1.1, 5.1.2, 5.1.3 |
| [Yolov10](/reComputer/scripts/yolov10/README.md) | Computer Vision | 7.2M | 5.74 GB | `reComputer run yolov10` | 5.1.1, 5.1.2, 5.1.3, 6.0 |
| Llama3 | Text (LLM) | 4.9GB | 10.5GB | `reComputer run llama3` | 5.1.1, 5.1.2, 5.1.3, 6.0 |
| [gpt-oss](/reComputer/scripts/gpt-oss/README.md) | Text (LLM) | 39GB | 31.28GB | `reComputer run gpt-oss` | 6.1, 6.2, 6.2.1 |
| [ros1-jp6](/reComputer/scripts/ros1-jp6/README.md) | Robotics / ROS 1 | * | 1.27GB | `reComputer run ros1-jp6` | 6.1, 6.2, 6.2.1 |
| [nvblox](/reComputer/scripts/nvblox/README.md) | Robotics / Mapping | * | 20.5GB+ | `reComputer run nvblox` | 6.x |
> Note: You should have enough space to run example, like `LLaVA`, at least `27.4GB` totally
More Examples can be found [examples.md](./docs/examples.md)
## Calling Contributors Join Us!
### How to work with us?
Want to add your own example? Check out the [development guide](./docs/develop.md).
We welcome contributions to improve jetson-examples! If you have an example you'd like to share, please submit a pull request. Thank you to all of our contributors! 🙏
This open call is listed in our [Contributor Project](https://github.com/orgs/Seeed-Studio/projects/6/views/1?filterQuery=jetson&pane=issue&itemId=64891723). If this is your first time joining us, [click here](https://github.com/orgs/Seeed-Studio/projects/6/views/1?pane=issue&itemId=30957479) to learn how the project works. We follow the steps with:
- Assignments: We offer a variety of assignments to enhance wiki content, each with a detailed description.
- Submission: Contributors can submit their content via a Pull Request after completing the assignments.
- Review: Maintainers will merge the submission and record the contributions.
**Contributors receive a $250 cash bonus as a token of appreciation.**
For any questions or further information, feel free to reach out via the GitHub issues page or contact edgeai@seeed.cc
## TODO List
- [ ] detect host environment and install what we need
- [ ] all type jetson support checking list
- [ ] try jetpack 6.0
- [ ] check disk space enough or not before run
- [ ] allow to setting some configs, such as `BASE_PATH`
- [ ] support jetson-containers update
- [ ] better table to show example's difference
### 👥 Contributors
<p align="center"><a href="https://github.com/Seeed-Projects/jetson-examples/graphs/contributors">
<img src="https://contributors-img.web.app/image?repo=Seeed-Projects/jetson-examples" />
</a></p>
## License
This project is licensed under the MIT License.
## Resources
- https://github.com/dusty-nv/jetson-containers
- https://www.jetson-ai-lab.com/
- https://www.ultralytics.com/
================================================
FILE: build.sh
================================================
#!/bin/bash
# 1 try clean older version
pip uninstall jetson-examples -y
# 2 clean last build files
rm -rf build/
# 3 install latest version
pip install .
# 5 build whl
read -p "build whl ? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
python3 -m pip install --upgrade build
echo "building..."
rm -rf dist/
python3 -m build
echo "build done."
else
echo "skip build."
fi
# 6 publish to Test PyPI
read -p "publish to test PyPI ? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
python3 -m pip install --upgrade twine
keyring --disable # https://github.com/pypa/twine/issues/847
echo "publishing to Test PyPI..."
python3 -m twine upload --repository testpypi dist/*
else
echo "skip publish."
fi
# 7 publish to PyPI
read -p "[Danger!!] publish to PyPI ? (confirm/*): " choice
if [[ $choice == "confirm" || $choice == "CONFIRM" ]]; then
python3 -m pip install --upgrade twine
keyring --disable # https://twine.readthedocs.io/en/stable/#disabling-keyring
echo "publishing to Prod PyPI..."
python3 -m twine upload --repository pypi dist/*
else
echo "skip publish."
fi
echo 'clean & build & publish ok.'
================================================
FILE: docs/develop.md
================================================
# Develop
This section provides guidance on how to contribute to the `jetson-examples` repository. It is highly recommended to develop and run your project on a Jetson device for the best experience.
## 0. Preparation
Follow these steps to get started:
```sh
# Clone the repository
git clone https://github.com/Seeed-Projects/jetson-examples.git
# Navigate to the repository
cd jetson-examples
# Install in 'develop mode'
pip install .
# Test the installed module
reComputer check
# If everything is okay, you should see the following output:
# Docker version...
# Python 3...
# ...
```
## 1. Project Structure
The project is structured as follows:
- `docs/`: This directory contains the project's documents.
- `assets/`: This directory contains document assets, such as images.
- `reComputer/`: This is the main directory of the Python module.
- `__init__.py`: This file is the initialization file for the Python module.
- `main.py`: This file contains the main logic code for the Python module.
- `scripts/`: This directory is used to store examples.
- `xxxxx/`: This is an example directory. Everything inside this directory will be installed into the system. You can save files of any type, such as images, Python scripts, executable files, etc.
- `init.sh`: **(optional)** This is the example init script. To initialize the project's initial data and environment.
- `run.sh`: **(MOST IMPORTANT)** This is the example startup script. It is the only entry point for your project.
- `readme.md`: **(optional)** This file provides an introduction to the example.
- `check.sh`: This is the checking script **(Not Finished yet)**.
- `run.sh`: This is the common startup script for examples.
- `install.sh`: This script uses `curl` and `github` to install `jetson-examples`.
- `pyproject.toml`: This file contains information on how to build and install `jetson-examples`.
## 2. Create Your Project
<img src="assets/lifetime.png" width="500px">
Follow these steps to create an `example` in this project:
```sh
# 1 Declare your project name as an environment variable
my_project=hello-world
# 2 Create a directory for your project
mkdir -p reComputer/scripts/$my_project
# 3 [required] Create the run.sh file
echo "echo 'hello world'" > reComputer/scripts/$my_project/run.sh
# 4 [option] Create the readme.md file
echo -e "# hello-world\n\n- Print \`hello-world\` to show how to add your project to this package" > reComputer/scripts/$my_project/readme.md
# 5 [option] Create the init.sh file
echo "echo 'init env'" > reComputer/scripts/$my_project/init.sh
# 6 [option] Create the clean.sh file
echo "echo 'clean data'" > reComputer/scripts/$my_project/clean.sh
```
After completing these steps, you should see the file changes as shown in the image below:

If you are familiar with creating and editing directories or files, you can use your preferred method.
## 3. Edit `$my_project/run.sh` to Customize Your Project
Use your preferred IDE (e.g., Vim, VS Code) to edit `reComputer/scripts/$my_project/run.sh` and add the desired functionality:
```sh
# Inside reComputer/scripts/$my_project/run.sh
echo 'hello world'
# TODO: Add code to achieve your desired functionality
# ...
```
## 4. Test Your Project
To test your project, follow these steps:
```sh
# Reinstall to make your new project work with `reComputer`
pip install .
# Run your new project with a one-line command
reComputer run hello-world
# INFO: Machine [Jetson AGX Orin] confirmed...
# Running example: hello-world
# ---- Example initialization ----
# jetson-ai-lab existed.
# ---- Example start ----
# hello world
# ---- Example done ----
```
## 5. (Optional) Add a `readme.md` File
If you want to provide additional information about your project, you can add a `readme.md` file. Use your preferred IDE to edit `reComputer/scripts/$my_project/readme.md`:
```sh
# hello-world
- Print hello-world to show how to add your project to this package
```
## 6. (Optional) Submit a New Pull Request
If you wish to contribute your project to the `jetson-examples` repository, you can follow these steps:
- 5.1 Fork this project.
- 5.2 Create a new branch in your project.
- 5.3 Commit the changes you made.
- 5.4 Push the changes to your project.
- 5.5 Create a pull request (`origin-git-repo/main <- your-git-repo/newbranch`) at [https://github.com/Seeed-Projects/jetson-examples/pulls](https://github.com/Seeed-Projects/jetson-examples/pulls).
- 5.6 Wait for a code review.
- 5.7 Once your code passes the review, it will be merged.
- 5.8 Thank you for your contribution!
================================================
FILE: docs/examples.md
================================================
# Example list
All examples that can be run:
| Example | Type | Model Size | Image Size | Command | Device |
| ------------------------------------------------ | ------------------------ | ---------- | ---------- | -------------------------------------------- | -------- |
| whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` | USB-CAM* |
| [yolov8-rail-inspection](/reComputer/scripts/yolov8-rail-inspection/readme.md) |Computer Vision(CV) | 6M | 13.8GB | `reComputer run yolov8-rail-inspection` | |
| [ultralytics-yolo](/reComputer/scripts/ultralytics-yolo/README.md) |Computer Vision(CV) | * | 15.4GB | `reComputer run ultralytics-yolo` | |
| [depth-anything](/reComputer/scripts/depth-anything/README.md) |Computer Vision(CV) | * | 12.9GB | `reComputer run depth-anything` | |
| [depth-anything-v3](/reComputer/scripts/depth-anything-v3/README.md) |Computer Vision(CV) | * | 7.6GB | `reComputer run depth-anything-v3` | |
| [qwen3.5-4b](/reComputer/scripts/qwen3.5-4b/README.md) | Text (LLM) | 2.5GB | * | `reComputer run qwen3.5-4b` | |
| [yolov10](/reComputer/scripts/yolov10/README.md) | Computer Vision(CV) | 7.2M | 5.74 GB | `reComputer run yolov10` | |
| text-generation-webui | Text (LLM) | 3.9GB | 14.8GB | `reComputer run text-generation-webui` | |
| llama3 | Text (LLM) | 4.9GB | 10.5GB | `reComputer run llama3` | |
| [gpt-oss](/reComputer/scripts/gpt-oss/README.md) | Text (LLM) | * | 31.28GB | `reComputer run gpt-oss` | |
| [ros1-jp6](/reComputer/scripts/ros1-jp6/README.md) | Robotics / ROS 1 | * | 1.27GB | `reComputer run ros1-jp6` | |
| [nvblox](/reComputer/scripts/nvblox/README.md) | Robotics / Mapping | * | 20.5GB+ | `reComputer run nvblox` | Gemini2 |
LLaMA | Text (LLM) | 1.5GB | 10.5GB | `reComputer run Sheared-LLaMA-2.7B-ShareGPT` | |
| llava-v1.5 | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava-v1.5-7b` | |
| llava-v1.6 | Text + Vision (VLM) | 13GB | 20.3GB | `reComputer run llava-v1.6-vicuna-7b` | |
| LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` | |
| Live LLaVA | Text + Vision (VLM) | 13GB | 20.3GB | `reComputer run live-llava` | USB-CAM* |
| stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` | |
| nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` | USB-CAM* |
| [nanodb](../reComputer/scripts/nanodb/readme.md) | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` | |
| [ollama](https://github.com/ollama/ollama) | Inference Server | * | 10.5GB | `reComputer run ollama` | |
| [TensorFlow MoveNet Thunder](/reComputer/scripts/MoveNet-Thunder/readme.md) |Computer Vision | | 7.7GB | `reComputer run MoveNet-Thunder` | USB-CAM*
| [TensorFlow MoveNet Lightning](/reComputer/scripts/MoveNet-Lightning/readme.md) |Computer Vision | | 7.48GB | `reComputer run MoveNet-Lightning` | USB-CAM*
| [TensorFlow MoveNet JS](/reComputer/scripts/MoveNetJS/readme.md) |Computer Vision | | 56.21MB | `reComputer run MoveNetJS` | USB-CAM*
| [Parler-TTS mini: expresso](/reComputer/scripts/parler-tts/readme.md) |Audio | | 6.9GB | `reComputer run parler-tts` |
================================================
FILE: docs/install.md
================================================
# Install
- use the way you like to install
## PyPI(recommend)
```sh
pip install jetson-examples
```
## Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
## Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
================================================
FILE: docs/publish.md
================================================
# publish
## pypi.org
```sh
# tools update
python3 -m pip install --upgrade build
python3 -m pip install --upgrade twine
```
### Test
```sh
# 1 build
python3 -m build
# 2 publish
python3 -m twine upload --repository testpypi dist/*
### WARNING: do not share you API token !!
# 3 test
pip install -i https://test.pypi.org/simple/ jetson-examples
### make sure version number right
```
### Prod
```sh
# 1 build
python3 -m build
# 2 publish
python3 -m twine upload --repository pypi dist/*
### WARNING: do not share you API token !!
# 3 test
pip install jetson-examples --upgrade
### make sure version number right
```
================================================
FILE: install.sh
================================================
#!/bin/bash
# TODO: make sure python3 in host is OK
cd /tmp && \
git clone https://github.com/Seeed-Projects/jetson-examples && \
cd jetson-examples && \
pip install . && \
echo "reComputer installed. try 'reComputer run whisper' to enjoy!"
================================================
FILE: pyproject.toml
================================================
[build-system]
requires = ["setuptools>=61.0.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "jetson-examples"
version = "0.2.5"
authors = [{ name = "luozhixin", email = "zhixin.luo@seeed.cc" }]
description = "Running Gen AI models and applications on NVIDIA Jetson devices with one-line command"
readme = "README.md"
requires-python = ">=3.8"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
keywords = [
"llama",
"llava",
"gpt",
"llm",
"nvidia",
"jetson",
"multimodal",
"jetson orin",
]
[project.scripts]
reComputer = "reComputer.main:run_script"
[project.urls]
Homepage = "https://github.com/Seeed-Projects/jetson-examples"
Issues = "https://github.com/Seeed-Projects/jetson-examples/issues"
# Tools settings -------------------------------------------------------------------------------------------------------
[tool.setuptools.packages.find]
where = ["."]
include = ["reComputer"]
[tool.setuptools]
include-package-data = true
[tool.setuptools.package-data]
"reComputer" = ["scripts/**/*"]
================================================
FILE: reComputer/__init__.py
================================================
__version__ = "0.1.3"
================================================
FILE: reComputer/main.py
================================================
import os
import subprocess
import sys
from pathlib import Path
def scripts_roots():
pkg_root = Path(__file__).resolve().parent
candidates = [
Path.cwd() / "reComputer" / "scripts",
pkg_root / "scripts",
]
source_hint = os.environ.get("JETSON_EXAMPLES_SOURCE")
if source_hint:
candidates.insert(1, Path(source_hint).expanduser().resolve() / "reComputer" / "scripts")
# Keep order while removing duplicates
dedup = []
seen = set()
for item in candidates:
key = str(item)
if key not in seen:
seen.add(key)
dedup.append(item)
return dedup
def scripts_root():
for root in scripts_roots():
if root.is_dir():
return str(root)
# fallback to package default path
return str(scripts_roots()[0])
def path_of_script(name):
for root in scripts_roots():
script_path = root / name
if script_path.exists():
return str(script_path)
return str(scripts_roots()[0] / name)
def list_all_examples(folder_path):
directory_names = []
for item in os.listdir(folder_path):
item_path = os.path.join(folder_path, item)
if os.path.isdir(item_path):
directory_names.append(item)
return directory_names
def run_script():
if len(sys.argv) == 3:
if sys.argv[1] == "run":
example_name = sys.argv[2]
# TODO: maybe use python instead of shell is better
subprocess.run(["bash", path_of_script("run.sh"), example_name])
elif sys.argv[1] == "clean":
example_name = sys.argv[2]
subprocess.run(["bash", path_of_script("clean.sh"), example_name])
else:
print("Only Support `run` or `clean` for now. try `reComputer run llava` .")
elif len(sys.argv) == 2:
if sys.argv[1] == "check":
subprocess.run(["bash", path_of_script("check.sh")])
elif sys.argv[1] == "update":
subprocess.run(["bash", path_of_script("update.sh")])
elif sys.argv[1] == "list":
example_folder = scripts_root()
directories = list_all_examples(example_folder)
print("example list:")
index = 1
for directory in directories:
print("{:03d}".format(index), "|", directory)
index += 1
print("-end-")
else:
print("reComputer help:")
print("---")
print("`reComputer check` | check system.")
print("`reComputer update` | update jetson-ai-lab.")
print("`reComputer list` | list all examples.")
print("`reComputer run xxx` | run an example.")
print("`reComputer clean xxx` | clean an example's data.")
print("---")
else:
print("Error Usage! try `reComputer help`.")
if __name__ == "__main__":
pass
================================================
FILE: reComputer/scripts/MoveNet-Lightning/clean.sh
================================================
#!/bin/bash
# get image
source ./getVersion.sh
# remove docker image
sudo docker rmi feiticeir0/movenet:tf2-${IMAGE_TAG}
================================================
FILE: reComputer/scripts/MoveNet-Lightning/getVersion.sh
================================================
#!/bin/bash
# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh
# and llama-factory init script
# we only have images for these - 36.2.0 works on 36.3.0
L4T_VERSIONS=("35.3.1", "35.4.1", "36.2.0", "36.3.0")
ARCH=$(uname -i)
# echo "ARCH: $ARCH"
if [ $ARCH = "aarch64" ]; then
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
#echo "reading L4T version from \"dpkg-query --show nvidia-l4t-core\""
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })
#echo ${L4T_VERSION_ARRAY[@]}
#echo ${#L4T_VERSION_ARRAY[@]}
L4T_RELEASE=${L4T_VERSION_ARRAY[0]}
L4T_REVISION=${L4T_VERSION_ARRAY[1]}
else
#echo "reading L4T version from /etc/nv_tegra_release"
L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
fi
L4T_REVISION_MAJOR=${L4T_REVISION:0:1}
L4T_REVISION_MINOR=${L4T_REVISION:2:1}
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
IMAGE_TAG=$L4T_VERSION
#echo "L4T_VERSION : $L4T_VERSION"
#echo "L4T_RELEASE : $L4T_RELEASE"
#echo "L4T_REVISION: $L4T_REVISION"
elif [ $ARCH != "x86_64" ]; then
echo "unsupported architecture: $ARCH"
exit 1
fi
if [[ ! " ${L4T_VERSIONS[@]} " =~ " ${L4T_VERSION} " ]]; then
echo "L4T_VERSION is not in the allowed versions list. Exiting."
exit 1
fi
# check if 36 to change IMAGE_TAG
if [ ${L4T_RELEASE} -eq "36" ]; then
# image tag will be 2.0
IMAGE_TAG="36.2.0"
fi
================================================
FILE: reComputer/scripts/MoveNet-Lightning/init.sh
================================================
#!/bin/bash
# Let's allow connections
xhost +local:docker
================================================
FILE: reComputer/scripts/MoveNet-Lightning/readme.md
================================================
# MoveNet
MoveNet is a ultra fast and accurate pose detection model.
We're demonstrating here using reComputer J402 and with MoveNet Lightning version

You can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)
## Getting started
#### Prerequisites
* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
* Webcam connected to reComputer
* Graphical desktop
* Docker installed
## Instalation
PyPI (best)
```bash
pip install jetson-examples
```
## Usage
1. Type the following command in a terminal
```bash
reComputer run MoveNet-Lightning
```
2. Start moving in front of the camera
================================================
FILE: reComputer/scripts/MoveNet-Lightning/run.sh
================================================
#!/bin/bash
# get L4T version
# it exports a variable IMAGE_TAG
source ./getVersion.sh
# pull docker image
docker pull feiticeir0/movenet-lightning:tf2-${IMAGE_TAG}"
docker run \
-e DISPLAY=$DISPLAY \
--runtime=nvidia \
--rm \
--device /dev/video0 \
-v /tmp/.X11-unix:/tmp/.X11-unix \
feiticeir0/movenet-lightning:tf2-${IMAGE_TAG}"
================================================
FILE: reComputer/scripts/MoveNet-Thunder/clean.sh
================================================
#!/bin/bash
# get image
source ./getVersion.sh
# remove docker image
sudo docker rmi feiticeir0/movenet:tf2-${TAG_IMAGE}
================================================
FILE: reComputer/scripts/MoveNet-Thunder/getVersion.sh
================================================
#!/bin/bash
# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh
# and llama-factory init script
# we only have images for these - 36.2.0 works on 36.3.0
L4T_VERSIONS=("35.3.1", "35.4.1", "36.2.0", "36.3.0")
ARCH=$(uname -i)
# echo "ARCH: $ARCH"
if [ $ARCH = "aarch64" ]; then
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
#echo "reading L4T version from \"dpkg-query --show nvidia-l4t-core\""
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })
#echo ${L4T_VERSION_ARRAY[@]}
#echo ${#L4T_VERSION_ARRAY[@]}
L4T_RELEASE=${L4T_VERSION_ARRAY[0]}
L4T_REVISION=${L4T_VERSION_ARRAY[1]}
else
#echo "reading L4T version from /etc/nv_tegra_release"
L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
fi
L4T_REVISION_MAJOR=${L4T_REVISION:0:1}
L4T_REVISION_MINOR=${L4T_REVISION:2:1}
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
IMAGE_TAG=$L4T_VERSION
#echo "L4T_VERSION : $L4T_VERSION"
#echo "L4T_RELEASE : $L4T_RELEASE"
#echo "L4T_REVISION: $L4T_REVISION"
elif [ $ARCH != "x86_64" ]; then
echo "unsupported architecture: $ARCH"
exit 1
fi
if [[ ! " ${L4T_VERSIONS[@]} " =~ " ${L4T_VERSION} " ]]; then
echo "L4T_VERSION is not in the allowed versions list. Exiting."
exit 1
fi
# check if 36 to change IMAGE_TAG
if [ ${L4T_RELEASE} -eq "36" ]; then
# image tag will be 2.0
IMAGE_TAG="36.2.0"
fi
================================================
FILE: reComputer/scripts/MoveNet-Thunder/init.sh
================================================
#!/bin/bash
# Let's allow connections
xhost +local:docker
================================================
FILE: reComputer/scripts/MoveNet-Thunder/readme.md
================================================
# MoveNet
MoveNet is a ultra fast and accurate pose detection model.
We're demonstrating here using reComputer J402 and with MoveNet Thunder version

You can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)
## Getting started
#### Prerequisites
* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
* Webcam connected to reComputer
* Graphical desktop
* Docker installed
## Instalation
PyPI (best)
```bash
pip install jetson-examples
```
## Usage
1. Type the following command in a terminal
```bash
reComputer run MoveNet-Thunder
```
2. Start moving in front of the camera
================================================
FILE: reComputer/scripts/MoveNet-Thunder/run.sh
================================================
#!/bin/bash
# get L4T version
# it exports a variable IMAGE_TAG
source ./getVersion.sh
# pull docker image
docker pull feiticeir0/movenet-thunder:tf2-${IMAGE_TAG}
docker run \
-e DISPLAY=$DISPLAY \
--runtime=nvidia \
--rm \
--device /dev/video0 \
-v /tmp/.X11-unix:/tmp/.X11-unix \
feiticeir0/movenet-thunder:tf2-${IMAGE_TAG}
================================================
FILE: reComputer/scripts/MoveNetJS/clean.sh
================================================
#!/bin/bash
# remove docker image
sudo docker rmi feiticeir0/movenetjs:latest
================================================
FILE: reComputer/scripts/MoveNetJS/readme.md
================================================
# MoveNet
MoveNet is a ultra fast and accurate pose detection model.
We're demonstrating here using reComputer J402

You can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)
## Getting started
#### Prerequisites
* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
* Webcam connected (one or the other)
* to the reComputer
* the computer you're using (remotely connected to the reComputer)
* Docker installed
## Instalation
PyPI (best)
```bash
pip install jetson-examples
```
## Usage
### Method 1
##### If you're running inside your reComputer
1. Type the following command in a terminal
```bash
reComputer run MoveNetJS
```
2. Open a web browser and go to [http://localhost:5000](http://localhost:5000)
3. Give permission to access webcam and wait a few seconds:
1. First will appear the webcam feed
2. Next will appear the lines estimating the pose
4. Start dancing
### Method 2
##### If you want to connect remotely with ssh to the reComputer
1. Connect using SSH but redirecting the 5000 port
```bash
ssh -L 5000:localhost:5000 <username>@<reComputer_IP>
```
2. Type the following command in a terminal
```bash
reComputer run movenetjs
```
2. Open a web browser (on your machine) and go to [http://localhost:5000](http://localhost:5000)
3. Give permission to access webcam and wait a few seconds:
1. First will appear the webcam feed
2. Next will appear the lines estimating the pose
4. Start dancing
**note** Firefox may fail showing webcam feed or pose estimation
================================================
FILE: reComputer/scripts/MoveNetJS/run.sh
================================================
#!/bin/bash
# pull docker image
docker push feiticeir0/movenetjs:latest
docker run \
--rm \
-p 5000:5000 \
feiticeir0/movenetjs:latest
================================================
FILE: reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag local_llm) \
python3 -m local_llm.chat --api=mlc \
--model princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT
================================================
FILE: reComputer/scripts/audiocraft/README.md
================================================
# AudioCraft Deployment on Jetson in One Line
## Hello
💡 In this demo, we refer to jetson-container to deploy audiocraft on Jetson devices. And generate music using a reference example.
🔥 Hightlights:
- **Audiocraft** is a tool designed for creating and manipulating audio content. 🎶
- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨
- **Jetson** is powerful AI hardware platform for edge computing.💻
Get your Jetson device ready and customize sounds with me.🚀
## Getting Started
- install **jetson-examples** by pip:
```sh
pip3 install jetson-examples
```
- restart reComputer
```sh
sudo restart
```
- run audiocraft on jetson in one line:
```sh
reComputer run audiocraft
```
## Reference
- https://github.com/dusty-nv/jetson-containers/tree/master/packages/audio/audiocraft
- https://github.com/facebookresearch/audiocraft
================================================
FILE: reComputer/scripts/audiocraft/clean.sh
================================================
#!/bin/bash
# TODO: clean old container
docker rmi $(/home/$USER/reComputer/jetson-containers/autotag audiocraft)
================================================
FILE: reComputer/scripts/audiocraft/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/audiocraft/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/audiocraft/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
jetson-containers run $(autotag audiocraft)
================================================
FILE: reComputer/scripts/check.sh
================================================
script_dir=$(dirname "$0")
docker --version && \
python3 -V && \
python -V && \
echo "now we can use more shell in $script_dir"
================================================
FILE: reComputer/scripts/clean.sh
================================================
#!/bin/bash
check_is_jetson_or_not() {
model_file="/proc/device-tree/model"
if [ -f "/proc/device-tree/model" ]; then
model=$(tr -d '\0' < /proc/device-tree/model | tr '[:upper:]' '[:lower:]')
if [[ $model =~ jetson|orin|nv|agx ]]; then
echo "INFO: machine[$model] confirmed..."
else
echo "WARNING: machine[$model] maybe not support..."
exit 1
fi
else
echo "ERROR: machine[$model] not support this..."
exit 1
fi
}
check_is_jetson_or_not
echo "clean example:$1"
BASE_PATH=/home/$USER/reComputer
# TODO: 要一个二次确认
echo "----clean example start----"
cd $JETSON_REPO_PATH
script_dir=$(dirname "$0")
start_script=$script_dir/$1/clean.sh
if [ -f $start_script ]; then
bash $start_script
else
echo "ERROR: Example[$1]/clean.sh Not Found."
fi
echo "----clean example done----"
================================================
FILE: reComputer/scripts/comfyui/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/comfyui/README.md
================================================
# Jetson-Example: Run ComfyUI (Stable Diffusion GUI) on NVIDIA Jetson Orin 🚀
## One-Click Quick Deployment of Plug-and-Play Stable Diffusion GUI
<p align="center">
<img src="images/comfyui.png" alt="comfyui">
</p>
## **Introduction** 📘
[ComfyUI](https://github.com/comfyanonymous/ComfyUI) will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface.
In this project, you can quickly deploy ComfyUI on Nvidia Jetson Orin devices with one click.
## **Key Features**:
- **One-click installation and configuration support for Nvidia Jetson Orin devices.**
- **GPU acceleration to optimize the performance of stable diffusion pipelines.**
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
- Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/), [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/), [SD3](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) and [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/)
- [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/)
- Asynchronous Queue system
- Many optimizations: Only re-executes the parts of the workflow that changes between executions.
- Smart memory management: can automatically run models on GPUs with as low as 1GB vram.
For other features, please refer to the original project [ComfyUI](https://github.com/comfyanonymous/ComfyUI).
Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)
### Get a Jetson Orin Device 🛒
| Device Model | Description | Link |
|--------------|-------------|------|
| Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |
| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
## **Quickstart** ⚡
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### **Installation via PyPI (Recommended)** 🐍
1. Install the package:
```sh
pip install jetson-examples
```
2. Restart your reComputer:
```sh
sudo reboot
```
3. Run ComfyUI with one command:
```sh
reComputer run comfyui
```
- **Input Dir**: Mount the input directory in Docker to the host directory `~/ComfyUI/input`.
- **Output Dir**: Mount the output directory in Docker to the host directory `~/ComfyUI/output`.
- **Models Dir**: Mount the models directory in Docker to the host directory `~/ComfyUI/models`.
## **For more tutorials** 🔧
- [ComfyUI Basic Tutorial VN](https://comfyanonymous.github.io/ComfyUI_tutorial_vn/)
- [ComfyUI](https://github.com/comfyanonymous/ComfyUI)
- [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/)
- [Comfy Org](https://www.comfy.org/)
## **Shortcuts**
| Keybind | Explanation |
|------------------------------------|--------------------------------------------------------------------------------------------------------------------|
| Ctrl + Enter | Queue up current graph for generation |
| Ctrl + Shift + Enter | Queue up current graph as first for generation |
| Ctrl + Z/Ctrl + Y | Undo/Redo |
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
| Alt + C | Collapse/uncollapse selected nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Ctrl + B | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) |
| Delete/Backspace | Delete selected nodes |
| Ctrl + Backspace | Delete the current graph |
| Space | Move the canvas around when held and moving the cursor |
| Ctrl/Shift + Click | Add clicked node to selection |
| Ctrl + C/Ctrl + V | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) |
| Ctrl + C/Ctrl + Shift + V | Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |
| Shift + Drag | Move multiple selected nodes at the same time |
| Ctrl + D | Load default graph |
| Alt + `+` | Canvas Zoom in |
| Alt + `-` | Canvas Zoom out |
| Ctrl + Shift + LMB + Vertical drag | Canvas Zoom in/out |
| Q | Toggle visibility of the queue |
| H | Toggle visibility of history |
| R | Refresh graph |
| Double-Click LMB | Open node quick search palette |
## License
This project is licensed under the GNU General Public License v3.0
================================================
FILE: reComputer/scripts/comfyui/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="comfyui"
IMAGE_NAME="yaohui1998/comfyui"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAME
sudo rm -r /home/$USER/reComputer/ComfyUI
================================================
FILE: reComputer/scripts/comfyui/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 30 # in GB
REQUIRED_MEM_SPACE: 15
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/comfyui/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
# create folder.
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
================================================
FILE: reComputer/scripts/comfyui/run.sh
================================================
#!/bin/bash
CONTAINER_NAME="comfyui"
IMAGE_NAME="yaohui1998/comfyui"
# Pull the latest image
docker pull $IMAGE_NAME
cd /home/$USER/reComputer/
git clone https://github.com/comfyanonymous/ComfyUI.git
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
docker exec -it $CONTAINER_NAME /bin/bash
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it --rm \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v /home/$USER/reComputer/ComfyUI:/usr/src/ComfyUI-Seeed \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/deep-live-cam/Dockerfile
================================================
FROM yaohui1998/deep-live-cam:0.1
WORKDIR /usr/src/Deep-Live-Cam
CMD ["python3", "run.py", "--execution-provider", "cuda"]
================================================
FILE: reComputer/scripts/deep-live-cam/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/deep-live-cam/README.md
================================================
# Jetson-Example: Run Deep Live Cam on Seeed Studio NVIDIA AGX Orin Developer Kit 🚀
This project provides a one-click deployment of the Deep Live Cam AI face-swapping project on the [Seeed Studio Jetson AGX Orin Developer Kit](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html), retaining all the features of the [original project](https://github.com/hacksider/Deep-Live-Cam) and supporting functionalities such as image-to-image, image-to-video, and image-to-webcam.
<p align="center">
<img src="images/WebUI.png" alt="WebUI">
</p>
All models and inference engine implemented in this project are from the official [Deep-Live-Cam](https://github.com/hacksider/Deep-Live-Cam).
## Get a Jetson Orin Device 🛒
| Device Model | Link |
|--------------|------|
| Jetson AGX Orin Dev Kit 32G | [Buy Here](https://www.seeedstudio.com/NVIDIA-Jetson-AGX-Orin-Developer-Kit-p-5314.html) |
| Jetson AGX Orin Dev Kit 64G | [Buy Here](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html) |
## New Features 🔥
### Resizable Preview Window
Dynamically improve the performance by using the --resizable parameter

### Face Mapping
Track faces and change it on the fly

source video

Tick this switch

Map the faces

And see the magic!
> The images in the "New Features" section are sourced from the [github community](https://github.com/hacksider/Deep-Live-Cam).
## 🥳Getting Started
### 📜Prerequisites
- AGX Orin Developer Kit [(🛒Buy Here)](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html)
- Jetpack 6.0
- USB Camera (optional)
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### 🚀Installation
PyPI(recommend)
```sh
pip install jetson-examples
```
Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
### 📋Usage
1. Run code:
```sh
reComputer run deep-live-cam
```
2. An `image` folder will be created in the user's home directory, where templates and the face images or videos that need to be swapped can be placed.
3. Click `Select a face` to choose an image of a face.
4. Click the `Select a target` button to choose a target face image.
5. Click `Preview` to display the transformed result, and click `Start` to save the result to the specified directory without displaying it.
6. Click `Preview` to display the transformed result, and click `Start` to save the result to the specified directory without displaying it.
7. You can choose the `Face enhancer` to enhance facial details and features.
8. Click `Live` to open the webcam for real-time conversion. Please connect a USB camera before starting the program.
> ⚠️ **Note**: The first time you convert an image, it may take approximately two minutes.
## 🙏🏻Thanks
[Deep-Live-Cam](https://github.com/hacksider/Deep-Live-Cam)
## 💨Contributing
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
## 🙅Disclaimer
This software is meant to be a productive contribution to the rapidly growing AI-generated media industry. It will help artists with tasks such as animating a custom character or using the character as a model for clothing etc.
The developers of this software are aware of its possible unethical applications and are committed to take preventative measures against them. It has a built-in check which prevents the program from working on inappropriate media including but not limited to nudity, graphic content, sensitive material such as war footage etc. We will continue to develop this project in the positive direction while adhering to law and ethics. This project may be shut down or include watermarks on the output if requested by law.
Users of this software are expected to use this software responsibly while abiding by local laws. If the face of a real person is being used, users are required to get consent from the concerned person and clearly mention that it is a deepfake when posting content online. Developers of this software will not be responsible for actions of end-users.
## ✅License
This project is licensed under the AGPL-3.0 License.
================================================
FILE: reComputer/scripts/deep-live-cam/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="deep-live-cam"
IMAGE_NAME="yaohui1998/deep-live-cam:1.0"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAMEs
sudo rm -r ~/images
================================================
FILE: reComputer/scripts/deep-live-cam/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 36.3.0
REQUIRED_DISK_SPACE: 40 # in GB
REQUIRED_MEM_SPACE: 20
PACKAGES:
- nvidia-jetpack
- x11-xserver-utils
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/deep-live-cam/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/deep-live-cam/run.sh
================================================
CONTAINER_NAME="deep-live-cam"
IMAGE_NAME="yaohui1998/deep-live-cam:1.0"
# Pull the latest image
docker pull $IMAGE_NAME
# Set display id
xhost +local:docker
export DISPLAY=:0
# mkdir image dir
mkdir ~/images
echo $DISPLAY
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it --rm \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v ~/images:/usr/src/Deep-Live-Cam/images \
-e DISPLAY=$DISPLAY \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/depth-anything/Dockerfile
================================================
FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3
RUN mkdir /usr/src/DepthAnything-on-Jetson-Orin
WORKDIR /usr/src/DepthAnything-on-Jetson-Orin
COPY . /usr/src/DepthAnything-on-Jetson-Orin
RUN pip install flask onnx flask flask_socketio huggingface_hub
CMD ["python3", "app.py"]
================================================
FILE: reComputer/scripts/depth-anything/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/depth-anything/README.md
================================================
# Jetson-Example: Run Depth Anything on NVIDIA Jetson Orin 🚀
This project provides an one-click deployment of the Depth Anything monocular depth estimation model developed by Hong Kong University and ByteDance. The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.
<p align="center">
<img src="images/WebUI.png" alt="WebUI">
</p>
All models and inference engine implemented in this project are from the official [Depth Anything](https://depth-anything.github.io/).
## 🔥Features
- One-click deployment for Depth Anything models.
- WebUI for model conversion and depth estimation.
- Support for uploading videos/images or using the local camera
- Supports S, B, L models of Depth Anything with input sizes of 308, 384, 406, and 518.
### 🗝️WebUI Features
- **Choose model**: Select from depth_anything_vits14 models. (S, B, L)
- **Choose input size**: Select the desired input size.(308, 384, 406, 518)
- **Grayscale option**: Option to use grayscale.
- **Choose source**: Select the input source (Video, Image, Camera).
- **Export Model**: Automatically download and convert the model from PyTorch (.pth) to TensorRT format.
- **Start Estimation**: Begin depth estimation using the selected model and input source.
- **Stop Estimation**: Stop the ongoing depth estimation process.
<p align="center">
<img src="images/Opr.png" alt="Depthanything" width="320" height="360">
</p>
## 🥳Getting Started
### 📜Prerequisites
- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
- Docker installed on reComputer
- USB Camera (optional)
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### 🚀Installation
PyPI(recommend)
```sh
pip install jetson-examples
```
Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
### 📋Usage
1. Run code:
```sh
reComputer run depth-anything
```
2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model, input size, and source.
3. Click on **Export Model** to download and convert the model.
4. Click on **Start Estimation** to begin the depth estimation process.
5. View the real-time depth estimation results on the WebUI.
## ⛏️Applications
- **Security**: Enhance surveillance systems with depth perception.
<p align="center">
<img src="images/Security.png" alt="Security" width="500" height="150">
</p>
- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.
<p align="center">
<img src="images/Autonomous Driving.png" alt="Autonomous Driving" width="500" height="150">
</p>
- **Underwater Scenes**: Apply depth estimation in underwater exploration.
<p align="center">
<img src="images/Underwater Scenes.png" alt="Underwater Scenes" width="500" height="150">
</p>
- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.
<p align="center">
<img src="images/Indoor Scenes.png" alt="Indoor Scenes" width="500" height="150">
</p>
## Further Development 🔧
- [Depth Anything Official](https://depth-anything.github.io/)
- [Depth Anything TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)
- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)
- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)
- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)
## 🙏🏻Contributing
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
## ✅License
This project is licensed under the MIT License.
## 🏷️Acknowledgements
- Depth Anything [project](https://depth-anything.github.io/) by Hong Kong University and ByteDance.
- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).
================================================
FILE: reComputer/scripts/depth-anything/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth-anything"
IMAGE_NAME="yaohui1998/depthanything-on-jetson-orin:latest"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAMEs
================================================
FILE: reComputer/scripts/depth-anything/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 20 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/depth-anything/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/depth-anything/run.sh
================================================
CONTAINER_NAME="depth-anything"
IMAGE_NAME="yaohui1998/depthanything-on-jetson-orin:latest"
# Pull the latest image
docker pull $IMAGE_NAME
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/depth-anything-v2/Dockerfile
================================================
FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3
RUN mkdir /usr/src/DepthAnything-on-Jetson-Orin
WORKDIR /usr/src/DepthAnything-on-Jetson-Orin
COPY . /usr/src/DepthAnything-on-Jetson-Orin
RUN pip install flask onnx flask flask_socketio huggingface_hub
CMD ["python3", "app.py"]
================================================
FILE: reComputer/scripts/depth-anything-v2/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/depth-anything-v2/README.md
================================================
# Jetson-Example: Run Depth Anything V2 on NVIDIA Jetson Orin 🚀
This project provides an one-click deployment of the Depth Anything V2 monocular depth estimation model developed by Hong Kong University and ByteDance. The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.
<p align="center">
<img src="images/WebUI.png" alt="WebUI">
</p>
All models and inference engine implemented in this project are from the official [Depth Anything V2](https://github.com/DepthAnything/Depth-Anything-V2).
## 🔥Features
- One-click deployment for Depth Anything V2 models.
- WebUI for model conversion and depth estimation.
- Support for uploading videos/images or using the local camera
- Supports S, B, L models of Depth Anything V2 with input sizes 518.
### 🗝️WebUI Features
- **Choose model**: Select from Depth Anything V2 models. (S, B, L)
- **Grayscale option**: Option to use grayscale.
- **Choose source**: Select the input source (Video, Image, Camera).
- **Export Model**: Automatically download and convert the model from ONNX to TensorRT format.
- **Start Estimation**: Begin depth estimation using the selected model and input source.
- **Stop Estimation**: Stop the ongoing depth estimation process.
<p align="center">
<img src="images/Opr.png" alt="Depthanything" width="320" height="360">
</p>
## 🥳Getting Started
### 📜Prerequisites
- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
- Docker installed on reComputer
- USB Camera (optional)
### Modify Docker Daemon Configuration (Optional)
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
```json
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
```
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
```sh
sudo systemctl restart docker
```
### 🚀Installation
PyPI(recommend)
```sh
pip install jetson-examples
```
Linux (github trick)
```sh
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
```
Github (for Developer)
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
### 📋Usage
1. Run code:
```sh
reComputer run depth-anything-v2
```
2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model and source.
3. Click on **Export Model** to download and convert the model.
4. Click on **Start Estimation** to begin the depth estimation process.
5. View the real-time depth estimation results on the WebUI.
## ⛏️Applications
- **Security**: Enhance surveillance systems with depth perception.
<p align="center">
<img src="images/Security.png" alt="Security" width="500" height="150">
</p>
- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.
<p align="center">
<img src="images/Autonomous Driving.png" alt="Autonomous Driving" width="500" height="150">
</p>
- **Underwater Scenes**: Apply depth estimation in underwater exploration.
<p align="center">
<img src="images/Underwater Scenes.png" alt="Underwater Scenes" width="500" height="150">
</p>
- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.
<p align="center">
<img src="images/Indoor Scenes.png" alt="Indoor Scenes" width="500" height="150">
</p>
## Further Development 🔧
- [Depth Anything V2 Official](https://github.com/DepthAnything/Depth-Anything-V2)
- [Depth Anything V2 TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)
- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)
- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)
- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)
## 🙏🏻Contributing
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
## ✅License
This project is licensed under the MIT License.
## 🏷️Acknowledgements
- Depth Anything V2 Official [project](https://github.com/DepthAnything/Depth-Anything-V2) by Hong Kong University and ByteDance.
- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).
================================================
FILE: reComputer/scripts/depth-anything-v2/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth-anything-v2"
IMAGE_NAME="yaohui1998/depthanything-v2-on-jetson-orin:latest"
sudo docker stop $CONTAINER_NAME
sudo docker rm $CONTAINER_NAME
sudo docker rmi $IMAGE_NAMEs
================================================
FILE: reComputer/scripts/depth-anything-v2/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 15 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/depth-anything-v2/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/depth-anything-v2/run.sh
================================================
CONTAINER_NAME="depth-anything-v2"
IMAGE_NAME="yaohui1998/depthanything-v2-on-jetson-orin:latest"
# Pull the latest image
docker pull $IMAGE_NAME
# Check if the container with the specified name already exists
if [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then
echo "Container $CONTAINER_NAME already exists. Starting and attaching..."
docker start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
docker run -it \
--name $CONTAINER_NAME \
--privileged \
--network host \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/*:/dev/* \
-v /etc/localtime:/etc/localtime:ro \
--runtime nvidia \
$IMAGE_NAME
fi
================================================
FILE: reComputer/scripts/depth-anything-v3/Dockerfile
================================================
# This demo uses a prebuilt Docker image from Docker Hub.
FROM chenduola6/depth_anything_v3:jp6.2
================================================
FILE: reComputer/scripts/depth-anything-v3/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/depth-anything-v3/README.md
================================================
# Jetson-Example: Run Depth Anything V3 on NVIDIA Jetson
This project provides one-click deployment for **Depth Anything V3** on NVIDIA Jetson devices.
It uses the prebuilt Docker image:
```sh
chenduola6/depth-anything-v3:jp6.2
```
Image size: **7.6 GB**
Supported JetPack/L4T versions:
- JetPack 6.2 -> L4T 36.4.0
- JetPack 6.2.1 -> L4T 36.4.3
- JetPack 6.1 -> L4T 36.4.4
<p align="center">
<img src="images/da3.png" alt="Depth Anything V3">
</p>
## Getting Started
### Prerequisites
- NVIDIA Jetson device with a supported L4T version
- Docker installed and available
- USB camera (for camera inference)
### Installation
PyPI (recommended):
```sh
pip install jetson-examples
```
GitHub (developer):
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
## Usage
1. Start the demo container with `reComputer`:
```sh
reComputer run depth-anything-v3
```
2. Enter the running container:
```bash
xhost +local:docker
docker run -it --rm \
--gpus all \
--network host \
--ipc host \
--privileged \
-e DISPLAY=$DISPLAY \
-e QT_X11_NO_MITSHM=1 \bash
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev:/dev \
chenduola6/depth-anything-v3
```
3. Run USB camera inference inside the container:
```sh
cd workspace/ros2-depth-anything-v3-trt
#build the engine file
source install/setup.bash
ros2 run depth_anything_v3 generate_engines onnx
```
<p align="center">
<img src="images/engine.png" alt="generate engine">
</p>
> **Note**:If the Jetson swap space is insufficient, it may cause the engine export process to fail.
>
> ```bash
> #add swap space
> sudo mkdir -p /mnt/nvme
> sudo fallocate -l 16G /mnt/nvme/swapfile
> sudo chmod 600 /mnt/nvme/swapfile
> sudo mkswap /mnt/nvme/swapfile
> sudo swapon /mnt/nvme/swapfile
> ```
```bash
#Run a USB camera demo
USB_SIMPLE=1 ./run_camera_depth.sh
```
## Cleanup
Only remove the container (keep image cache):
```sh
reComputer clean depth-anything-v3
```
## References
- [Depth Anything v3 project](https://github.com/ByteDance-Seed/Depth-Anything-3)
- [ros2-depth-anything-v3-trt](https://github.com/ika-rwth-aachen/ros2-depth-anything-v3-trt)
- [Seeed jetson-examples](https://github.com/Seeed-Projects/jetson-examples)
================================================
FILE: reComputer/scripts/depth-anything-v3/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth_anything_v3"
# Prefer plain docker, fallback to sudo docker when user has no docker group permission
if docker info >/dev/null 2>&1; then
DOCKER_CMD=(docker)
else
DOCKER_CMD=(sudo docker)
fi
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" stop $CONTAINER_NAME
fi
if [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" rm $CONTAINER_NAME
echo "Container $CONTAINER_NAME removed."
else
echo "Container $CONTAINER_NAME does not exist."
fi
================================================
FILE: reComputer/scripts/depth-anything-v3/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 12 # in GB
REQUIRED_MEM_SPACE: 4
PACKAGES:
- nvidia-jetpack
- x11-xserver-utils
DOCKER:
ENABLE: false
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/depth-anything-v3/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/depth-anything-v3/run.sh
================================================
#!/bin/bash
CONTAINER_NAME="depth-anything-v3"
IMAGE_NAME="chenduola6/depth-anything-v3:jp6.2"
# Prefer plain docker, fallback to sudo docker when user has no docker group permission
if docker info >/dev/null 2>&1; then
DOCKER_CMD=(docker)
else
echo "Current user has no docker permission."
echo "Please enter sudo password once for this run."
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
# Keep sudo timestamp alive during long pulls/runs to avoid repeated prompts.
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done 2>/dev/null &
SUDO_KEEPALIVE_PID=$!
trap 'kill $SUDO_KEEPALIVE_PID >/dev/null 2>&1 || true' EXIT
DOCKER_CMD=(sudo docker)
fi
# Pull the latest image
"${DOCKER_CMD[@]}" pull $IMAGE_NAME
# Enable local X11 access for docker GUI apps
xhost +local:docker
# Use default display when DISPLAY is not set
if [ -z "$DISPLAY" ]; then
export DISPLAY=:0
fi
# Check if the container with the specified name already exists
if [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME already exists. Starting..."
"${DOCKER_CMD[@]}" start $CONTAINER_NAME
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
"${DOCKER_CMD[@]}" run -it \
--name $CONTAINER_NAME \
--gpus all \
--network host \
--ipc host \
--privileged \
-e DISPLAY=$DISPLAY \
-e QT_X11_NO_MITSHM=1 \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
$IMAGE_NAME
fi
echo "To run USB camera inference inside container:"
echo "1) ${DOCKER_CMD[*]} exec -it $CONTAINER_NAME /bin/bash"
echo "2) cd workspace/ros2-depth-anything-v3-trt"
echo "3) USB_SIMPLE=1 ./run_camera_depth.sh"
================================================
FILE: reComputer/scripts/gpt-oss/Dockerfile
================================================
FROM chenduola6/got-oss-20b:jp6
================================================
FILE: reComputer/scripts/gpt-oss/LICENSE
================================================
MIT License
Copyright (c) [2024] [Seeed Studio]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: reComputer/scripts/gpt-oss/README.md
================================================
# Jetson-Example: Run GPT-OSS 20B on NVIDIA Jetson
This project provides one-click deployment for **GPT-OSS 20B** on NVIDIA Jetson devices.
It uses the prebuilt Docker image:
```sh
chenduola6/got-oss-20b:jp6
```
Docker image size: **31.28 GB**
## Hardware Requirements
- NVIDIA Jetson device with at least **16GB VRAM**
- At least **50GB** available disk space
Supported JetPack/L4T versions:
- JetPack 6.2 -> L4T 36.4.0
- JetPack 6.2.1 -> L4T 36.4.3
- JetPack 6.1 -> L4T 36.4.4
<p align="center">
<img src="images/gpt-oss.gif" alt="GPT-OSS demo">
</p>
## Getting Started
### Installation
PyPI (recommended):
```sh
pip install jetson-examples
```
GitHub (developer):
```sh
git clone https://github.com/Seeed-Projects/jetson-examples
cd jetson-examples
pip install .
```
## Usage
### One-line deployment
```sh
reComputer run gpt-oss
```
This command pulls the image and starts `llama-server` in a detached container.
The script waits for `/v1/models` to become ready before exiting.
> **Note**: The script auto-detects the available GPU run mode on your Jetson (`--runtime nvidia` or `--gpus all`).
>
> **Note**: If prompted by the script, allow adding your user to the `docker` group so future runs do not require `sudo docker`. After adding the group, log out and log back in once.
>
> **Note**: If `curl /v1/models` returns `503 {"message":"Loading model"}`, the model is still loading. First startup can take several minutes.
>
> **Note**: If startup fails because of memory pressure, add swap space and try again:
>
> ```sh
> sudo fallocate -l 16G /swapfile
> sudo chmod 600 /swapfile
> sudo mkswap /swapfile
> sudo swapon /swapfile
> ```
You can lower memory usage when launching:
```sh
LLAMA_CTX=512 LLAMA_NGL=16 reComputer run gpt-oss
```
### Verify service
```sh
curl http://127.0.0.1:8080/v1/models
```
### Check logs
```sh
docker logs -f gpt-oss
```
## Manual Deployment (inside Docker)
```sh
docker pull chenduola6/got-oss-20b:jp6
docker run -it --rm \
--runtime nvidia \
--network host \
--ipc=host \
chenduola6/got-oss-20b:jp6
# inside the container
cd /root/gpt-oss/llama.cpp
./build/bin/llama-server \
-m /root/gpt-oss/gguf/gpt-oss-20b-Q4_K.gguf \
-ngl 20 -c 1024 \
--host 0.0.0.0 --port 8080
```
## Cleanup
Only remove the container (keep image cache):
```sh
reComputer clean gpt-oss
```
## References
- [llama.cpp](https://github.com/ggml-org/llama.cpp)
- [Seeed jetson-examples](https://github.com/Seeed-Projects/jetson-examples)
- [Setup step by step](https://wiki.seeedstudio.com/deploy_gptoss_on_jetson/)
================================================
FILE: reComputer/scripts/gpt-oss/clean.sh
================================================
#!/bin/bash
CONTAINER_NAME="gpt-oss"
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
if id -nG "$USER" | grep -qw docker; then
echo "Current user is already in docker group, but docker is still unavailable."
echo "Please make sure Docker daemon is running, for example:"
echo "sudo systemctl enable --now docker"
exit 1
fi
echo "Current user has no docker permission."
read -r -p "Add current user ($USER) to docker group now? (y/n): " choice
case "$choice" in
y|Y)
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
if ! getent group docker >/dev/null 2>&1; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
echo "Added $USER to docker group."
echo "Please log out and log back in (or reboot), then rerun:"
echo "reComputer clean gpt-oss"
exit 1
;;
*)
echo "Skipped docker group setup."
echo "You can run this manually:"
echo "sudo usermod -aG docker $USER"
exit 1
;;
esac
}
ensure_docker_access
DOCKER_CMD=(docker)
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" stop "$CONTAINER_NAME"
fi
if [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
"${DOCKER_CMD[@]}" rm "$CONTAINER_NAME"
echo "Container $CONTAINER_NAME removed."
else
echo "Container $CONTAINER_NAME does not exist."
fi
echo "Image is kept locally for faster next startup."
================================================
FILE: reComputer/scripts/gpt-oss/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 50 # in GB
REQUIRED_MEM_SPACE: 14
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: false
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/gpt-oss/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
================================================
FILE: reComputer/scripts/gpt-oss/run.sh
================================================
#!/bin/bash
CONTAINER_NAME="gpt-oss"
IMAGE_NAME="chenduola6/got-oss-20b:jp6"
MODEL_PATH="/root/gpt-oss/gguf/gpt-oss-20b-Q4_K.gguf"
HOST="0.0.0.0"
PORT="${LLAMA_PORT:-8080}"
NGL="${LLAMA_NGL:-20}"
CTX="${LLAMA_CTX:-1024}"
STARTUP_TIMEOUT="${LLAMA_STARTUP_TIMEOUT:-600}"
SERVER_CMD="cd /root/gpt-oss/llama.cpp && ./build/bin/llama-server -m ${MODEL_PATH} -ngl ${NGL} -c ${CTX} --host ${HOST} --port ${PORT}"
GPU_FLAGS=()
ensure_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found."
echo "Please install Docker first, then rerun this command."
exit 1
fi
if docker info >/dev/null 2>&1; then
return 0
fi
if id -nG "$USER" | grep -qw docker; then
echo "Current user is already in docker group, but docker is still unavailable."
echo "Please make sure Docker daemon is running, for example:"
echo "sudo systemctl enable --now docker"
exit 1
fi
echo "Current user has no docker permission."
read -r -p "Add current user ($USER) to docker group now? (y/n): " choice
case "$choice" in
y|Y)
if ! sudo -v; then
echo "Failed to authenticate sudo. Exiting."
exit 1
fi
if ! getent group docker >/dev/null 2>&1; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
echo "Added $USER to docker group."
echo "Please log out and log back in (or reboot), then rerun:"
echo "reComputer run gpt-oss"
exit 1
;;
*)
echo "Skipped docker group setup."
echo "You can run this manually:"
echo "sudo usermod -aG docker $USER"
exit 1
;;
esac
}
ensure_docker_access
DOCKER_CMD=(docker)
ensure_image() {
if "${DOCKER_CMD[@]}" pull "$IMAGE_NAME"; then
return 0
fi
echo "Warning: failed to pull image from Docker Hub."
if "${DOCKER_CMD[@]}" image inspect "$IMAGE_NAME" >/dev/null 2>&1; then
echo "Found local image cache: $IMAGE_NAME"
echo "Continue with local image."
return 0
fi
echo "No local image cache found. Please check network and retry."
exit 1
}
create_container() {
"${DOCKER_CMD[@]}" run -d \
--name "$CONTAINER_NAME" \
"${GPU_FLAGS[@]}" \
--network host \
--ipc=host \
"$IMAGE_NAME" \
/bin/bash -lc "$SERVER_CMD"
}
probe_gpu_mode() {
if "${DOCKER_CMD[@]}" run --rm --runtime nvidia --network host --ipc=host "$IMAGE_NAME" /bin/sh -lc "exit 0" >/dev/null 2>&1; then
GPU_FLAGS=(--runtime nvidia)
echo "Using GPU mode: --runtime nvidia"
return 0
fi
if "${DOCKER_CMD[@]}" run --rm --gpus all --network host --ipc=host "$IMAGE_NAME" /bin/sh -lc "exit 0" >/dev/null 2>&1; then
GPU_FLAGS=(--gpus all)
echo "Using GPU mode: --gpus all"
return 0
fi
echo "Failed to detect a working Docker GPU mode."
echo "Tried: --runtime nvidia and --gpus all"
echo "Please check Docker + NVIDIA Container Runtime on this device."
exit 1
}
ensure_image
probe_gpu_mode
# Check if the container with the specified name already exists
if [ "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME is already running."
elif [ "$("${DOCKER_CMD[@]}" ps -a -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container $CONTAINER_NAME already exists but is not running."
echo "Recreating with current runtime settings..."
"${DOCKER_CMD[@]}" rm -f "$CONTAINER_NAME" >/dev/null 2>&1 || true
if ! create_container >/dev/null; then
echo "Failed to create container."
exit 1
fi
else
echo "Container $CONTAINER_NAME does not exist. Creating and starting..."
if ! create_container >/dev/null; then
echo "Failed to create container."
exit 1
fi
fi
if [ -z "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container failed to reach running state."
echo "Inspect logs with: ${DOCKER_CMD[*]} logs $CONTAINER_NAME"
exit 1
fi
wait_for_server_ready() {
local endpoint="http://127.0.0.1:${PORT}/v1/models"
local elapsed=0
local interval=5
local raw_response=""
local response_body=""
local http_code="000"
local last_code="000"
local last_body=""
if ! command -v curl >/dev/null 2>&1; then
echo "curl not found, skip readiness probing."
return 0
fi
echo "Waiting for GPT-OSS to be ready at ${endpoint} (timeout: ${STARTUP_TIMEOUT}s)..."
while [ "$elapsed" -lt "$STARTUP_TIMEOUT" ]; do
if [ -z "$("${DOCKER_CMD[@]}" ps -q -f name=^/${CONTAINER_NAME}$)" ]; then
echo "Container exited before model became ready."
echo "Recent logs:"
"${DOCKER_CMD[@]}" logs --tail 80 "$CONTAINER_NAME"
return 1
fi
raw_response="$(curl -s --max-time 3 -w "\n%{http_code}" "$endpoint" 2>/dev/null || true)"
http_code="$(printf '%s' "$raw_response" | tail -n 1)"
response_body="$(printf '%s' "$raw_response" | sed '$d')"
last_code="$http_code"
last_body="$response_body"
# Ready when endpoint returns model list payload.
if [ "$http_code" = "200" ] && echo "$response_body" | grep -q "\"data\""; then
return 0
fi
# Typical warm-up response from llama-server while loading weights.
if [ "$http_code" = "503" ] && echo "$response_body" | grep -q "Loading model"; then
if [ $((elapsed % 30)) -eq 0 ]; then
echo "Model is still loading... (${elapsed}s)"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
continue
fi
if [ $((elapsed % 30)) -eq 0 ]; then
echo "Waiting model readiness... (${elapsed}s, http=${http_code})"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
done
echo "Model is still not ready after ${STARTUP_TIMEOUT}s."
echo "Last endpoint status: ${last_code}"
if [ -n "$last_body" ]; then
echo "Last endpoint response: $last_body"
fi
echo "Recent logs:"
"${DOCKER_CMD[@]}" logs --tail 80 "$CONTAINER_NAME"
echo "You can try lower memory settings:"
echo "LLAMA_CTX=512 LLAMA_NGL=16 reComputer run gpt-oss"
return 1
}
if ! wait_for_server_ready; then
exit 1
fi
echo "GPT-OSS server is ready at: http://127.0.0.1:${PORT}"
echo "Check models:"
echo "curl http://127.0.0.1:${PORT}/v1/models"
echo "Follow server logs:"
echo "${DOCKER_CMD[*]} logs -f $CONTAINER_NAME"
================================================
FILE: reComputer/scripts/live-llava/init.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/live-llava/run.sh
================================================
#!/bin/bash
SUPPORT_L4T_LIST="35.3.1"
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
get_l4t_version() {
ARCH=$(uname -i)
echo "ARCH: $ARCH"
if [ $ARCH = "aarch64" ]; then
L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)
if [ -z "$L4T_VERSION_STRING" ]; then
echo "reading L4T version from \"dpkg-query --show nvidia-l4t-core\""
L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)
L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })
L4T_RELEASE=${L4T_VERSION_ARRAY[0]}
L4T_REVISION=${L4T_VERSION_ARRAY[1]}
else
echo "reading L4T version from /etc/nv_tegra_release"
L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')
L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')
fi
L4T_REVISION_MAJOR=${L4T_REVISION:0:1}
L4T_REVISION_MINOR=${L4T_REVISION:2:1}
L4T_VERSION="$L4T_RELEASE.$L4T_REVISION"
echo "L4T_VERSION: $L4T_VERSION"
elif [ $ARCH != "x86_64" ]; then
echo "unsupported architecture: $ARCH" # show in red color
exit 1
fi
}
# 1. Check L4T version
get_l4t_version
CHECK_L4T_VERSION=0
for item in $SUPPORT_L4T_LIST; do
if [ "$item" = "$L4T_VERSION" ]; then
CHECK_L4T_VERSION=1
break
fi
done
if [ $CHECK_L4T_VERSION -eq 1 ]; then
echo "pass the version check"
else
echo "currently supported versions of jetpack are $SUPPORT_L4T_LIST" # show in red color
exit 1
fi
# 2. Check Google Chrome
if dpkg -s chromium-browser &>/dev/null; then
echo "Chrome is installed."
else
echo "install Google Chrome ..." # show in red color
sudo apt install chromium-browser
echo "Google Chrome installed successfully" # show in red color
fi
# 3. Generate Google browser key
FILE_NAME="key.pem"
FILE_PATH="$JETSON_REPO_PATH/data"
if [ -f "$FILE_PATH/$FILE_NAME" ]; then
echo "key file '$FILE_PATH/$FILE_NAME' exists."
else
cd $FILE_PATH
openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes -subj '/CN=localhost'
cd ..
fi
# 4. edit source code
cat >"$JETSON_REPO_PATH/packages/llm/local_llm/agents/video_query.py" <<'EOF'
#!/usr/bin/env python3
import time
import logging
import threading
from local_llm import Agent
from local_llm.plugins import (
VideoSource,
VideoOutput,
ChatQuery,
PrintStream,
ProcessProxy,
)
from local_llm.utils import ArgParser, print_table
from termcolor import cprint
from jetson_utils import cudaFont, cudaMemcpy, cudaToNumpy, cudaDeviceSynchronize
from flask import Flask, request
class VideoQuery(Agent):
"""
Perpetual always-on closed-loop visual agent that applies prompts to a video stream.
"""
def __init__(self, model="liuhaotian/llava-v1.5-7b", **kwargs):
super().__init__()
self.lock = threading.Lock()
# load model in another process for smooth streaming
# self.llm = ProcessProxy((lambda **kwargs: ChatQuery(model, drop_inputs=True, **kwargs)), **kwargs)
self.llm = ChatQuery(model, drop_inputs=True, **kwargs)
self.llm.add(PrintStream(color="green", relay=True).add(self.on_text))
self.llm.start()
# test / warm-up query
self.warmup = True
self.text = ""
self.eos = False
self.llm("What is 2+2?")
while self.warmup:
time.sleep(0.25)
# create video streams
self.video_source = VideoSource(**kwargs)
self.video_output = VideoOutput(**kwargs)
self.video_source.add(self.on_video, threaded=False)
self.video_output.start()
self.font = cudaFont()
# setup prompts
self.prompt = "Describe the image concisely and briefly."
# entry node
self.pipeline = [self.video_source]
def on_video(self, image):
np_image = cudaToNumpy(image)
cudaDeviceSynchronize()
self.llm(
[
"reset",
np_image,
self.prompt,
]
)
text = self.text.replace("\n", "").replace("</s>", "").strip()
if text:
worlds = text.split()
line_counter = len(worlds) // 10
if len(worlds) % 10 != 0:
line_counter += 1
for l in range(line_counter):
line_text = " ".join(worlds[l * 10 : (l + 1) * 10])
self.font.OverlayText(
image,
text=line_text,
x=5,
y=int(79 + l * 37),
color=self.font.White,
background=self.font.Gray40,
)
self.font.OverlayText(
image,
text="Prompt: " + self.prompt,
x=5,
y=42,
color=(120, 215, 21),
background=self.font.Gray40,
)
self.video_output(image)
def on_text(self, text):
if self.eos:
self.text = text # new query response
self.eos = False
elif not self.warmup: # don't view warmup response
self.text = self.text + text
if text.endswith("</s>") or text.endswith("###") or text.endswith("<|im_end|>"):
self.print_stats()
self.warmup = False
self.eos = True
def update_switch(self, on_off):
self.video_source.switch(on_off)
def update_prompts(self, new_prompt):
with self.lock:
if new_prompt:
self.prompt = new_prompt
def print_stats(self):
# print_table(self.llm.model.stats)
curr_time = time.perf_counter()
if not hasattr(self, "start_time"):
self.start_time = curr_time
else:
frame_time = curr_time - self.start_time
self.start_time = curr_time
logging.info(
f"refresh rate: {1.0 / frame_time:.2f} FPS ({frame_time*1000:.1f} ms)"
)
if __name__ == "__main__":
parser = ArgParser(extras=ArgParser.Defaults + ["video_input", "video_output"])
args = parser.parse_args()
# 独立线程运行
agent = VideoQuery(**vars(args))
def run_video_query():
agent.run()
video_query_thread = threading.Thread(target=run_video_query)
video_query_thread.start()
# 启动web服务
app = Flask(__name__)
@app.route("/update_prompt", methods=["POST"])
def update_prompts():
prompt = request.json.get("prompt")
if prompt:
agent.update_prompts(prompt)
return "Prompts updated successfully."
else:
return "Invalid prompts data."
@app.route("/update_switch", methods=["POST"])
def update_switch():
infer_or_not = True if request.json.get("switch") == "on" else False
agent.update_switch(infer_or_not)
return "stop" if not infer_or_not else "start"
@app.route("/update_params", methods=["POST"])
def update_params():
try:
agent.llm.max_new_tokens = request.json.get("max_new_tokens") or 128
agent.llm.min_new_tokens = request.json.get("min_new_tokens") or -1
agent.llm.do_sample = request.json.get("do_sample") or False
agent.llm.repetition_penalty = request.json.get("repetition_penalty") or 1.0
agent.llm.temperature = request.json.get("temperature") or 0.7
agent.llm.top_p = request.json.get("top_p") or 0.95
if request.json.get("system_prompt"):
agent.llm.chat_history.template["system_prompt"] = request.json.get(
"system_prompt"
)
return "params updated."
except Exception as e:
print(e)
return "update failure"
app.run(host="0.0.0.0", port=5555)
EOF
sed -i 's/from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, SiglipImageProcessor, SiglipVisionModel/from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection # , SiglipImageProcessor, SiglipVisionModel/' "$JETSON_REPO_PATH/packages/llm/local_llm/vision/clip_hf.py"
sed -i "s/'siglip': dict(preprocessor=SiglipImageProcessor, model=SiglipVisionModel),/# 'siglip': dict(preprocessor=SiglipImageProcessor, model=SiglipVisionModel),/" "$JETSON_REPO_PATH/packages/llm/local_llm/vision/clip_hf.py"
sed -i 's/from .audio import */# from .audio import */' "$JETSON_REPO_PATH/packages/llm/local_llm/plugins/__init__.py"
sed -i 's/from .nanodb import NanoDB/# from .nanodb import NanoDB/' "$JETSON_REPO_PATH/packages/llm/local_llm/plugins/__init__.py"
sed -i 's/import onnxruntime as ort/# import onnxruntime as ort/' "$JETSON_REPO_PATH/packages/llm/local_llm/utils/model.py"
echo "The script has been modified."
gnome-terminal -- /bin/bash -c "chromium-browser --disable-features=WebRtcHideLocalIpsWithMdns https://localhost:8554/; exec /bin/bash"
cd $JETSON_REPO_PATH
sudo docker run --runtime nvidia -it --rm --network host --volume /tmp/argus_socket:/tmp/argus_socket --volume /etc/enctune.conf:/etc/enctune.conf --volume /etc/nv_tegra_release:/etc/nv_tegra_release --volume /proc/device-tree/model:/tmp/nv_jetson_model --volume /var/run/dbus:/var/run/dbus --volume /var/run/avahi-daemon/socket:/var/run/avahi-daemon/socket --volume /var/run/docker.sock:/var/run/docker.sock --volume $JETSON_REPO_PATH/data:/data --device /dev/snd --device /dev/bus/usb -e DISPLAY=:0 -v /tmp/.X11-unix/:/tmp/.X11-unix -v /tmp/.docker.xauth:/tmp/.docker.xauth -e XAUTHORITY=/tmp/.docker.xauth --device /dev/video0 --device /dev/video1 -v $JETSON_REPO_PATH/packages/llm/local_llm:/opt/local_llm/local_llm -e SSL_KEY=/data/key.pem -e SSL_CERT=/data/cert.pem dustynv/local_llm:r35.3.1 python3 -m local_llm.agents.video_query --api=mlc --verbose --model liuhaotian/llava-v1.5-7b --max-new-tokens 32 --video-input /dev/video0 --video-output webrtc://@:8554/output
================================================
FILE: reComputer/scripts/llama-factory/README.md
================================================
# Finetune LLM by Llama-Factory on Jetson
## Hello
Now you can tailor a custom private local LLM to meet your requirements.
💡 Here's an example of quickly deploying [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory) on Jetson device.
🔥 Hightlights:
- **Llama-Factory** is an efficient tool to unify efficient Fine-Tuning of 100+ LLMs. 🚀🔍
- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨
- **Jetson** is powerful AI hardware platform for edge computing.💻
🛠️ Follow the tutorial below to quickly experience the performance of Llama-Factory on edge computing devices.
<div align="center">
<img alt="training" width="1200px" src="./assets/training.gif">
</div>
## Get a Jetson Orin Device 🛒
| Device Model | Description | Link |
|--------------|-------------|------|
| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
| NVIDIA® Jetson AGX Orin™ 64GB Developer Kit | Smallest and most powerful AI edge computer | [Buy Here](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html) |
## Getting Started
- install **jetson-examples** by pip:
```sh
pip3 install jetson-examples
```
- restart reComputer
```sh
sudo restart
```
- run Llama-Factory webui on jetson in one line:
```sh
reComputer run llama-factory
```
- Please visit http://127.0.0.1:7860
<div align="center">
<img alt="yolov10" width="1200px" src="./assets/webui.png">
</div>
## Run Training Script
> **Note:** Some models and datasets require confirmation before using them, so we recommend logging in with your Hugging Face account by:
> `sudo docker exec -it llama-factory huggingface-cli login`
There are a lot of parameters to choose from webui, you can refer to here for more information.
For demonstration purposes, set `Model name: Phi-1.5-1.3B`, `Dataset: alpaca_zh`, leave the other parameters unchanged, and then click the `Start` button
<div align="center">
<img alt="yolov10" width="1200px" src="./assets/llama-factory-Jetson.png">
</div>
## Build Docker Image
We highly recommend that you use `jetson-containers` to compile the docker container, as you can see [here](https://github.com/dusty-nv/jetson-containers/pull/566).
## Reference
- https://github.com/hiyouga/LLaMA-Factory
- https://github.com/dusty-nv/jetson-containers
================================================
FILE: reComputer/scripts/llama-factory/clean.sh
================================================
#!/bin/bash
sudo docker rmi youjiang9977/llama-factory:r35.4.1
sudo rm -rf /home/$USER/reComputer/jetson-containers/LLaMA-Factory/*
================================================
FILE: reComputer/scripts/llama-factory/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llama-factory/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llama-factory/run.sh
================================================
#!/bin/bash
DATA_PATH="/home/$USER/reComputer/jetson-containers/data"
sudo docker run -it --rm --network host --runtime nvidia \
--volume $DATA_PATH:/data \
--name llama-factory \
youjiang9977/llama-factory:r35.4.1
================================================
FILE: reComputer/scripts/llama3/clean.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
# search local image
img_tag=$($JETSON_REPO_PATH/autotag -p local ollama)
# 检查返回值
if [ $? -eq 0 ]; then
echo "Found Image successfully."
sudo docker rmi $img_tag
else
echo "[warn] Found Image failed with error code $?. skip delete Image."
fi
#
# 4 build whl
read -p "Delete all data for ollama? (y/n): " choice
if [[ $choice == "y" || $choice == "Y" ]]; then
echo "Delete=> $JETSON_REPO_PATH/data/models/ollama/"
sudo rm -rf $JETSON_REPO_PATH/data/models/ollama/
echo "Clean Data Done."
else
echo "[warn] Skip Clean Data."
fi
================================================
FILE: reComputer/scripts/llama3/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 15 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llama3/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llama3/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
# try stop old server
docker rm -f ollama
# start new server
./run.sh -d --name ollama $(./autotag ollama)
# run a client
./run.sh $(./autotag ollama) /bin/ollama run llama3
# clean new server
docker rm -f ollama
================================================
FILE: reComputer/scripts/llama3.2/clean.sh
================================================
#!/bin/bash
get_l4t_version() {
local l4t_version=""
local release_line=$(head -n 1 /etc/nv_tegra_release)
if [[ $release_line =~ R([0-9]+)\ *\(release\),\ REVISION:\ ([0-9]+\.[0-9]+) ]]; then
local major="${BASH_REMATCH[1]}"
local revision="${BASH_REMATCH[2]}"
l4t_version="${major}.${revision}"
fi
echo "$l4t_version"
}
L4T_VERSION=$(get_l4t_version)
echo "Detected L4T version: $L4T_VERSION"
# Determine the Docker image based on L4T version
if [[ "$L4T_VERSION" == "35.3.1" || "$L4T_VERSION" == "35.4.1" || "$L4T_VERSION" == "35.5.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r35.3.1"
elif [[ "$L4T_VERSION" == "36.3.0" || "$L4T_VERSION" == "36.4.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r36.3.0"
else
echo "Error: L4T version $L4T_VERSION is not supported."
exit 1
fi
if [ "$(docker images -q "$IMAGE_NAME")" ]; then
echo "Deleting $IMAGE_NAME..."
docker rmi "$IMAGE_NAME"
echo "Image $IMAGE_NAME has been successfully deleted."
else
echo "No image named $IMAGE_NAME was found."
fi
================================================
FILE: reComputer/scripts/llama3.2/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
- 36.4.0
REQUIRED_DISK_SPACE: 15
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llama3.2/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llama3.2/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
get_l4t_version() {
local l4t_version=""
local release_line=$(head -n 1 /etc/nv_tegra_release)
if [[ $release_line =~ R([0-9]+)\ *\(release\),\ REVISION:\ ([0-9]+\.[0-9]+) ]]; then
local major="${BASH_REMATCH[1]}"
local revision="${BASH_REMATCH[2]}"
l4t_version="${major}.${revision}"
fi
echo "$l4t_version"
}
L4T_VERSION=$(get_l4t_version)
echo "Detected L4T version: $L4T_VERSION"
# Determine the Docker image based on L4T version
if [[ "$L4T_VERSION" == "35.3.1" || "$L4T_VERSION" == "35.4.1" || "$L4T_VERSION" == "35.5.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r35.3.1"
elif [[ "$L4T_VERSION" == "36.3.0" || "$L4T_VERSION" == "36.4.0" ]]; then
IMAGE_NAME="youjiang9977/ollama:r36.3.0"
else
echo "Error: L4T version $L4T_VERSION is not supported."
exit 1
fi
docker rm -f ollama
./run.sh -d --name ollama $IMAGE_NAME
./run.sh $IMAGE_NAME /bin/ollama run llama3.2
docker rm -f ollama
================================================
FILE: reComputer/scripts/llava/clean.sh
================================================
#!/bin/bash
docker rmi $(/home/$USER/reComputer/jetson-containers/autotag llava)
================================================
FILE: reComputer/scripts/llava/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 15 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llava/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llava/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag llava) \
python3 -m llava.serve.cli \
--model-path liuhaotian/llava-v1.5-7b \
--image-file /data/images/hoover.jpg
================================================
FILE: reComputer/scripts/llava-v1.5-7b/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llava-v1.5-7b/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llava-v1.5-7b/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag llava) \
python3 -m llava.serve.cli \
--model-path liuhaotian/llava-v1.5-7b \
--image-file /data/images/hoover.jpg
================================================
FILE: reComputer/scripts/llava-v1.6-vicuna-7b/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/llava-v1.6-vicuna-7b/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/llava-v1.6-vicuna-7b/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag local_llm) \
python3 -m local_llm --api=mlc \
--model liuhaotian/llava-v1.6-vicuna-7b \
--max-context-len 768 \
--max-new-tokens 128
================================================
FILE: reComputer/scripts/nanodb/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 80 # in GB
REQUIRED_MEM_SPACE: 15
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/nanodb/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/nanodb/readme.md
================================================
# NanoDB
## ref
- <https://www.jetson-ai-lab.com/tutorial_nanodb.html>
## access
- using in machine, try `http://127.0.0.1:7860` in browser.
- using in other pc, make sure you know jetson's IP and try `http://<<Jetson's IP>>:7860` in browser.
================================================
FILE: reComputer/scripts/nanodb/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
check_disk_space() {
directory="$1" # a directory
required_space_gb="$2" # how many GB we need
# get disk of directory
device=$(df -P "$directory" | awk 'NR==2 {print $1}')
echo $device
# get free space in KB
free_space=$(df -P "$device" | awk 'NR==2 {print $4}')
echo $free_space
# change unit to GB
free_space_gb=$(echo "scale=2; $free_space / 1024 / 1024" | bc)
echo $free_space_gb
# check and fast-fail
if (( $(echo "$free_space_gb >= $required_space_gb" | bc -l) )); then
echo "disk space ($1) enough, keep going."
else
echo "disk space ($1) not enough!! we need $2 GB!!"
exit 1
fi
}
# check data files TODO: support params to force download
DATA_PATH="$JETSON_REPO_PATH/data/datasets/coco/2017"
if [ ! -d $DATA_PATH ]; then
mkdir -p $DATA_PATH
fi
cd $DATA_PATH
# check val2017.zip
if [ ! -d "$DATA_PATH/val2017" ]; then
if [ ! -f "val2017.zip" ]; then
check_disk_space $DATA_PATH 1
wget http://images.cocodataset.org/zips/val2017.zip
else
echo "val2017.zip existed."
fi
check_disk_space $DATA_PATH 19
unzip val2017.zip && rm val2017.zip
else
echo "val2017/ existed."
fi
# check train2017.zip
if [ ! -d "$DATA_PATH/train2017" ]; then
if [ ! -f "train2017.zip" ]; then
check_disk_space $DATA_PATH 19
wget http://images.cocodataset.org/zips/train2017.zip
else
echo "train2017.zip existed."
fi
check_disk_space $DATA_PATH 19
unzip train2017.zip && rm train2017.zip
else
echo "train2017/ existed."
fi
if [ ! -d "$DATA_PATH/unlabeled2017" ]; then
# check unlabeled2017.zip
if [ ! -f "unlabeled2017.zip" ]; then
check_disk_space $DATA_PATH 19
wget http://images.cocodataset.org/zips/unlabeled2017.zip
else
echo "unlabeled2017.zip existed."
fi
check_disk_space $DATA_PATH 19
unzip unlabeled2017.zip && rm unlabeled2017.zip
else
echo "unlabeled2017/ existed."
fi
# check index files
INDEX_PATH="$JETSON_REPO_PATH/data/nanodb/coco/2017"
if [ ! -d $INDEX_PATH ]; then
cd $JETSON_REPO_PATH/data/
check_disk_space $JETSON_REPO_PATH 1
wget https://nvidia.box.com/shared/static/icw8qhgioyj4qsk832r4nj2p9olsxoci.gz -O nanodb_coco_2017.tar.gz
tar -xzvf nanodb_coco_2017.tar.gz
fi
# RUN
cd $JETSON_REPO_PATH
./run.sh $(./autotag nanodb) \
python3 -m nanodb \
--path /data/nanodb/coco/2017 \
--server --port=7860
================================================
FILE: reComputer/scripts/nanoowl/config.yaml
================================================
# The tested JetPack versions.
ALLOWED_L4T_VERSIONS:
- 35.3.1
- 35.4.1
- 35.5.0
- 36.3.0
REQUIRED_DISK_SPACE: 25 # in GB
REQUIRED_MEM_SPACE: 7
PACKAGES:
- nvidia-jetpack
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/nanoowl/init.sh
================================================
#!/bin/bash
# check the runtime environment.
source $(dirname "$(realpath "$0")")/../utils.sh
check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
BASE_PATH=/home/$USER/reComputer
mkdir -p $BASE_PATH/
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
if [ -d $JETSON_REPO_PATH ]; then
echo "jetson-ai-lab existed."
else
echo "jetson-ai-lab does not installed. start init..."
cd $BASE_PATH/
git clone --depth=1 $BASE_JETSON_LAB_GIT
cd $JETSON_REPO_PATH
bash install.sh
fi
================================================
FILE: reComputer/scripts/nanoowl/run.sh
================================================
#!/bin/bash
BASE_PATH=/home/$USER/reComputer
JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
cd $JETSON_REPO_PATH
./run.sh $(./autotag nanoowl) bash -c "ls /dev/video* && cd examples/tree_demo && python3 tree_demo.py ../../data/owl_image_encoder_patch32.engine"
================================================
FILE: reComputer/scripts/nvblox/README.md
================================================
# Jetson Example: Run NVBlox Mapping on NVIDIA Jetson

[Isaac ROS NVBlox](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox) is a high-performance GPU-accelerated 3D mapping framework developed by NVIDIA for real-time robotic perception. Unlike monocular depth estimation models, NVBlox consumes true depth input from RGB-D cameras or stereo cameras to construct accurate 3D scene representations.This case enables you to **quickly deploy the necessary environment for nvblox to run on your reComputer with just one click.**
Detailed instructions for environment configuration can be found at:[Deploy NVBlox with Orbbec Camera](https://wiki.seeedstudio.com/deploy_nvblox_jetson_agx_orin/)
Main process run it will:
1. Download `nvblox_images.tar` from the built-in OneDrive share link into `~/.cache/jetson-examples/nvblox`
2. Run `docker load -i` on that archive
3. Build the derived image and prepared host/container workspaces
4. Launch the static Gemini2 NVBlox demo
## Requirements
- NVIDIA Jetson Orin
- Ubuntu 22.04
- JetPack 6.x
- Docker with NVIDIA Container Runtime
- Orbbec Gemini2 or another Orbbec camera that provides `/camera/color/*` and `/camera/depth/*`
- Roughly 60GB free disk space for the cached archive, derived image, and managed workspace
## Usage
Run the full prepare + demo flow:
```sh
cd jetson-example/
pip install .
reComputer run nvblox
```
**Prepare only:**
```bash
NVBLOX_MODE=prepare reComputer run nvblox
```
Run only after preparation:
```sh
NVBLOX_MODE=run reComputer run nvblox
```
Force a rebuild of the prepared host/container workspaces:
```sh
NVBLOX_FORCE_REBUILD=1 reComputer run nvblox
```
Run headless:
```sh
NVBLOX_HEADLESS=1 reComputer run nvblox
```
Override the managed workspace root:
```sh
MANAGED_ROOT=/path/to/nvblox_demo reComputer run nvblox
```
Override the built-in OneDrive archive settings:
```sh
NVBLOX_IMAGE_SHARE_URL='https://...'
NVBLOX_IMAGE_ARCHIVE_NAME='nvblox_images.tar'
NVBLOX_IMAGE_CACHE_DIR="$HOME/.cache/jetson-examples/nvblox"
reComputer run nvblox
```
## Cleanup
```sh
reComputer clean nvblox
```
This removes the managed workspace, logs, partial downloads, the derived image `local/isaac_ros_nvblox_orbbec:jp6-humble`, and the running demo container if it exists.
It keeps:
- the cached base archive in `~/.cache/jetson-examples/nvblox`
- the loaded base image imported from `nvblox_images.tar`
## Troubleshooting
- The default path checks ordinary Gemini2 color/depth readiness, not stereo IR capability.
- Host readiness now requires only:
- `/camera/color/camera_info`
- `/camera/depth/camera_info`
- `/camera/color/image_raw`
- `/camera/depth/image_raw`
- Container readiness now checks host camera discovery through `/camera/color/camera_info` and `/camera/depth/camera_info`.
- The runtime success criterion is static map output from `/nvblox_node/static_esdf_pointcloud` or `/nvblox_node/static_map_slice`.
- `usb speed: 5000 Mbps` is not treated as proof that the full demo is healthy. The final authority is whether host color/depth, container visibility, static TF, and static map output all succeed.
- If the host driver exits and Gemini2 falls back to `usb_present_no_video`, the run path still attempts automatic recovery with udev refresh and USB rebind so you can usually retry without unplugging the camera.
- If the run still fails, use the built-in connectivity debugger:
```sh
bash reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh
```
That debug path follows the same stages as the default runtime:
1. Gemini2 device state
2. Host ROS discovery environment
3. Container ROS discovery environment
4. Host color/depth readiness
5. Container camera visibility
6. Managed static TF availability
7. Static NVBlox output
## Notes
- This example does not use `docker pull` for the base image path.
- The OneDrive downloader resolves the anonymous `download.aspx?...tempauth=...` URL from the preview page before downloading.
- `NVBLOX_MODE=run` expects an already prepared `MANAGED_ROOT`.
- The host camera is launched with `ros2 launch orbbec_camera gemini2.launch.py publish_tf:=false tf_publish_rate:=0.0`.
- The container workspace now centers on `nvblox_examples_bringup` static Orbbec launches and removes the old default dependence on Visual SLAM.
- The managed static TF chain is generated inside the prepared container workspace rather than relying on device-published TF.
- Headless mode switches the default launch file to `orbbec_debug.launch.py`, while GUI mode uses `orbbec_example.launch.py`.
================================================
FILE: reComputer/scripts/nvblox/clean.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/lib/common.sh"
MANAGED_ROOT="${MANAGED_ROOT:-${MANAGED_ROOT_DEFAULT}}"
CACHE_DIR="$(resolve_nvblox_image_cache_dir)"
maybe_enable_docker_access() {
if ! command -v docker >/dev/null 2>&1; then
warn "docker command not found. Skipping container and image cleanup."
return 1
fi
if docker info >/dev/null 2>&1; then
DOCKER_PREFIX=()
return 0
fi
if sudo docker info >/dev/null 2>&1; then
DOCKER_PREFIX=(sudo)
return 0
fi
warn "Cannot access the Docker daemon. Skipping container and image cleanup."
return 1
}
remove_managed_root() {
local sentinel_path="${MANAGED_ROOT}/${MANAGED_SENTINEL_NAME}"
if [[ ! -e "${MANAGED_ROOT}" ]]; then
info "Managed root ${MANAGED_ROOT} does not exist."
return 0
fi
if [[ ! -f "${sentinel_path}" ]]; then
die "Managed root ${MANAGED_ROOT} exists but is not owned by the NVBlox example. Refusing to remove it."
fi
run_sudo rm -rf "${MANAGED_ROOT}"
info "Removed managed root ${MANAGED_ROOT}"
}
ensure_supported_user_context
if should_reexec_as_setup_user; then
printf '[reComputer][nvblox] Re-entering as %s.\n' "${SETUP_USER_NAME}" >&2
reexec_as_setup_user "${SCRIPT_DIR}/clean.sh"
fi
cleanup_residual_gemini2_processes "nvblox clean" || true
if maybe_enable_docker_access; then
if docker_cmd ps -a --format '{{.Names}}' | grep -Fxq "${CONTAINER_NAME_DEFAULT}"; then
info "Removing container ${CONTAINER_NAME_DEFAULT}"
docker_cmd rm -f "${CONTAINER_NAME_DEFAULT}" >/dev/null
else
info "Container ${CONTAINER_NAME_DEFAULT} does not exist."
fi
if docker_cmd image inspect "${DERIVED_IMAGE_TAG}" >/dev/null 2>&1; then
info "Removing derived image ${DERIVED_IMAGE_TAG}"
docker_cmd image rm -f "${DERIVED_IMAGE_TAG}" >/dev/null
else
info "Derived image ${DERIVED_IMAGE_TAG} does not exist."
fi
fi
remove_managed_root
cleanup_nvblox_partial_downloads "${CACHE_DIR}"
info "NVBlox clean complete. Cached base archive is kept in ${CACHE_DIR}"
================================================
FILE: reComputer/scripts/nvblox/config/orbbec_stereo_capability_probe.yaml
================================================
depth_registration: false
enable_point_cloud: false
enable_colored_point_cloud: false
device_preset: "High Accuracy"
laser_on_off_mode: 1
time_domain: "device"
enable_sync_host_time: true
align_mode: "SW"
camera_name: "camera"
enable_3d_reconstruction_mode: false
enable_color: false
color_width: 640
color_height: 480
color_fps: 5
color_format: "RGB"
color_qos: "SENSOR_DATA"
depth_width: 640
depth_height: 400
depth_fps: 15
depth_format: "Y16"
depth_qos: "SENSOR_DATA"
point_cloud_qos: "SENSOR_DATA"
enable_ir_auto_exposure: false
ir_exposure: 5000
ir_gain: 40
enable_left_ir: true
left_ir_width: 640
left_ir_height: 400
left_ir_fps: 15
left_ir_format: "Y8"
left_ir_qos: "SENSOR_DATA"
enable_right_ir: true
right_ir_width: 640
right_ir_height: 400
right_ir_fps: 15
right_ir_format: "Y8"
right_ir_qos: "SENSOR_DATA"
enable_sync_output_accel_gyro: false
enable_accel: false
accel_rate: "200hz"
accel_range: "4g"
enable_gyro: false
gyro_rate: "200hz"
gyro_range: "1000dps"
liner_accel_cov: "0.01"
angular_vel_cov: "0.01"
================================================
FILE: reComputer/scripts/nvblox/config/orbbec_vslam_mobile.yaml
================================================
depth_registration: true
enable_point_cloud: true
enable_colored_point_cloud: true
device_preset: "High Accuracy"
laser_on_off_mode: 1
time_domain: "device"
enable_sync_host_time: true
align_mode: "SW"
camera_name: "camera"
enable_3d_reconstruction_mode: true
enable_color: true
color_width: 640
color_height: 480
color_fps: 30
color_format: "RGB"
enable_color_auto_exposure: false
color_exposure: 50
color_gain: -1
color_qos: "SENSOR_DATA"
depth_width: 640
depth_height: 480
depth_fps: 30
depth_format: "Y16"
depth_qos: "SENSOR_DATA"
point_cloud_qos: "SENSOR_DATA"
enable_ir_auto_exposure: false
ir_exposure: 5000
ir_gain: 40
enable_left_ir: true
left_ir_width: 640
left_ir_height: 480
left_ir_fps: 30
left_ir_format: "Y8"
left_ir_qos: "SENSOR_DATA"
enable_right_ir: true
right_ir_width: 640
right_ir_height: 480
right_ir_fps: 30
right_ir_format: "Y8"
right_ir_qos: "SENSOR_DATA"
enable_sync_output_accel_gyro: false
enable_accel: false
accel_rate: "200hz"
accel_range: "4g"
enable_gyro: false
gyro_rate: "200hz"
gyro_range: "1000dps"
liner_accel_cov: "0.01"
angular_vel_cov: "0.01"
================================================
FILE: reComputer/scripts/nvblox/config.yaml
================================================
ALLOWED_L4T_VERSIONS:
- 36.4.0
- 36.4.3
- 36.4.4
REQUIRED_DISK_SPACE: 60
REQUIRED_MEM_SPACE: 14
PACKAGES:
- nvidia-jetpack
- x11-xserver-utils
DOCKER:
ENABLE: true
DAEMON: |
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"storage-driver": "overlay2",
"data-root": "/var/lib/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"no-new-privileges": true,
"experimental": false
}
================================================
FILE: reComputer/scripts/nvblox/docker/Dockerfile.nvblox_orbbec
================================================
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
ARG ROS_DISTRO=humble
ENV DEBIAN_FRONTEND=noninteractive
ENV ROS_DISTRO=${ROS_DISTRO}
SHELL ["/bin/bash", "-lc"]
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git \
curl \
ca-certificates \
python3-rosdep \
python3-vcstool \
python3-colcon-common-extensions \
build-essential \
libgflags-dev \
nlohmann-json3-dev \
libdw-dev \
libssl-dev \
mesa-utils \
libgl1 \
libgoogle-glog-dev \
ros-${ROS_DISTRO}-image-transport \
ros-${ROS_DISTRO}-image-transport-plugins \
ros-${ROS_DISTRO}-compressed-image-transport \
ros-${ROS_DISTRO}-image-publisher \
ros-${ROS_DISTRO}-camera-info-manager \
ros-${ROS_DISTRO}-diagnostic-updater \
ros-${ROS_DISTRO}-diagnostic-msgs \
ros-${ROS_DISTRO}-statistics-msgs \
ros-${ROS_DISTRO}-xacro \
ros-${ROS_DISTRO}-backward-ros \
ros-${ROS_DISTRO}-magic-enum \
ros-${ROS_DISTRO}-foxglove-msgs && \
rm -rf /var/lib/apt/lists/*
RUN if [[ -f /opt/ros/${ROS_DISTRO}/include/magic_enum.hpp ]]; then \
ln -sf /opt/ros/${ROS_DISTRO}/include/magic_enum.hpp /usr/include/magic_enum.hpp; \
fi && \
if [[ -d /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/foxglove_msgs/msg ]]; then \
mkdir -p /opt/ros/${ROS_DISTRO}/include/foxglove_msgs && \
ln -sfn /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/foxglove_msgs/msg /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/msg; \
fi
COPY docker/prepare_container_workspace.sh /opt/nvblox/bin/prepare_container_workspace.sh
COPY docker/launch_nvblox.sh /opt/nvblox/bin/launch_nvblox.sh
RUN chmod +x /opt/nvblox/bin/prepare_container_workspace.sh /opt/nvblox/bin/launch_nvblox.sh
WORKDIR /workspaces/isaac_ros-dev
================================================
FILE: reComputer/scripts/nvblox/docker/launch_nvblox.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
ROS_DISTRO="${ROS_DISTRO:-humble}"
NVBLOX_LAUNCH_FILE="${NVBLOX_LAUNCH_FILE:-orbbec_example.launch.py}"
EXPECTED_WORKSPACE_SPEC_VERSION="${EXPECTED_WORKSPACE_SPEC_VERSION:-}"
NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC="${NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC:-45}"
ISAAC_WS="/workspaces/isaac_ros-dev"
STAMP_PATH="${ISAAC_WS}/.setup-nvbox/container_workspace.env"
LAUNCH_PID=""
OUTPUT_PROBE_PID=""
ROS_DISCOVERY_ENV_VARS=(
"ROS_DOMAIN_ID"
"ROS_LOCALHOST_ONLY"
"RMW_IMPLEMENTATION"
"ROS_AUTOMATIC_DISCOVERY_RANGE"
"ROS_STATIC_PEERS"
"CYCLONEDDS_URI"
"CYCLONEDDS_HOME"
"FASTDDS_DEFAULT_PROFILES_FILE"
"FASTRTPS_DEFAULT_PROFILES_FILE"
)
[[ -f "/opt/ros/${ROS_DISTRO}/setup.bash" ]] || {
printf '[container][ERROR] Missing ROS setup at /opt/ros/%s/setup.bash\n' "${ROS_DISTRO}" >&2
exit 1
}
[[ -f "${ISAAC_WS}/install/setup.bash" ]] || {
printf '[container][ERROR] Missing workspace setup at %s/install/setup.bash\n' "${ISAAC_WS}" >&2
exit 1
}
[[ -f "${STAMP_PATH}" ]] || {
printf '[container][ERROR] Missing workspace stamp at %s\n' "${STAMP_PATH}" >&2
exit 1
}
restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
# shellcheck disable=SC1091
source "/opt/ros/${ROS_DISTRO}/setup.bash"
# shellcheck disable=SC1090
source "${ISAAC_WS}/install/setup.bash"
# shellcheck disable=SC1090
source "${STAMP_PATH}"
if (( restore_nounset )); then
set -u
fi
if [[ -n "${EXPECTED_WORKSPACE_SPEC_VERSION}" ]] && \
[[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" != "${EXPECTED_WORKSPACE_SPEC_VERSION}" ]]; then
printf '[container][ERROR] Workspace spec mismatch. Expected %s, found %s\n' \
"${EXPECTED_WORKSPACE_SPEC_VERSION}" "${STAMP_WORKSPACE_SPEC_VERSION:-unknown}" >&2
exit 1
fi
PACKAGE_PREFIX="$(ros2 pkg prefix nvblox_examples_bringup 2>/dev/null || true)"
[[ -n "${PACKAGE_PREFIX}" ]] || {
printf '[container][ERROR] Cannot resolve nvblox_examples_bringup in the prepared workspace.\n' >&2
exit 1
}
LAUNCH_PATH="${PACKAGE_PREFIX}/share/nvblox_examples_bringup/launch/${NVBLOX_LAUNCH_FILE}"
[[ -f "${LAUNCH_PATH}" ]] || {
printf '[container][ERROR] Prepared launch file is missing: %s\n' "${LAUNCH_PATH}" >&2
exit 1
}
format_ros_discovery_env() {
local parts=()
local var_name=""
local value=""
local old_ifs="${IFS}"
for var_name in "${ROS_DISCOVERY_ENV_VARS[@]}"; do
value="${!var_name-}"
if [[ -n "${value}" ]]; then
parts+=("${var_name}=${value}")
else
parts+=("${var_name}=<unset>")
fi
done
IFS=', '
printf '%s\n' "${parts[*]}"
IFS="${old_ifs}"
}
printf '[container][INFO] Workspace spec: %s\n' "${STAMP_WORKSPACE_SPEC_VERSION:-unknown}"
printf '[container][INFO] Workspace stamped at: %s\n' "${STAMPED_AT:-unknown}"
printf '[container][INFO] Launching static demo file: %s\n' "${NVBLOX_LAUNCH_FILE}"
printf '[container][INFO] Managed static TF chain: odom -> base_link -> camera_link -> camera_color_optical_frame\n'
printf '[container][INFO] Expected camera info frame_id: camera_color_optical_frame\n'
printf '[container][INFO] Container ROS discovery env: %s\n' "$(format_ros_discovery_env)"
probe_nvblox_runtime_output() {
python3 - "${NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC}" <<'PY'
import sys
import time
import rclpy
from nav_msgs.msg import OccupancyGrid
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import PointCloud2
timeout_seconds = float(sys.argv[1])
class NvbloxOutputProbe(Node):
def __init__(self):
super().__init__('nvblox_runtime_output_probe')
self.result = None
self.create_subscription(
PointCloud2,
'/nvblox_node/static_esdf_pointcloud',
self._pointcloud_callback,
qos_profile_sensor_data)
self.create_subscription(
OccupancyGrid,
'/nvblox_node/static_map_slice',
self._map_slice_callback,
10)
def _pointcloud_callback(self, msg: PointCloud2):
self.result = (
'/nvblox_node/static_esdf_pointcloud',
f'frame_id={msg.header.frame_id or "<empty>"} width={msg.width} height={msg.height}')
def _map_slice_callback(self, msg: OccupancyGrid):
self.result = (
'/nvblox_node/static_map_slice',
f'frame_id={msg.header.frame_id or "<empty>"} width={msg.info.width} '
f'height={msg.info.height} resolution={msg.info.resolution:.3f}')
def main() -> int:
print(
'[container][INFO] Starting runtime output probe for '
'/nvblox_node/static_esdf_pointcloud and /nvblox_node/static_map_slice '
f'({timeout_seconds:.0f}s timeout)',
flush=True)
rclpy.init(args=None)
node = NvbloxOutputProbe()
executor = SingleThreadedExecutor()
executor.add_node(node)
deadline = time.monotonic() + timeout_seconds
try:
while time.monotonic() < deadline and node.result is None:
executor.spin_once(timeout_sec=0.2)
if node.result is None:
print(
'[container][WARN] Runtime output probe timed out waiting for '
'/nvblox_node/static_esdf_pointcloud or /nvblox_node/static_map_slice. '
'Readiness probes passed, but no runtime map output was observed yet.',
flush=True)
return 1
topic_name, details = node.result
print(f'[container][INFO] Runtime output probe received {topic_name}: {details}', flush=True)
return 0
finally:
executor.remove_node(node)
node.destroy_node()
rclpy.shutdown()
sys.exit(main())
PY
}
forward_signal() {
local signal="$1"
[[ -n "${LAUNCH_PID}" ]] && kill "-${signal}" "${LAUNCH_PID}" 2>/dev/null || true
[[ -n "${OUTPUT_PROBE_PID}" ]] && kill "-${signal}" "${OUTPUT_PROBE_PID}" 2>/dev/null || true
}
trap 'forward_signal INT' INT
trap 'forward_signal TERM' TERM
ros2 launch nvblox_examples_bringup "${NVBLOX_LAUNCH_FILE}" &
LAUNCH_PID=$!
probe_nvblox_runtime_output &
OUTPUT_PROBE_PID=$!
set +e
wait "${LAUNCH_PID}"
launch_status=$?
set -e
if [[ -n "${OUTPUT_PROBE_PID}" ]] && kill -0 "${OUTPUT_PROBE_PID}" 2>/dev/null; then
kill -TERM "${OUTPUT_PROBE_PID}" 2>/dev/null || true
fi
wait "${OUTPUT_PROBE_PID}" 2>/dev/null || true
exit "${launch_status}"
================================================
FILE: reComputer/scripts/nvblox/docker/prepare_container_workspace.sh
================================================
#!/usr/bin/env bash
set -euo pipefail
ROS_DISTRO="${ROS_DISTRO:-humble}"
FORCE_REBUILD="${FORCE_REBUILD:-0}"
SETUP_IMAGE_ID="${SETUP_IMAGE_ID:-}"
SETUP_IMAGE_CONTEXT_HASH="${SETUP_IMAGE_CONTEXT_HASH:-}"
COMMUNITY_REPO_URL="${COMMUNITY_REPO_URL:-https://github.com/jjjadand/isaac-NVblox-Orbbec.git}"
COMMUNITY_REPO_BRANCH="${COMMUNITY_REPO_BRANCH:-main}"
OFFICIAL_NVBLOX_REPO_URL="${OFFICIAL_NVBLOX_REPO_URL:-https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox.git}"
OFFICIAL_NVBLOX_REPO_BRANCH="${OFFICIAL_NVBLOX_REPO_BRANCH:-release-3.2}"
WORKSPACE_SPEC_VERSION="${EXPECTED_WORKSPACE_SPEC_VERSION:-static-demo-final-v3}"
ISAAC_WS="/workspaces/isaac_ros-dev"
SRC_DIR="${ISAAC_WS}/src"
SETUP_DIR="${ISAAC_WS}/.setup-nvbox"
STAMP_PATH="${SETUP_DIR}/container_workspace.env"
COMMUNITY_REPO_PATH="${SETUP_DIR}/isaac-NVblox-Orbbec"
OFFICIAL_NVBLOX_REPO_PATH="${SETUP_DIR}/isaac_ros_nvblox"
COMMUNITY_COMMON_ROOT="${COMMUNITY_REPO_PATH}/src/isaac_ros_common"
COMMUNITY_NITROS_ROOT="${COMMUNITY_REPO_PATH}/src/isaac_ros_nitros"
COMMUNITY_NVBLOX_ROOT="${COMMUNITY_REPO_PATH}/src/isaac_ros_nvblox"
OFFICIAL_NVBLOX_ROOT="${OFFICIAL_NVBLOX_REPO_PATH}"
COMMUNITY_COMMON_PACKAGE_PATHS=(
"isaac_common"
"isaac_ros_common"
"isaac_ros_launch_utils"
"isaac_ros_tensor_list_interfaces"
)
COMMUNITY_NITROS_PACKAGE_PATHS=(
"isaac_ros_gxf"
"isaac_ros_nitros"
"isaac_ros_managed_nitros"
"isaac_ros_nitros_type/isaac_ros_nitros_camera_info_type"
"isaac_ros_nitros_type/isaac_ros_nitros_image_type"
"isaac_ros_nitros_type/isaac_ros_nitros_tensor_list_type"
"isaac_ros_gxf_extensions/gxf_isaac_message_compositor"
"isaac_ros_gxf_extensions/gxf_isaac_optimizer"
"isaac_ros_gxf_extensions/gxf_isaac_gxf_helpers"
"isaac_ros_gxf_extensions/gxf_isaac_sight"
"isaac_ros_gxf_extensions/gxf_isaac_atlas"
"isaac_ros_gxf_extensions/gxf_isaac_gems"
)
OFFICIAL_NVBLOX_PACKAGE_PATHS=(
"nvblox_msgs"
"nvblox_ros_common"
"nvblox_ros_python_utils"
"nvblox_ros"
"nvblox_rviz_plugin"
"nvblox_examples/nvblox_examples_bringup"
)
STATIC_DEMO_OVERLAY_FILE_PATHS=(
"nvblox_examples/nvblox_examples_bringup/config/visualization/orbbec_example.rviz"
)
GENERATED_LAUNCH_FILE_PATHS=(
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py"
)
GENERATED_CONFIG_FILE_PATHS=(
"nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
REQUIRED_SRC_PATHS=(
"isaac_common"
"isaac_ros_common"
"isaac_ros_launch_utils"
"isaac_ros_tensor_list_interfaces"
"isaac_ros_gxf"
"isaac_ros_nitros"
"isaac_ros_managed_nitros"
"isaac_ros_nitros_type/isaac_ros_nitros_camera_info_type"
"isaac_ros_nitros_type/isaac_ros_nitros_image_type"
"isaac_ros_nitros_type/isaac_ros_nitros_tensor_list_type"
"isaac_ros_gxf_extensions/gxf_isaac_message_compositor"
"isaac_ros_gxf_extensions/gxf_isaac_optimizer"
"isaac_ros_gxf_extensions/gxf_isaac_gxf_helpers"
"isaac_ros_gxf_extensions/gxf_isaac_sight"
"isaac_ros_gxf_extensions/gxf_isaac_atlas"
"isaac_ros_gxf_extensions/gxf_isaac_gems"
"nvblox_msgs"
"nvblox_ros_common"
"nvblox_ros_python_utils"
"nvblox_ros"
"nvblox_rviz_plugin"
"nvblox_examples/nvblox_examples_bringup"
)
REQUIRED_SRC_FILE_PATHS=(
"nvblox_ros/CMakeLists.txt"
"nvblox_ros/nvblox_core/CMakeLists.txt"
"nvblox_ros/nvblox_core/cmake/cuda/setup_compute_capability.cmake"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py"
"nvblox_examples/nvblox_examples_bringup/config/visualization/orbbec_example.rviz"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py"
"nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py"
"nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
EXCLUDED_SRC_PATHS=(
"isaac_ros_pynitros"
"isaac_ros_managed_nitros_examples"
"isaac_ros_nitros_bridge"
"isaac_ros_nitros_topic_tools"
"isaac_ros_visual_slam"
"isaac_ros_visual_slam_interfaces"
"nvblox_nav2"
"nvblox_examples/nvblox_image_padding"
"nvblox_examples/semantic_label_conversion"
)
STATIC_DEMO_REMOVED_DEPENDENCIES=(
"nova_carter_navigation"
"isaac_ros_visual_slam"
"isaac_ros_visual_slam_interfaces"
"isaac_ros_peoplenet_models_install"
"isaac_ros_detectnet"
"isaac_ros_peoplesemseg_models_install"
"isaac_ros_dnn_image_encoder"
"isaac_ros_triton"
"isaac_ros_unet"
"semantic_label_conversion"
"nvblox_image_padding"
)
ROSDEP_SKIP_KEYS=(
"isaac_ros_peoplenet_models_install"
"isaac_ros_detectnet"
"isaac_ros_image_proc"
)
COLCON_TARGETS=(
"nvblox_examples_bringup"
)
RUNTIME_REQUIRED_PACKAGES=(
"nvblox_examples_bringup"
"nvblox_ros"
)
INSTALL_REQUIRED_FILE_PATHS=(
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_transforms.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_example.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_debug.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py"
"install/nvblox_examples_bringup/share/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml"
)
log() {
printf '[container][%s] %s\n' "$(date '+%Y-%m-%d %H:%M:%S')" "$*"
}
die() {
printf '[container][ERROR] %s\n' "$*" >&2
exit 1
}
source_ros() {
local restore_nounset=0
if [[ $- == *u* ]]; then
restore_nounset=1
set +u
fi
# shellcheck disable=SC1091
source "/opt/ros/${ROS_DISTRO}/setup.bash"
if [[ -f "${ISAAC_WS}/install/setup.bash" ]]; then
# shellcheck disable=SC1090
source "${ISAAC_WS}/install/setup.bash"
fi
if (( restore_nounset )); then
set -u
fi
}
ensure_rosdep_ready() {
if [[ ! -f /etc/ros/rosdep/sources.list.d/20-default.list ]]; then
log "Initializing rosdep."
rosdep init || true
fi
log "Updating rosdep."
rosdep update
}
ensure_git_safe_directory() {
local repo_path="$1"
[[ -n "${repo_path}" ]] || return 0
[[ -e "${repo_path}" ]] || return 0
if git config --global --get-all safe.directory 2>/dev/null | grep -Fqx "${repo_path}"; then
return 0
fi
git config --global --add safe.directory "${repo_path}"
}
resolve_gitdir_path() {
local repo_path="$1"
local dot_git_path="${repo_path}/.git"
local gitdir_value=""
if [[ -d "${dot_git_path}" ]]; then
printf '%s\n' "${dot_git_path}"
return 0
fi
if [[ -f "${dot_git_path}" ]]; then
gitdir_value="$(sed -n 's/^gitdir: //p' "${dot_git_path}" | head -n 1)"
[[ -n "${gitdir_value}" ]] || return 1
if [[ "${gitdir_value}" = /* ]]; then
printf '%s\n' "${gitdir_value}"
else
printf '%s\n' "$(cd "${repo_path}" && cd "${gitdir_value}" && pwd)"
fi
return 0
fi
return 1
}
ensure_repo_safe_directories() {
local repo_path="$1"
local gitdir_path=""
ensure_git_safe_directory "${repo_path}"
if gitdir_path="$(resolve_gitdir_path "${repo_path}" 2>/dev/null)"; then
ensure_git_safe_directory "${gitdir_path}"
fi
}
extract_dubious_ownership_paths() {
local log_path="$1"
sed -n "s/.*detected dubious ownership in repository at '\(.*\)'/\1/p" "${log_path}" | sort -u
}
ensure_paths_from_ownership_log() {
local log_path="$1"
local repo_path=""
while IFS= read -r repo_path; do
[[ -n "${repo_path}" ]] || continue
ensure_repo_safe_directories "${repo_path}"
done < <(extract_dubious_ownership_paths "${log_path}")
}
assert_git_repo_metadata() {
local repo_path="$1"
local label="$2"
[[ ! -e "${repo_path}" ]] && return 0
[[ -e "${repo_path}/.git" ]] && return 0
die "Managed ${label} cache at ${repo_path} is missing Git metadata. Delete ${repo_path} and rerun prepare."
}
assert_git_repo_accessible() {
local repo_path="$1"
local label="$2"
local git_log
[[ -e "${repo_path}" ]] || return 0
assert_git_repo_metadata "${repo_path}" "${label}"
ensure_repo_safe_directories "${repo_path}"
git_log="$(mktemp)"
if git -C "${repo_path}" rev-parse --is-inside-work-tree >/dev/null 2>"${git_log}"; then
rm -f "${git_log}"
return 0
fi
if grep -q "detected dubious ownership" "${git_log}"; then
ensure_paths_from_ownership_log "${git_log}"
if git -C "${repo_path}" rev-parse --is-inside-work-tree >/dev/null 2>"${git_log}"; then
rm -f "${git_log}"
return 0
fi
fi
cat "${git_log}" >&2 || true
rm -f "${git_log}"
die "Managed ${label} cache at ${repo_path} is not usable. Delete ${repo_path} and rerun prepare."
}
initialize_managed_git_access() {
mkdir -p "${HOME}" >/dev/null 2>&1 || true
touch "${HOME}/.gitconfig" >/dev/null 2>&1 || true
ensure_repo_safe_directories "${COMMUNITY_REPO_PATH}"
ensure_repo_safe_directories "${OFFICIAL_NVBLOX_REPO_PATH}"
ensure_repo_safe_directories "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core"
}
verify_managed_git_cache_state() {
assert_git_repo_accessible "${COMMUNITY_REPO_PATH}" "community repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}" "official Isaac ROS Nvblox repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core" "official Isaac ROS Nvblox submodule"
}
clone_or_update_repo() {
local repo_url="$1"
local repo_branch="$2"
local repo_path="$3"
local repo_name="$4"
mkdir -p "${SRC_DIR}" "${SETUP_DIR}"
if [[ ! -d "${repo_path}/.git" ]]; then
log "Cloning ${repo_name} from ${repo_url}."
git clone --branch "${repo_branch}" --depth 1 "${repo_url}" "${repo_path}"
ensure_repo_safe_directories "${repo_path}"
return 0
fi
assert_git_repo_accessible "${repo_path}" "${repo_name}"
if [[ -n "$(git -C "${repo_path}" status --porcelain)" ]]; then
die "Managed repo has local changes at ${repo_path}."
fi
log "Refreshing ${repo_name}."
git -C "${repo_path}" fetch --depth 1 origin "${repo_branch}"
git -C "${repo_path}" checkout -B "${repo_branch}" "origin/${repo_branch}"
}
sync_git_submodule() {
local repo_path="$1"
local submodule_path="$2"
local label="$3"
local submodule_repo_path="${repo_path}/${submodule_path}"
local git_log=""
assert_git_repo_accessible "${repo_path}" "${label}"
ensure_repo_safe_directories "${submodule_repo_path}"
log "Syncing ${label} submodule ${submodule_path}."
git -C "${repo_path}" submodule sync -- "${submodule_path}"
git_log="$(mktemp)"
if ! git -C "${repo_path}" submodule update --init --depth 1 -- "${submodule_path}" 2>"${git_log}"; then
if grep -q "detected dubious ownership" "${git_log}"; then
ensure_paths_from_ownership_log "${git_log}"
ensure_repo_safe_directories "${repo_path}"
ensure_repo_safe_directories "${submodule_repo_path}"
: > "${git_log}"
if ! git -C "${repo_path}" submodule update --init --depth 1 -- "${submodule_path}" 2>"${git_log}"; then
cat "${git_log}" >&2 || true
rm -f "${git_log}"
die "Failed to sync ${label} submodule ${submodule_path} after refreshing Git safe.directory entries."
fi
else
cat "${git_log}" >&2 || true
rm -f "${git_log}"
die "Failed to sync ${label} submodule ${submodule_path}."
fi
fi
rm -f "${git_log}"
assert_git_repo_accessible "${submodule_repo_path}" "${label} submodule"
}
verify_workspace_install() {
local package_name=""
local file_path=""
[[ -f "${ISAAC_WS}/install/setup.bash" ]] || return 1
source_ros
for package_name in "${RUNTIME_REQUIRED_PACKAGES[@]}"; do
ros2 pkg prefix "${package_name}" >/dev/null 2>&1 || return 1
done
for file_path in "${INSTALL_REQUIRED_FILE_PATHS[@]}"; do
[[ -f "${ISAAC_WS}/${file_path}" ]] || return 1
done
}
stamp_current() {
[[ -f "${STAMP_PATH}" ]] || return 1
# shellcheck disable=SC1090
source "${STAMP_PATH}"
[[ "${STAMP_IMAGE_ID:-}" == "${SETUP_IMAGE_ID}" ]] || return 1
[[ "${STAMP_IMAGE_CONTEXT_HASH:-}" == "${SETUP_IMAGE_CONTEXT_HASH}" ]] || return 1
[[ "${STAMP_COMMUNITY_COMMIT:-}" == "${COMMUNITY_COMMIT}" ]] || return 1
[[ "${STAMP_OFFICIAL_NVBLOX_COMMIT:-}" == "${OFFICIAL_NVBLOX_COMMIT}" ]] || return 1
[[ "${STAMP_OFFICIAL_NVBLOX_CORE_COMMIT:-}" == "${OFFICIAL_NVBLOX_CORE_COMMIT}" ]] || return 1
[[ "${STAMP_WORKSPACE_SPEC_VERSION:-}" == "${WORKSPACE_SPEC_VERSION}" ]] || return 1
verify_synced_workspace_layout
verify_workspace_install
}
write_stamp() {
{
printf 'STAMP_IMAGE_ID=%q\n' "${SETUP_IMAGE_ID}"
printf 'STAMP_IMAGE_CONTEXT_HASH=%q\n' "${SETUP_IMAGE_CONTEXT_HASH}"
printf 'STAMP_COMMUNITY_COMMIT=%q\n' "${COMMUNITY_COMMIT}"
printf 'STAMP_OFFICIAL_NVBLOX_COMMIT=%q\n' "${OFFICIAL_NVBLOX_COMMIT}"
printf 'STAMP_OFFICIAL_NVBLOX_CORE_COMMIT=%q\n' "${OFFICIAL_NVBLOX_CORE_COMMIT}"
printf 'STAMP_WORKSPACE_SPEC_VERSION=%q\n' "${WORKSPACE_SPEC_VERSION}"
printf 'STAMPED_AT=%q\n' "$(date -Is 2>/dev/null || date)"
} > "${STAMP_PATH}"
}
clear_managed_src_dir() {
mkdir -p "${SRC_DIR}"
find "${SRC_DIR}" -mindepth 1 -maxdepth 1 -exec rm -rf {} +
}
copy_package_path() {
local source_root="$1"
local package_path="$2"
local src_path="${source_root}/${package_path}"
local dest_path="${SRC_DIR}/${package_path}"
[[ -d "${src_path}" ]] || die "Expected package path ${package_path} is missing from ${source_root}."
mkdir -p "$(dirname "${dest_path}")"
rm -rf "${dest_path}"
cp -a "${src_path}" "${dest_path}"
}
copy_package_root() {
local source_root="$1"
local package_name="$2"
local dest_path="${SRC_DIR}/${package_name}"
[[ -f "${source_root}/package.xml" ]] || die "Expected root package.xml is missing from ${source_root}."
mkdir -p "${dest_path}"
rm -rf "${dest_path}"
mkdir -p "${dest_path}"
find "${source_root}" -mindepth 1 -maxdepth 1 ! -name '.git' -exec cp -a {} "${dest_path}/" \;
}
sync_package_group() {
local source_root="$1"
shift
local package_path=""
for package_path in "$@"; do
copy_package_path "${source_root}" "${package_path}"
done
}
apply_overlay_files() {
local source_root="$1"
shift
local relative_path=""
local source_path=""
local dest_path=""
for relative_path in "$@"; do
source_path="${source_root}/${relative_path}"
dest_path="${SRC_DIR}/${relative_path}"
[[ -f "${source_path}" ]] || die "Expected overlay file ${relative_path} is missing from ${source_root}."
mkdir -p "$(dirname "${dest_path}")"
cp -a "${source_path}" "${dest_path}"
done
}
write_orbbec_transforms_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
def static_tf(parent: str, child: str, xyz: tuple[float, float, float], rpy: tuple[float, float, float]) -> Node:
return Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=[
'--x', str(xyz[0]),
'--y', str(xyz[1]),
'--z', str(xyz[2]),
'--roll', str(rpy[0]),
'--pitch', str(rpy[1]),
'--yaw', str(rpy[2]),
'--frame-id', parent,
'--child-frame-id', child,
],
output='screen')
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
actions = args.get_launch_actions()
actions.append(static_tf('odom', 'base_link', (0.0, 0.0, 0.0), (0.0, 0.0, 0.0)))
actions.append(static_tf('base_link', 'camera_link', (0.1, 0.0, 0.2), (0.0, 0.0, 0.0)))
actions.append(static_tf('camera_link', 'camera0_link', (0.0, 0.0, 0.0), (0.0, 0.0, 0.0)))
actions.append(static_tf(
'camera_link',
'camera_color_optical_frame',
(0.0, 0.0, 0.0),
(-1.57079632679, 0.0, -1.57079632679)))
actions.append(static_tf(
'camera_color_optical_frame',
'camera_depth_optical_frame',
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0)))
return LaunchDescription(actions)
EOF
}
write_orbbec_static_config() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml" <<'EOF'
/**:
ros__parameters:
use_lidar: false
input_qos: "SENSOR_DATA"
map_clearing_frame_id: "base_link"
esdf_slice_bounds_visualization_attachment_frame_id: "base_link"
static_mapper:
esdf_slice_height: 0.0
esdf_slice_min_height: -0.1
esdf_slice_max_height: 0.3
EOF
}
write_orbbec_example_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
from nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
args.add_arg('log_level', 'info', choices=['debug', 'info', 'warn'], cli=True)
actions = args.get_launch_actions()
actions.append(
lu.include(
'nvblox_examples_bringup',
'launch/orbbec_transforms.launch.py'))
actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))
base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')
realsense_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_realsense.yaml')
orbbec_static_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_orbbec_static.yaml')
nvblox_node = ComposableNode(
name='nvblox_node',
package='nvblox_ros',
plugin='nvblox::NvbloxNode',
remappings=[
('camera_0/depth/image', '/camera/depth/image_raw'),
('camera_0/depth/camera_info', '/camera/depth/camera_info'),
('camera_0/color/image', '/camera/color/image_raw'),
('camera_0/color/camera_info', '/camera/color/camera_info'),
],
parameters=[
base_config,
realsense_config,
orbbec_static_config,
{'num_cameras': 1},
{'use_lidar': False},
],
)
actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))
rviz_config_path = lu.get_path(
'nvblox_examples_bringup',
'config/visualization/orbbec_example.rviz')
actions.append(
Node(
package='rviz2',
executable='rviz2',
arguments=['-d', str(rviz_config_path)],
output='screen'))
return LaunchDescription(actions)
EOF
}
write_orbbec_debug_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
from nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
args.add_arg('log_level', 'debug', choices=['debug', 'info', 'warn'], cli=True)
actions = args.get_launch_actions()
actions.append(
lu.include(
'nvblox_examples_bringup',
'launch/orbbec_transforms.launch.py'))
actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))
base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')
realsense_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_realsense.yaml')
orbbec_static_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_orbbec_static.yaml')
nvblox_node = ComposableNode(
name='nvblox_node',
package='nvblox_ros',
plugin='nvblox::NvbloxNode',
remappings=[
('camera_0/depth/image', '/camera/depth/image_raw'),
('camera_0/depth/camera_info', '/camera/depth/camera_info'),
('camera_0/color/image', '/camera/color/image_raw'),
('camera_0/color/camera_info', '/camera/color/camera_info'),
],
parameters=[
base_config,
realsense_config,
orbbec_static_config,
{'num_cameras': 1},
{'use_lidar': False},
],
)
actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))
return LaunchDescription(actions)
EOF
}
write_orbbec_standalone_launch() {
cat > "${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py" <<'EOF'
from isaac_ros_launch_utils.all_types import *
import isaac_ros_launch_utils as lu
from nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME
def generate_launch_description() -> LaunchDescription:
args = lu.ArgumentContainer()
args.add_arg('log_level', 'info', choices=['debug', 'info', 'warn'], cli=True)
actions = args.get_launch_actions()
actions.append(
lu.include(
'nvblox_examples_bringup',
'launch/orbbec_transforms.launch.py'))
actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))
base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')
realsense_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_realsense.yaml')
orbbec_static_config = lu.get_path(
'nvblox_examples_bringup',
'config/nvblox/specializations/nvblox_orbbec_static.yaml')
nvblox_node = ComposableNode(
name='nvblox_node',
package='nvblox_ros',
plugin='nvblox::NvbloxNode',
remappings=[
('camera_0/depth/image', '/camera/depth/image_raw'),
('camera_0/depth/camera_info', '/camera/depth/camera_info'),
('camera_0/color/image', '/camera/color/image_raw'),
('camera_0/color/camera_info', '/camera/color/camera_info'),
],
parameters=[
base_config,
realsense_config,
orbbec_static_config,
{'num_cameras': 1},
{'use_lidar': False},
],
)
actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))
return LaunchDescription(actions)
EOF
}
generate_static_demo_launches() {
log "Generating managed static demo launch files."
mkdir -p \
"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch" \
"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations"
write_orbbec_transforms_launch
write_orbbec_static_config
write_orbbec_example_launch
write_orbbec_debug_launch
write_orbbec_standalone_launch
}
patch_manifest_remove_dependencies() {
local manifest_path="$1"
shift
local dependency_name=""
[[ -f "${manifest_path}" ]] || die "Expected manifest does not exist: ${manifest_path}"
for dependency_name in "$@"; do
sed -i "/>${dependency_name}</d" "${manifest_path}"
done
}
patch_static_demo_manifests() {
local bringup_manifest="${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/package.xml"
log "Patching synced manifests for the static demo workspace."
patch_manifest_remove_dependencies "${bringup_manifest}" "${STATIC_DEMO_REMOVED_DEPENDENCIES[@]}"
}
verify_synced_workspace_layout() {
local path_name=""
local file_path=""
local bringup_manifest="${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/package.xml"
local dependency_name=""
for path_name in "${REQUIRED_SRC_PATHS[@]}"; do
[[ -d "${SRC_DIR}/${path_name}" ]] || die "Required synced package path is missing: ${SRC_DIR}/${path_name}"
done
for path_name in "${EXCLUDED_SRC_PATHS[@]}"; do
[[ ! -e "${SRC_DIR}/${path_name}" ]] || die "Excluded package path should not exist in the managed workspace: ${SRC_DIR}/${path_name}"
done
for file_path in "${REQUIRED_SRC_FILE_PATHS[@]}"; do
[[ -f "${SRC_DIR}/${file_path}" ]] || die "Required synced file is missing: ${SRC_DIR}/${file_path}"
done
[[ -f "${bringup_manifest}" ]] || die "Expected bringup manifest is missing: ${bringup_manifest}"
for dependency_name in "${STATIC_DEMO_REMOVED_DEPENDENCIES[@]}"; do
if grep -q ">${dependency_name}<" "${bringup_manifest}"; then
die "Static demo manifest still declares excluded dependency ${dependency_name}."
fi
done
}
sync_static_demo_workspace() {
log "Syncing package whitelist into the managed workspace."
clear_managed_src_dir
sync_package_group "${COMMUNITY_COMMON_ROOT}" "${COMMUNITY_COMMON_PACKAGE_PATHS[@]}"
sync_package_group "${COMMUNITY_NITROS_ROOT}" "${COMMUNITY_NITROS_PACKAGE_PATHS[@]}"
sync_package_group "${OFFICIAL_NVBLOX_ROOT}" "${OFFICIAL_NVBLOX_PACKAGE_PATHS[@]}"
apply_overlay_files "${COMMUNITY_NVBLOX_ROOT}" "${STATIC_DEMO_OVERLAY_FILE_PATHS[@]}"
generate_static_demo_launches
patch_static_demo_manifests
verify_synced_workspace_layout
}
rebuild_workspace() {
local rosdep_dependency_args=(
--dependency-types buildtool
--dependency-types buildtool_export
--dependency-types build
--dependency-types build_export
--dependency-types exec
)
local rosdep_skip_args=()
local skip_key=""
source_ros
ensure_rosdep_ready
for skip_key in "${ROSDEP_SKIP_KEYS[@]}"; do
rosdep_skip_args+=(--skip-keys "${skip_key}")
done
log "Installing workspace dependencies with rosdep."
(
cd "${ISAAC_WS}"
rosdep install \
--from-paths src \
--ignore-src \
-r \
-y \
--rosdistro "${ROS_DISTRO}" \
"${rosdep_dependency_args[@]}" \
"${rosdep_skip_args[@]}"
)
run_colcon_build() {
(
cd "${ISAAC_WS}"
colcon build \
--packages-up-to "${COLCON_TARGETS[@]}" \
--symlink-install \
--event-handlers console_direct+ \
--cmake-args -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=OFF
)
}
patch_ext_stdgpu_cuda_compat() {
local ext_stdgpu_root=""
local memory_detail_path=""
local unordered_base_path=""
local patched_any=1
while IFS= read -r ext_stdgpu_root; do
memory_detail_path="${ext_stdgpu_root}/src/stdgpu/impl/memory_detail.h"
unordered_base_path="${ext_stdgpu_root}/src/stdgpu/impl/unordered_base_detail.cuh"
if [[ -f "${memory_detail_path}" ]]; then
if python3 - "${memory_detail_path}" <<'PY'
from pathlib import Path
import sys
path = Path(sys.argv[1])
text = path.read_text()
replacements = {
"construct_at(p, forward<Args>(args)...);": "stdgpu::construct_at(p, stdgpu::forward<Args>(args)...);",
"destroy_at(p);": "stdgpu::destroy_at(p);",
"return to_address(pointer_traits<Ptr>::to_address(p));": "return stdgpu::to_address(pointer_traits<Ptr>::to_address(p));",
}
changed = False
for old, new in replacements.items():
if old in text:
text = text.replace(old, new)
changed = True
if changed:
path.write_text(text)
sys.exit(0 if changed else 1)
PY
then
log "Applied CUDA 12.6 stdgpu compatibility patch to ${memory_detail_path}."
patched_any=0
fi
fi
if [[ -f "${unordered_base_path}" ]]; then
if python3 - "${unordered_base_path}" <<'PY'
from pathlib import Path
import sys
path = Path(sys.argv[1])
text = path.read_text()
replacements = {
"_base.insert(*to_address(_begin + i));": "_base.insert(*stdgpu::to_address(_begin + i));",
}
changed = False
for old, new in replacements.items():
if old in text:
text = text.replace(old, new)
changed = True
if changed:
path.write_text(text)
sys.exit(0 if changed else 1)
PY
then
log "Applied CUDA 12.6 stdgpu compatibility patch to ${unordered_base_path}."
patched_any=0
fi
fi
done < <(find "${ISAAC_WS}/build" -type d -path '*/_deps/ext_stdgpu-src' 2>/dev/null | sort)
return "${patched_any}"
}
log "Building container workspace."
rm -rf "${ISAAC_WS}/build" "${ISAAC_WS}/install" "${ISAAC_WS}/log"
if run_colcon_build; then
return 0
fi
if patch_ext_stdgpu_cuda_compat; then
log "Retrying container workspace build after applying stdgpu CUDA compatibility patches."
run_colcon_build
return 0
fi
die "Container workspace build failed before the compatibility patch could be applied."
}
initialize_managed_git_access
verify_managed_git_cache_state
clone_or_update_repo "${COMMUNITY_REPO_URL}" "${COMMUNITY_REPO_BRANCH}" "${COMMUNITY_REPO_PATH}" "community repo"
clone_or_update_repo "${OFFICIAL_NVBLOX_REPO_URL}" "${OFFICIAL_NVBLOX_REPO_BRANCH}" "${OFFICIAL_NVBLOX_REPO_PATH}" "official Isaac ROS Nvblox repo"
sync_git_submodule "${OFFICIAL_NVBLOX_REPO_PATH}" "nvblox_ros/nvblox_core" "official Isaac ROS Nvblox"
assert_git_repo_accessible "${COMMUNITY_REPO_PATH}" "community repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}" "official Isaac ROS Nvblox repo"
assert_git_repo_accessible "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core" "official Isaac ROS Nvblox submodule"
COMMUNITY_COMMIT="$(git -C "${COMMUNITY_REPO_PATH}" rev-parse HEAD)"
OFFICIAL_NVBLOX_COMMIT="$(git -C "${OFFICIAL_NVBLOX_REPO_PATH}" rev-parse HEAD)"
OFFICIAL_NVBLOX_CORE_COMMIT="$(git -C "${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core" rev-parse HEAD)"
if [[ "${FORCE_REBUILD}" != "1" ]] && stamp_current; then
log "Container workspace is already current. Skipping rebuild."
exit 0
fi
sync_static_demo_workspace
rebuild_workspace
verify_synced_workspace_layout
verify_workspace_install || die "Container workspace verification failed."
write_stamp
log "Container workspace preparation complete."
================================================
FILE: reComputer/scripts/nvblox/host/orbbec_mobile_host.launch.py
================================================
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import LoadComposableNodes, Node
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
config_file_path = LaunchConfiguration('config_file_path')
container_name = LaunchConfiguration('component_container_name', default='orbbec_host_container')
container = Node(
name=container_name,
package='rclcpp_components',
executable='component_container_mt',
output='screen')
load_orbbec_node = LoadComposableNodes(
target_container=container_name,
composable_node_descriptions=[
ComposableNode(
namespace='camera',
name='orbbec_camera_node',
package='orbbec_camera',
plugin='orbbec_camera::OBCameraNodeDriver',
parameters=[config_file_path],
remappings=[
('/camera/left_ir/image_raw', '~/output/infra_1'),
('/camera/right_ir/image_raw', '~/output/infra_2'),
('/camera/depth/image_raw', '~/output/depth'),
('/camera/depth_registered/points', '~/output/pointcloud'),
],
)
])
return LaunchDescription([
DeclareLaunchArgument('config_file_path'),
DeclareLaunchArgument('component_container_name', default_value='orbbec_host_container'),
container,
load_orbbec_node,
])
================================================
FILE: reComputer/scripts/nvblox/init.sh
================================================
#!/bin/bash
echo "NVBlox preflight, image download, docker load, and demo setup are handled by 'reComputer run nvblox'."
================================================
FILE: reComputer/scripts/nvblox/lib/common.sh
================================================
#!/usr/bin/env bash
if [[ "${SETUP_NVBOX_COMMON_SH:-0}" == "1" ]]; then
return 0
fi
readonly SETUP_NVBOX_COMMON_SH=1
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
readonly PROJECT_ROOT
common_fatal() {
printf '[setup-nvbox][ERROR] %s\n' "$*" >&2
exit 1
}
resolve_setup_user_name() {
if [[ -n "${SUDO_USER:-}" && "${SUDO_USER}" != "root" ]]; then
printf '%s\n' "${SUDO_USER}"
return 0
fi
id -un
}
lookup_user_passwd_entry() {
local user_name="$1"
getent passwd "${user_name}" 2>/dev/null | head -n 1
}
resolve_user_home() {
local user_name="$1"
local passwd_entry=""
passwd_entry="$(lookup_user_passwd_entry "${user_name}")"
[[ -n "${passwd_entry}" ]] || common_fatal "Cannot resolve passwd entry for user ${user_name}."
printf '%s\n' "$(cut -d: -f6 <<<"${passwd_entry}")"
}
readonly SETUP_USER_NAME="$(resolve_setup_user_name)"
readonly SETUP_USER_HOME="$(resolve_user_home "${SETUP_USER_NAME}")"
readonly SETUP_USER_UID="$(id -u "${SETUP_USER_NAME}")"
readonly SETUP_USER_GID="$(id -g "${SETUP_USER_NAME}")"
readonly MANAGED_ROOT_DEFAULT="${SETUP_USER_HOME}/nvblox_demo"
readonly MANAGED_SENTINEL_NAME=".managed-by-setup-nvbox"
readonly ROS_DISTRO_DEFAULT="humble"
readonly ORBBEC_VERSION="v2.3.4"
readonly ORBBEC_REPO_URL="https://github.com/orbbec/OrbbecSDK_ROS2.git"
readonly GEMINI2_USB_VENDOR_ID="2bc5"
readonly GEMINI2_USB_PRODUCT_ID="0670"
readonly GEMINI2_READY_TIMEOUT_SECONDS=15
readonly GEMINI2_SIGNAL_TIMEOUT_SECONDS=5
readonly HOST_CAMERA_LOG_TAIL_LINES=40
readonly COMMUNITY_REPO_URL_DEFAULT="https://github.com/jjjadand/isaac-NVblox-Orbbec.git"
readonly COMMUNITY_REPO_BRANCH_DEFAULT="main"
readonly BASE_IMAGE_PREFERRED="isaac_ros_dev-aarch64:latest"
readonly DERIVED_IMAGE_TAG="local/isaac_ros_nvblox_orbbec:jp6-humble"
readonly CONTAINER_NAME_DEFAULT="isaac_ros_nvblox_orbbec"
readonly CONTAINER_WORKSPACE_SPEC_VERSION="static-demo-final-v3"
readonly NVBLOX_IMAGE_SHARE_URL_DEFAULT="https://seeedstudio88-my.sharepoint.com/:u:/g/personal/youjiang_yu_seeedstudio88_onmicrosoft_com/IQCCDToomY6WSaRZdfsTs9vXAengb-SCEvNfSUgq0cipP6w?e=z9axor"
readonly NVBLOX_IMAGE_ARCHIVE_NAME_DEFAULT="nvblox_images.tar"
readonly NVBLOX_IMAGE_CACHE_DIR_DEFAULT="${SETUP_USER_HOME}/.cache/jetson-examples/nvblox"
readonly FASTDDS_RUNTIME_DIR_RELATIVE=".runtime/fastdds"
readonly FASTDDS_UDP_ONLY_PROFILE_FILENAME="udp_only.xml"
readonly ROS_DISCOVERY_ENV_VARS=(
"ROS_DOMAIN_ID"
"ROS_LOCALHOST_ONLY"
"RMW_IMPLEMENTATION"
"ROS_AUTOMATIC_DISCOVERY_RANGE"
"ROS_STATIC_PEERS"
"CYCLONEDDS_URI"
"CYCLONEDDS_HOME"
"FASTDDS_DEFAULT_PROFILES_FILE"
"FASTRTPS_DEFAULT_PROFILES_FILE"
)
readonly ROS_DISCOVERY_PATH_ENV_VARS=(
"CYCLONEDDS_URI"
"CYCLONEDDS_HOME"
"FASTDDS_DEFAULT_PROFILES_FILE"
"FASTRTPS_DEFAULT_PROFILES_FILE"
)
DOCKER_PREFIX=()
timestamp() {
date '+%Y-%m-%d %H:%M:%S'
}
log() {
local level="$1"
shift
printf '[%s] [%s] %s\n' "$(timestamp)" "${level}" "$*"
}
info() {
log INFO "$@"
}
warn() {
log WARN "$@"
}
error() {
log ERROR "$@" >&2
}
die() {
error "$@"
exit 1
}
resolve_nvblox_image_share_url() {
printf '%s\n' "${NVBLOX_IMAGE_SHARE_URL:-${NVBLOX_IMAGE_SHARE_URL_DEFAULT}}"
}
resolve_nvblox_image_archive_name() {
printf '%s\n' "${NVBLOX_IMAGE_ARCHIVE_NAME:-${NVBLOX_IMAGE_ARCHIVE_NAME_DEFAULT}}"
}
resolve_nvblox_image_cache_dir() {
printf '%s\n' "${NVBLOX_IMAGE_CACHE_DIR:-${NVBLOX_IMAGE_CACHE_DIR_DEFAULT}}"
}
resolve_nvblox_image_archive_path() {
local cache_dir="${1:-$(resolve_nvblox_image_cache_dir)}"
local archive_name="${2:-$(resolve_nvblox_image_archive_name)}"
printf '%s/%s\n' "${cache_dir%/}" "${archive_name}"
}
cleanup_nvblox_partial_downloads() {
local cache_dir="${1:-$(resolve_nvblox_image_cache_dir)}"
local partial_file=""
[[ -d "${cache_dir}" ]] || return 0
while IFS= read -r partial_file; do
[[ -n "${partial_file}" ]] || continue
rm -f "${partial_file}"
info "Removed partial NVBlox download ${partial_file}"
done < <(find "${cache_dir}" -maxdepth 1 -type f -name '*.part' 2>/dev/null | sort)
}
ensure_supported_user_context() {
if [[ "${EUID}" -eq 0 && -z "${SUDO_USER:-}" ]]; then
die "Running from a root login shell is not supported. Use your normal user account, or invoke this script with sudo from that account."
fi
if [[ "${EUID}" -eq 0 && "${SETUP_USER_NAME}" == "root" ]]; then
die "Cannot determine a non-root setup user from sudo context."
fi
}
should_reexec_as_setup_user() {
[[ "${EUID}" -eq 0 ]] || return 1
[[ -n "${SUDO_USER:-}" && "${SUDO_USER}" != "root" ]] || return 1
[[ "${SETUP_NVBOX_REEXECED:-0}" != "1" ]]
}
reexec_as_setup_user() {
local script_path="$1"
shift
local env_args=("SETUP_NVBOX_REEXECED=1")
if [[ -n "${MANAGED_ROOT:-}" ]]; then
env_args+=("MANAGED_ROOT=${MANAGED_ROOT}")
fi
exec sudo -H -u "${SETUP_USER_NAME}" env "${env_args[@]}" bash "${script_path}" "$@"
}
run_sudo() {
if [[ "${EUID}" -eq 0 ]]; then
"$@"
else
sudo "$@"
fi
}
run_sudo_noninteractive() {
if [[ "${EUID}" -eq 0 ]]; then
"$@"
return 0
fi
sudo -n "$@"
}
guard_managed_root_path() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
if [[ -e "${root}" && ! -e "${sentinel}" ]]; then
die "Managed root ${root} exists but is not owned by this project. Refusing to continue."
fi
}
bootstrap_managed_root() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
guard_managed_root_path "${root}"
mkdir -p "${root}/logs" "${root}/.stamps"
if [[ ! -f "${sentinel}" ]]; then
{
printf 'managed_root=%s\n' "${root}"
printf 'created_at=%s\n' "$(date -Is 2>/dev/null || date)"
printf 'project_root=%s\n' "${PROJECT_ROOT}"
} > "${sentinel}"
fi
}
repair_managed_root_ownership() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
[[ -d "${root}" ]] || return 0
[[ -f "${sentinel}" ]] || return 0
if find "${root}" \( ! -uid "${SETUP_USER_UID}" -o ! -gid "${SETUP_USER_GID}" \) -print -quit 2>/dev/null | grep -q .; then
info "Repairing managed root ownership under ${root}."
run_sudo chown -R "${SETUP_USER_UID}:${SETUP_USER_GID}" "${root}"
fi
}
require_bootstrapped_managed_root() {
local root="$1"
local sentinel="${root}/${MANAGED_SENTINEL_NAME}"
if [[ ! -f "${sentinel}" ]]; then
die "Managed root ${root} is not prepared. Run with --prepare-only or the default mode first."
fi
}
package_installed() {
local package_name="$1"
dpkg-query -W -f='${Status}' "${package_name}" 2>/dev/null | grep -q 'install ok installed'
}
install_packages_if_missing() {
local missing=()
local package_name
for package_name in "$@"; do
if ! package_installed "${package_name}"; then
missing+=("${package_name}")
fi
done
if ((${#missing[@]} == 0)); then
return 0
fi
info "Installing apt packages: ${missing[*]}"
run_sudo apt-get update
run_sudo apt-get install -y --no-install-recommends "${missing[@]}"
}
assert_command() {
local command_name="$1"
command -v "${command_name}" >/dev/null 2>&1 || die "Required command not found: ${command_name}"
}
read_file_lower_trimmed() {
local file_path="$1"
tr '[:upper:]' '[:lower:]' < "${file_path}" | tr -d '[:space:]'
}
find_usb_device_with_ids() {
local start_path="$1"
local current_path=""
current_path="$(readlink -f "${start_path}" 2>/dev/null || true)"
[[ -n "${current_path}" ]] || return 1
while [[ "${current_path}" != "/" ]]; do
if [[ -f "${current_path}/idVendor" && -f "${current_path}/idProduct" ]]; then
if [[ "$(read_file_lower_trimmed "${current_path}/idVendor")" == "${GEMINI2_USB_VENDOR_ID}" ]] && \
[[ "$(read_file_lower_trimmed "${current_path}/idProduct")" == "${GEMINI2_USB_PRODUCT_ID}" ]]; then
printf '%s\n' "${current_path}"
return 0
fi
fi
current_path="$(dirname "${current_path}")"
done
return 1
}
gemini2_usb_device_dirs() {
local device_dir
for device_dir in /sys/bus/usb/devices/*; do
[[ -f "${device_dir}/idVendor" && -f "${device_dir}/idProduct" ]] || continue
if [[ "$(read_file_lower_trimmed "${device_dir}/idVendor")" == "${GEMINI2_USB_VENDOR_ID}" ]] && \
[[ "$(read_file_lower_trimmed "${device_dir}/idProduct")" == "${GEMINI2_USB_PRODUCT_ID}" ]]; then
printf '%s\n' "${device_dir}"
fi
done
}
gemini2_usb_present() {
local usb_device=""
usb_device="$(gemini2_usb_device_dirs | head -n 1 || true)"
[[ -n "${usb_device}" ]]
}
gemini2_usb_link_speed_mbps() {
local device_dir=""
local speed_path=""
local speed_value=""
device_dir="$(gemini2_usb_device_dirs | head -n 1 || true)"
[[ -n "${device_dir}" ]] || return 0
speed_path="${device_dir}/speed"
[[ -f "${speed_path}" ]] || return 0
speed_value="$(tr -d '[:space:]' < "${speed_path}" 2>/dev/null || true)"
[[ "${speed_value}" =~ ^[0-9]+$ ]] || return 0
printf '%s\n' "${speed_value}"
}
gemini2_video_nodes() {
local video_sysfs_path=""
local video_name=""
for video_sysfs_path in /sys/class/video4linux/video*; do
[[ -e "${video_sysfs_path}" ]] || continue
if find_usb_device_with_ids "${video_sysfs_path}/device" >/dev/null 2>&1; then
video_name="$(basename "${video_sysfs_path}")"
[[ -e "/dev/${video_name}" ]] || continue
printf '/dev/%s\n' "${video_name}"
fi
done | sort -u
}
gemini2_video_nodes_joined() {
local video_nodes=()
mapfile -t video_nodes < <(gemini2_video_nodes)
if ((${#video_nodes[@]} == 0)); then
return 0
fi
printf '%s\n' "${video_nodes[*]}"
}
log_gemini2_video_nodes_snapshot() {
local prefix="${1:-Gemini2 /dev/video snapshot}"
local video_nodes=""
video_nodes="$(gemini2_video_nodes_joined)"
if [[ -n "${video_nodes}" ]]; then
info "${prefix}: ${video_nodes}"
else
warn "${prefix}: <none>"
fi
}
gemini2_device_state() {
local video_nodes=""
if !
gitextract_qsc773b7/ ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── build.sh ├── docs/ │ ├── develop.md │ ├── examples.md │ ├── install.md │ └── publish.md ├── install.sh ├── pyproject.toml ├── reComputer/ │ ├── __init__.py │ ├── main.py │ └── scripts/ │ ├── MoveNet-Lightning/ │ │ ├── clean.sh │ │ ├── getVersion.sh │ │ ├── init.sh │ │ ├── readme.md │ │ └── run.sh │ ├── MoveNet-Thunder/ │ │ ├── clean.sh │ │ ├── getVersion.sh │ │ ├── init.sh │ │ ├── readme.md │ │ └── run.sh │ ├── MoveNetJS/ │ │ ├── clean.sh │ │ ├── readme.md │ │ └── run.sh │ ├── Sheared-LLaMA-2.7B-ShareGPT/ │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── audiocraft/ │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── check.sh │ ├── clean.sh │ ├── comfyui/ │ │ ├── LICENSE │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── deep-live-cam/ │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── depth-anything/ │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── depth-anything-v2/ │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── depth-anything-v3/ │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── gpt-oss/ │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── live-llava/ │ │ ├── init.sh │ │ └── run.sh │ ├── llama-factory/ │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── llama3/ │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── llama3.2/ │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── llava/ │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── llava-v1.5-7b/ │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── llava-v1.6-vicuna-7b/ │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── nanodb/ │ │ ├── config.yaml │ │ ├── init.sh │ │ ├── readme.md │ │ └── run.sh │ ├── nanoowl/ │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── nvblox/ │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config/ │ │ │ ├── orbbec_stereo_capability_probe.yaml │ │ │ └── orbbec_vslam_mobile.yaml │ │ ├── config.yaml │ │ ├── docker/ │ │ │ ├── Dockerfile.nvblox_orbbec │ │ │ ├── launch_nvblox.sh │ │ │ └── prepare_container_workspace.sh │ │ ├── host/ │ │ │ └── orbbec_mobile_host.launch.py │ │ ├── init.sh │ │ ├── lib/ │ │ │ └── common.sh │ │ ├── onedrive_downloader.py │ │ ├── run.sh │ │ ├── scripts/ │ │ │ ├── debug_runtime_connectivity.sh │ │ │ ├── preflight.sh │ │ │ ├── prepare_container.sh │ │ │ ├── prepare_host.sh │ │ │ └── run_demo.sh │ │ └── start_nvblox_demo.sh │ ├── ollama/ │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── parler-tts/ │ │ ├── clean.sh │ │ ├── getVersion.sh │ │ ├── init.sh │ │ ├── readme.md │ │ └── run.sh │ ├── qwen3.5-4b/ │ │ ├── Dockerfile.jetson │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── ros1-jp6/ │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── run.sh │ ├── stable-diffusion-webui/ │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── text-generation-webui/ │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── ultralytics-yolo/ │ │ ├── LICENSE │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── update.sh │ ├── utils.sh │ ├── whisper/ │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ ├── yolov10/ │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── clean.sh │ │ ├── config.yaml │ │ ├── init.sh │ │ └── run.sh │ └── yolov8-rail-inspection/ │ ├── config.yaml │ ├── init.sh │ ├── readme.md │ └── run.sh └── setup.py
SYMBOL INDEX (22 symbols across 4 files) FILE: reComputer/main.py function scripts_roots (line 7) | def scripts_roots(): function scripts_root (line 29) | def scripts_root(): function path_of_script (line 37) | def path_of_script(name): function list_all_examples (line 45) | def list_all_examples(folder_path): function run_script (line 54) | def run_script(): FILE: reComputer/scripts/nvblox/host/orbbec_mobile_host.launch.py function generate_launch_description (line 8) | def generate_launch_description(): FILE: reComputer/scripts/nvblox/onedrive_downloader.py class DownloadError (line 39) | class DownloadError(Exception): function parse_args (line 43) | def parse_args() -> argparse.Namespace: function is_supported_host (line 83) | def is_supported_host(hostname: str) -> bool: function sanitize_filename (line 91) | def sanitize_filename(value: str | None) -> str | None: function validate_source_url (line 104) | def validate_source_url(raw_url: str) -> str: function needs_download_flag (line 132) | def needs_download_flag(parsed_url) -> bool: function with_download_flag (line 136) | def with_download_flag(url: str) -> str: function looks_like_landing_page (line 150) | def looks_like_landing_page(content_type: str, first_chunk: bytes) -> bool: function filename_from_content_disposition (line 169) | def filename_from_content_disposition(header_value: str | None) -> str |... function filename_from_url (line 192) | def filename_from_url(url: str) -> str | None: function probe_remote_target (line 197) | def probe_remote_target(url: str, filename_override: str | None) -> tupl... function prepare_target_paths (line 237) | def prepare_target_paths( function progress_stream (line 269) | def progress_stream(): function download_file (line 276) | def download_file(url: str, filepath: Path, filename: str) -> None: function main (line 373) | def main() -> int: FILE: setup.py function package_files (line 11) | def package_files(root: Path):
Condensed preview — 177 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (389K chars).
[
{
"path": ".gitignore",
"chars": 3154,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
},
{
"path": "LICENSE",
"chars": 1066,
"preview": "MIT License\n\nCopyright (c) 2024 luozhixin\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\n"
},
{
"path": "MANIFEST.in",
"chars": 139,
"preview": "recursive-include reComputer/scripts *\nglobal-exclude __pycache__\nglobal-exclude *.pyc *.pyo\nglobal-exclude *.png *.jpg "
},
{
"path": "README.md",
"chars": 5948,
"preview": "# jetson-examples\n\n<div align=\"\">\n <img alt=\"jetson\" width=\"1200px\" src=\"https://files.seeedstudio.com/wiki/reComputer-"
},
{
"path": "build.sh",
"chars": 1201,
"preview": "#!/bin/bash\n\n# 1 try clean older version\npip uninstall jetson-examples -y\n\n# 2 clean last build files\nrm -rf build/\n\n# 3"
},
{
"path": "docs/develop.md",
"chars": 4638,
"preview": "# Develop\n\nThis section provides guidance on how to contribute to the `jetson-examples` repository. It is highly recomme"
},
{
"path": "docs/examples.md",
"chars": 4304,
"preview": "# Example list\n\nAll examples that can be run:\n\n| Example | Type "
},
{
"path": "docs/install.md",
"chars": 368,
"preview": "# Install\n\n- use the way you like to install\n\n## PyPI(recommend)\n\n```sh\npip install jetson-examples\n```\n\n## Linux (githu"
},
{
"path": "docs/publish.md",
"chars": 626,
"preview": "# publish\n\n## pypi.org\n\n```sh\n# tools update\npython3 -m pip install --upgrade build\npython3 -m pip install --upgrade twi"
},
{
"path": "install.sh",
"chars": 240,
"preview": "#!/bin/bash\n# TODO: make sure python3 in host is OK\ncd /tmp && \\\ngit clone https://github.com/Seeed-Projects/jetson-exam"
},
{
"path": "pyproject.toml",
"chars": 1135,
"preview": "[build-system]\nrequires = [\"setuptools>=61.0.0\", \"wheel\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"jet"
},
{
"path": "reComputer/__init__.py",
"chars": 22,
"preview": "__version__ = \"0.1.3\"\n"
},
{
"path": "reComputer/main.py",
"chars": 2923,
"preview": "import os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\n\ndef scripts_roots():\n pkg_root = Path(__file__).res"
},
{
"path": "reComputer/scripts/MoveNet-Lightning/clean.sh",
"chars": 124,
"preview": "#!/bin/bash\n\n\n# get image\nsource ./getVersion.sh\n\n# remove docker image\nsudo docker rmi feiticeir0/movenet:tf2-${IMAGE_T"
},
{
"path": "reComputer/scripts/MoveNet-Lightning/getVersion.sh",
"chars": 1648,
"preview": "#!/bin/bash\n# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.s"
},
{
"path": "reComputer/scripts/MoveNet-Lightning/init.sh",
"chars": 61,
"preview": "#!/bin/bash\n\n# Let's allow connections\nxhost +local:docker\n\n\n"
},
{
"path": "reComputer/scripts/MoveNet-Lightning/readme.md",
"chars": 724,
"preview": "# MoveNet\n\nMoveNet is a ultra fast and accurate pose detection model.\n\nWe're demonstrating here using reComputer J402 an"
},
{
"path": "reComputer/scripts/MoveNet-Lightning/run.sh",
"chars": 344,
"preview": "#!/bin/bash\n\n# get L4T version\n# it exports a variable IMAGE_TAG\nsource ./getVersion.sh\n\n# pull docker image\n\ndocker pul"
},
{
"path": "reComputer/scripts/MoveNet-Thunder/clean.sh",
"chars": 123,
"preview": "#!/bin/bash\n\n# get image\nsource ./getVersion.sh\n\n# remove docker image\nsudo docker rmi feiticeir0/movenet:tf2-${TAG_IMAG"
},
{
"path": "reComputer/scripts/MoveNet-Thunder/getVersion.sh",
"chars": 1648,
"preview": "#!/bin/bash\n# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.s"
},
{
"path": "reComputer/scripts/MoveNet-Thunder/init.sh",
"chars": 61,
"preview": "#!/bin/bash\n\n# Let's allow connections\nxhost +local:docker\n\n\n"
},
{
"path": "reComputer/scripts/MoveNet-Thunder/readme.md",
"chars": 721,
"preview": "# MoveNet\n\nMoveNet is a ultra fast and accurate pose detection model.\n\nWe're demonstrating here using reComputer J402 an"
},
{
"path": "reComputer/scripts/MoveNet-Thunder/run.sh",
"chars": 337,
"preview": "#!/bin/bash\n\n# get L4T version\n# it exports a variable IMAGE_TAG\nsource ./getVersion.sh\n\n# pull docker image\ndocker pull"
},
{
"path": "reComputer/scripts/MoveNetJS/clean.sh",
"chars": 80,
"preview": "#!/bin/bash\n\n# remove docker image\nsudo docker rmi feiticeir0/movenetjs:latest \n"
},
{
"path": "reComputer/scripts/MoveNetJS/readme.md",
"chars": 1632,
"preview": "# MoveNet\n\nMoveNet is a ultra fast and accurate pose detection model.\n\nWe're demonstrating here using reComputer J402\n\n!"
},
{
"path": "reComputer/scripts/MoveNetJS/run.sh",
"chars": 144,
"preview": "#!/bin/bash\n\n# pull docker image\n\ndocker push feiticeir0/movenetjs:latest\n\ndocker run \\\n\t--rm \\\n\t-p 5000:5000 \\\n\tfeitice"
},
{
"path": "reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh",
"chars": 237,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./ru"
},
{
"path": "reComputer/scripts/audiocraft/README.md",
"chars": 934,
"preview": "# AudioCraft Deployment on Jetson in One Line \n\n## Hello\n\n💡 In this demo, we refer to jetson-container to deploy audiocr"
},
{
"path": "reComputer/scripts/audiocraft/clean.sh",
"chars": 116,
"preview": "#!/bin/bash\n\n# TODO: clean old container\ndocker rmi $(/home/$USER/reComputer/jetson-containers/autotag audiocraft)\n\n"
},
{
"path": "reComputer/scripts/audiocraft/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/audiocraft/init.sh",
"chars": 570,
"preview": "#!/bin/bash\n\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirnam"
},
{
"path": "reComputer/scripts/audiocraft/run.sh",
"chars": 160,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\njets"
},
{
"path": "reComputer/scripts/check.sh",
"chars": 127,
"preview": "script_dir=$(dirname \"$0\")\ndocker --version && \\\npython3 -V && \\\npython -V && \\\necho \"now we can use more shell in $scri"
},
{
"path": "reComputer/scripts/clean.sh",
"chars": 882,
"preview": "#!/bin/bash\n\ncheck_is_jetson_or_not() {\n model_file=\"/proc/device-tree/model\"\n \n if [ -f \"/proc/device-tree/mod"
},
{
"path": "reComputer/scripts/comfyui/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "reComputer/scripts/comfyui/README.md",
"chars": 7851,
"preview": "# Jetson-Example: Run ComfyUI (Stable Diffusion GUI) on NVIDIA Jetson Orin 🚀\n\n## One-Click Quick Deployment of Plug-and-"
},
{
"path": "reComputer/scripts/comfyui/clean.sh",
"chars": 205,
"preview": "#!/bin/bash\nCONTAINER_NAME=\"comfyui\"\nIMAGE_NAME=\"yaohui1998/comfyui\"\n\nsudo docker stop $CONTAINER_NAME\nsudo docker rm $C"
},
{
"path": "reComputer/scripts/comfyui/config.yaml",
"chars": 636,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\nREQUIRED_DISK_SPACE: 30 # in GB\nR"
},
{
"path": "reComputer/scripts/comfyui/init.sh",
"chars": 226,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/comfyui/run.sh",
"chars": 911,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"comfyui\"\nIMAGE_NAME=\"yaohui1998/comfyui\"\n\n# Pull the latest image\ndocker pull $IMAGE_NAME\n\n"
},
{
"path": "reComputer/scripts/deep-live-cam/Dockerfile",
"chars": 125,
"preview": "\nFROM yaohui1998/deep-live-cam:0.1\n\nWORKDIR /usr/src/Deep-Live-Cam\n\nCMD [\"python3\", \"run.py\", \"--execution-provider\", \"c"
},
{
"path": "reComputer/scripts/deep-live-cam/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "reComputer/scripts/deep-live-cam/README.md",
"chars": 5236,
"preview": "# Jetson-Example: Run Deep Live Cam on Seeed Studio NVIDIA AGX Orin Developer Kit 🚀\n\nThis project provides a one-click d"
},
{
"path": "reComputer/scripts/deep-live-cam/clean.sh",
"chars": 199,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"deep-live-cam\"\nIMAGE_NAME=\"yaohui1998/deep-live-cam:1.0\"\n\nsudo docker stop $CONTAINER_NAME\n"
},
{
"path": "reComputer/scripts/deep-live-cam/config.yaml",
"chars": 637,
"preview": "\n# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 36.3.0\nREQUIRED_DISK_SPACE: 40 # in GB\nREQUIRED_MEM_SPACE: 20"
},
{
"path": "reComputer/scripts/deep-live-cam/init.sh",
"chars": 155,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/deep-live-cam/run.sh",
"chars": 899,
"preview": "CONTAINER_NAME=\"deep-live-cam\"\nIMAGE_NAME=\"yaohui1998/deep-live-cam:1.0\"\n\n# Pull the latest image\ndocker pull $IMAGE_NAM"
},
{
"path": "reComputer/scripts/depth-anything/Dockerfile",
"chars": 279,
"preview": "FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3\nRUN mkdir /usr/src/DepthAnything-on-Jetson-Orin\nWORKDIR /usr/src/Dept"
},
{
"path": "reComputer/scripts/depth-anything/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "reComputer/scripts/depth-anything/README.md",
"chars": 4872,
"preview": "# Jetson-Example: Run Depth Anything on NVIDIA Jetson Orin 🚀\nThis project provides an one-click deployment of the Depth "
},
{
"path": "reComputer/scripts/depth-anything/clean.sh",
"chars": 199,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"depth-anything\"\nIMAGE_NAME=\"yaohui1998/depthanything-on-jetson-orin:latest\"\n\nsudo docker st"
},
{
"path": "reComputer/scripts/depth-anything/config.yaml",
"chars": 635,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\nREQUIRED_DISK_SPACE: 20 # in GB\nR"
},
{
"path": "reComputer/scripts/depth-anything/init.sh",
"chars": 154,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/depth-anything/run.sh",
"chars": 730,
"preview": "CONTAINER_NAME=\"depth-anything\"\nIMAGE_NAME=\"yaohui1998/depthanything-on-jetson-orin:latest\"\n\n# Pull the latest image\ndoc"
},
{
"path": "reComputer/scripts/depth-anything-v2/Dockerfile",
"chars": 279,
"preview": "FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3\nRUN mkdir /usr/src/DepthAnything-on-Jetson-Orin\nWORKDIR /usr/src/Dept"
},
{
"path": "reComputer/scripts/depth-anything-v2/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "reComputer/scripts/depth-anything-v2/README.md",
"chars": 4831,
"preview": "# Jetson-Example: Run Depth Anything V2 on NVIDIA Jetson Orin 🚀\nThis project provides an one-click deployment of the Dep"
},
{
"path": "reComputer/scripts/depth-anything-v2/clean.sh",
"chars": 204,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"depth-anything-v2\"\nIMAGE_NAME=\"yaohui1998/depthanything-v2-on-jetson-orin:latest\"\n\nsudo doc"
},
{
"path": "reComputer/scripts/depth-anything-v2/config.yaml",
"chars": 636,
"preview": "\n# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\nREQUIRED_DISK_SPACE: 15 # in GB\n"
},
{
"path": "reComputer/scripts/depth-anything-v2/init.sh",
"chars": 155,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/depth-anything-v2/run.sh",
"chars": 736,
"preview": "CONTAINER_NAME=\"depth-anything-v2\"\nIMAGE_NAME=\"yaohui1998/depthanything-v2-on-jetson-orin:latest\"\n\n# Pull the latest ima"
},
{
"path": "reComputer/scripts/depth-anything-v3/Dockerfile",
"chars": 98,
"preview": "# This demo uses a prebuilt Docker image from Docker Hub.\nFROM chenduola6/depth_anything_v3:jp6.2\n"
},
{
"path": "reComputer/scripts/depth-anything-v3/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "reComputer/scripts/depth-anything-v3/README.md",
"chars": 2244,
"preview": "# Jetson-Example: Run Depth Anything V3 on NVIDIA Jetson\n\nThis project provides one-click deployment for **Depth Anythin"
},
{
"path": "reComputer/scripts/depth-anything-v3/clean.sh",
"chars": 580,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"depth_anything_v3\"\n\n# Prefer plain docker, fallback to sudo docker when user has no docker "
},
{
"path": "reComputer/scripts/depth-anything-v3/config.yaml",
"chars": 658,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 36.4.0\n - 36.4.3\n - 36.4.4\nREQUIRED_DISK_SPACE: 12 # in GB\nR"
},
{
"path": "reComputer/scripts/depth-anything-v3/init.sh",
"chars": 154,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/depth-anything-v3/run.sh",
"chars": 1894,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"depth-anything-v3\"\nIMAGE_NAME=\"chenduola6/depth-anything-v3:jp6.2\"\n\n# Prefer plain docker, "
},
{
"path": "reComputer/scripts/gpt-oss/Dockerfile",
"chars": 32,
"preview": "FROM chenduola6/got-oss-20b:jp6\n"
},
{
"path": "reComputer/scripts/gpt-oss/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "reComputer/scripts/gpt-oss/README.md",
"chars": 2567,
"preview": "# Jetson-Example: Run GPT-OSS 20B on NVIDIA Jetson\n\nThis project provides one-click deployment for **GPT-OSS 20B** on NV"
},
{
"path": "reComputer/scripts/gpt-oss/clean.sh",
"chars": 1884,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"gpt-oss\"\n\nensure_docker_access() {\n if ! command -v docker >/dev/null 2>&1; then\n "
},
{
"path": "reComputer/scripts/gpt-oss/config.yaml",
"chars": 636,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 36.4.0\n - 36.4.3\n - 36.4.4\nREQUIRED_DISK_SPACE: 50 # in GB\nR"
},
{
"path": "reComputer/scripts/gpt-oss/init.sh",
"chars": 154,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/gpt-oss/run.sh",
"chars": 6751,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"gpt-oss\"\nIMAGE_NAME=\"chenduola6/got-oss-20b:jp6\"\nMODEL_PATH=\"/root/gpt-oss/gguf/gpt-oss-20b"
},
{
"path": "reComputer/scripts/live-llava/init.sh",
"chars": 428,
"preview": "#!/bin/bash\n\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE"
},
{
"path": "reComputer/scripts/live-llava/run.sh",
"chars": 10117,
"preview": "#!/bin/bash\n\nSUPPORT_L4T_LIST=\"35.3.1\"\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n"
},
{
"path": "reComputer/scripts/llama-factory/README.md",
"chars": 2474,
"preview": "# Finetune LLM by Llama-Factory on Jetson\n\n\n## Hello\nNow you can tailor a custom private local LLM to meet your requirem"
},
{
"path": "reComputer/scripts/llama-factory/clean.sh",
"chars": 133,
"preview": "#!/bin/bash\n\nsudo docker rmi youjiang9977/llama-factory:r35.4.1\nsudo rm -rf /home/$USER/reComputer/jetson-containers/LLa"
},
{
"path": "reComputer/scripts/llama-factory/config.yaml",
"chars": 635,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\nREQUIRED_DISK_SPACE: 25 # in GB\nR"
},
{
"path": "reComputer/scripts/llama-factory/init.sh",
"chars": 570,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/llama-factory/run.sh",
"chars": 231,
"preview": "#!/bin/bash\n\n\nDATA_PATH=\"/home/$USER/reComputer/jetson-containers/data\"\n\nsudo docker run -it --rm --network host --runti"
},
{
"path": "reComputer/scripts/llama3/clean.sh",
"chars": 648,
"preview": "#!/bin/bash\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n# search local image\nimg_ta"
},
{
"path": "reComputer/scripts/llama3/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 15"
},
{
"path": "reComputer/scripts/llama3/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/llama3/run.sh",
"chars": 328,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n# try"
},
{
"path": "reComputer/scripts/llama3.2/clean.sh",
"chars": 1070,
"preview": "#!/bin/bash\n\nget_l4t_version() {\n local l4t_version=\"\"\n local release_line=$(head -n 1 /etc/nv_tegra_release)\n "
},
{
"path": "reComputer/scripts/llama3.2/config.yaml",
"chars": 648,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\n - 36.4.0\nREQUIRED_DIS"
},
{
"path": "reComputer/scripts/llama3.2/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/llama3.2/run.sh",
"chars": 1067,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\nget_"
},
{
"path": "reComputer/scripts/llava/clean.sh",
"chars": 82,
"preview": "#!/bin/bash\n\ndocker rmi $(/home/$USER/reComputer/jetson-containers/autotag llava)\n"
},
{
"path": "reComputer/scripts/llava/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 15"
},
{
"path": "reComputer/scripts/llava/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/llava/run.sh",
"chars": 251,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./ru"
},
{
"path": "reComputer/scripts/llava-v1.5-7b/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/llava-v1.5-7b/init.sh",
"chars": 570,
"preview": "#!/bin/bash\n\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirnam"
},
{
"path": "reComputer/scripts/llava-v1.5-7b/run.sh",
"chars": 251,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./ru"
},
{
"path": "reComputer/scripts/llava-v1.6-vicuna-7b/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/llava-v1.6-vicuna-7b/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/llava-v1.6-vicuna-7b/run.sh",
"chars": 269,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./ru"
},
{
"path": "reComputer/scripts/nanodb/config.yaml",
"chars": 647,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 80"
},
{
"path": "reComputer/scripts/nanodb/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/nanodb/readme.md",
"chars": 247,
"preview": "# NanoDB\n\n## ref\n\n- <https://www.jetson-ai-lab.com/tutorial_nanodb.html>\n\n## access\n\n- using in machine, try `http://127"
},
{
"path": "reComputer/scripts/nanodb/run.sh",
"chars": 2571,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n\ncheck_disk_space() {\n "
},
{
"path": "reComputer/scripts/nanoowl/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/nanoowl/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/nanoowl/run.sh",
"chars": 264,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./ru"
},
{
"path": "reComputer/scripts/nvblox/README.md",
"chars": 4602,
"preview": "# Jetson Example: Run NVBlox Mapping on NVIDIA Jetson \n\n\n\n[Isaac ROS NVBlox]("
},
{
"path": "reComputer/scripts/nvblox/clean.sh",
"chars": 2132,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=S"
},
{
"path": "reComputer/scripts/nvblox/config/orbbec_stereo_capability_probe.yaml",
"chars": 1026,
"preview": "depth_registration: false\nenable_point_cloud: false\nenable_colored_point_cloud: false\ndevice_preset: \"High Accuracy\"\nlas"
},
{
"path": "reComputer/scripts/nvblox/config/orbbec_vslam_mobile.yaml",
"chars": 1090,
"preview": "depth_registration: true\nenable_point_cloud: true\nenable_colored_point_cloud: true\ndevice_preset: \"High Accuracy\"\nlaser_"
},
{
"path": "reComputer/scripts/nvblox/config.yaml",
"chars": 617,
"preview": "ALLOWED_L4T_VERSIONS:\n - 36.4.0\n - 36.4.3\n - 36.4.4\nREQUIRED_DISK_SPACE: 60\nREQUIRED_MEM_SPACE: 14\nPACKAGES:\n - nvid"
},
{
"path": "reComputer/scripts/nvblox/docker/Dockerfile.nvblox_orbbec",
"chars": 1818,
"preview": "ARG BASE_IMAGE\nFROM ${BASE_IMAGE}\n\nARG ROS_DISTRO=humble\nENV DEBIAN_FRONTEND=noninteractive\nENV ROS_DISTRO=${ROS_DISTRO}"
},
{
"path": "reComputer/scripts/nvblox/docker/launch_nvblox.sh",
"chars": 6371,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nROS_DISTRO=\"${ROS_DISTRO:-humble}\"\nNVBLOX_LAUNCH_FILE=\"${NVBLOX_LAUNCH_FILE:-orbb"
},
{
"path": "reComputer/scripts/nvblox/docker/prepare_container_workspace.sh",
"chars": 29990,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nROS_DISTRO=\"${ROS_DISTRO:-humble}\"\nFORCE_REBUILD=\"${FORCE_REBUILD:-0}\"\nSETUP_IMAG"
},
{
"path": "reComputer/scripts/nvblox/host/orbbec_mobile_host.launch.py",
"chars": 1584,
"preview": "from launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.substitutions import L"
},
{
"path": "reComputer/scripts/nvblox/init.sh",
"chars": 122,
"preview": "#!/bin/bash\n\necho \"NVBlox preflight, image download, docker load, and demo setup are handled by 'reComputer run nvblox'."
},
{
"path": "reComputer/scripts/nvblox/lib/common.sh",
"chars": 29855,
"preview": "#!/usr/bin/env bash\n\nif [[ \"${SETUP_NVBOX_COMMON_SH:-0}\" == \"1\" ]]; then\n return 0\nfi\nreadonly SETUP_NVBOX_COMMON_SH=1\n"
},
{
"path": "reComputer/scripts/nvblox/onedrive_downloader.py",
"chars": 13225,
"preview": "#!/usr/bin/env python3\n\"\"\"Download public OneDrive/SharePoint share links with resume support.\"\"\"\n\nfrom __future__ impor"
},
{
"path": "reComputer/scripts/nvblox/run.sh",
"chars": 752,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nMODE=\"${NVBLOX_MODE:-a"
},
{
"path": "reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh",
"chars": 28871,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=S"
},
{
"path": "reComputer/scripts/nvblox/scripts/preflight.sh",
"chars": 2545,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=S"
},
{
"path": "reComputer/scripts/nvblox/scripts/prepare_container.sh",
"chars": 6559,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=S"
},
{
"path": "reComputer/scripts/nvblox/scripts/prepare_host.sh",
"chars": 5791,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=S"
},
{
"path": "reComputer/scripts/nvblox/scripts/run_demo.sh",
"chars": 23720,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=S"
},
{
"path": "reComputer/scripts/nvblox/start_nvblox_demo.sh",
"chars": 4238,
"preview": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=S"
},
{
"path": "reComputer/scripts/ollama/clean.sh",
"chars": 648,
"preview": "#!/bin/bash\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n# search local image\nimg_ta"
},
{
"path": "reComputer/scripts/ollama/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 15"
},
{
"path": "reComputer/scripts/ollama/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/ollama/run.sh",
"chars": 247,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n# tr"
},
{
"path": "reComputer/scripts/parler-tts/clean.sh",
"chars": 122,
"preview": "#!/bin/bash\n\n# get image\nsource ./getVersion.sh\n\n# remove docker image\nsudo docker rmi feiticeir0/parler-tts:${TAG_IMAGE"
},
{
"path": "reComputer/scripts/parler-tts/getVersion.sh",
"chars": 1648,
"preview": "#!/bin/bash\n# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.s"
},
{
"path": "reComputer/scripts/parler-tts/init.sh",
"chars": 128,
"preview": "#!/bin/bash\n\necho \"Creating models directory at /home/$USER/models\"\n\n# Create Model dir in User home\nmkdir /home/$USER/m"
},
{
"path": "reComputer/scripts/parler-tts/readme.md",
"chars": 2480,
"preview": "# Parler TTS Mini: Expresso\n\n\nParler-TTS Mini: Expresso is a fine-tuned version of Parler-TTS Mini v0.1 on the Expresso "
},
{
"path": "reComputer/scripts/parler-tts/run.sh",
"chars": 313,
"preview": "#!/bin/bash\n\nMODELS_DIR=/home/$USER/models\n\n# get L4T version\n# it exports a variable IMAGE_TAG\nsource ./getVersion.sh\n\n"
},
{
"path": "reComputer/scripts/qwen3.5-4b/Dockerfile.jetson",
"chars": 932,
"preview": "# Jetson Orin (sm_87) llama.cpp inference image\n# Build flow is maintained outside this repo; this file is kept here as "
},
{
"path": "reComputer/scripts/qwen3.5-4b/README.md",
"chars": 1998,
"preview": "# Jetson-Example: Run Qwen3.5-4B on NVIDIA Jetson\n\nThis example runs **Qwen3.5-4B** on Jetson Orin with **llama.cpp** an"
},
{
"path": "reComputer/scripts/qwen3.5-4b/clean.sh",
"chars": 1947,
"preview": "#!/bin/bash\nset -euo pipefail\n\nCONTAINER_NAME=\"qwen3.5-4b\"\n\nensure_docker_access() {\n if ! command -v docker >/dev/nu"
},
{
"path": "reComputer/scripts/qwen3.5-4b/config.yaml",
"chars": 662,
"preview": "# Tested and compatible JetPack/L4T versions.\nALLOWED_L4T_VERSIONS:\n - 36.3.0\n - 36.4.0\n - 36.4.3\n - 36.4.4\nREQUIRED"
},
{
"path": "reComputer/scripts/qwen3.5-4b/init.sh",
"chars": 123,
"preview": "#!/bin/bash\n\nsource \"$(dirname \"$(realpath \"$0\")\")/../utils.sh\"\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yam"
},
{
"path": "reComputer/scripts/qwen3.5-4b/run.sh",
"chars": 8761,
"preview": "#!/bin/bash\nset -euo pipefail\n\nCONTAINER_NAME=\"qwen3.5-4b\"\nIMAGE_NAME=\"${QWEN35_IMAGE_NAME:-llama-jetson}\"\nIMAGE_ARCHIVE"
},
{
"path": "reComputer/scripts/ros1-jp6/README.md",
"chars": 2023,
"preview": "# Jetson-Example: Run ROS 1 Noetic on NVIDIA Jetson\n\nThis example downloads a prebuilt ROS 1 Noetic Docker archive from "
},
{
"path": "reComputer/scripts/ros1-jp6/clean.sh",
"chars": 920,
"preview": "#!/bin/bash\nset -euo pipefail\n\nCONTAINER_NAME=\"${ROS1_JP6_CONTAINER_NAME:-ros1-jp6}\"\n\nensure_docker_access() {\n if ! "
},
{
"path": "reComputer/scripts/ros1-jp6/config.yaml",
"chars": 632,
"preview": "ALLOWED_L4T_VERSIONS:\n - 36.4.0\n - 36.4.3\n - 36.4.4\nREQUIRED_DISK_SPACE: 10\nREQUIRED_MEM_SPACE: 4\nPACKAGES:\n - nvidi"
},
{
"path": "reComputer/scripts/ros1-jp6/init.sh",
"chars": 123,
"preview": "#!/bin/bash\n\nsource \"$(dirname \"$(realpath \"$0\")\")/../utils.sh\"\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yam"
},
{
"path": "reComputer/scripts/ros1-jp6/run.sh",
"chars": 5180,
"preview": "#!/bin/bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nDOWNLOADER_SCRIPT=\"${SCRIPT_DI"
},
{
"path": "reComputer/scripts/run.sh",
"chars": 1125,
"preview": "#!/bin/bash\nhandle_error() {\n echo \"An error occurred. Exiting...\"\n exit 1\n}\ntrap 'handle_error' ERR\n\ncheck_is_jet"
},
{
"path": "reComputer/scripts/stable-diffusion-webui/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/stable-diffusion-webui/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/stable-diffusion-webui/run.sh",
"chars": 160,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./ru"
},
{
"path": "reComputer/scripts/text-generation-webui/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/text-generation-webui/init.sh",
"chars": 570,
"preview": "#!/bin/bash\n\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirnam"
},
{
"path": "reComputer/scripts/text-generation-webui/run.sh",
"chars": 407,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n# do"
},
{
"path": "reComputer/scripts/ultralytics-yolo/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "reComputer/scripts/ultralytics-yolo/README.md",
"chars": 8956,
"preview": "# Jetson-Example: Run Ultralytics YOLO Platform Service on NVIDIA Jetson Orin 🚀(**Supported YOLOV11**)\n\n## One-Click Qui"
},
{
"path": "reComputer/scripts/ultralytics-yolo/clean.sh",
"chars": 1129,
"preview": "CONTAINER_NAME=\"ultralytics-yolo\"\n\n# Function to get L4T version\nget_l4t_version() {\n local l4t_version=\"\"\n local "
},
{
"path": "reComputer/scripts/ultralytics-yolo/config.yaml",
"chars": 679,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 32.6.1\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\n - 36.4.0\n "
},
{
"path": "reComputer/scripts/ultralytics-yolo/init.sh",
"chars": 121,
"preview": "#!/bin/bash\n\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\""
},
{
"path": "reComputer/scripts/ultralytics-yolo/run.sh",
"chars": 2082,
"preview": "#!/bin/bash\n\nCONTAINER_NAME=\"ultralytics-yolo\"\n\n# Function to get L4T version\nget_l4t_version() {\n local l4t_version="
},
{
"path": "reComputer/scripts/update.sh",
"chars": 900,
"preview": "#!/bin/bash\necho \"--update jetson-containers repo--\"\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\n\nJETSON_REPO_"
},
{
"path": "reComputer/scripts/utils.sh",
"chars": 7104,
"preview": "#!/bin/bash\n\ncheck_base_env() \n{\n # 1. Set color value\n RED=$(tput setaf 1)\n GREEN=$(tput setaf 2)\n YELLOW=$"
},
{
"path": "reComputer/scripts/whisper/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 25"
},
{
"path": "reComputer/scripts/whisper/init.sh",
"chars": 569,
"preview": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname"
},
{
"path": "reComputer/scripts/whisper/run.sh",
"chars": 146,
"preview": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./ru"
},
{
"path": "reComputer/scripts/yolov10/Dockerfile",
"chars": 466,
"preview": "FROM dustynv/l4t-pytorch:r35.3.1\nWORKDIR /opt\n\nRUN pip3 install --no-cache-dir --verbose gradio==4.31.5\n\nRUN git clone h"
},
{
"path": "reComputer/scripts/yolov10/README.md",
"chars": 3598,
"preview": "# Quickly Experience YOLOv10 on Jetson\n\n\n## Hello\n\n💡 Here's an example of quickly deploying YOLOv10 on a Jetson device.\n"
},
{
"path": "reComputer/scripts/yolov10/clean.sh",
"chars": 106,
"preview": "#!/bin/bash\n\nsudo docker rmi youjiang9977/yolov10-jetson:5.1.1\nsudo rm -rf /home/$USER/reComputer/yolov10\n"
},
{
"path": "reComputer/scripts/yolov10/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 20"
},
{
"path": "reComputer/scripts/yolov10/init.sh",
"chars": 602,
"preview": "#!/bin/bash\n\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\""
},
{
"path": "reComputer/scripts/yolov10/run.sh",
"chars": 289,
"preview": "#!/bin/bash\n\nsudo docker run -it --rm --net=host --runtime nvidia \\\n -v /var/run/docker.sock:/var/run/docker.sock \\\n "
},
{
"path": "reComputer/scripts/yolov8-rail-inspection/config.yaml",
"chars": 646,
"preview": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n - 35.3.1\n - 35.4.1\n - 35.5.0\n - 36.3.0\nREQUIRED_DISK_SPACE: 20"
},
{
"path": "reComputer/scripts/yolov8-rail-inspection/init.sh",
"chars": 122,
"preview": "#!/bin/bash\n\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\""
},
{
"path": "reComputer/scripts/yolov8-rail-inspection/readme.md",
"chars": 2008,
"preview": "# Abstract\nThis project harnesses YOLOv8 technology, specifically tailored for precise identification and counting of bo"
},
{
"path": "reComputer/scripts/yolov8-rail-inspection/run.sh",
"chars": 809,
"preview": "#!/bin/bash\n\ndocker pull yaohui1998/bolt_inspection:1.0\n\nif [ \"$(docker ps -aq -f name=yolov8_rain_inspection)\" ]; then\n"
},
{
"path": "setup.py",
"chars": 1678,
"preview": "from pathlib import Path\n\nfrom setuptools import setup\n\n\nREADME_PATH = Path(__file__).parent / \"README.md\"\nLONG_DESCRIPT"
}
]
About this extraction
This page contains the full source code of the Seeed-Projects/jetson-examples GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 177 files (348.5 KB), approximately 103.5k tokens, and a symbol index with 22 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.