[
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\n.github/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n!reComputer/scripts/nvblox/lib/\n!reComputer/scripts/nvblox/lib/**\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# poetry\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\n#   commonly ignored for libraries.\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\n#poetry.lock\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/#use-with-ide\n.pdm.toml\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n# PyCharm\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n#.idea/\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 luozhixin\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "recursive-include reComputer/scripts *\nglobal-exclude __pycache__\nglobal-exclude *.pyc *.pyo\nglobal-exclude *.png *.jpg *.jpeg *.gif *.bmp\n"
  },
  {
    "path": "README.md",
    "content": "# jetson-examples\n\n<div align=\"\">\n  <img alt=\"jetson\" width=\"1200px\" src=\"https://files.seeedstudio.com/wiki/reComputer-Jetson/jetson-examples/Jetson1200x300.png\">\n</dev>\n\n[![Discord](https://dcbadge.vercel.app/api/server/5BQCkty7vN?style=flat&compact=true)](https://discord.gg/5BQCkty7vN)\n\nThis repository provides examples for running AI models and applications on [NVIDIA Jetson devices](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) with a single command.\n\nThis repo builds upon the work of the [jetson-containers](https://github.com/dusty-nv/jetson-containers), [ultralytics](https://github.com/ultralytics/ultralytics) and other excellent projects. \n\n## Features\n- 🚀 **Easy Deployment:** Deploy state-of-the-art AI models on Jetson devices in one line.\n- 🔄 **Versatile Examples:** Supports text generation, image generation, computer vision and so on.\n- ⚡ **Optimized for Jetson:** Leverages Nvidia Jetson hardware for efficient performance.\n\n\n## Install\nTo install the package, run:\n\n```sh\npip3 install jetson-examples\n```\n\n> Notes: \n> - Check [here](./docs/install.md) for more installation methods \n> - To upgrade to the latest version, use:  `pip3 install jetson-examples --upgrade`.\n\n\n\n## Quickstart\nTo run and chat with [LLaVA](https://www.jetson-ai-lab.com/tutorial_llava.html), execute:\n\n```sh\nreComputer run llava\n```\n<div align=\"center\">\n  <img alt=\"jetson\" width=\"1200px\" src=\"./docs/assets/llava.png\">\n</div>\n\n## Example list\n\nHere are some examples that can be run:\n\n| Example                                          | Type                     | Model/Data Size | Docker Image Size | Command                                 | Supported JetPack |\n| ------------------------------------------------ | ------------------------ | --------------- | ---------- | --------------------------------------- | ------------------------------------------------ |\n| 🆕 [Ultralytics-yolo](/reComputer/scripts/ultralytics-yolo/README.md) | Computer Vision |  | 15.4GB  | `reComputer run  ultralytics-yolo`  | 4.6, 5.1.1, 5.1.2, 5.1.3, 6.0, 6.1, 6.2 |\n| 🆕 [Deep-Live-Cam](/reComputer/scripts/deep-live-cam/README.md) | Face-swapping | 0.5GB | 20GB  | `reComputer run  deep-live-cam`  | 6.0 |\n| 🆕 llama-factory | Finetune LLM |  | 13.5GB  | `reComputer run  llama-factory`  | 5.1.1, 5.1.2, 5.1.3 |\n| 🆕 [ComfyUI](/reComputer/scripts/comfyui/README.md) |Computer Vision |  | 20GB  | `reComputer run comfyui`  | 5.1.1, 5.1.2, 5.1.3 |\n| [Depth-Anything-V2](/reComputer/scripts/depth-anything-v2/README.md) |Computer Vision |  | 15GB  | `reComputer run depth-anything-v2`  | 5.1.1, 5.1.2, 5.1.3 |\n| [Depth-Anything-V3](/reComputer/scripts/depth-anything-v3/README.md) |Computer Vision |  | 7.6GB  | `reComputer run depth-anything-v3`  | 6.1, 6.2, 6.2.1 |\n| 🆕 [Qwen3.5-4B](/reComputer/scripts/qwen3.5-4b/README.md) | Text (LLM) | 2.5GB | 0.2GB | `reComputer run qwen3.5-4b` | 6.1, 6.2, 6.2.1 |\n| [Depth-Anything](/reComputer/scripts/depth-anything/README.md) |Computer Vision |  | 12.9GB  | `reComputer run  depth-anything`  | 5.1.1, 5.1.2, 5.1.3 |\n| [Yolov10](/reComputer/scripts/yolov10/README.md)     | Computer Vision         | 7.2M               | 5.74 GB     | `reComputer run yolov10`                 | 5.1.1, 5.1.2, 5.1.3, 6.0 |\n| Llama3                                         | Text (LLM)               | 4.9GB           | 10.5GB     | `reComputer run llama3`                 | 5.1.1, 5.1.2, 5.1.3, 6.0 |\n| [gpt-oss](/reComputer/scripts/gpt-oss/README.md)     | Text (LLM)               | 39GB | 31.28GB    | `reComputer run gpt-oss`               | 6.1, 6.2, 6.2.1 |\n| [ros1-jp6](/reComputer/scripts/ros1-jp6/README.md)   | Robotics / ROS 1         | *    | 1.27GB     | `reComputer run ros1-jp6`             | 6.1, 6.2, 6.2.1 |\n| [nvblox](/reComputer/scripts/nvblox/README.md)       | Robotics / Mapping       | *    | 20.5GB+    | `reComputer run nvblox`                | 6.x |\n\n\n> Note: You should have enough space to run example, like `LLaVA`, at least `27.4GB` totally\n\nMore Examples can be found [examples.md](./docs/examples.md)\n\n## Calling Contributors Join Us!\n\n### How to work with us? \n\nWant to add your own example? Check out the [development guide](./docs/develop.md).\n\nWe welcome contributions to improve jetson-examples! If you have an example you'd like to share, please submit a pull request. Thank you to all of our contributors! 🙏 \n\nThis open call is listed in our [Contributor Project](https://github.com/orgs/Seeed-Studio/projects/6/views/1?filterQuery=jetson&pane=issue&itemId=64891723). If this is your first time joining us, [click here](https://github.com/orgs/Seeed-Studio/projects/6/views/1?pane=issue&itemId=30957479) to learn how the project works. We follow the steps with: \n\n\n- Assignments: We offer a variety of assignments to enhance wiki content, each with a detailed description.\n- Submission: Contributors can submit their content via a Pull Request after completing the assignments.\n- Review: Maintainers will merge the submission and record the contributions.\n\n**Contributors receive a $250 cash bonus as a token of appreciation.**\n\nFor any questions or further information, feel free to reach out via the GitHub issues page or contact edgeai@seeed.cc \n\n\n\n## TODO List\n\n- [ ] detect host environment and install what we need\n- [ ] all type jetson support checking list\n- [ ] try jetpack 6.0\n- [ ] check disk space enough or not before run\n- [ ] allow to setting some configs, such as `BASE_PATH`\n- [ ] support jetson-containers update\n- [ ] better table to show example's difference\n\n### 👥 Contributors\n\n<p align=\"center\"><a href=\"https://github.com/Seeed-Projects/jetson-examples/graphs/contributors\">\n  <img src=\"https://contributors-img.web.app/image?repo=Seeed-Projects/jetson-examples\" />\n</a></p>\n\n\n## License\nThis project is licensed under the MIT License. \n\n## Resources\n- https://github.com/dusty-nv/jetson-containers\n- https://www.jetson-ai-lab.com/\n- https://www.ultralytics.com/\n"
  },
  {
    "path": "build.sh",
    "content": "#!/bin/bash\n\n# 1 try clean older version\npip uninstall jetson-examples -y\n\n# 2 clean last build files\nrm -rf build/\n\n# 3 install latest version\npip install .\n\n# 5 build whl\nread -p \"build whl ? (y/n): \" choice\nif [[ $choice == \"y\" || $choice == \"Y\" ]]; then\n    python3 -m pip install --upgrade build\n    echo \"building...\"\n    rm -rf dist/\n    python3 -m build\n    echo \"build done.\"\nelse\n    echo \"skip build.\"\nfi\n\n# 6 publish to Test PyPI\nread -p \"publish to test PyPI ? (y/n): \" choice\nif [[ $choice == \"y\" || $choice == \"Y\" ]]; then\n    python3 -m pip install --upgrade twine\n    keyring --disable # https://github.com/pypa/twine/issues/847\n    echo \"publishing to Test PyPI...\"\n    python3 -m twine upload --repository testpypi dist/*\nelse\n    echo \"skip publish.\"\nfi\n\n\n# 7 publish to PyPI\nread -p \"[Danger!!] publish to PyPI ? (confirm/*): \" choice\nif [[ $choice == \"confirm\" || $choice == \"CONFIRM\" ]]; then\n    python3 -m pip install --upgrade twine\n    keyring --disable # https://twine.readthedocs.io/en/stable/#disabling-keyring\n    echo \"publishing to Prod PyPI...\"\n    python3 -m twine upload --repository pypi dist/*\nelse\n    echo \"skip publish.\"\nfi\n\necho 'clean & build & publish ok.'\n"
  },
  {
    "path": "docs/develop.md",
    "content": "# Develop\n\nThis section provides guidance on how to contribute to the `jetson-examples` repository. It is highly recommended to develop and run your project on a Jetson device for the best experience.\n\n## 0. Preparation\n\nFollow these steps to get started:\n\n```sh\n# Clone the repository\ngit clone https://github.com/Seeed-Projects/jetson-examples.git\n\n# Navigate to the repository\ncd jetson-examples\n\n# Install in 'develop mode'\npip install .\n\n# Test the installed module\nreComputer check\n\n# If everything is okay, you should see the following output:\n# Docker version...\n# Python 3...\n# ...\n```\n\n## 1. Project Structure\n\nThe project is structured as follows:\n\n- `docs/`: This directory contains the project's documents.\n  - `assets/`: This directory contains document assets, such as images.\n- `reComputer/`: This is the main directory of the Python module.\n  - `__init__.py`: This file is the initialization file for the Python module.\n  - `main.py`: This file contains the main logic code for the Python module.\n  - `scripts/`: This directory is used to store examples.\n    - `xxxxx/`: This is an example directory. Everything inside this directory will be installed into the system. You can save files of any type, such as images, Python scripts, executable files, etc.\n      - `init.sh`: **(optional)** This is the example init script. To initialize the project's initial data and environment.\n      - `run.sh`: **(MOST IMPORTANT)** This is the example startup script. It is the only entry point for your project.\n      - `readme.md`: **(optional)** This file provides an introduction to the example.\n    - `check.sh`: This is the checking script **(Not Finished yet)**.\n    - `run.sh`: This is the common startup script for examples.\n- `install.sh`: This script uses `curl` and `github` to install `jetson-examples`.\n- `pyproject.toml`: This file contains information on how to build and install `jetson-examples`.\n\n## 2. Create Your Project\n\n<img src=\"assets/lifetime.png\" width=\"500px\">\n\nFollow these steps to create an `example` in this project:\n\n```sh\n# 1 Declare your project name as an environment variable\nmy_project=hello-world\n\n# 2 Create a directory for your project\nmkdir -p reComputer/scripts/$my_project\n\n# 3 [required] Create the run.sh file\necho \"echo 'hello world'\" > reComputer/scripts/$my_project/run.sh\n\n# 4 [option] Create the readme.md file\necho -e \"# hello-world\\n\\n- Print \\`hello-world\\` to show how to add your project to this package\" > reComputer/scripts/$my_project/readme.md\n\n# 5 [option] Create the init.sh file\necho \"echo 'init env'\" > reComputer/scripts/$my_project/init.sh\n\n# 6 [option] Create the clean.sh file\necho \"echo 'clean data'\" > reComputer/scripts/$my_project/clean.sh\n```\n\nAfter completing these steps, you should see the file changes as shown in the image below:\n\n![changes](assets/file-changes.png)\n\nIf you are familiar with creating and editing directories or files, you can use your preferred method.\n\n## 3. Edit `$my_project/run.sh` to Customize Your Project\n\nUse your preferred IDE (e.g., Vim, VS Code) to edit `reComputer/scripts/$my_project/run.sh` and add the desired functionality:\n\n```sh\n# Inside reComputer/scripts/$my_project/run.sh\necho 'hello world'\n# TODO: Add code to achieve your desired functionality\n# ...\n```\n\n## 4. Test Your Project\n\nTo test your project, follow these steps:\n\n```sh\n# Reinstall to make your new project work with `reComputer`\npip install .\n\n# Run your new project with a one-line command\nreComputer run hello-world\n# INFO: Machine [Jetson AGX Orin] confirmed...\n# Running example: hello-world\n# ---- Example initialization ----\n# jetson-ai-lab existed.\n# ---- Example start ----\n# hello world\n# ---- Example done ----\n```\n\n## 5. (Optional) Add a `readme.md` File\n\nIf you want to provide additional information about your project, you can add a `readme.md` file. Use your preferred IDE to edit `reComputer/scripts/$my_project/readme.md`:\n\n```sh\n# hello-world\n- Print hello-world to show how to add your project to this package\n```\n\n## 6. (Optional) Submit a New Pull Request\n\nIf you wish to contribute your project to the `jetson-examples` repository, you can follow these steps:\n\n- 5.1 Fork this project.\n- 5.2 Create a new branch in your project.\n- 5.3 Commit the changes you made.\n- 5.4 Push the changes to your project.\n- 5.5 Create a pull request (`origin-git-repo/main <- your-git-repo/newbranch`) at [https://github.com/Seeed-Projects/jetson-examples/pulls](https://github.com/Seeed-Projects/jetson-examples/pulls).\n- 5.6 Wait for a code review.\n- 5.7 Once your code passes the review, it will be merged.\n- 5.8 Thank you for your contribution!\n"
  },
  {
    "path": "docs/examples.md",
    "content": "# Example list\n\nAll examples that can be run:\n\n| Example                                          | Type                     | Model Size | Image Size | Command                                      | Device   |\n| ------------------------------------------------ | ------------------------ | ---------- | ---------- | -------------------------------------------- | -------- |\n| whisper                                          | Audio                    | 1.5GB      | 6.0GB      | `reComputer run whisper`                     | USB-CAM* |\n| [yolov8-rail-inspection](/reComputer/scripts/yolov8-rail-inspection/readme.md) |Computer Vision(CV) | 6M | 13.8GB  | `reComputer run yolov8-rail-inspection`  |  | \n| [ultralytics-yolo](/reComputer/scripts/ultralytics-yolo/README.md) |Computer Vision(CV) | * | 15.4GB  | `reComputer run  ultralytics-yolo`  | |\n| [depth-anything](/reComputer/scripts/depth-anything/README.md) |Computer Vision(CV) | * | 12.9GB  | `reComputer run  depth-anything`  |  | \n| [depth-anything-v3](/reComputer/scripts/depth-anything-v3/README.md) |Computer Vision(CV) | * | 7.6GB  | `reComputer run depth-anything-v3`  |  | \n| [qwen3.5-4b](/reComputer/scripts/qwen3.5-4b/README.md) | Text (LLM) | 2.5GB | * | `reComputer run qwen3.5-4b` | |\n| [yolov10](/reComputer/scripts/yolov10/README.md)     | Computer Vision(CV)         | 7.2M               | 5.74 GB     | `reComputer run yolov10`                 | | \n| text-generation-webui                            | Text (LLM)               | 3.9GB      | 14.8GB     | `reComputer run text-generation-webui`       |          |\n| llama3                                           | Text (LLM)               | 4.9GB      | 10.5GB     | `reComputer run llama3`                      |          |\n| [gpt-oss](/reComputer/scripts/gpt-oss/README.md) | Text (LLM)               | *          | 31.28GB    | `reComputer run gpt-oss`                     |          |\n| [ros1-jp6](/reComputer/scripts/ros1-jp6/README.md) | Robotics / ROS 1         | *          | 1.27GB     | `reComputer run ros1-jp6`                    |          |\n| [nvblox](/reComputer/scripts/nvblox/README.md)   | Robotics / Mapping       | *          | 20.5GB+    | `reComputer run nvblox`                      | Gemini2  |\nLLaMA                                            | Text (LLM)               | 1.5GB      | 10.5GB     | `reComputer run Sheared-LLaMA-2.7B-ShareGPT` |          |\n| llava-v1.5                                       | Text + Vision (VLM)      | 13GB       | 14.4GB     | `reComputer run llava-v1.5-7b`               |          |\n| llava-v1.6                                       | Text + Vision (VLM)      | 13GB       | 20.3GB     | `reComputer run llava-v1.6-vicuna-7b`        |          |\n| LLaVA                                            | Text + Vision (VLM)      | 13GB       | 14.4GB     | `reComputer run llava`                       |          |\n| Live LLaVA                                       | Text + Vision (VLM)      | 13GB       | 20.3GB     | `reComputer run live-llava`                  | USB-CAM* |\n| stable-diffusion-webui                           | Image Generation         | 3.97G      | 7.3GB      | `reComputer run stable-diffusion-webui`      |          |\n| nanoowl                                          | Vision Transformers(ViT) | 613MB      | 15.1GB     | `reComputer run nanoowl`                     | USB-CAM* |\n| [nanodb](../reComputer/scripts/nanodb/readme.md) | Vector Database          | 76GB       | 7.0GB      | `reComputer run nanodb`                      |          |\n| [ollama](https://github.com/ollama/ollama)       | Inference Server         | *          | 10.5GB     | `reComputer run ollama`                      |          |\n| [TensorFlow MoveNet Thunder](/reComputer/scripts/MoveNet-Thunder/readme.md) |Computer Vision |  | 7.7GB  | `reComputer run  MoveNet-Thunder`  | USB-CAM*\n| [TensorFlow MoveNet Lightning](/reComputer/scripts/MoveNet-Lightning/readme.md) |Computer Vision |  | 7.48GB  | `reComputer run  MoveNet-Lightning`  | USB-CAM*\n| [TensorFlow MoveNet JS](/reComputer/scripts/MoveNetJS/readme.md) |Computer Vision |  | 56.21MB  | `reComputer run  MoveNetJS`  | USB-CAM*\n| [Parler-TTS mini: expresso](/reComputer/scripts/parler-tts/readme.md) |Audio |  | 6.9GB  | `reComputer run  parler-tts`  |\n"
  },
  {
    "path": "docs/install.md",
    "content": "# Install\n\n- use the way you like to install\n\n## PyPI(recommend)\n\n```sh\npip install jetson-examples\n```\n\n## Linux (github trick)\n\n```sh\ncurl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh\n```\n\n## Github (for Developer)\n\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n"
  },
  {
    "path": "docs/publish.md",
    "content": "# publish\n\n## pypi.org\n\n```sh\n# tools update\npython3 -m pip install --upgrade build\npython3 -m pip install --upgrade twine\n```\n\n### Test\n\n```sh\n# 1 build\npython3 -m build\n\n# 2 publish\npython3 -m twine upload --repository testpypi dist/*\n### WARNING: do not share you API token !!\n\n# 3 test\npip install -i https://test.pypi.org/simple/ jetson-examples\n### make sure version number right\n```\n\n### Prod\n\n```sh\n# 1 build\npython3 -m build\n\n# 2 publish\npython3 -m twine upload --repository pypi dist/*\n### WARNING: do not share you API token !!\n\n# 3 test\npip install jetson-examples --upgrade\n### make sure version number right\n```\n"
  },
  {
    "path": "install.sh",
    "content": "#!/bin/bash\n# TODO: make sure python3 in host is OK\ncd /tmp && \\\ngit clone https://github.com/Seeed-Projects/jetson-examples && \\\ncd jetson-examples && \\\npip install . && \\\necho \"reComputer installed. try 'reComputer run whisper' to enjoy!\""
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"setuptools>=61.0.0\", \"wheel\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"jetson-examples\"\nversion = \"0.2.5\"\nauthors = [{ name = \"luozhixin\", email = \"zhixin.luo@seeed.cc\" }]\ndescription = \"Running Gen AI models and applications on NVIDIA Jetson devices with one-line command\"\nreadme = \"README.md\"\nrequires-python = \">=3.8\"\nclassifiers = [\n  \"Programming Language :: Python :: 3\",\n  \"License :: OSI Approved :: MIT License\",\n  \"Operating System :: OS Independent\",\n]\nkeywords = [\n  \"llama\",\n  \"llava\",\n  \"gpt\",\n  \"llm\",\n  \"nvidia\",\n  \"jetson\",\n  \"multimodal\",\n  \"jetson orin\",\n]\n\n[project.scripts]\nreComputer = \"reComputer.main:run_script\"\n\n[project.urls]\nHomepage = \"https://github.com/Seeed-Projects/jetson-examples\"\nIssues = \"https://github.com/Seeed-Projects/jetson-examples/issues\"\n\n# Tools settings -------------------------------------------------------------------------------------------------------\n\n[tool.setuptools.packages.find]\nwhere = [\".\"]\ninclude = [\"reComputer\"]\n\n[tool.setuptools]\ninclude-package-data = true\n\n[tool.setuptools.package-data]\n\"reComputer\" = [\"scripts/**/*\"]\n"
  },
  {
    "path": "reComputer/__init__.py",
    "content": "__version__ = \"0.1.3\"\n"
  },
  {
    "path": "reComputer/main.py",
    "content": "import os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\n\ndef scripts_roots():\n    pkg_root = Path(__file__).resolve().parent\n    candidates = [\n        Path.cwd() / \"reComputer\" / \"scripts\",\n        pkg_root / \"scripts\",\n    ]\n\n    source_hint = os.environ.get(\"JETSON_EXAMPLES_SOURCE\")\n    if source_hint:\n        candidates.insert(1, Path(source_hint).expanduser().resolve() / \"reComputer\" / \"scripts\")\n\n    # Keep order while removing duplicates\n    dedup = []\n    seen = set()\n    for item in candidates:\n        key = str(item)\n        if key not in seen:\n            seen.add(key)\n            dedup.append(item)\n    return dedup\n\n\ndef scripts_root():\n    for root in scripts_roots():\n        if root.is_dir():\n            return str(root)\n    # fallback to package default path\n    return str(scripts_roots()[0])\n\n\ndef path_of_script(name):\n    for root in scripts_roots():\n        script_path = root / name\n        if script_path.exists():\n            return str(script_path)\n    return str(scripts_roots()[0] / name)\n\n\ndef list_all_examples(folder_path):\n    directory_names = []\n    for item in os.listdir(folder_path):\n        item_path = os.path.join(folder_path, item)\n        if os.path.isdir(item_path):\n            directory_names.append(item)\n    return directory_names\n\n\ndef run_script():\n\n    if len(sys.argv) == 3:\n        if sys.argv[1] == \"run\":\n            example_name = sys.argv[2]\n            # TODO: maybe use python instead of shell is better\n            subprocess.run([\"bash\", path_of_script(\"run.sh\"), example_name])\n        elif sys.argv[1] == \"clean\":\n            example_name = sys.argv[2]\n            subprocess.run([\"bash\", path_of_script(\"clean.sh\"), example_name])\n        else:\n            print(\"Only Support `run` or `clean` for now. try `reComputer run llava` .\")\n    elif len(sys.argv) == 2:\n        if sys.argv[1] == \"check\":\n            subprocess.run([\"bash\", path_of_script(\"check.sh\")])\n        elif sys.argv[1] == \"update\":\n            subprocess.run([\"bash\", path_of_script(\"update.sh\")])\n        elif sys.argv[1] == \"list\":\n            example_folder = scripts_root()\n            directories = list_all_examples(example_folder)\n            print(\"example list:\")\n            index = 1\n            for directory in directories:\n                print(\"{:03d}\".format(index), \"|\", directory)\n                index += 1\n            print(\"-end-\")\n        else:\n            print(\"reComputer help:\")\n            print(\"---\")\n            print(\"`reComputer check`   | check system.\")\n            print(\"`reComputer update`  | update jetson-ai-lab.\")\n            print(\"`reComputer list`    | list all examples.\")\n            print(\"`reComputer run xxx` | run an example.\")\n            print(\"`reComputer clean xxx` | clean an example's data.\")\n            print(\"---\")\n    else:\n        print(\"Error Usage! try `reComputer help`.\")\n\n\nif __name__ == \"__main__\":\n    pass\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Lightning/clean.sh",
    "content": "#!/bin/bash\n\n\n# get image\nsource ./getVersion.sh\n\n# remove docker image\nsudo docker rmi feiticeir0/movenet:tf2-${IMAGE_TAG}\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Lightning/getVersion.sh",
    "content": "#!/bin/bash\n# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh\n# and llama-factory init script\n\n# we only have images for these - 36.2.0 works on 36.3.0\nL4T_VERSIONS=(\"35.3.1\", \"35.4.1\", \"36.2.0\", \"36.3.0\")\n\nARCH=$(uname -i)\n# echo \"ARCH:  $ARCH\"\n\nif [ $ARCH = \"aarch64\" ]; then\n\tL4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)\n\n\tif [ -z \"$L4T_VERSION_STRING\" ]; then\n\t\t#echo \"reading L4T version from \\\"dpkg-query --show nvidia-l4t-core\\\"\"\n\n\t\tL4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)\n\t\tL4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })\n\n\t\t#echo ${L4T_VERSION_ARRAY[@]}\n\t\t#echo ${#L4T_VERSION_ARRAY[@]}\n\n\t\tL4T_RELEASE=${L4T_VERSION_ARRAY[0]}\n\t\tL4T_REVISION=${L4T_VERSION_ARRAY[1]}\n\telse\n\t\t#echo \"reading L4T version from /etc/nv_tegra_release\"\n\n\t\tL4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')\n\t\tL4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')\n\tfi\n\n\tL4T_REVISION_MAJOR=${L4T_REVISION:0:1}\n\tL4T_REVISION_MINOR=${L4T_REVISION:2:1}\n\n\tL4T_VERSION=\"$L4T_RELEASE.$L4T_REVISION\"\n\n\tIMAGE_TAG=$L4T_VERSION\n\n\t#echo \"L4T_VERSION :  $L4T_VERSION\"\n\t#echo \"L4T_RELEASE :  $L4T_RELEASE\"\n\t#echo \"L4T_REVISION:  $L4T_REVISION\"\n\nelif [ $ARCH != \"x86_64\" ]; then\n\techo \"unsupported architecture:  $ARCH\"\n\texit 1\nfi\n\n\nif [[ ! \" ${L4T_VERSIONS[@]} \" =~ \" ${L4T_VERSION} \" ]]; then\n    echo \"L4T_VERSION is not in the allowed versions list. Exiting.\"\n    exit 1\nfi\n\n# check if 36 to change IMAGE_TAG\nif [ ${L4T_RELEASE} -eq \"36\" ]; then\n\t# image tag will be 2.0\n\tIMAGE_TAG=\"36.2.0\"\nfi\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Lightning/init.sh",
    "content": "#!/bin/bash\n\n# Let's allow connections\nxhost +local:docker\n\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Lightning/readme.md",
    "content": "# MoveNet\n\nMoveNet is a ultra fast and accurate pose detection model.\n\nWe're demonstrating here using reComputer J402 and with MoveNet Lightning version\n\n![movenet working](images/dance_movenet.gif)\n\nYou can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)\n\n## Getting started\n#### Prerequisites\n* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)\n* Webcam connected to reComputer\n* Graphical desktop\n* Docker installed\n\n## Instalation\nPyPI (best)\n\n```bash\npip install jetson-examples\n```\n\n## Usage\n1. Type the following command in a terminal\n```bash\nreComputer run MoveNet-Lightning\n```\n2. Start moving in front of the camera\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Lightning/run.sh",
    "content": "#!/bin/bash\n\n# get L4T version\n# it exports a variable IMAGE_TAG\nsource ./getVersion.sh\n\n# pull docker image\n\ndocker pull feiticeir0/movenet-lightning:tf2-${IMAGE_TAG}\"\n\ndocker run \\\n\t-e DISPLAY=$DISPLAY \\\n\t--runtime=nvidia \\\n\t--rm \\\n\t--device /dev/video0 \\\n\t-v /tmp/.X11-unix:/tmp/.X11-unix \\\n\tfeiticeir0/movenet-lightning:tf2-${IMAGE_TAG}\"\n\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Thunder/clean.sh",
    "content": "#!/bin/bash\n\n# get image\nsource ./getVersion.sh\n\n# remove docker image\nsudo docker rmi feiticeir0/movenet:tf2-${TAG_IMAGE}\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Thunder/getVersion.sh",
    "content": "#!/bin/bash\n# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh\n# and llama-factory init script\n\n# we only have images for these - 36.2.0 works on 36.3.0\nL4T_VERSIONS=(\"35.3.1\", \"35.4.1\", \"36.2.0\", \"36.3.0\")\n\nARCH=$(uname -i)\n# echo \"ARCH:  $ARCH\"\n\nif [ $ARCH = \"aarch64\" ]; then\n\tL4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)\n\n\tif [ -z \"$L4T_VERSION_STRING\" ]; then\n\t\t#echo \"reading L4T version from \\\"dpkg-query --show nvidia-l4t-core\\\"\"\n\n\t\tL4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)\n\t\tL4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })\n\n\t\t#echo ${L4T_VERSION_ARRAY[@]}\n\t\t#echo ${#L4T_VERSION_ARRAY[@]}\n\n\t\tL4T_RELEASE=${L4T_VERSION_ARRAY[0]}\n\t\tL4T_REVISION=${L4T_VERSION_ARRAY[1]}\n\telse\n\t\t#echo \"reading L4T version from /etc/nv_tegra_release\"\n\n\t\tL4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')\n\t\tL4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')\n\tfi\n\n\tL4T_REVISION_MAJOR=${L4T_REVISION:0:1}\n\tL4T_REVISION_MINOR=${L4T_REVISION:2:1}\n\n\tL4T_VERSION=\"$L4T_RELEASE.$L4T_REVISION\"\n\n\tIMAGE_TAG=$L4T_VERSION\n\n\t#echo \"L4T_VERSION :  $L4T_VERSION\"\n\t#echo \"L4T_RELEASE :  $L4T_RELEASE\"\n\t#echo \"L4T_REVISION:  $L4T_REVISION\"\n\nelif [ $ARCH != \"x86_64\" ]; then\n\techo \"unsupported architecture:  $ARCH\"\n\texit 1\nfi\n\n\nif [[ ! \" ${L4T_VERSIONS[@]} \" =~ \" ${L4T_VERSION} \" ]]; then\n    echo \"L4T_VERSION is not in the allowed versions list. Exiting.\"\n    exit 1\nfi\n\n# check if 36 to change IMAGE_TAG\nif [ ${L4T_RELEASE} -eq \"36\" ]; then\n\t# image tag will be 2.0\n\tIMAGE_TAG=\"36.2.0\"\nfi\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Thunder/init.sh",
    "content": "#!/bin/bash\n\n# Let's allow connections\nxhost +local:docker\n\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Thunder/readme.md",
    "content": "# MoveNet\n\nMoveNet is a ultra fast and accurate pose detection model.\n\nWe're demonstrating here using reComputer J402 and with MoveNet Thunder version\n\n![movenet working](images/dance_movenet.gif)\n\nYou can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)\n\n## Getting started\n#### Prerequisites\n* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)\n* Webcam connected to reComputer\n* Graphical desktop\n* Docker installed\n\n## Instalation\nPyPI (best)\n\n```bash\npip install jetson-examples\n```\n\n## Usage\n1. Type the following command in a terminal\n```bash\nreComputer run MoveNet-Thunder\n```\n2. Start moving in front of the camera\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNet-Thunder/run.sh",
    "content": "#!/bin/bash\n\n# get L4T version\n# it exports a variable IMAGE_TAG\nsource ./getVersion.sh\n\n# pull docker image\ndocker pull feiticeir0/movenet-thunder:tf2-${IMAGE_TAG}\n\ndocker run \\\n\t-e DISPLAY=$DISPLAY \\\n\t--runtime=nvidia \\\n\t--rm \\\n\t--device /dev/video0 \\\n\t-v /tmp/.X11-unix:/tmp/.X11-unix \\\n\tfeiticeir0/movenet-thunder:tf2-${IMAGE_TAG}\n\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNetJS/clean.sh",
    "content": "#!/bin/bash\n\n# remove docker image\nsudo docker rmi feiticeir0/movenetjs:latest \n"
  },
  {
    "path": "reComputer/scripts/MoveNetJS/readme.md",
    "content": "# MoveNet\n\nMoveNet is a ultra fast and accurate pose detection model.\n\nWe're demonstrating here using reComputer J402\n\n![movenetjs working](images/dance.gif)\n\nYou can get more information on MoveNet from [TensorFlow](https://www.tensorflow.org/hub/tutorials/movenet)\n\n## Getting started\n#### Prerequisites\n* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)\n* Webcam connected (one or the other)\n  * to the reComputer\n  * the computer you're using (remotely connected to the reComputer)\n* Docker installed\n\n## Instalation\nPyPI (best)\n\n```bash\npip install jetson-examples\n```\n\n## Usage\n### Method 1\n##### If you're running inside your reComputer\n1. Type the following command in a terminal\n```bash\nreComputer run MoveNetJS\n```\n2. Open a web browser and go to [http://localhost:5000](http://localhost:5000)\n3. Give permission to access webcam and wait a few seconds:\n   1. First will appear the webcam feed\n   2. Next will appear the lines estimating the pose\n4. Start dancing\n\n### Method 2\n##### If you want to connect remotely with ssh to the reComputer\n1. Connect using SSH but redirecting the 5000 port\n```bash\nssh -L 5000:localhost:5000 <username>@<reComputer_IP>\n```\n2. Type the following command in a terminal\n```bash\nreComputer run movenetjs\n```\n2. Open a web browser (on your machine) and go to [http://localhost:5000](http://localhost:5000)\n3. Give permission to access webcam and wait a few seconds:\n   1. First will appear the webcam feed\n   2. Next will appear the lines estimating the pose\n4. Start dancing\n\n**note** Firefox may fail showing webcam feed or pose estimation\n\n"
  },
  {
    "path": "reComputer/scripts/MoveNetJS/run.sh",
    "content": "#!/bin/bash\n\n# pull docker image\n\ndocker push feiticeir0/movenetjs:latest\n\ndocker run \\\n\t--rm \\\n\t-p 5000:5000 \\\n\tfeiticeir0/movenetjs:latest\n\n\n\n"
  },
  {
    "path": "reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./run.sh $(./autotag local_llm) \\\npython3 -m local_llm.chat --api=mlc \\\n--model princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT"
  },
  {
    "path": "reComputer/scripts/audiocraft/README.md",
    "content": "# AudioCraft Deployment on Jetson in One Line \n\n## Hello\n\n💡 In this demo, we refer to jetson-container to deploy audiocraft on Jetson devices. And generate music using a reference example. \n\n🔥 Hightlights:\n- **Audiocraft** is a tool designed for creating and manipulating audio content. 🎶\n- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨\n- **Jetson** is powerful AI hardware platform for edge computing.💻\n\nGet your Jetson device ready and customize sounds with me.🚀\n\n\n## Getting Started\n\n- install **jetson-examples** by pip:\n    ```sh\n    pip3 install jetson-examples\n    ```\n- restart reComputer \n    ```sh\n    sudo restart\n    ```\n- run audiocraft on jetson in one line:\n    ```sh\n    reComputer run audiocraft\n    ```\n\n## Reference\n- https://github.com/dusty-nv/jetson-containers/tree/master/packages/audio/audiocraft\n- https://github.com/facebookresearch/audiocraft\n\n\n\n"
  },
  {
    "path": "reComputer/scripts/audiocraft/clean.sh",
    "content": "#!/bin/bash\n\n# TODO: clean old container\ndocker rmi $(/home/$USER/reComputer/jetson-containers/autotag audiocraft)\n\n"
  },
  {
    "path": "reComputer/scripts/audiocraft/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/audiocraft/init.sh",
    "content": "#!/bin/bash\n\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/audiocraft/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\njetson-containers run $(autotag audiocraft)\n"
  },
  {
    "path": "reComputer/scripts/check.sh",
    "content": "script_dir=$(dirname \"$0\")\ndocker --version && \\\npython3 -V && \\\npython -V && \\\necho \"now we can use more shell in $script_dir\""
  },
  {
    "path": "reComputer/scripts/clean.sh",
    "content": "#!/bin/bash\n\ncheck_is_jetson_or_not() {\n    model_file=\"/proc/device-tree/model\"\n    \n    if [ -f \"/proc/device-tree/model\" ]; then\n        model=$(tr -d '\\0' < /proc/device-tree/model | tr '[:upper:]' '[:lower:]')\n        if [[ $model =~ jetson|orin|nv|agx ]]; then\n            echo \"INFO: machine[$model] confirmed...\"\n        else\n            echo \"WARNING: machine[$model] maybe not support...\"\n            exit 1\n        fi\n    else\n        echo \"ERROR: machine[$model] not support this...\"\n        exit 1\n    fi\n}\ncheck_is_jetson_or_not\n\necho \"clean example：$1\"\nBASE_PATH=/home/$USER/reComputer\n# TODO: 要一个二次确认\necho \"----clean example start----\"\ncd $JETSON_REPO_PATH\nscript_dir=$(dirname \"$0\")\nstart_script=$script_dir/$1/clean.sh\nif [ -f $start_script ]; then\n    bash $start_script\nelse\n    echo \"ERROR: Example[$1]/clean.sh Not Found.\"\nfi\necho \"----clean example done----\"\n"
  },
  {
    "path": "reComputer/scripts/comfyui/LICENSE",
    "content": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "reComputer/scripts/comfyui/README.md",
    "content": "# Jetson-Example: Run ComfyUI (Stable Diffusion GUI) on NVIDIA Jetson Orin 🚀\n\n## One-Click Quick Deployment of Plug-and-Play Stable Diffusion GUI\n<p align=\"center\">\n  <img src=\"images/comfyui.png\" alt=\"comfyui\">\n</p>\n\n## **Introduction** 📘\n[ComfyUI](https://github.com/comfyanonymous/ComfyUI) will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. \nIn this project, you can quickly deploy ComfyUI on Nvidia Jetson Orin devices with one click. \n\n\n\n## **Key Features**:\n- **One-click installation and configuration support for Nvidia Jetson Orin devices.**\n- **GPU acceleration to optimize the performance of stable diffusion pipelines.**\n- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.\n- Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/), [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/), [Stable Cascade](https://comfyanonymous.github.io/ComfyUI_examples/stable_cascade/), [SD3](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) and [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/)\n- [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/)\n- Asynchronous Queue system\n- Many optimizations: Only re-executes the parts of the workflow that changes between executions.\n- Smart memory management: can automatically run models on GPUs with as low as 1GB vram.\n\n  For other features, please refer to the original project [ComfyUI](https://github.com/comfyanonymous/ComfyUI).\n\n  Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)\n\n\n### Get a Jetson Orin Device 🛒\n| Device Model | Description | Link |\n|--------------|-------------|------|\n| Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |\n| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |\n\n## **Quickstart** ⚡\n\n### Modify Docker Daemon Configuration (Optional)\nTo enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:\n\n```json\n{\n  \"default-runtime\": \"nvidia\",\n  \"runtimes\": {\n    \"nvidia\": {\n      \"path\": \"nvidia-container-runtime\",\n      \"runtimeArgs\": []\n    }\n  },\n  \"storage-driver\": \"overlay2\",\n  \"data-root\": \"/var/lib/docker\",\n  \"log-driver\": \"json-file\",\n  \"log-opts\": {\n    \"max-size\": \"100m\",\n    \"max-file\": \"3\"\n  },\n  \"no-new-privileges\": true,\n  \"experimental\": false\n}\n```\n\nAfter modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:\n\n```sh\nsudo systemctl restart docker\n```\n\n### **Installation via PyPI (Recommended)** 🐍\n1. Install the package:\n    ```sh\n    pip install jetson-examples\n    ```\n\n2. Restart your reComputer:\n    ```sh\n    sudo reboot\n    ```\n\n3. Run ComfyUI with one command:\n    ```sh\n    reComputer run comfyui\n    ```\n- **Input Dir**: Mount the input directory in Docker to the host directory `~/ComfyUI/input`.\n- **Output Dir**: Mount the output directory in Docker to the host directory `~/ComfyUI/output`.\n\n- **Models Dir**: Mount the models directory in Docker to the host directory `~/ComfyUI/models`.\n\n\n## **For more tutorials** 🔧\n- [ComfyUI Basic Tutorial VN](https://comfyanonymous.github.io/ComfyUI_tutorial_vn/)\n- [ComfyUI](https://github.com/comfyanonymous/ComfyUI)\n- [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/)\n- [Comfy Org](https://www.comfy.org/)\n\n\n## **Shortcuts**\n\n| Keybind                            | Explanation                                                                                                        |\n|------------------------------------|--------------------------------------------------------------------------------------------------------------------|\n| Ctrl + Enter                       | Queue up current graph for generation                                                                              |\n| Ctrl + Shift + Enter               | Queue up current graph as first for generation                                                                     |\n| Ctrl + Z/Ctrl + Y                  | Undo/Redo                                                                                                          |\n| Ctrl + S                           | Save workflow                                                                                                      |\n| Ctrl + O                           | Load workflow                                                                                                      |\n| Ctrl + A                           | Select all nodes                                                                                                   |\n| Alt + C                            | Collapse/uncollapse selected nodes                                                                                 |\n| Ctrl + M                           | Mute/unmute selected nodes                                                                                         |\n| Ctrl + B                           | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through)            |\n| Delete/Backspace                   | Delete selected nodes                                                                                              |\n| Ctrl + Backspace                   | Delete the current graph                                                                                           |\n| Space                              | Move the canvas around when held and moving the cursor                                                             |\n| Ctrl/Shift + Click                 | Add clicked node to selection                                                                                      |\n| Ctrl + C/Ctrl + V                  | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes)                     |\n| Ctrl + C/Ctrl + Shift + V          | Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |\n| Shift + Drag                       | Move multiple selected nodes at the same time                                                                      |\n| Ctrl + D                           | Load default graph                                                                                                 |\n| Alt + `+`                          | Canvas Zoom in                                                                                                     |\n| Alt + `-`                          | Canvas Zoom out                                                                                                    |\n| Ctrl + Shift + LMB + Vertical drag | Canvas Zoom in/out                                                                                                 |\n| Q                                  | Toggle visibility of the queue                                                                                     |\n| H                                  | Toggle visibility of history                                                                                       |\n| R                                  | Refresh graph                                                                                                      |\n| Double-Click LMB                   | Open node quick search palette                |\n\n\n## License\n\nThis project is licensed under the GNU General Public License v3.0\n"
  },
  {
    "path": "reComputer/scripts/comfyui/clean.sh",
    "content": "#!/bin/bash\nCONTAINER_NAME=\"comfyui\"\nIMAGE_NAME=\"yaohui1998/comfyui\"\n\nsudo docker stop $CONTAINER_NAME\nsudo docker rm $CONTAINER_NAME\nsudo docker rmi $IMAGE_NAME\n\nsudo rm -r /home/$USER/reComputer/ComfyUI\n"
  },
  {
    "path": "reComputer/scripts/comfyui/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\nREQUIRED_DISK_SPACE: 30  # in GB\nREQUIRED_MEM_SPACE: 15\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/comfyui/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\n# create folder.\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\n"
  },
  {
    "path": "reComputer/scripts/comfyui/run.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"comfyui\"\nIMAGE_NAME=\"yaohui1998/comfyui\"\n\n# Pull the latest image\ndocker pull $IMAGE_NAME\n\ncd /home/$USER/reComputer/\ngit clone https://github.com/comfyanonymous/ComfyUI.git\n\n\n# Check if the container with the specified name already exists\nif [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then\n    echo \"Container $CONTAINER_NAME already exists. Starting and attaching...\"\n    docker start $CONTAINER_NAME\n    docker exec -it $CONTAINER_NAME /bin/bash\nelse\n    echo \"Container $CONTAINER_NAME does not exist. Creating and starting...\"\n    docker run -it --rm \\\n        --name $CONTAINER_NAME \\\n        --privileged \\\n        --network host \\\n        -v /home/$USER/reComputer/ComfyUI:/usr/src/ComfyUI-Seeed \\\n        -v /tmp/.X11-unix:/tmp/.X11-unix \\\n        -v /dev/*:/dev/* \\\n        -v /etc/localtime:/etc/localtime:ro \\\n        --runtime nvidia \\\n        $IMAGE_NAME\nfi\n"
  },
  {
    "path": "reComputer/scripts/deep-live-cam/Dockerfile",
    "content": "\nFROM yaohui1998/deep-live-cam:0.1\n\nWORKDIR /usr/src/Deep-Live-Cam\n\nCMD [\"python3\", \"run.py\", \"--execution-provider\", \"cuda\"]"
  },
  {
    "path": "reComputer/scripts/deep-live-cam/LICENSE",
    "content": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "reComputer/scripts/deep-live-cam/README.md",
    "content": "# Jetson-Example: Run Deep Live Cam on Seeed Studio NVIDIA AGX Orin Developer Kit 🚀\n\nThis project provides a one-click deployment of the Deep Live Cam AI face-swapping project on the [Seeed Studio Jetson AGX Orin Developer Kit](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html), retaining all the features of the [original project](https://github.com/hacksider/Deep-Live-Cam) and supporting functionalities such as image-to-image, image-to-video, and image-to-webcam.\n\n<p align=\"center\">\n  <img src=\"images/WebUI.png\" alt=\"WebUI\">\n</p>\n\nAll models and inference engine implemented in this project are from the official [Deep-Live-Cam](https://github.com/hacksider/Deep-Live-Cam).\n\n## Get a Jetson Orin Device 🛒\n| Device Model  | Link |\n|--------------|------|\n| Jetson AGX Orin Dev Kit 32G | [Buy Here](https://www.seeedstudio.com/NVIDIA-Jetson-AGX-Orin-Developer-Kit-p-5314.html) |\n| Jetson AGX Orin Dev Kit 64G | [Buy Here](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html) |\n\n## New Features 🔥\n### Resizable Preview Window\n\nDynamically improve the performance by using the --resizable parameter\n![resizable-gif](./images/resizable.gif)\n\n### Face Mapping\n\nTrack faces and change it on the fly\n\n![face_mapping_source](./images/face_mapping_source.gif)\n\nsource video\n\n![face-mapping](./images/face_mapping.png)\n\nTick this switch\n\n![face-mapping2](./images/face_mapping2.png)\n\nMap the faces\n\n![face_mapping_result](./images/face_mapping_result.gif)\n\nAnd see the magic!\n\n> The images in the \"New Features\" section are sourced from the [github community](https://github.com/hacksider/Deep-Live-Cam).\n\n## 🥳Getting Started\n### 📜Prerequisites\n- AGX Orin Developer Kit [(🛒Buy Here)](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html)\n- Jetpack 6.0\n- USB Camera (optional)\n\n\n### Modify Docker Daemon Configuration (Optional)\nTo enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:\n\n```json\n{\n  \"default-runtime\": \"nvidia\",\n  \"runtimes\": {\n    \"nvidia\": {\n      \"path\": \"nvidia-container-runtime\",\n      \"runtimeArgs\": []\n    }\n  },\n  \"storage-driver\": \"overlay2\",\n  \"data-root\": \"/var/lib/docker\",\n  \"log-driver\": \"json-file\",\n  \"log-opts\": {\n    \"max-size\": \"100m\",\n    \"max-file\": \"3\"\n  },\n  \"no-new-privileges\": true,\n  \"experimental\": false\n}\n```\n\nAfter modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:\n\n```sh\nsudo systemctl restart docker\n```\n\n\n### 🚀Installation\n\n\nPyPI(recommend)\n  ```sh\n  pip install jetson-examples\n  ```\nLinux (github trick)\n```sh\ncurl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh\n```\nGithub (for Developer)\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n### 📋Usage\n1. Run code:\n    ```sh\n    reComputer run deep-live-cam\n    ```\n\n2. An `image` folder will be created in the user's home directory, where templates and the face images or videos that need to be swapped can be placed.\n\n3. Click `Select a face` to choose an image of a face.\n\n4. Click the `Select a target` button to choose a target face image.\n\n5. Click `Preview` to display the transformed result, and click `Start` to save the result to the specified directory without displaying it.\n\n6. Click `Preview` to display the transformed result, and click `Start` to save the result to the specified directory without displaying it.\n\n7. You can choose the `Face enhancer` to enhance facial details and features.\n\n8. Click `Live` to open the webcam for real-time conversion. Please connect a USB camera before starting the program.\n\n> ⚠️ **Note**: The first time you convert an image, it may take approximately two minutes.\n\n## 🙏🏻Thanks\n[Deep-Live-Cam](https://github.com/hacksider/Deep-Live-Cam)\n\n## 💨Contributing\n\nWe welcome contributions from the community. Please fork the repository and create a pull request with your changes.\n\n\n## 🙅‍Disclaimer\nThis software is meant to be a productive contribution to the rapidly growing AI-generated media industry. It will help artists with tasks such as animating a custom character or using the character as a model for clothing etc.\n\nThe developers of this software are aware of its possible unethical applications and are committed to take preventative measures against them. It has a built-in check which prevents the program from working on inappropriate media including but not limited to nudity, graphic content, sensitive material such as war footage etc. We will continue to develop this project in the positive direction while adhering to law and ethics. This project may be shut down or include watermarks on the output if requested by law.\n\nUsers of this software are expected to use this software responsibly while abiding by local laws. If the face of a real person is being used, users are required to get consent from the concerned person and clearly mention that it is a deepfake when posting content online. Developers of this software will not be responsible for actions of end-users.\n\n## ✅License\n\nThis project is licensed under the AGPL-3.0 License.\n"
  },
  {
    "path": "reComputer/scripts/deep-live-cam/clean.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"deep-live-cam\"\nIMAGE_NAME=\"yaohui1998/deep-live-cam:1.0\"\n\nsudo docker stop $CONTAINER_NAME\nsudo docker rm $CONTAINER_NAME\nsudo docker rmi $IMAGE_NAMEs\nsudo rm -r ~/images"
  },
  {
    "path": "reComputer/scripts/deep-live-cam/config.yaml",
    "content": "\n# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 36.3.0\nREQUIRED_DISK_SPACE: 40  # in GB\nREQUIRED_MEM_SPACE: 20\nPACKAGES:\n  - nvidia-jetpack\n  - x11-xserver-utils\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/deep-live-cam/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\n"
  },
  {
    "path": "reComputer/scripts/deep-live-cam/run.sh",
    "content": "CONTAINER_NAME=\"deep-live-cam\"\nIMAGE_NAME=\"yaohui1998/deep-live-cam:1.0\"\n\n# Pull the latest image\ndocker pull $IMAGE_NAME\n# Set display id\nxhost +local:docker\nexport DISPLAY=:0\n# mkdir image dir\nmkdir ~/images\necho $DISPLAY\n# Check if the container with the specified name already exists\nif [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then\n    echo \"Container $CONTAINER_NAME already exists. Starting and attaching...\"\n    docker start $CONTAINER_NAME\nelse\n    echo \"Container $CONTAINER_NAME does not exist. Creating and starting...\"\n    docker run -it --rm \\\n        --name $CONTAINER_NAME \\\n        --privileged \\\n        --network host \\\n        -v ~/images:/usr/src/Deep-Live-Cam/images \\\n        -e DISPLAY=$DISPLAY \\\n        -v /tmp/.X11-unix:/tmp/.X11-unix \\\n        -v /dev/*:/dev/* \\\n        -v /etc/localtime:/etc/localtime:ro \\\n        --runtime nvidia \\\n        $IMAGE_NAME\nfi\n"
  },
  {
    "path": "reComputer/scripts/depth-anything/Dockerfile",
    "content": "FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3\nRUN mkdir /usr/src/DepthAnything-on-Jetson-Orin\nWORKDIR /usr/src/DepthAnything-on-Jetson-Orin\nCOPY . /usr/src/DepthAnything-on-Jetson-Orin\nRUN pip install flask onnx flask flask_socketio huggingface_hub\nCMD [\"python3\", \"app.py\"]"
  },
  {
    "path": "reComputer/scripts/depth-anything/LICENSE",
    "content": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "reComputer/scripts/depth-anything/README.md",
    "content": "# Jetson-Example: Run Depth Anything on NVIDIA Jetson Orin 🚀\nThis project provides an one-click deployment of the Depth Anything monocular depth estimation model developed by Hong Kong University and ByteDance.  The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.\n<p align=\"center\">\n  <img src=\"images/WebUI.png\" alt=\"WebUI\">\n</p>\n\nAll models and inference engine implemented in this project are from the official [Depth Anything](https://depth-anything.github.io/).\n\n## 🔥Features\n\n- One-click deployment for Depth Anything models.\n- WebUI for model conversion and depth estimation.\n- Support for uploading videos/images or using the local camera \n- Supports S, B, L models of Depth Anything with input sizes of 308, 384, 406, and 518.\n\n    ### 🗝️WebUI Features\n    - **Choose model**: Select from depth_anything_vits14 models. (S, B, L)\n    - **Choose input size**: Select the desired input size.(308, 384, 406, 518)\n    - **Grayscale option**: Option to use grayscale. \n    - **Choose source**: Select the input source (Video, Image, Camera).\n    - **Export Model**: Automatically download and convert the model from PyTorch (.pth) to TensorRT format.\n    - **Start Estimation**: Begin depth estimation using the selected model and input source.\n    - **Stop Estimation**: Stop the ongoing depth estimation process.\n    <p align=\"center\">\n      <img src=\"images/Opr.png\" alt=\"Depthanything\" width=\"320\" height=\"360\">\n    </p>\n\n## 🥳Getting Started\n### 📜Prerequisites\n- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)\n- Docker installed on reComputer\n- USB Camera (optional)\n\n\n### Modify Docker Daemon Configuration (Optional)\nTo enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:\n\n```json\n{\n  \"default-runtime\": \"nvidia\",\n  \"runtimes\": {\n    \"nvidia\": {\n      \"path\": \"nvidia-container-runtime\",\n      \"runtimeArgs\": []\n    }\n  },\n  \"storage-driver\": \"overlay2\",\n  \"data-root\": \"/var/lib/docker\",\n  \"log-driver\": \"json-file\",\n  \"log-opts\": {\n    \"max-size\": \"100m\",\n    \"max-file\": \"3\"\n  },\n  \"no-new-privileges\": true,\n  \"experimental\": false\n}\n```\n\nAfter modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:\n\n```sh\nsudo systemctl restart docker\n```\n\n\n### 🚀Installation\n\n\nPyPI(recommend)\n  ```sh\n  pip install jetson-examples\n  ```\nLinux (github trick)\n```sh\ncurl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh\n```\nGithub (for Developer)\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n### 📋Usage\n1. Run code:\n    ```sh\n    reComputer run depth-anything\n    ```\n2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model, input size, and source.\n\n3. Click on **Export Model** to download and convert the model.\n\n4. Click on **Start Estimation** to begin the depth estimation process.\n\n5. View the real-time depth estimation results on the WebUI.\n\n## ⛏️Applications\n\n- **Security**: Enhance surveillance systems with depth perception.\n  <p align=\"center\">\n    <img src=\"images/Security.png\" alt=\"Security\" width=\"500\" height=\"150\">\n  </p>\n- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.\n  <p align=\"center\">\n    <img src=\"images/Autonomous Driving.png\" alt=\"Autonomous Driving\" width=\"500\" height=\"150\">\n  </p>\n- **Underwater Scenes**: Apply depth estimation in underwater exploration.\n  <p align=\"center\">\n      <img src=\"images/Underwater Scenes.png\" alt=\"Underwater Scenes\" width=\"500\" height=\"150\">\n    </p>\n- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.\n  <p align=\"center\">\n      <img src=\"images/Indoor Scenes.png\" alt=\"Indoor Scenes\" width=\"500\" height=\"150\">\n    </p>\n\n## Further Development 🔧\n- [Depth Anything Official](https://depth-anything.github.io/)\n- [Depth Anything TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)\n- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)\n- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)\n- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)\n\n\n## 🙏🏻Contributing\n\nWe welcome contributions from the community. Please fork the repository and create a pull request with your changes.\n\n## ✅License\n\nThis project is licensed under the MIT License.\n\n## 🏷️Acknowledgements\n\n- Depth Anything [project](https://depth-anything.github.io/) by Hong Kong University and ByteDance.\n- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).\n"
  },
  {
    "path": "reComputer/scripts/depth-anything/clean.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"depth-anything\"\nIMAGE_NAME=\"yaohui1998/depthanything-on-jetson-orin:latest\"\n\nsudo docker stop $CONTAINER_NAME\nsudo docker rm $CONTAINER_NAME\nsudo docker rmi $IMAGE_NAMEs\n"
  },
  {
    "path": "reComputer/scripts/depth-anything/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\nREQUIRED_DISK_SPACE: 20  # in GB\nREQUIRED_MEM_SPACE: 4\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/depth-anything/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n"
  },
  {
    "path": "reComputer/scripts/depth-anything/run.sh",
    "content": "CONTAINER_NAME=\"depth-anything\"\nIMAGE_NAME=\"yaohui1998/depthanything-on-jetson-orin:latest\"\n\n# Pull the latest image\ndocker pull $IMAGE_NAME\n\n# Check if the container with the specified name already exists\nif [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then\n    echo \"Container $CONTAINER_NAME already exists. Starting and attaching...\"\n    docker start $CONTAINER_NAME\nelse\n    echo \"Container $CONTAINER_NAME does not exist. Creating and starting...\"\n    docker run -it \\\n        --name $CONTAINER_NAME \\\n        --privileged \\\n        --network host \\\n        -v /tmp/.X11-unix:/tmp/.X11-unix \\\n        -v /dev/*:/dev/* \\\n        -v /etc/localtime:/etc/localtime:ro \\\n        --runtime nvidia \\\n        $IMAGE_NAME\nfi\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v2/Dockerfile",
    "content": "FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3\nRUN mkdir /usr/src/DepthAnything-on-Jetson-Orin\nWORKDIR /usr/src/DepthAnything-on-Jetson-Orin\nCOPY . /usr/src/DepthAnything-on-Jetson-Orin\nRUN pip install flask onnx flask flask_socketio huggingface_hub\nCMD [\"python3\", \"app.py\"]"
  },
  {
    "path": "reComputer/scripts/depth-anything-v2/LICENSE",
    "content": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v2/README.md",
    "content": "# Jetson-Example: Run Depth Anything V2 on NVIDIA Jetson Orin 🚀\nThis project provides an one-click deployment of the Depth Anything V2 monocular depth estimation model developed by Hong Kong University and ByteDance.  The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.\n<p align=\"center\">\n  <img src=\"images/WebUI.png\" alt=\"WebUI\">\n</p>\n\nAll models and inference engine implemented in this project are from the official [Depth Anything V2](https://github.com/DepthAnything/Depth-Anything-V2).\n\n## 🔥Features\n\n- One-click deployment for Depth Anything V2 models.\n- WebUI for model conversion and depth estimation.\n- Support for uploading videos/images or using the local camera \n- Supports S, B, L models of Depth Anything V2 with input sizes 518.\n\n    ### 🗝️WebUI Features\n    - **Choose model**: Select from Depth Anything V2 models. (S, B, L)\n    - **Grayscale option**: Option to use grayscale. \n    - **Choose source**: Select the input source (Video, Image, Camera).\n    - **Export Model**: Automatically download and convert the model from ONNX to TensorRT format.\n    - **Start Estimation**: Begin depth estimation using the selected model and input source.\n    - **Stop Estimation**: Stop the ongoing depth estimation process.\n    <p align=\"center\">\n      <img src=\"images/Opr.png\" alt=\"Depthanything\" width=\"320\" height=\"360\">\n    </p>\n\n## 🥳Getting Started\n### 📜Prerequisites\n- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)\n- Docker installed on reComputer\n- USB Camera (optional)\n\n\n### Modify Docker Daemon Configuration (Optional)\nTo enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:\n\n```json\n{\n  \"default-runtime\": \"nvidia\",\n  \"runtimes\": {\n    \"nvidia\": {\n      \"path\": \"nvidia-container-runtime\",\n      \"runtimeArgs\": []\n    }\n  },\n  \"storage-driver\": \"overlay2\",\n  \"data-root\": \"/var/lib/docker\",\n  \"log-driver\": \"json-file\",\n  \"log-opts\": {\n    \"max-size\": \"100m\",\n    \"max-file\": \"3\"\n  },\n  \"no-new-privileges\": true,\n  \"experimental\": false\n}\n```\n\nAfter modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:\n\n```sh\nsudo systemctl restart docker\n```\n\n\n### 🚀Installation\n\n\nPyPI(recommend)\n  ```sh\n  pip install jetson-examples\n  ```\nLinux (github trick)\n```sh\ncurl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh\n```\nGithub (for Developer)\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n### 📋Usage\n1. Run code:\n    ```sh\n    reComputer run depth-anything-v2\n    ```\n\n2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model and source.\n\n3. Click on **Export Model** to download and convert the model.\n\n4. Click on **Start Estimation** to begin the depth estimation process.\n\n5. View the real-time depth estimation results on the WebUI.\n\n## ⛏️Applications\n\n- **Security**: Enhance surveillance systems with depth perception.\n  <p align=\"center\">\n    <img src=\"images/Security.png\" alt=\"Security\" width=\"500\" height=\"150\">\n  </p>\n- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.\n  <p align=\"center\">\n    <img src=\"images/Autonomous Driving.png\" alt=\"Autonomous Driving\" width=\"500\" height=\"150\">\n  </p>\n- **Underwater Scenes**: Apply depth estimation in underwater exploration.\n  <p align=\"center\">\n      <img src=\"images/Underwater Scenes.png\" alt=\"Underwater Scenes\" width=\"500\" height=\"150\">\n    </p>\n- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.\n  <p align=\"center\">\n      <img src=\"images/Indoor Scenes.png\" alt=\"Indoor Scenes\" width=\"500\" height=\"150\">\n    </p>\n\n## Further Development 🔧\n- [Depth Anything V2 Official](https://github.com/DepthAnything/Depth-Anything-V2)\n- [Depth Anything V2 TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)\n- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)\n- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)\n- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)\n\n\n## 🙏🏻Contributing\n\nWe welcome contributions from the community. Please fork the repository and create a pull request with your changes.\n\n## ✅License\n\nThis project is licensed under the MIT License.\n\n## 🏷️Acknowledgements\n\n- Depth Anything V2 Official [project](https://github.com/DepthAnything/Depth-Anything-V2) by Hong Kong University and ByteDance.\n- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v2/clean.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"depth-anything-v2\"\nIMAGE_NAME=\"yaohui1998/depthanything-v2-on-jetson-orin:latest\"\n\nsudo docker stop $CONTAINER_NAME\nsudo docker rm $CONTAINER_NAME\nsudo docker rmi $IMAGE_NAMEs"
  },
  {
    "path": "reComputer/scripts/depth-anything-v2/config.yaml",
    "content": "\n# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\nREQUIRED_DISK_SPACE: 15  # in GB\nREQUIRED_MEM_SPACE: 4\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v2/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v2/run.sh",
    "content": "CONTAINER_NAME=\"depth-anything-v2\"\nIMAGE_NAME=\"yaohui1998/depthanything-v2-on-jetson-orin:latest\"\n\n# Pull the latest image\ndocker pull $IMAGE_NAME\n\n# Check if the container with the specified name already exists\nif [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then\n    echo \"Container $CONTAINER_NAME already exists. Starting and attaching...\"\n    docker start $CONTAINER_NAME\nelse\n    echo \"Container $CONTAINER_NAME does not exist. Creating and starting...\"\n    docker run -it \\\n        --name $CONTAINER_NAME \\\n        --privileged \\\n        --network host \\\n        -v /tmp/.X11-unix:/tmp/.X11-unix \\\n        -v /dev/*:/dev/* \\\n        -v /etc/localtime:/etc/localtime:ro \\\n        --runtime nvidia \\\n        $IMAGE_NAME\nfi\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v3/Dockerfile",
    "content": "# This demo uses a prebuilt Docker image from Docker Hub.\nFROM chenduola6/depth_anything_v3:jp6.2\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v3/LICENSE",
    "content": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v3/README.md",
    "content": "# Jetson-Example: Run Depth Anything V3 on NVIDIA Jetson\n\nThis project provides one-click deployment for **Depth Anything V3** on NVIDIA Jetson devices.\nIt uses the prebuilt Docker image:\n\n```sh\nchenduola6/depth-anything-v3:jp6.2\n```\n\nImage size: **7.6 GB**\n\nSupported JetPack/L4T versions:\n- JetPack 6.2 -> L4T 36.4.0\n- JetPack 6.2.1 -> L4T 36.4.3\n- JetPack 6.1 -> L4T 36.4.4\n\n<p align=\"center\">\n  <img src=\"images/da3.png\" alt=\"Depth Anything V3\">\n</p>\n\n\n## Getting Started\n\n### Prerequisites\n- NVIDIA Jetson device with a supported L4T version\n- Docker installed and available\n- USB camera (for camera inference)\n\n### Installation\n\nPyPI (recommended):\n```sh\npip install jetson-examples\n```\n\nGitHub (developer):\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n## Usage\n\n1. Start the demo container with `reComputer`:\n\n```sh\nreComputer run depth-anything-v3\n```\n\n2. Enter the running container:\n\n```bash\nxhost +local:docker\n\ndocker run -it --rm \\\n  --gpus all \\\n  --network host \\\n  --ipc host \\\n  --privileged \\\n  -e DISPLAY=$DISPLAY \\\n  -e QT_X11_NO_MITSHM=1 \\bash\n  -v /tmp/.X11-unix:/tmp/.X11-unix \\\n  -v /dev:/dev \\\n  chenduola6/depth-anything-v3\n```\n\n3. Run USB camera inference inside the container:\n\n```sh\ncd workspace/ros2-depth-anything-v3-trt\n#build the engine file\nsource install/setup.bash\nros2 run depth_anything_v3 generate_engines onnx\n```\n\n<p align=\"center\">\n  <img src=\"images/engine.png\" alt=\"generate engine\">\n</p>\n\n> **Note**:If the Jetson swap space is insufficient, it may cause the engine export process to fail.\n>\n> ```bash\n> #add swap space\n> sudo mkdir -p /mnt/nvme\n> sudo fallocate -l 16G /mnt/nvme/swapfile\n> sudo chmod 600 /mnt/nvme/swapfile\n> sudo mkswap /mnt/nvme/swapfile\n> sudo swapon /mnt/nvme/swapfile\n> ```\n\n```bash\n#Run a USB camera demo\nUSB_SIMPLE=1 ./run_camera_depth.sh\n```\n\n## Cleanup\n\nOnly remove the container (keep image cache):\n```sh\nreComputer clean depth-anything-v3\n```\n\n## References\n- [Depth Anything v3 project](https://github.com/ByteDance-Seed/Depth-Anything-3)\n- [ros2-depth-anything-v3-trt](https://github.com/ika-rwth-aachen/ros2-depth-anything-v3-trt)\n- [Seeed jetson-examples](https://github.com/Seeed-Projects/jetson-examples)\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v3/clean.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"depth_anything_v3\"\n\n# Prefer plain docker, fallback to sudo docker when user has no docker group permission\nif docker info >/dev/null 2>&1; then\n    DOCKER_CMD=(docker)\nelse\n    DOCKER_CMD=(sudo docker)\nfi\n\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    \"${DOCKER_CMD[@]}\" stop $CONTAINER_NAME\nfi\n\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -a -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    \"${DOCKER_CMD[@]}\" rm $CONTAINER_NAME\n    echo \"Container $CONTAINER_NAME removed.\"\nelse\n    echo \"Container $CONTAINER_NAME does not exist.\"\nfi\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v3/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 36.4.0\n  - 36.4.3\n  - 36.4.4\nREQUIRED_DISK_SPACE: 12  # in GB\nREQUIRED_MEM_SPACE: 4\nPACKAGES:\n  - nvidia-jetpack\n  - x11-xserver-utils\nDOCKER: \n  ENABLE: false\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v3/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n"
  },
  {
    "path": "reComputer/scripts/depth-anything-v3/run.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"depth-anything-v3\"\nIMAGE_NAME=\"chenduola6/depth-anything-v3:jp6.2\"\n\n# Prefer plain docker, fallback to sudo docker when user has no docker group permission\nif docker info >/dev/null 2>&1; then\n    DOCKER_CMD=(docker)\nelse\n    echo \"Current user has no docker permission.\"\n    echo \"Please enter sudo password once for this run.\"\n    if ! sudo -v; then\n        echo \"Failed to authenticate sudo. Exiting.\"\n        exit 1\n    fi\n    # Keep sudo timestamp alive during long pulls/runs to avoid repeated prompts.\n    while true; do\n        sudo -n true\n        sleep 60\n        kill -0 \"$$\" || exit\n    done 2>/dev/null &\n    SUDO_KEEPALIVE_PID=$!\n    trap 'kill $SUDO_KEEPALIVE_PID >/dev/null 2>&1 || true' EXIT\n    DOCKER_CMD=(sudo docker)\nfi\n\n# Pull the latest image\n\"${DOCKER_CMD[@]}\" pull $IMAGE_NAME\n\n# Enable local X11 access for docker GUI apps\nxhost +local:docker\n\n# Use default display when DISPLAY is not set\nif [ -z \"$DISPLAY\" ]; then\n    export DISPLAY=:0\nfi\n\n# Check if the container with the specified name already exists\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -a -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    echo \"Container $CONTAINER_NAME already exists. Starting...\"\n    \"${DOCKER_CMD[@]}\" start $CONTAINER_NAME\nelse\n    echo \"Container $CONTAINER_NAME does not exist. Creating and starting...\"\n    \"${DOCKER_CMD[@]}\" run -it \\\n        --name $CONTAINER_NAME \\\n        --gpus all \\\n        --network host \\\n        --ipc host \\\n        --privileged \\\n        -e DISPLAY=$DISPLAY \\\n        -e QT_X11_NO_MITSHM=1 \\\n        -v /tmp/.X11-unix:/tmp/.X11-unix \\\n        -v /dev:/dev \\\n        -v /etc/localtime:/etc/localtime:ro \\\n        $IMAGE_NAME\nfi\n\necho \"To run USB camera inference inside container:\"\necho \"1) ${DOCKER_CMD[*]} exec -it $CONTAINER_NAME /bin/bash\"\necho \"2) cd workspace/ros2-depth-anything-v3-trt\"\necho \"3) USB_SIMPLE=1 ./run_camera_depth.sh\"\n"
  },
  {
    "path": "reComputer/scripts/gpt-oss/Dockerfile",
    "content": "FROM chenduola6/got-oss-20b:jp6\n"
  },
  {
    "path": "reComputer/scripts/gpt-oss/LICENSE",
    "content": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "reComputer/scripts/gpt-oss/README.md",
    "content": "# Jetson-Example: Run GPT-OSS 20B on NVIDIA Jetson\n\nThis project provides one-click deployment for **GPT-OSS 20B** on NVIDIA Jetson devices.\nIt uses the prebuilt Docker image:\n\n```sh\nchenduola6/got-oss-20b:jp6\n```\n\nDocker image size: **31.28 GB**\n\n## Hardware Requirements\n- NVIDIA Jetson device with at least **16GB VRAM**\n- At least **50GB** available disk space\n\nSupported JetPack/L4T versions:\n- JetPack 6.2 -> L4T 36.4.0\n- JetPack 6.2.1 -> L4T 36.4.3\n- JetPack 6.1 -> L4T 36.4.4\n\n<p align=\"center\">\n  <img src=\"images/gpt-oss.gif\" alt=\"GPT-OSS demo\">\n</p>\n\n## Getting Started\n\n### Installation\n\nPyPI (recommended):\n```sh\npip install jetson-examples\n```\n\nGitHub (developer):\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n## Usage\n\n### One-line deployment\n```sh\nreComputer run gpt-oss\n```\n\nThis command pulls the image and starts `llama-server` in a detached container.\nThe script waits for `/v1/models` to become ready before exiting.\n\n> **Note**: The script auto-detects the available GPU run mode on your Jetson (`--runtime nvidia` or `--gpus all`).\n>\n> **Note**: If prompted by the script, allow adding your user to the `docker` group so future runs do not require `sudo docker`. After adding the group, log out and log back in once.\n>\n> **Note**: If `curl /v1/models` returns `503 {\"message\":\"Loading model\"}`, the model is still loading. First startup can take several minutes.\n>\n> **Note**: If startup fails because of memory pressure, add swap space and try again:\n>\n> ```sh\n> sudo fallocate -l 16G /swapfile\n> sudo chmod 600 /swapfile\n> sudo mkswap /swapfile\n> sudo swapon /swapfile\n> ```\n\nYou can lower memory usage when launching:\n```sh\nLLAMA_CTX=512 LLAMA_NGL=16 reComputer run gpt-oss\n```\n\n### Verify service\n```sh\ncurl http://127.0.0.1:8080/v1/models\n```\n\n### Check logs\n```sh\ndocker logs -f gpt-oss\n```\n\n## Manual Deployment (inside Docker)\n\n```sh\ndocker pull chenduola6/got-oss-20b:jp6\n\ndocker run -it --rm \\\n  --runtime nvidia \\\n  --network host \\\n  --ipc=host \\\n  chenduola6/got-oss-20b:jp6\n\n# inside the container\ncd /root/gpt-oss/llama.cpp\n\n./build/bin/llama-server \\\n  -m /root/gpt-oss/gguf/gpt-oss-20b-Q4_K.gguf \\\n  -ngl 20 -c 1024 \\\n  --host 0.0.0.0 --port 8080\n```\n\n## Cleanup\n\nOnly remove the container (keep image cache):\n```sh\nreComputer clean gpt-oss\n```\n\n## References\n- [llama.cpp](https://github.com/ggml-org/llama.cpp)\n- [Seeed jetson-examples](https://github.com/Seeed-Projects/jetson-examples)\n- [Setup step by step](https://wiki.seeedstudio.com/deploy_gptoss_on_jetson/)\n"
  },
  {
    "path": "reComputer/scripts/gpt-oss/clean.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"gpt-oss\"\n\nensure_docker_access() {\n    if ! command -v docker >/dev/null 2>&1; then\n        echo \"docker command not found.\"\n        echo \"Please install Docker first, then rerun this command.\"\n        exit 1\n    fi\n\n    if docker info >/dev/null 2>&1; then\n        return 0\n    fi\n\n    if id -nG \"$USER\" | grep -qw docker; then\n        echo \"Current user is already in docker group, but docker is still unavailable.\"\n        echo \"Please make sure Docker daemon is running, for example:\"\n        echo \"sudo systemctl enable --now docker\"\n        exit 1\n    fi\n\n    echo \"Current user has no docker permission.\"\n    read -r -p \"Add current user ($USER) to docker group now? (y/n): \" choice\n    case \"$choice\" in\n        y|Y)\n            if ! sudo -v; then\n                echo \"Failed to authenticate sudo. Exiting.\"\n                exit 1\n            fi\n            if ! getent group docker >/dev/null 2>&1; then\n                sudo groupadd docker\n            fi\n            sudo usermod -aG docker \"$USER\"\n            echo \"Added $USER to docker group.\"\n            echo \"Please log out and log back in (or reboot), then rerun:\"\n            echo \"reComputer clean gpt-oss\"\n            exit 1\n            ;;\n        *)\n            echo \"Skipped docker group setup.\"\n            echo \"You can run this manually:\"\n            echo \"sudo usermod -aG docker $USER\"\n            exit 1\n            ;;\n    esac\n}\n\nensure_docker_access\nDOCKER_CMD=(docker)\n\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    \"${DOCKER_CMD[@]}\" stop \"$CONTAINER_NAME\"\nfi\n\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -a -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    \"${DOCKER_CMD[@]}\" rm \"$CONTAINER_NAME\"\n    echo \"Container $CONTAINER_NAME removed.\"\nelse\n    echo \"Container $CONTAINER_NAME does not exist.\"\nfi\n\necho \"Image is kept locally for faster next startup.\"\n"
  },
  {
    "path": "reComputer/scripts/gpt-oss/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 36.4.0\n  - 36.4.3\n  - 36.4.4\nREQUIRED_DISK_SPACE: 50  # in GB\nREQUIRED_MEM_SPACE: 14\nPACKAGES:\n  - nvidia-jetpack\nDOCKER:\n  ENABLE: false\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/gpt-oss/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n"
  },
  {
    "path": "reComputer/scripts/gpt-oss/run.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"gpt-oss\"\nIMAGE_NAME=\"chenduola6/got-oss-20b:jp6\"\nMODEL_PATH=\"/root/gpt-oss/gguf/gpt-oss-20b-Q4_K.gguf\"\nHOST=\"0.0.0.0\"\nPORT=\"${LLAMA_PORT:-8080}\"\nNGL=\"${LLAMA_NGL:-20}\"\nCTX=\"${LLAMA_CTX:-1024}\"\nSTARTUP_TIMEOUT=\"${LLAMA_STARTUP_TIMEOUT:-600}\"\nSERVER_CMD=\"cd /root/gpt-oss/llama.cpp && ./build/bin/llama-server -m ${MODEL_PATH} -ngl ${NGL} -c ${CTX} --host ${HOST} --port ${PORT}\"\nGPU_FLAGS=()\n\nensure_docker_access() {\n    if ! command -v docker >/dev/null 2>&1; then\n        echo \"docker command not found.\"\n        echo \"Please install Docker first, then rerun this command.\"\n        exit 1\n    fi\n\n    if docker info >/dev/null 2>&1; then\n        return 0\n    fi\n\n    if id -nG \"$USER\" | grep -qw docker; then\n        echo \"Current user is already in docker group, but docker is still unavailable.\"\n        echo \"Please make sure Docker daemon is running, for example:\"\n        echo \"sudo systemctl enable --now docker\"\n        exit 1\n    fi\n\n    echo \"Current user has no docker permission.\"\n    read -r -p \"Add current user ($USER) to docker group now? (y/n): \" choice\n    case \"$choice\" in\n        y|Y)\n            if ! sudo -v; then\n                echo \"Failed to authenticate sudo. Exiting.\"\n                exit 1\n            fi\n            if ! getent group docker >/dev/null 2>&1; then\n                sudo groupadd docker\n            fi\n            sudo usermod -aG docker \"$USER\"\n            echo \"Added $USER to docker group.\"\n            echo \"Please log out and log back in (or reboot), then rerun:\"\n            echo \"reComputer run gpt-oss\"\n            exit 1\n            ;;\n        *)\n            echo \"Skipped docker group setup.\"\n            echo \"You can run this manually:\"\n            echo \"sudo usermod -aG docker $USER\"\n            exit 1\n            ;;\n    esac\n}\n\nensure_docker_access\nDOCKER_CMD=(docker)\n\nensure_image() {\n    if \"${DOCKER_CMD[@]}\" pull \"$IMAGE_NAME\"; then\n        return 0\n    fi\n\n    echo \"Warning: failed to pull image from Docker Hub.\"\n    if \"${DOCKER_CMD[@]}\" image inspect \"$IMAGE_NAME\" >/dev/null 2>&1; then\n        echo \"Found local image cache: $IMAGE_NAME\"\n        echo \"Continue with local image.\"\n        return 0\n    fi\n\n    echo \"No local image cache found. Please check network and retry.\"\n    exit 1\n}\n\ncreate_container() {\n    \"${DOCKER_CMD[@]}\" run -d \\\n        --name \"$CONTAINER_NAME\" \\\n        \"${GPU_FLAGS[@]}\" \\\n        --network host \\\n        --ipc=host \\\n        \"$IMAGE_NAME\" \\\n        /bin/bash -lc \"$SERVER_CMD\"\n}\n\nprobe_gpu_mode() {\n    if \"${DOCKER_CMD[@]}\" run --rm --runtime nvidia --network host --ipc=host \"$IMAGE_NAME\" /bin/sh -lc \"exit 0\" >/dev/null 2>&1; then\n        GPU_FLAGS=(--runtime nvidia)\n        echo \"Using GPU mode: --runtime nvidia\"\n        return 0\n    fi\n\n    if \"${DOCKER_CMD[@]}\" run --rm --gpus all --network host --ipc=host \"$IMAGE_NAME\" /bin/sh -lc \"exit 0\" >/dev/null 2>&1; then\n        GPU_FLAGS=(--gpus all)\n        echo \"Using GPU mode: --gpus all\"\n        return 0\n    fi\n\n    echo \"Failed to detect a working Docker GPU mode.\"\n    echo \"Tried: --runtime nvidia and --gpus all\"\n    echo \"Please check Docker + NVIDIA Container Runtime on this device.\"\n    exit 1\n}\n\nensure_image\nprobe_gpu_mode\n\n# Check if the container with the specified name already exists\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    echo \"Container $CONTAINER_NAME is already running.\"\nelif [ \"$(\"${DOCKER_CMD[@]}\" ps -a -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    echo \"Container $CONTAINER_NAME already exists but is not running.\"\n    echo \"Recreating with current runtime settings...\"\n    \"${DOCKER_CMD[@]}\" rm -f \"$CONTAINER_NAME\" >/dev/null 2>&1 || true\n    if ! create_container >/dev/null; then\n        echo \"Failed to create container.\"\n        exit 1\n    fi\nelse\n    echo \"Container $CONTAINER_NAME does not exist. Creating and starting...\"\n    if ! create_container >/dev/null; then\n        echo \"Failed to create container.\"\n        exit 1\n    fi\nfi\n\nif [ -z \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    echo \"Container failed to reach running state.\"\n    echo \"Inspect logs with: ${DOCKER_CMD[*]} logs $CONTAINER_NAME\"\n    exit 1\nfi\n\nwait_for_server_ready() {\n    local endpoint=\"http://127.0.0.1:${PORT}/v1/models\"\n    local elapsed=0\n    local interval=5\n    local raw_response=\"\"\n    local response_body=\"\"\n    local http_code=\"000\"\n    local last_code=\"000\"\n    local last_body=\"\"\n\n    if ! command -v curl >/dev/null 2>&1; then\n        echo \"curl not found, skip readiness probing.\"\n        return 0\n    fi\n\n    echo \"Waiting for GPT-OSS to be ready at ${endpoint} (timeout: ${STARTUP_TIMEOUT}s)...\"\n    while [ \"$elapsed\" -lt \"$STARTUP_TIMEOUT\" ]; do\n        if [ -z \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n            echo \"Container exited before model became ready.\"\n            echo \"Recent logs:\"\n            \"${DOCKER_CMD[@]}\" logs --tail 80 \"$CONTAINER_NAME\"\n            return 1\n        fi\n\n        raw_response=\"$(curl -s --max-time 3 -w \"\\n%{http_code}\" \"$endpoint\" 2>/dev/null || true)\"\n        http_code=\"$(printf '%s' \"$raw_response\" | tail -n 1)\"\n        response_body=\"$(printf '%s' \"$raw_response\" | sed '$d')\"\n\n        last_code=\"$http_code\"\n        last_body=\"$response_body\"\n\n        # Ready when endpoint returns model list payload.\n        if [ \"$http_code\" = \"200\" ] && echo \"$response_body\" | grep -q \"\\\"data\\\"\"; then\n            return 0\n        fi\n\n        # Typical warm-up response from llama-server while loading weights.\n        if [ \"$http_code\" = \"503\" ] && echo \"$response_body\" | grep -q \"Loading model\"; then\n            if [ $((elapsed % 30)) -eq 0 ]; then\n                echo \"Model is still loading... (${elapsed}s)\"\n            fi\n            sleep \"$interval\"\n            elapsed=$((elapsed + interval))\n            continue\n        fi\n\n        if [ $((elapsed % 30)) -eq 0 ]; then\n            echo \"Waiting model readiness... (${elapsed}s, http=${http_code})\"\n        fi\n        sleep \"$interval\"\n        elapsed=$((elapsed + interval))\n    done\n\n    echo \"Model is still not ready after ${STARTUP_TIMEOUT}s.\"\n    echo \"Last endpoint status: ${last_code}\"\n    if [ -n \"$last_body\" ]; then\n        echo \"Last endpoint response: $last_body\"\n    fi\n    echo \"Recent logs:\"\n    \"${DOCKER_CMD[@]}\" logs --tail 80 \"$CONTAINER_NAME\"\n    echo \"You can try lower memory settings:\"\n    echo \"LLAMA_CTX=512 LLAMA_NGL=16 reComputer run gpt-oss\"\n    return 1\n}\n\nif ! wait_for_server_ready; then\n    exit 1\nfi\n\necho \"GPT-OSS server is ready at: http://127.0.0.1:${PORT}\"\necho \"Check models:\"\necho \"curl http://127.0.0.1:${PORT}/v1/models\"\necho \"Follow server logs:\"\necho \"${DOCKER_CMD[*]} logs -f $CONTAINER_NAME\"\n"
  },
  {
    "path": "reComputer/scripts/live-llava/init.sh",
    "content": "#!/bin/bash\n\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/live-llava/run.sh",
    "content": "#!/bin/bash\n\nSUPPORT_L4T_LIST=\"35.3.1\"\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n\nget_l4t_version() {\n    ARCH=$(uname -i)\n    echo \"ARCH:  $ARCH\"\n    \n    if [ $ARCH = \"aarch64\" ]; then\n        L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)\n        \n        if [ -z \"$L4T_VERSION_STRING\" ]; then\n            echo \"reading L4T version from \\\"dpkg-query --show nvidia-l4t-core\\\"\"\n            L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)\n            L4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })\n            L4T_RELEASE=${L4T_VERSION_ARRAY[0]}\n            L4T_REVISION=${L4T_VERSION_ARRAY[1]}\n        else\n            echo \"reading L4T version from /etc/nv_tegra_release\"\n            L4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')\n            L4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')\n        fi\n        \n        L4T_REVISION_MAJOR=${L4T_REVISION:0:1}\n        L4T_REVISION_MINOR=${L4T_REVISION:2:1}\n        L4T_VERSION=\"$L4T_RELEASE.$L4T_REVISION\"\n        \n        echo \"L4T_VERSION:  $L4T_VERSION\"\n        \n        elif [ $ARCH != \"x86_64\" ]; then\n        echo \"unsupported architecture:  $ARCH\" # show in red color\n        exit 1\n    fi\n}\n\n# 1. Check L4T version\nget_l4t_version\nCHECK_L4T_VERSION=0\nfor item in $SUPPORT_L4T_LIST; do\n    if [ \"$item\" = \"$L4T_VERSION\" ]; then\n        CHECK_L4T_VERSION=1\n        break\n    fi\ndone\n\nif [ $CHECK_L4T_VERSION -eq 1 ]; then\n    echo \"pass the version check\"\nelse\n    echo \"currently supported versions of jetpack are $SUPPORT_L4T_LIST\" # show in red color\n    exit 1\nfi\n\n# 2. Check Google Chrome\nif dpkg -s chromium-browser &>/dev/null; then\n    echo \"Chrome is installed.\"\nelse\n    echo \"install Google Chrome ...\" # show in red color\n    sudo apt install chromium-browser\n    echo \"Google Chrome installed successfully\" # show in red color\nfi\n\n# 3. Generate Google browser key\nFILE_NAME=\"key.pem\"\nFILE_PATH=\"$JETSON_REPO_PATH/data\"\nif [ -f \"$FILE_PATH/$FILE_NAME\" ]; then\n    echo \"key file '$FILE_PATH/$FILE_NAME' exists.\"\nelse\n    cd $FILE_PATH\n    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes -subj '/CN=localhost'\n    cd ..\nfi\n\n# 4. edit source code\ncat >\"$JETSON_REPO_PATH/packages/llm/local_llm/agents/video_query.py\" <<'EOF'\n#!/usr/bin/env python3\nimport time\nimport logging\nimport threading\n\nfrom local_llm import Agent\n\nfrom local_llm.plugins import (\n    VideoSource,\n    VideoOutput,\n    ChatQuery,\n    PrintStream,\n    ProcessProxy,\n)\nfrom local_llm.utils import ArgParser, print_table\n\nfrom termcolor import cprint\nfrom jetson_utils import cudaFont, cudaMemcpy, cudaToNumpy, cudaDeviceSynchronize\n\nfrom flask import Flask, request\n\n\nclass VideoQuery(Agent):\n    \"\"\"\n    Perpetual always-on closed-loop visual agent that applies prompts to a video stream.\n    \"\"\"\n\n    def __init__(self, model=\"liuhaotian/llava-v1.5-7b\", **kwargs):\n        super().__init__()\n        self.lock = threading.Lock()\n\n        # load model in another process for smooth streaming\n        # self.llm = ProcessProxy((lambda **kwargs: ChatQuery(model, drop_inputs=True, **kwargs)), **kwargs)\n        self.llm = ChatQuery(model, drop_inputs=True, **kwargs)\n        self.llm.add(PrintStream(color=\"green\", relay=True).add(self.on_text))\n        self.llm.start()\n\n        # test / warm-up query\n        self.warmup = True\n        self.text = \"\"\n        self.eos = False\n\n        self.llm(\"What is 2+2?\")\n\n        while self.warmup:\n            time.sleep(0.25)\n\n        # create video streams\n        self.video_source = VideoSource(**kwargs)\n        self.video_output = VideoOutput(**kwargs)\n\n        self.video_source.add(self.on_video, threaded=False)\n        self.video_output.start()\n\n        self.font = cudaFont()\n\n        # setup prompts\n        self.prompt = \"Describe the image concisely and briefly.\"\n\n        # entry node\n        self.pipeline = [self.video_source]\n\n    def on_video(self, image):\n        np_image = cudaToNumpy(image)\n        cudaDeviceSynchronize()\n\n        self.llm(\n            [\n                \"reset\",\n                np_image,\n                self.prompt,\n            ]\n        )\n\n        text = self.text.replace(\"\\n\", \"\").replace(\"</s>\", \"\").strip()\n\n        if text:\n            worlds = text.split()\n            line_counter = len(worlds) // 10\n            if len(worlds) % 10 != 0:\n                line_counter += 1\n            for l in range(line_counter):\n                line_text = \" \".join(worlds[l * 10 : (l + 1) * 10])\n                self.font.OverlayText(\n                    image,\n                    text=line_text,\n                    x=5,\n                    y=int(79 + l * 37),\n                    color=self.font.White,\n                    background=self.font.Gray40,\n                )\n        self.font.OverlayText(\n            image,\n            text=\"Prompt: \" + self.prompt,\n            x=5,\n            y=42,\n            color=(120, 215, 21),\n            background=self.font.Gray40,\n        )\n        self.video_output(image)\n\n    def on_text(self, text):\n        if self.eos:\n            self.text = text  # new query response\n            self.eos = False\n        elif not self.warmup:  # don't view warmup response\n            self.text = self.text + text\n\n        if text.endswith(\"</s>\") or text.endswith(\"###\") or text.endswith(\"<|im_end|>\"):\n            self.print_stats()\n            self.warmup = False\n            self.eos = True\n\n    def update_switch(self, on_off):\n        self.video_source.switch(on_off)\n\n    def update_prompts(self, new_prompt):\n        with self.lock:\n            if new_prompt:\n                self.prompt = new_prompt\n\n    def print_stats(self):\n        # print_table(self.llm.model.stats)\n        curr_time = time.perf_counter()\n\n        if not hasattr(self, \"start_time\"):\n            self.start_time = curr_time\n        else:\n            frame_time = curr_time - self.start_time\n            self.start_time = curr_time\n            logging.info(\n                f\"refresh rate:  {1.0 / frame_time:.2f} FPS  ({frame_time*1000:.1f} ms)\"\n            )\n\n\nif __name__ == \"__main__\":\n    parser = ArgParser(extras=ArgParser.Defaults + [\"video_input\", \"video_output\"])\n    args = parser.parse_args()\n    # 独立线程运行\n    agent = VideoQuery(**vars(args))\n\n    def run_video_query():\n        agent.run()\n\n    video_query_thread = threading.Thread(target=run_video_query)\n    video_query_thread.start()\n\n    # 启动web服务\n    app = Flask(__name__)\n\n    @app.route(\"/update_prompt\", methods=[\"POST\"])\n    def update_prompts():\n        prompt = request.json.get(\"prompt\")\n        if prompt:\n            agent.update_prompts(prompt)\n            return \"Prompts updated successfully.\"\n        else:\n            return \"Invalid prompts data.\"\n\n    @app.route(\"/update_switch\", methods=[\"POST\"])\n    def update_switch():\n        infer_or_not = True if request.json.get(\"switch\") == \"on\" else False\n        agent.update_switch(infer_or_not)\n        return \"stop\" if not infer_or_not else \"start\"\n\n    @app.route(\"/update_params\", methods=[\"POST\"])\n    def update_params():\n        try:\n            agent.llm.max_new_tokens = request.json.get(\"max_new_tokens\") or 128\n            agent.llm.min_new_tokens = request.json.get(\"min_new_tokens\") or -1\n            agent.llm.do_sample = request.json.get(\"do_sample\") or False\n            agent.llm.repetition_penalty = request.json.get(\"repetition_penalty\") or 1.0\n            agent.llm.temperature = request.json.get(\"temperature\") or 0.7\n            agent.llm.top_p = request.json.get(\"top_p\") or 0.95\n            if request.json.get(\"system_prompt\"):\n                agent.llm.chat_history.template[\"system_prompt\"] = request.json.get(\n                    \"system_prompt\"\n                )\n            return \"params updated.\"\n        except Exception as e:\n            print(e)\n            return \"update failure\"\n\n    app.run(host=\"0.0.0.0\", port=5555)\n\n\nEOF\n\nsed -i 's/from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, SiglipImageProcessor, SiglipVisionModel/from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection  # , SiglipImageProcessor, SiglipVisionModel/' \"$JETSON_REPO_PATH/packages/llm/local_llm/vision/clip_hf.py\"\nsed -i \"s/'siglip': dict(preprocessor=SiglipImageProcessor, model=SiglipVisionModel),/# 'siglip': dict(preprocessor=SiglipImageProcessor, model=SiglipVisionModel),/\" \"$JETSON_REPO_PATH/packages/llm/local_llm/vision/clip_hf.py\"\n\nsed -i 's/from .audio import */# from .audio import */' \"$JETSON_REPO_PATH/packages/llm/local_llm/plugins/__init__.py\"\nsed -i 's/from .nanodb import NanoDB/# from .nanodb import NanoDB/' \"$JETSON_REPO_PATH/packages/llm/local_llm/plugins/__init__.py\"\n\nsed -i 's/import onnxruntime as ort/# import onnxruntime as ort/' \"$JETSON_REPO_PATH/packages/llm/local_llm/utils/model.py\"\n\necho \"The script has been modified.\"\n\ngnome-terminal -- /bin/bash -c \"chromium-browser --disable-features=WebRtcHideLocalIpsWithMdns https://localhost:8554/; exec /bin/bash\"\n\ncd $JETSON_REPO_PATH\nsudo docker run --runtime nvidia -it --rm --network host --volume /tmp/argus_socket:/tmp/argus_socket --volume /etc/enctune.conf:/etc/enctune.conf --volume /etc/nv_tegra_release:/etc/nv_tegra_release --volume /proc/device-tree/model:/tmp/nv_jetson_model --volume /var/run/dbus:/var/run/dbus --volume /var/run/avahi-daemon/socket:/var/run/avahi-daemon/socket --volume /var/run/docker.sock:/var/run/docker.sock --volume $JETSON_REPO_PATH/data:/data --device /dev/snd --device /dev/bus/usb -e DISPLAY=:0 -v /tmp/.X11-unix/:/tmp/.X11-unix -v /tmp/.docker.xauth:/tmp/.docker.xauth -e XAUTHORITY=/tmp/.docker.xauth --device /dev/video0 --device /dev/video1 -v $JETSON_REPO_PATH/packages/llm/local_llm:/opt/local_llm/local_llm -e SSL_KEY=/data/key.pem -e SSL_CERT=/data/cert.pem dustynv/local_llm:r35.3.1 python3 -m local_llm.agents.video_query --api=mlc --verbose --model liuhaotian/llava-v1.5-7b --max-new-tokens 32 --video-input /dev/video0 --video-output webrtc://@:8554/output\n"
  },
  {
    "path": "reComputer/scripts/llama-factory/README.md",
    "content": "# Finetune LLM by Llama-Factory on Jetson\n\n\n## Hello\nNow you can tailor a custom private local LLM to meet your requirements.\n\n💡 Here's an example of quickly deploying [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory) on Jetson device.\n\n🔥 Hightlights:\n- **Llama-Factory** is an efficient tool to unify efficient Fine-Tuning of 100+ LLMs. 🚀🔍\n- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨\n- **Jetson** is powerful AI hardware platform for edge computing.💻\n\n🛠️ Follow the tutorial below to quickly experience the performance of Llama-Factory on edge computing devices.\n\n<div align=\"center\">\n  <img alt=\"training\" width=\"1200px\" src=\"./assets/training.gif\">\n</div>\n\n## Get a Jetson Orin Device 🛒\n| Device Model | Description | Link |\n|--------------|-------------|------|\n| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |\n| NVIDIA® Jetson AGX Orin™ 64GB Developer Kit | Smallest and most powerful AI edge computer | [Buy Here](https://www.seeedstudio.com/NVIDIArJetson-AGX-Orintm-64GB-Developer-Kit-p-5641.html) |\n\n## Getting Started\n\n- install **jetson-examples** by pip:\n    ```sh\n    pip3 install jetson-examples\n    ```\n- restart reComputer \n    ```sh\n    sudo restart\n    ```\n- run Llama-Factory webui on jetson in one line:\n    ```sh\n    reComputer run llama-factory\n    ```\n- Please visit http://127.0.0.1:7860\n\n<div align=\"center\">\n  <img alt=\"yolov10\" width=\"1200px\" src=\"./assets/webui.png\">\n</div>\n\n\n\n## Run Training Script \n\n> **Note:** Some models and datasets require confirmation before using them, so we recommend logging in with your Hugging Face account by: \n> `sudo docker exec -it llama-factory huggingface-cli login`\n\nThere are a lot of parameters to choose from webui, you can refer to here for more information.\n\nFor demonstration purposes, set `Model name: Phi-1.5-1.3B`, `Dataset: alpaca_zh`, leave the other parameters unchanged, and then click the `Start` button\n\n<div align=\"center\">\n  <img alt=\"yolov10\" width=\"1200px\" src=\"./assets/llama-factory-Jetson.png\">\n</div>\n\n\n## Build Docker Image\nWe highly recommend that you use `jetson-containers` to compile the docker container, as you can see [here](https://github.com/dusty-nv/jetson-containers/pull/566).\n\n## Reference\n- https://github.com/hiyouga/LLaMA-Factory\n- https://github.com/dusty-nv/jetson-containers\n\n"
  },
  {
    "path": "reComputer/scripts/llama-factory/clean.sh",
    "content": "#!/bin/bash\n\nsudo docker rmi youjiang9977/llama-factory:r35.4.1\nsudo rm -rf /home/$USER/reComputer/jetson-containers/LLaMA-Factory/*\n"
  },
  {
    "path": "reComputer/scripts/llama-factory/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/llama-factory/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n\n"
  },
  {
    "path": "reComputer/scripts/llama-factory/run.sh",
    "content": "#!/bin/bash\n\n\nDATA_PATH=\"/home/$USER/reComputer/jetson-containers/data\"\n\nsudo docker run -it --rm --network host --runtime nvidia \\\n    --volume $DATA_PATH:/data \\\n    --name llama-factory \\\n    youjiang9977/llama-factory:r35.4.1\n\n"
  },
  {
    "path": "reComputer/scripts/llama3/clean.sh",
    "content": "#!/bin/bash\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n# search local image\nimg_tag=$($JETSON_REPO_PATH/autotag -p local ollama)\n# 检查返回值\nif [ $? -eq 0 ]; then\n    echo \"Found Image successfully.\"\n    sudo docker rmi $img_tag\nelse\n    echo \"[warn] Found Image failed with error code $?. skip delete Image.\"\nfi\n# \n# 4 build whl\nread -p \"Delete all data for ollama? (y/n): \" choice\nif [[ $choice == \"y\" || $choice == \"Y\" ]]; then\n    echo \"Delete=> $JETSON_REPO_PATH/data/models/ollama/\"\n    sudo rm -rf $JETSON_REPO_PATH/data/models/ollama/\n    echo \"Clean Data Done.\"\nelse\n    echo \"[warn] Skip Clean Data.\"\nfi\n"
  },
  {
    "path": "reComputer/scripts/llama3/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 15  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/llama3/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/llama3/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n# try stop old server\ndocker rm -f ollama\n# start new server\n./run.sh -d --name ollama $(./autotag ollama)\n# run a client\n./run.sh $(./autotag ollama) /bin/ollama run llama3\n# clean new server\ndocker rm -f ollama\n"
  },
  {
    "path": "reComputer/scripts/llama3.2/clean.sh",
    "content": "#!/bin/bash\n\nget_l4t_version() {\n    local l4t_version=\"\"\n    local release_line=$(head -n 1 /etc/nv_tegra_release)\n    if [[ $release_line =~ R([0-9]+)\\ *\\(release\\),\\ REVISION:\\ ([0-9]+\\.[0-9]+) ]]; then\n        local major=\"${BASH_REMATCH[1]}\"\n        local revision=\"${BASH_REMATCH[2]}\"\n        l4t_version=\"${major}.${revision}\"\n    fi\n    echo \"$l4t_version\"\n}\n\nL4T_VERSION=$(get_l4t_version)\necho \"Detected L4T version: $L4T_VERSION\"\n\n# Determine the Docker image based on L4T version\nif [[ \"$L4T_VERSION\" == \"35.3.1\" || \"$L4T_VERSION\" == \"35.4.1\" || \"$L4T_VERSION\" == \"35.5.0\" ]]; then\n    IMAGE_NAME=\"youjiang9977/ollama:r35.3.1\"\nelif [[ \"$L4T_VERSION\" == \"36.3.0\" || \"$L4T_VERSION\" == \"36.4.0\" ]]; then\n    IMAGE_NAME=\"youjiang9977/ollama:r36.3.0\"\nelse\n    echo \"Error: L4T version $L4T_VERSION is not supported.\"\n    exit 1\nfi\n\nif [ \"$(docker images -q \"$IMAGE_NAME\")\" ]; then\n    echo \"Deleting $IMAGE_NAME...\"\n    docker rmi \"$IMAGE_NAME\"\n    echo \"Image $IMAGE_NAME has been successfully deleted.\"\nelse\n    echo \"No image named $IMAGE_NAME was found.\"\nfi\n\n"
  },
  {
    "path": "reComputer/scripts/llama3.2/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\n  - 36.4.0\nREQUIRED_DISK_SPACE: 15\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/llama3.2/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/llama3.2/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\nget_l4t_version() {\n    local l4t_version=\"\"\n    local release_line=$(head -n 1 /etc/nv_tegra_release)\n    if [[ $release_line =~ R([0-9]+)\\ *\\(release\\),\\ REVISION:\\ ([0-9]+\\.[0-9]+) ]]; then\n        local major=\"${BASH_REMATCH[1]}\"\n        local revision=\"${BASH_REMATCH[2]}\"\n        l4t_version=\"${major}.${revision}\"\n    fi\n    echo \"$l4t_version\"\n}\n\nL4T_VERSION=$(get_l4t_version)\necho \"Detected L4T version: $L4T_VERSION\"\n\n# Determine the Docker image based on L4T version\nif [[ \"$L4T_VERSION\" == \"35.3.1\" || \"$L4T_VERSION\" == \"35.4.1\" || \"$L4T_VERSION\" == \"35.5.0\" ]]; then\n    IMAGE_NAME=\"youjiang9977/ollama:r35.3.1\"\nelif [[ \"$L4T_VERSION\" == \"36.3.0\" || \"$L4T_VERSION\" == \"36.4.0\" ]]; then\n    IMAGE_NAME=\"youjiang9977/ollama:r36.3.0\"\nelse\n    echo \"Error: L4T version $L4T_VERSION is not supported.\"\n    exit 1\nfi\n\ndocker rm -f ollama\n./run.sh -d --name ollama $IMAGE_NAME\n./run.sh $IMAGE_NAME /bin/ollama run llama3.2\ndocker rm -f ollama\n\n"
  },
  {
    "path": "reComputer/scripts/llava/clean.sh",
    "content": "#!/bin/bash\n\ndocker rmi $(/home/$USER/reComputer/jetson-containers/autotag llava)\n"
  },
  {
    "path": "reComputer/scripts/llava/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 15  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/llava/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/llava/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./run.sh $(./autotag llava) \\\npython3 -m llava.serve.cli \\\n--model-path liuhaotian/llava-v1.5-7b \\\n--image-file /data/images/hoover.jpg"
  },
  {
    "path": "reComputer/scripts/llava-v1.5-7b/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/llava-v1.5-7b/init.sh",
    "content": "#!/bin/bash\n\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/llava-v1.5-7b/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./run.sh $(./autotag llava) \\\npython3 -m llava.serve.cli \\\n--model-path liuhaotian/llava-v1.5-7b \\\n--image-file /data/images/hoover.jpg"
  },
  {
    "path": "reComputer/scripts/llava-v1.6-vicuna-7b/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/llava-v1.6-vicuna-7b/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/llava-v1.6-vicuna-7b/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./run.sh $(./autotag local_llm) \\\npython3 -m local_llm --api=mlc \\\n--model liuhaotian/llava-v1.6-vicuna-7b \\\n--max-context-len 768 \\\n--max-new-tokens 128"
  },
  {
    "path": "reComputer/scripts/nanodb/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 80  # in GB\nREQUIRED_MEM_SPACE: 15\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/nanodb/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/nanodb/readme.md",
    "content": "# NanoDB\n\n## ref\n\n- <https://www.jetson-ai-lab.com/tutorial_nanodb.html>\n\n## access\n\n- using in machine, try `http://127.0.0.1:7860` in browser.\n- using in other pc, make sure you know jetson's IP and try `http://<<Jetson's IP>>:7860` in browser.\n"
  },
  {
    "path": "reComputer/scripts/nanodb/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n\ncheck_disk_space() {\n    directory=\"$1\"  # a directory\n    required_space_gb=\"$2\"  # how many GB we need\n    \n    # get disk of directory\n    device=$(df -P \"$directory\" | awk 'NR==2 {print $1}')\n    echo $device\n    \n    # get free space in KB\n    free_space=$(df -P \"$device\" | awk 'NR==2 {print $4}')\n    echo $free_space\n    \n    # change unit to GB\n    free_space_gb=$(echo \"scale=2; $free_space / 1024 / 1024\" | bc)\n    echo $free_space_gb\n    \n    # check and fast-fail\n    if (( $(echo \"$free_space_gb >= $required_space_gb\" | bc -l) )); then\n        echo \"disk space ($1) enough, keep going.\"\n    else\n        echo \"disk space ($1) not enough!! we need $2 GB!!\"\n        exit 1\n    fi\n}\n\n# check data files TODO: support params to force download\nDATA_PATH=\"$JETSON_REPO_PATH/data/datasets/coco/2017\"\nif [ ! -d $DATA_PATH ]; then\n    mkdir -p $DATA_PATH\nfi\ncd $DATA_PATH\n# check val2017.zip\nif [ ! -d \"$DATA_PATH/val2017\" ]; then\n    if [ ! -f \"val2017.zip\" ]; then\n        check_disk_space $DATA_PATH 1\n        wget http://images.cocodataset.org/zips/val2017.zip\n    else\n        echo \"val2017.zip existed.\"\n    fi\n    check_disk_space $DATA_PATH 19\n    unzip val2017.zip && rm val2017.zip\nelse\n    echo \"val2017/ existed.\"\nfi\n# check train2017.zip\nif [ ! -d \"$DATA_PATH/train2017\" ]; then\n    if [ ! -f \"train2017.zip\" ]; then\n        check_disk_space $DATA_PATH 19\n        wget http://images.cocodataset.org/zips/train2017.zip\n    else\n        echo \"train2017.zip existed.\"\n    fi\n    check_disk_space $DATA_PATH 19\n    unzip train2017.zip && rm train2017.zip\nelse\n    echo \"train2017/ existed.\"\nfi\nif [ ! -d \"$DATA_PATH/unlabeled2017\" ]; then\n    # check unlabeled2017.zip\n    if [ ! -f \"unlabeled2017.zip\" ]; then\n        check_disk_space $DATA_PATH 19\n        wget http://images.cocodataset.org/zips/unlabeled2017.zip\n    else\n        echo \"unlabeled2017.zip existed.\"\n    fi\n    check_disk_space $DATA_PATH 19\n    unzip unlabeled2017.zip && rm unlabeled2017.zip\nelse\n    echo \"unlabeled2017/ existed.\"\nfi\n\n# check index files\nINDEX_PATH=\"$JETSON_REPO_PATH/data/nanodb/coco/2017\"\nif [ ! -d $INDEX_PATH ]; then\n    cd $JETSON_REPO_PATH/data/\n    check_disk_space $JETSON_REPO_PATH 1\n    wget https://nvidia.box.com/shared/static/icw8qhgioyj4qsk832r4nj2p9olsxoci.gz -O nanodb_coco_2017.tar.gz\n    tar -xzvf nanodb_coco_2017.tar.gz\nfi\n\n# RUN\ncd $JETSON_REPO_PATH\n./run.sh $(./autotag nanodb) \\\npython3 -m nanodb \\\n--path /data/nanodb/coco/2017 \\\n--server --port=7860"
  },
  {
    "path": "reComputer/scripts/nanoowl/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/nanoowl/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/nanoowl/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./run.sh $(./autotag nanoowl) bash -c \"ls /dev/video* && cd examples/tree_demo && python3 tree_demo.py ../../data/owl_image_encoder_patch32.engine\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/README.md",
    "content": "# Jetson Example: Run  NVBlox Mapping on NVIDIA Jetson \n\n![img](images/isaac_sim_nvblox_humans.gif)\n\n[Isaac ROS NVBlox](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox) is a high-performance GPU-accelerated 3D mapping framework developed by NVIDIA for real-time robotic perception. Unlike monocular depth estimation models, NVBlox consumes true depth input from RGB-D cameras or stereo cameras to construct accurate 3D scene representations.This case enables you to **quickly deploy the necessary environment for nvblox to run on your reComputer with just one click.**\n\nDetailed instructions for environment configuration can be found at:[Deploy NVBlox with Orbbec Camera](https://wiki.seeedstudio.com/deploy_nvblox_jetson_agx_orin/)\n\n\n\nMain process run it will:\n\n1. Download `nvblox_images.tar` from the built-in OneDrive share link into `~/.cache/jetson-examples/nvblox`\n2. Run `docker load -i` on that archive\n3. Build the derived image and prepared host/container workspaces\n4. Launch the static Gemini2 NVBlox demo\n\n## Requirements\n\n- NVIDIA Jetson Orin \n- Ubuntu 22.04\n- JetPack 6.x\n- Docker with NVIDIA Container Runtime\n- Orbbec Gemini2 or another Orbbec camera that provides `/camera/color/*` and `/camera/depth/*`\n- Roughly 60GB free disk space for the cached archive, derived image, and managed workspace\n\n## Usage\n\nRun the full prepare + demo flow:\n\n```sh\ncd jetson-example/\npip install .\nreComputer run nvblox\n```\n\n**Prepare only:**\n\n```bash\nNVBLOX_MODE=prepare reComputer run nvblox\n```\n\nRun only after preparation:\n\n```sh\nNVBLOX_MODE=run reComputer run nvblox\n```\n\nForce a rebuild of the prepared host/container workspaces:\n\n```sh\nNVBLOX_FORCE_REBUILD=1 reComputer run nvblox\n```\n\nRun headless:\n\n```sh\nNVBLOX_HEADLESS=1 reComputer run nvblox\n```\n\nOverride the managed workspace root:\n\n```sh\nMANAGED_ROOT=/path/to/nvblox_demo reComputer run nvblox\n```\n\nOverride the built-in OneDrive archive settings:\n\n```sh\nNVBLOX_IMAGE_SHARE_URL='https://...'\nNVBLOX_IMAGE_ARCHIVE_NAME='nvblox_images.tar'\nNVBLOX_IMAGE_CACHE_DIR=\"$HOME/.cache/jetson-examples/nvblox\"\nreComputer run nvblox\n```\n\n## Cleanup\n\n```sh\nreComputer clean nvblox\n```\n\nThis removes the managed workspace, logs, partial downloads, the derived image `local/isaac_ros_nvblox_orbbec:jp6-humble`, and the running demo container if it exists.\n\nIt keeps:\n\n- the cached base archive in `~/.cache/jetson-examples/nvblox`\n- the loaded base image imported from `nvblox_images.tar`\n\n## Troubleshooting\n\n- The default path checks ordinary Gemini2 color/depth readiness, not stereo IR capability.\n- Host readiness now requires only:\n  - `/camera/color/camera_info`\n  - `/camera/depth/camera_info`\n  - `/camera/color/image_raw`\n  - `/camera/depth/image_raw`\n- Container readiness now checks host camera discovery through `/camera/color/camera_info` and `/camera/depth/camera_info`.\n- The runtime success criterion is static map output from `/nvblox_node/static_esdf_pointcloud` or `/nvblox_node/static_map_slice`.\n- `usb speed: 5000 Mbps` is not treated as proof that the full demo is healthy. The final authority is whether host color/depth, container visibility, static TF, and static map output all succeed.\n- If the host driver exits and Gemini2 falls back to `usb_present_no_video`, the run path still attempts automatic recovery with udev refresh and USB rebind so you can usually retry without unplugging the camera.\n- If the run still fails, use the built-in connectivity debugger:\n\n```sh\nbash reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh\n```\n\nThat debug path follows the same stages as the default runtime:\n\n1. Gemini2 device state\n2. Host ROS discovery environment\n3. Container ROS discovery environment\n4. Host color/depth readiness\n5. Container camera visibility\n6. Managed static TF availability\n7. Static NVBlox output\n\n## Notes\n\n- This example does not use `docker pull` for the base image path.\n- The OneDrive downloader resolves the anonymous `download.aspx?...tempauth=...` URL from the preview page before downloading.\n- `NVBLOX_MODE=run` expects an already prepared `MANAGED_ROOT`.\n- The host camera is launched with `ros2 launch orbbec_camera gemini2.launch.py publish_tf:=false tf_publish_rate:=0.0`.\n- The container workspace now centers on `nvblox_examples_bringup` static Orbbec launches and removes the old default dependence on Visual SLAM.\n- The managed static TF chain is generated inside the prepared container workspace rather than relying on device-published TF.\n- Headless mode switches the default launch file to `orbbec_debug.launch.py`, while GUI mode uses `orbbec_example.launch.py`.\n"
  },
  {
    "path": "reComputer/scripts/nvblox/clean.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=SC1091\nsource \"${SCRIPT_DIR}/lib/common.sh\"\n\nMANAGED_ROOT=\"${MANAGED_ROOT:-${MANAGED_ROOT_DEFAULT}}\"\nCACHE_DIR=\"$(resolve_nvblox_image_cache_dir)\"\n\nmaybe_enable_docker_access() {\n  if ! command -v docker >/dev/null 2>&1; then\n    warn \"docker command not found. Skipping container and image cleanup.\"\n    return 1\n  fi\n\n  if docker info >/dev/null 2>&1; then\n    DOCKER_PREFIX=()\n    return 0\n  fi\n\n  if sudo docker info >/dev/null 2>&1; then\n    DOCKER_PREFIX=(sudo)\n    return 0\n  fi\n\n  warn \"Cannot access the Docker daemon. Skipping container and image cleanup.\"\n  return 1\n}\n\nremove_managed_root() {\n  local sentinel_path=\"${MANAGED_ROOT}/${MANAGED_SENTINEL_NAME}\"\n\n  if [[ ! -e \"${MANAGED_ROOT}\" ]]; then\n    info \"Managed root ${MANAGED_ROOT} does not exist.\"\n    return 0\n  fi\n\n  if [[ ! -f \"${sentinel_path}\" ]]; then\n    die \"Managed root ${MANAGED_ROOT} exists but is not owned by the NVBlox example. Refusing to remove it.\"\n  fi\n\n  run_sudo rm -rf \"${MANAGED_ROOT}\"\n  info \"Removed managed root ${MANAGED_ROOT}\"\n}\n\nensure_supported_user_context\nif should_reexec_as_setup_user; then\n  printf '[reComputer][nvblox] Re-entering as %s.\\n' \"${SETUP_USER_NAME}\" >&2\n  reexec_as_setup_user \"${SCRIPT_DIR}/clean.sh\"\nfi\n\ncleanup_residual_gemini2_processes \"nvblox clean\" || true\n\nif maybe_enable_docker_access; then\n  if docker_cmd ps -a --format '{{.Names}}' | grep -Fxq \"${CONTAINER_NAME_DEFAULT}\"; then\n    info \"Removing container ${CONTAINER_NAME_DEFAULT}\"\n    docker_cmd rm -f \"${CONTAINER_NAME_DEFAULT}\" >/dev/null\n  else\n    info \"Container ${CONTAINER_NAME_DEFAULT} does not exist.\"\n  fi\n\n  if docker_cmd image inspect \"${DERIVED_IMAGE_TAG}\" >/dev/null 2>&1; then\n    info \"Removing derived image ${DERIVED_IMAGE_TAG}\"\n    docker_cmd image rm -f \"${DERIVED_IMAGE_TAG}\" >/dev/null\n  else\n    info \"Derived image ${DERIVED_IMAGE_TAG} does not exist.\"\n  fi\nfi\n\nremove_managed_root\ncleanup_nvblox_partial_downloads \"${CACHE_DIR}\"\n\ninfo \"NVBlox clean complete. Cached base archive is kept in ${CACHE_DIR}\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/config/orbbec_stereo_capability_probe.yaml",
    "content": "depth_registration: false\nenable_point_cloud: false\nenable_colored_point_cloud: false\ndevice_preset: \"High Accuracy\"\nlaser_on_off_mode: 1\ntime_domain: \"device\"\nenable_sync_host_time: true\nalign_mode: \"SW\"\ncamera_name: \"camera\"\nenable_3d_reconstruction_mode: false\n\nenable_color: false\ncolor_width: 640\ncolor_height: 480\ncolor_fps: 5\ncolor_format: \"RGB\"\ncolor_qos: \"SENSOR_DATA\"\n\ndepth_width: 640\ndepth_height: 400\ndepth_fps: 15\ndepth_format: \"Y16\"\ndepth_qos: \"SENSOR_DATA\"\npoint_cloud_qos: \"SENSOR_DATA\"\n\nenable_ir_auto_exposure: false\nir_exposure: 5000\nir_gain: 40\n\nenable_left_ir: true\nleft_ir_width: 640\nleft_ir_height: 400\nleft_ir_fps: 15\nleft_ir_format: \"Y8\"\nleft_ir_qos: \"SENSOR_DATA\"\n\nenable_right_ir: true\nright_ir_width: 640\nright_ir_height: 400\nright_ir_fps: 15\nright_ir_format: \"Y8\"\nright_ir_qos: \"SENSOR_DATA\"\n\nenable_sync_output_accel_gyro: false\nenable_accel: false\naccel_rate: \"200hz\"\naccel_range: \"4g\"\nenable_gyro: false\ngyro_rate: \"200hz\"\ngyro_range: \"1000dps\"\nliner_accel_cov: \"0.01\"\nangular_vel_cov: \"0.01\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/config/orbbec_vslam_mobile.yaml",
    "content": "depth_registration: true\nenable_point_cloud: true\nenable_colored_point_cloud: true\ndevice_preset: \"High Accuracy\"\nlaser_on_off_mode: 1\ntime_domain: \"device\"\nenable_sync_host_time: true\nalign_mode: \"SW\"\ncamera_name: \"camera\"\nenable_3d_reconstruction_mode: true\n\nenable_color: true\ncolor_width: 640\ncolor_height: 480\ncolor_fps: 30\ncolor_format: \"RGB\"\nenable_color_auto_exposure: false\ncolor_exposure: 50\ncolor_gain: -1\ncolor_qos: \"SENSOR_DATA\"\n\ndepth_width: 640\ndepth_height: 480\ndepth_fps: 30\ndepth_format: \"Y16\"\ndepth_qos: \"SENSOR_DATA\"\npoint_cloud_qos: \"SENSOR_DATA\"\n\nenable_ir_auto_exposure: false\nir_exposure: 5000\nir_gain: 40\n\nenable_left_ir: true\nleft_ir_width: 640\nleft_ir_height: 480\nleft_ir_fps: 30\nleft_ir_format: \"Y8\"\nleft_ir_qos: \"SENSOR_DATA\"\n\nenable_right_ir: true\nright_ir_width: 640\nright_ir_height: 480\nright_ir_fps: 30\nright_ir_format: \"Y8\"\nright_ir_qos: \"SENSOR_DATA\"\n\nenable_sync_output_accel_gyro: false\nenable_accel: false\naccel_rate: \"200hz\"\naccel_range: \"4g\"\nenable_gyro: false\ngyro_rate: \"200hz\"\ngyro_range: \"1000dps\"\nliner_accel_cov: \"0.01\"\nangular_vel_cov: \"0.01\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/config.yaml",
    "content": "ALLOWED_L4T_VERSIONS:\n  - 36.4.0\n  - 36.4.3\n  - 36.4.4\nREQUIRED_DISK_SPACE: 60\nREQUIRED_MEM_SPACE: 14\nPACKAGES:\n  - nvidia-jetpack\n  - x11-xserver-utils\nDOCKER:\n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/nvblox/docker/Dockerfile.nvblox_orbbec",
    "content": "ARG BASE_IMAGE\nFROM ${BASE_IMAGE}\n\nARG ROS_DISTRO=humble\nENV DEBIAN_FRONTEND=noninteractive\nENV ROS_DISTRO=${ROS_DISTRO}\n\nSHELL [\"/bin/bash\", \"-lc\"]\n\nRUN apt-get update && \\\n    apt-get install -y --no-install-recommends \\\n      git \\\n      curl \\\n      ca-certificates \\\n      python3-rosdep \\\n      python3-vcstool \\\n      python3-colcon-common-extensions \\\n      build-essential \\\n      libgflags-dev \\\n      nlohmann-json3-dev \\\n      libdw-dev \\\n      libssl-dev \\\n      mesa-utils \\\n      libgl1 \\\n      libgoogle-glog-dev \\\n      ros-${ROS_DISTRO}-image-transport \\\n      ros-${ROS_DISTRO}-image-transport-plugins \\\n      ros-${ROS_DISTRO}-compressed-image-transport \\\n      ros-${ROS_DISTRO}-image-publisher \\\n      ros-${ROS_DISTRO}-camera-info-manager \\\n      ros-${ROS_DISTRO}-diagnostic-updater \\\n      ros-${ROS_DISTRO}-diagnostic-msgs \\\n      ros-${ROS_DISTRO}-statistics-msgs \\\n      ros-${ROS_DISTRO}-xacro \\\n      ros-${ROS_DISTRO}-backward-ros \\\n      ros-${ROS_DISTRO}-magic-enum \\\n      ros-${ROS_DISTRO}-foxglove-msgs && \\\n    rm -rf /var/lib/apt/lists/*\n\nRUN if [[ -f /opt/ros/${ROS_DISTRO}/include/magic_enum.hpp ]]; then \\\n      ln -sf /opt/ros/${ROS_DISTRO}/include/magic_enum.hpp /usr/include/magic_enum.hpp; \\\n    fi && \\\n    if [[ -d /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/foxglove_msgs/msg ]]; then \\\n      mkdir -p /opt/ros/${ROS_DISTRO}/include/foxglove_msgs && \\\n      ln -sfn /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/foxglove_msgs/msg /opt/ros/${ROS_DISTRO}/include/foxglove_msgs/msg; \\\n    fi\n\nCOPY docker/prepare_container_workspace.sh /opt/nvblox/bin/prepare_container_workspace.sh\nCOPY docker/launch_nvblox.sh /opt/nvblox/bin/launch_nvblox.sh\nRUN chmod +x /opt/nvblox/bin/prepare_container_workspace.sh /opt/nvblox/bin/launch_nvblox.sh\n\nWORKDIR /workspaces/isaac_ros-dev\n"
  },
  {
    "path": "reComputer/scripts/nvblox/docker/launch_nvblox.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nROS_DISTRO=\"${ROS_DISTRO:-humble}\"\nNVBLOX_LAUNCH_FILE=\"${NVBLOX_LAUNCH_FILE:-orbbec_example.launch.py}\"\nEXPECTED_WORKSPACE_SPEC_VERSION=\"${EXPECTED_WORKSPACE_SPEC_VERSION:-}\"\nNVBLOX_OUTPUT_PROBE_TIMEOUT_SEC=\"${NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC:-45}\"\nISAAC_WS=\"/workspaces/isaac_ros-dev\"\nSTAMP_PATH=\"${ISAAC_WS}/.setup-nvbox/container_workspace.env\"\nLAUNCH_PID=\"\"\nOUTPUT_PROBE_PID=\"\"\nROS_DISCOVERY_ENV_VARS=(\n  \"ROS_DOMAIN_ID\"\n  \"ROS_LOCALHOST_ONLY\"\n  \"RMW_IMPLEMENTATION\"\n  \"ROS_AUTOMATIC_DISCOVERY_RANGE\"\n  \"ROS_STATIC_PEERS\"\n  \"CYCLONEDDS_URI\"\n  \"CYCLONEDDS_HOME\"\n  \"FASTDDS_DEFAULT_PROFILES_FILE\"\n  \"FASTRTPS_DEFAULT_PROFILES_FILE\"\n)\n\n[[ -f \"/opt/ros/${ROS_DISTRO}/setup.bash\" ]] || {\n  printf '[container][ERROR] Missing ROS setup at /opt/ros/%s/setup.bash\\n' \"${ROS_DISTRO}\" >&2\n  exit 1\n}\n[[ -f \"${ISAAC_WS}/install/setup.bash\" ]] || {\n  printf '[container][ERROR] Missing workspace setup at %s/install/setup.bash\\n' \"${ISAAC_WS}\" >&2\n  exit 1\n}\n[[ -f \"${STAMP_PATH}\" ]] || {\n  printf '[container][ERROR] Missing workspace stamp at %s\\n' \"${STAMP_PATH}\" >&2\n  exit 1\n}\n\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\n\n# shellcheck disable=SC1091\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\n# shellcheck disable=SC1090\nsource \"${ISAAC_WS}/install/setup.bash\"\n# shellcheck disable=SC1090\nsource \"${STAMP_PATH}\"\n\nif (( restore_nounset )); then\n  set -u\nfi\n\nif [[ -n \"${EXPECTED_WORKSPACE_SPEC_VERSION}\" ]] && \\\n   [[ \"${STAMP_WORKSPACE_SPEC_VERSION:-}\" != \"${EXPECTED_WORKSPACE_SPEC_VERSION}\" ]]; then\n  printf '[container][ERROR] Workspace spec mismatch. Expected %s, found %s\\n' \\\n    \"${EXPECTED_WORKSPACE_SPEC_VERSION}\" \"${STAMP_WORKSPACE_SPEC_VERSION:-unknown}\" >&2\n  exit 1\nfi\n\nPACKAGE_PREFIX=\"$(ros2 pkg prefix nvblox_examples_bringup 2>/dev/null || true)\"\n[[ -n \"${PACKAGE_PREFIX}\" ]] || {\n  printf '[container][ERROR] Cannot resolve nvblox_examples_bringup in the prepared workspace.\\n' >&2\n  exit 1\n}\n\nLAUNCH_PATH=\"${PACKAGE_PREFIX}/share/nvblox_examples_bringup/launch/${NVBLOX_LAUNCH_FILE}\"\n[[ -f \"${LAUNCH_PATH}\" ]] || {\n  printf '[container][ERROR] Prepared launch file is missing: %s\\n' \"${LAUNCH_PATH}\" >&2\n  exit 1\n}\n\nformat_ros_discovery_env() {\n  local parts=()\n  local var_name=\"\"\n  local value=\"\"\n  local old_ifs=\"${IFS}\"\n\n  for var_name in \"${ROS_DISCOVERY_ENV_VARS[@]}\"; do\n    value=\"${!var_name-}\"\n    if [[ -n \"${value}\" ]]; then\n      parts+=(\"${var_name}=${value}\")\n    else\n      parts+=(\"${var_name}=<unset>\")\n    fi\n  done\n\n  IFS=', '\n  printf '%s\\n' \"${parts[*]}\"\n  IFS=\"${old_ifs}\"\n}\n\nprintf '[container][INFO] Workspace spec: %s\\n' \"${STAMP_WORKSPACE_SPEC_VERSION:-unknown}\"\nprintf '[container][INFO] Workspace stamped at: %s\\n' \"${STAMPED_AT:-unknown}\"\nprintf '[container][INFO] Launching static demo file: %s\\n' \"${NVBLOX_LAUNCH_FILE}\"\nprintf '[container][INFO] Managed static TF chain: odom -> base_link -> camera_link -> camera_color_optical_frame\\n'\nprintf '[container][INFO] Expected camera info frame_id: camera_color_optical_frame\\n'\nprintf '[container][INFO] Container ROS discovery env: %s\\n' \"$(format_ros_discovery_env)\"\n\nprobe_nvblox_runtime_output() {\n  python3 - \"${NVBLOX_OUTPUT_PROBE_TIMEOUT_SEC}\" <<'PY'\nimport sys\nimport time\n\nimport rclpy\nfrom nav_msgs.msg import OccupancyGrid\nfrom rclpy.executors import SingleThreadedExecutor\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import PointCloud2\n\ntimeout_seconds = float(sys.argv[1])\n\n\nclass NvbloxOutputProbe(Node):\n    def __init__(self):\n        super().__init__('nvblox_runtime_output_probe')\n        self.result = None\n        self.create_subscription(\n            PointCloud2,\n            '/nvblox_node/static_esdf_pointcloud',\n            self._pointcloud_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            OccupancyGrid,\n            '/nvblox_node/static_map_slice',\n            self._map_slice_callback,\n            10)\n\n    def _pointcloud_callback(self, msg: PointCloud2):\n        self.result = (\n            '/nvblox_node/static_esdf_pointcloud',\n            f'frame_id={msg.header.frame_id or \"<empty>\"} width={msg.width} height={msg.height}')\n\n    def _map_slice_callback(self, msg: OccupancyGrid):\n        self.result = (\n            '/nvblox_node/static_map_slice',\n            f'frame_id={msg.header.frame_id or \"<empty>\"} width={msg.info.width} '\n            f'height={msg.info.height} resolution={msg.info.resolution:.3f}')\n\n\ndef main() -> int:\n    print(\n        '[container][INFO] Starting runtime output probe for '\n        '/nvblox_node/static_esdf_pointcloud and /nvblox_node/static_map_slice '\n        f'({timeout_seconds:.0f}s timeout)',\n        flush=True)\n    rclpy.init(args=None)\n    node = NvbloxOutputProbe()\n    executor = SingleThreadedExecutor()\n    executor.add_node(node)\n    deadline = time.monotonic() + timeout_seconds\n\n    try:\n        while time.monotonic() < deadline and node.result is None:\n            executor.spin_once(timeout_sec=0.2)\n\n        if node.result is None:\n            print(\n                '[container][WARN] Runtime output probe timed out waiting for '\n                '/nvblox_node/static_esdf_pointcloud or /nvblox_node/static_map_slice. '\n                'Readiness probes passed, but no runtime map output was observed yet.',\n                flush=True)\n            return 1\n\n        topic_name, details = node.result\n        print(f'[container][INFO] Runtime output probe received {topic_name}: {details}', flush=True)\n        return 0\n    finally:\n        executor.remove_node(node)\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\n}\n\nforward_signal() {\n  local signal=\"$1\"\n\n  [[ -n \"${LAUNCH_PID}\" ]] && kill \"-${signal}\" \"${LAUNCH_PID}\" 2>/dev/null || true\n  [[ -n \"${OUTPUT_PROBE_PID}\" ]] && kill \"-${signal}\" \"${OUTPUT_PROBE_PID}\" 2>/dev/null || true\n}\n\ntrap 'forward_signal INT' INT\ntrap 'forward_signal TERM' TERM\n\nros2 launch nvblox_examples_bringup \"${NVBLOX_LAUNCH_FILE}\" &\nLAUNCH_PID=$!\n\nprobe_nvblox_runtime_output &\nOUTPUT_PROBE_PID=$!\n\nset +e\nwait \"${LAUNCH_PID}\"\nlaunch_status=$?\nset -e\n\nif [[ -n \"${OUTPUT_PROBE_PID}\" ]] && kill -0 \"${OUTPUT_PROBE_PID}\" 2>/dev/null; then\n  kill -TERM \"${OUTPUT_PROBE_PID}\" 2>/dev/null || true\nfi\nwait \"${OUTPUT_PROBE_PID}\" 2>/dev/null || true\n\nexit \"${launch_status}\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/docker/prepare_container_workspace.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nROS_DISTRO=\"${ROS_DISTRO:-humble}\"\nFORCE_REBUILD=\"${FORCE_REBUILD:-0}\"\nSETUP_IMAGE_ID=\"${SETUP_IMAGE_ID:-}\"\nSETUP_IMAGE_CONTEXT_HASH=\"${SETUP_IMAGE_CONTEXT_HASH:-}\"\nCOMMUNITY_REPO_URL=\"${COMMUNITY_REPO_URL:-https://github.com/jjjadand/isaac-NVblox-Orbbec.git}\"\nCOMMUNITY_REPO_BRANCH=\"${COMMUNITY_REPO_BRANCH:-main}\"\nOFFICIAL_NVBLOX_REPO_URL=\"${OFFICIAL_NVBLOX_REPO_URL:-https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox.git}\"\nOFFICIAL_NVBLOX_REPO_BRANCH=\"${OFFICIAL_NVBLOX_REPO_BRANCH:-release-3.2}\"\n\nWORKSPACE_SPEC_VERSION=\"${EXPECTED_WORKSPACE_SPEC_VERSION:-static-demo-final-v3}\"\n\nISAAC_WS=\"/workspaces/isaac_ros-dev\"\nSRC_DIR=\"${ISAAC_WS}/src\"\nSETUP_DIR=\"${ISAAC_WS}/.setup-nvbox\"\nSTAMP_PATH=\"${SETUP_DIR}/container_workspace.env\"\nCOMMUNITY_REPO_PATH=\"${SETUP_DIR}/isaac-NVblox-Orbbec\"\nOFFICIAL_NVBLOX_REPO_PATH=\"${SETUP_DIR}/isaac_ros_nvblox\"\n\nCOMMUNITY_COMMON_ROOT=\"${COMMUNITY_REPO_PATH}/src/isaac_ros_common\"\nCOMMUNITY_NITROS_ROOT=\"${COMMUNITY_REPO_PATH}/src/isaac_ros_nitros\"\nCOMMUNITY_NVBLOX_ROOT=\"${COMMUNITY_REPO_PATH}/src/isaac_ros_nvblox\"\nOFFICIAL_NVBLOX_ROOT=\"${OFFICIAL_NVBLOX_REPO_PATH}\"\n\nCOMMUNITY_COMMON_PACKAGE_PATHS=(\n  \"isaac_common\"\n  \"isaac_ros_common\"\n  \"isaac_ros_launch_utils\"\n  \"isaac_ros_tensor_list_interfaces\"\n)\n\nCOMMUNITY_NITROS_PACKAGE_PATHS=(\n  \"isaac_ros_gxf\"\n  \"isaac_ros_nitros\"\n  \"isaac_ros_managed_nitros\"\n  \"isaac_ros_nitros_type/isaac_ros_nitros_camera_info_type\"\n  \"isaac_ros_nitros_type/isaac_ros_nitros_image_type\"\n  \"isaac_ros_nitros_type/isaac_ros_nitros_tensor_list_type\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_message_compositor\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_optimizer\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_gxf_helpers\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_sight\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_atlas\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_gems\"\n)\n\nOFFICIAL_NVBLOX_PACKAGE_PATHS=(\n  \"nvblox_msgs\"\n  \"nvblox_ros_common\"\n  \"nvblox_ros_python_utils\"\n  \"nvblox_ros\"\n  \"nvblox_rviz_plugin\"\n  \"nvblox_examples/nvblox_examples_bringup\"\n)\n\nSTATIC_DEMO_OVERLAY_FILE_PATHS=(\n  \"nvblox_examples/nvblox_examples_bringup/config/visualization/orbbec_example.rviz\"\n)\n\nGENERATED_LAUNCH_FILE_PATHS=(\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py\"\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py\"\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py\"\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py\"\n)\n\nGENERATED_CONFIG_FILE_PATHS=(\n  \"nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml\"\n)\n\nREQUIRED_SRC_PATHS=(\n  \"isaac_common\"\n  \"isaac_ros_common\"\n  \"isaac_ros_launch_utils\"\n  \"isaac_ros_tensor_list_interfaces\"\n  \"isaac_ros_gxf\"\n  \"isaac_ros_nitros\"\n  \"isaac_ros_managed_nitros\"\n  \"isaac_ros_nitros_type/isaac_ros_nitros_camera_info_type\"\n  \"isaac_ros_nitros_type/isaac_ros_nitros_image_type\"\n  \"isaac_ros_nitros_type/isaac_ros_nitros_tensor_list_type\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_message_compositor\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_optimizer\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_gxf_helpers\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_sight\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_atlas\"\n  \"isaac_ros_gxf_extensions/gxf_isaac_gems\"\n  \"nvblox_msgs\"\n  \"nvblox_ros_common\"\n  \"nvblox_ros_python_utils\"\n  \"nvblox_ros\"\n  \"nvblox_rviz_plugin\"\n  \"nvblox_examples/nvblox_examples_bringup\"\n)\n\nREQUIRED_SRC_FILE_PATHS=(\n  \"nvblox_ros/CMakeLists.txt\"\n  \"nvblox_ros/nvblox_core/CMakeLists.txt\"\n  \"nvblox_ros/nvblox_core/cmake/cuda/setup_compute_capability.cmake\"\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py\"\n  \"nvblox_examples/nvblox_examples_bringup/config/visualization/orbbec_example.rviz\"\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py\"\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py\"\n  \"nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py\"\n  \"nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml\"\n)\n\nEXCLUDED_SRC_PATHS=(\n  \"isaac_ros_pynitros\"\n  \"isaac_ros_managed_nitros_examples\"\n  \"isaac_ros_nitros_bridge\"\n  \"isaac_ros_nitros_topic_tools\"\n  \"isaac_ros_visual_slam\"\n  \"isaac_ros_visual_slam_interfaces\"\n  \"nvblox_nav2\"\n  \"nvblox_examples/nvblox_image_padding\"\n  \"nvblox_examples/semantic_label_conversion\"\n)\n\nSTATIC_DEMO_REMOVED_DEPENDENCIES=(\n  \"nova_carter_navigation\"\n  \"isaac_ros_visual_slam\"\n  \"isaac_ros_visual_slam_interfaces\"\n  \"isaac_ros_peoplenet_models_install\"\n  \"isaac_ros_detectnet\"\n  \"isaac_ros_peoplesemseg_models_install\"\n  \"isaac_ros_dnn_image_encoder\"\n  \"isaac_ros_triton\"\n  \"isaac_ros_unet\"\n  \"semantic_label_conversion\"\n  \"nvblox_image_padding\"\n)\n\nROSDEP_SKIP_KEYS=(\n  \"isaac_ros_peoplenet_models_install\"\n  \"isaac_ros_detectnet\"\n  \"isaac_ros_image_proc\"\n)\n\nCOLCON_TARGETS=(\n  \"nvblox_examples_bringup\"\n)\n\nRUNTIME_REQUIRED_PACKAGES=(\n  \"nvblox_examples_bringup\"\n  \"nvblox_ros\"\n)\n\nINSTALL_REQUIRED_FILE_PATHS=(\n  \"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_transforms.launch.py\"\n  \"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_example.launch.py\"\n  \"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_debug.launch.py\"\n  \"install/nvblox_examples_bringup/share/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py\"\n  \"install/nvblox_examples_bringup/share/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml\"\n)\n\nlog() {\n  printf '[container][%s] %s\\n' \"$(date '+%Y-%m-%d %H:%M:%S')\" \"$*\"\n}\n\ndie() {\n  printf '[container][ERROR] %s\\n' \"$*\" >&2\n  exit 1\n}\n\nsource_ros() {\n  local restore_nounset=0\n\n  if [[ $- == *u* ]]; then\n    restore_nounset=1\n    set +u\n  fi\n\n  # shellcheck disable=SC1091\n  source \"/opt/ros/${ROS_DISTRO}/setup.bash\"\n  if [[ -f \"${ISAAC_WS}/install/setup.bash\" ]]; then\n    # shellcheck disable=SC1090\n    source \"${ISAAC_WS}/install/setup.bash\"\n  fi\n\n  if (( restore_nounset )); then\n    set -u\n  fi\n}\n\nensure_rosdep_ready() {\n  if [[ ! -f /etc/ros/rosdep/sources.list.d/20-default.list ]]; then\n    log \"Initializing rosdep.\"\n    rosdep init || true\n  fi\n\n  log \"Updating rosdep.\"\n  rosdep update\n}\n\nensure_git_safe_directory() {\n  local repo_path=\"$1\"\n\n  [[ -n \"${repo_path}\" ]] || return 0\n  [[ -e \"${repo_path}\" ]] || return 0\n\n  if git config --global --get-all safe.directory 2>/dev/null | grep -Fqx \"${repo_path}\"; then\n    return 0\n  fi\n\n  git config --global --add safe.directory \"${repo_path}\"\n}\n\nresolve_gitdir_path() {\n  local repo_path=\"$1\"\n  local dot_git_path=\"${repo_path}/.git\"\n  local gitdir_value=\"\"\n\n  if [[ -d \"${dot_git_path}\" ]]; then\n    printf '%s\\n' \"${dot_git_path}\"\n    return 0\n  fi\n\n  if [[ -f \"${dot_git_path}\" ]]; then\n    gitdir_value=\"$(sed -n 's/^gitdir: //p' \"${dot_git_path}\" | head -n 1)\"\n    [[ -n \"${gitdir_value}\" ]] || return 1\n\n    if [[ \"${gitdir_value}\" = /* ]]; then\n      printf '%s\\n' \"${gitdir_value}\"\n    else\n      printf '%s\\n' \"$(cd \"${repo_path}\" && cd \"${gitdir_value}\" && pwd)\"\n    fi\n    return 0\n  fi\n\n  return 1\n}\n\nensure_repo_safe_directories() {\n  local repo_path=\"$1\"\n  local gitdir_path=\"\"\n\n  ensure_git_safe_directory \"${repo_path}\"\n\n  if gitdir_path=\"$(resolve_gitdir_path \"${repo_path}\" 2>/dev/null)\"; then\n    ensure_git_safe_directory \"${gitdir_path}\"\n  fi\n}\n\nextract_dubious_ownership_paths() {\n  local log_path=\"$1\"\n  sed -n \"s/.*detected dubious ownership in repository at '\\(.*\\)'/\\1/p\" \"${log_path}\" | sort -u\n}\n\nensure_paths_from_ownership_log() {\n  local log_path=\"$1\"\n  local repo_path=\"\"\n\n  while IFS= read -r repo_path; do\n    [[ -n \"${repo_path}\" ]] || continue\n    ensure_repo_safe_directories \"${repo_path}\"\n  done < <(extract_dubious_ownership_paths \"${log_path}\")\n}\n\nassert_git_repo_metadata() {\n  local repo_path=\"$1\"\n  local label=\"$2\"\n\n  [[ ! -e \"${repo_path}\" ]] && return 0\n  [[ -e \"${repo_path}/.git\" ]] && return 0\n  die \"Managed ${label} cache at ${repo_path} is missing Git metadata. Delete ${repo_path} and rerun prepare.\"\n}\n\nassert_git_repo_accessible() {\n  local repo_path=\"$1\"\n  local label=\"$2\"\n  local git_log\n\n  [[ -e \"${repo_path}\" ]] || return 0\n  assert_git_repo_metadata \"${repo_path}\" \"${label}\"\n  ensure_repo_safe_directories \"${repo_path}\"\n\n  git_log=\"$(mktemp)\"\n  if git -C \"${repo_path}\" rev-parse --is-inside-work-tree >/dev/null 2>\"${git_log}\"; then\n    rm -f \"${git_log}\"\n    return 0\n  fi\n\n  if grep -q \"detected dubious ownership\" \"${git_log}\"; then\n    ensure_paths_from_ownership_log \"${git_log}\"\n    if git -C \"${repo_path}\" rev-parse --is-inside-work-tree >/dev/null 2>\"${git_log}\"; then\n      rm -f \"${git_log}\"\n      return 0\n    fi\n  fi\n\n  cat \"${git_log}\" >&2 || true\n  rm -f \"${git_log}\"\n  die \"Managed ${label} cache at ${repo_path} is not usable. Delete ${repo_path} and rerun prepare.\"\n}\n\ninitialize_managed_git_access() {\n  mkdir -p \"${HOME}\" >/dev/null 2>&1 || true\n  touch \"${HOME}/.gitconfig\" >/dev/null 2>&1 || true\n\n  ensure_repo_safe_directories \"${COMMUNITY_REPO_PATH}\"\n  ensure_repo_safe_directories \"${OFFICIAL_NVBLOX_REPO_PATH}\"\n  ensure_repo_safe_directories \"${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core\"\n}\n\nverify_managed_git_cache_state() {\n  assert_git_repo_accessible \"${COMMUNITY_REPO_PATH}\" \"community repo\"\n  assert_git_repo_accessible \"${OFFICIAL_NVBLOX_REPO_PATH}\" \"official Isaac ROS Nvblox repo\"\n  assert_git_repo_accessible \"${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core\" \"official Isaac ROS Nvblox submodule\"\n}\n\nclone_or_update_repo() {\n  local repo_url=\"$1\"\n  local repo_branch=\"$2\"\n  local repo_path=\"$3\"\n  local repo_name=\"$4\"\n\n  mkdir -p \"${SRC_DIR}\" \"${SETUP_DIR}\"\n\n  if [[ ! -d \"${repo_path}/.git\" ]]; then\n    log \"Cloning ${repo_name} from ${repo_url}.\"\n    git clone --branch \"${repo_branch}\" --depth 1 \"${repo_url}\" \"${repo_path}\"\n    ensure_repo_safe_directories \"${repo_path}\"\n    return 0\n  fi\n\n  assert_git_repo_accessible \"${repo_path}\" \"${repo_name}\"\n  if [[ -n \"$(git -C \"${repo_path}\" status --porcelain)\" ]]; then\n    die \"Managed repo has local changes at ${repo_path}.\"\n  fi\n\n  log \"Refreshing ${repo_name}.\"\n  git -C \"${repo_path}\" fetch --depth 1 origin \"${repo_branch}\"\n  git -C \"${repo_path}\" checkout -B \"${repo_branch}\" \"origin/${repo_branch}\"\n}\n\nsync_git_submodule() {\n  local repo_path=\"$1\"\n  local submodule_path=\"$2\"\n  local label=\"$3\"\n  local submodule_repo_path=\"${repo_path}/${submodule_path}\"\n  local git_log=\"\"\n\n  assert_git_repo_accessible \"${repo_path}\" \"${label}\"\n  ensure_repo_safe_directories \"${submodule_repo_path}\"\n  log \"Syncing ${label} submodule ${submodule_path}.\"\n  git -C \"${repo_path}\" submodule sync -- \"${submodule_path}\"\n  git_log=\"$(mktemp)\"\n  if ! git -C \"${repo_path}\" submodule update --init --depth 1 -- \"${submodule_path}\" 2>\"${git_log}\"; then\n    if grep -q \"detected dubious ownership\" \"${git_log}\"; then\n      ensure_paths_from_ownership_log \"${git_log}\"\n      ensure_repo_safe_directories \"${repo_path}\"\n      ensure_repo_safe_directories \"${submodule_repo_path}\"\n      : > \"${git_log}\"\n      if ! git -C \"${repo_path}\" submodule update --init --depth 1 -- \"${submodule_path}\" 2>\"${git_log}\"; then\n        cat \"${git_log}\" >&2 || true\n        rm -f \"${git_log}\"\n        die \"Failed to sync ${label} submodule ${submodule_path} after refreshing Git safe.directory entries.\"\n      fi\n    else\n      cat \"${git_log}\" >&2 || true\n      rm -f \"${git_log}\"\n      die \"Failed to sync ${label} submodule ${submodule_path}.\"\n    fi\n  fi\n\n  rm -f \"${git_log}\"\n  assert_git_repo_accessible \"${submodule_repo_path}\" \"${label} submodule\"\n}\n\nverify_workspace_install() {\n  local package_name=\"\"\n  local file_path=\"\"\n\n  [[ -f \"${ISAAC_WS}/install/setup.bash\" ]] || return 1\n\n  source_ros\n  for package_name in \"${RUNTIME_REQUIRED_PACKAGES[@]}\"; do\n    ros2 pkg prefix \"${package_name}\" >/dev/null 2>&1 || return 1\n  done\n\n  for file_path in \"${INSTALL_REQUIRED_FILE_PATHS[@]}\"; do\n    [[ -f \"${ISAAC_WS}/${file_path}\" ]] || return 1\n  done\n}\n\nstamp_current() {\n  [[ -f \"${STAMP_PATH}\" ]] || return 1\n  # shellcheck disable=SC1090\n  source \"${STAMP_PATH}\"\n  [[ \"${STAMP_IMAGE_ID:-}\" == \"${SETUP_IMAGE_ID}\" ]] || return 1\n  [[ \"${STAMP_IMAGE_CONTEXT_HASH:-}\" == \"${SETUP_IMAGE_CONTEXT_HASH}\" ]] || return 1\n  [[ \"${STAMP_COMMUNITY_COMMIT:-}\" == \"${COMMUNITY_COMMIT}\" ]] || return 1\n  [[ \"${STAMP_OFFICIAL_NVBLOX_COMMIT:-}\" == \"${OFFICIAL_NVBLOX_COMMIT}\" ]] || return 1\n  [[ \"${STAMP_OFFICIAL_NVBLOX_CORE_COMMIT:-}\" == \"${OFFICIAL_NVBLOX_CORE_COMMIT}\" ]] || return 1\n  [[ \"${STAMP_WORKSPACE_SPEC_VERSION:-}\" == \"${WORKSPACE_SPEC_VERSION}\" ]] || return 1\n  verify_synced_workspace_layout\n  verify_workspace_install\n}\n\nwrite_stamp() {\n  {\n    printf 'STAMP_IMAGE_ID=%q\\n' \"${SETUP_IMAGE_ID}\"\n    printf 'STAMP_IMAGE_CONTEXT_HASH=%q\\n' \"${SETUP_IMAGE_CONTEXT_HASH}\"\n    printf 'STAMP_COMMUNITY_COMMIT=%q\\n' \"${COMMUNITY_COMMIT}\"\n    printf 'STAMP_OFFICIAL_NVBLOX_COMMIT=%q\\n' \"${OFFICIAL_NVBLOX_COMMIT}\"\n    printf 'STAMP_OFFICIAL_NVBLOX_CORE_COMMIT=%q\\n' \"${OFFICIAL_NVBLOX_CORE_COMMIT}\"\n    printf 'STAMP_WORKSPACE_SPEC_VERSION=%q\\n' \"${WORKSPACE_SPEC_VERSION}\"\n    printf 'STAMPED_AT=%q\\n' \"$(date -Is 2>/dev/null || date)\"\n  } > \"${STAMP_PATH}\"\n}\n\nclear_managed_src_dir() {\n  mkdir -p \"${SRC_DIR}\"\n  find \"${SRC_DIR}\" -mindepth 1 -maxdepth 1 -exec rm -rf {} +\n}\n\ncopy_package_path() {\n  local source_root=\"$1\"\n  local package_path=\"$2\"\n  local src_path=\"${source_root}/${package_path}\"\n  local dest_path=\"${SRC_DIR}/${package_path}\"\n\n  [[ -d \"${src_path}\" ]] || die \"Expected package path ${package_path} is missing from ${source_root}.\"\n  mkdir -p \"$(dirname \"${dest_path}\")\"\n  rm -rf \"${dest_path}\"\n  cp -a \"${src_path}\" \"${dest_path}\"\n}\n\ncopy_package_root() {\n  local source_root=\"$1\"\n  local package_name=\"$2\"\n  local dest_path=\"${SRC_DIR}/${package_name}\"\n\n  [[ -f \"${source_root}/package.xml\" ]] || die \"Expected root package.xml is missing from ${source_root}.\"\n  mkdir -p \"${dest_path}\"\n  rm -rf \"${dest_path}\"\n  mkdir -p \"${dest_path}\"\n  find \"${source_root}\" -mindepth 1 -maxdepth 1 ! -name '.git' -exec cp -a {} \"${dest_path}/\" \\;\n}\n\nsync_package_group() {\n  local source_root=\"$1\"\n  shift\n  local package_path=\"\"\n\n  for package_path in \"$@\"; do\n    copy_package_path \"${source_root}\" \"${package_path}\"\n  done\n}\n\napply_overlay_files() {\n  local source_root=\"$1\"\n  shift\n  local relative_path=\"\"\n  local source_path=\"\"\n  local dest_path=\"\"\n\n  for relative_path in \"$@\"; do\n    source_path=\"${source_root}/${relative_path}\"\n    dest_path=\"${SRC_DIR}/${relative_path}\"\n    [[ -f \"${source_path}\" ]] || die \"Expected overlay file ${relative_path} is missing from ${source_root}.\"\n    mkdir -p \"$(dirname \"${dest_path}\")\"\n    cp -a \"${source_path}\" \"${dest_path}\"\n  done\n}\n\nwrite_orbbec_transforms_launch() {\n  cat > \"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_transforms.launch.py\" <<'EOF'\nfrom isaac_ros_launch_utils.all_types import *\nimport isaac_ros_launch_utils as lu\n\n\ndef static_tf(parent: str, child: str, xyz: tuple[float, float, float], rpy: tuple[float, float, float]) -> Node:\n    return Node(\n        package='tf2_ros',\n        executable='static_transform_publisher',\n        arguments=[\n            '--x', str(xyz[0]),\n            '--y', str(xyz[1]),\n            '--z', str(xyz[2]),\n            '--roll', str(rpy[0]),\n            '--pitch', str(rpy[1]),\n            '--yaw', str(rpy[2]),\n            '--frame-id', parent,\n            '--child-frame-id', child,\n        ],\n        output='screen')\n\n\ndef generate_launch_description() -> LaunchDescription:\n    args = lu.ArgumentContainer()\n    actions = args.get_launch_actions()\n\n    actions.append(static_tf('odom', 'base_link', (0.0, 0.0, 0.0), (0.0, 0.0, 0.0)))\n    actions.append(static_tf('base_link', 'camera_link', (0.1, 0.0, 0.2), (0.0, 0.0, 0.0)))\n    actions.append(static_tf('camera_link', 'camera0_link', (0.0, 0.0, 0.0), (0.0, 0.0, 0.0)))\n    actions.append(static_tf(\n        'camera_link',\n        'camera_color_optical_frame',\n        (0.0, 0.0, 0.0),\n        (-1.57079632679, 0.0, -1.57079632679)))\n    actions.append(static_tf(\n        'camera_color_optical_frame',\n        'camera_depth_optical_frame',\n        (0.0, 0.0, 0.0),\n        (0.0, 0.0, 0.0)))\n\n    return LaunchDescription(actions)\nEOF\n}\n\nwrite_orbbec_static_config() {\n  cat > \"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations/nvblox_orbbec_static.yaml\" <<'EOF'\n/**:\n  ros__parameters:\n    use_lidar: false\n    input_qos: \"SENSOR_DATA\"\n    map_clearing_frame_id: \"base_link\"\n    esdf_slice_bounds_visualization_attachment_frame_id: \"base_link\"\n    static_mapper:\n      esdf_slice_height: 0.0\n      esdf_slice_min_height: -0.1\n      esdf_slice_max_height: 0.3\nEOF\n}\n\nwrite_orbbec_example_launch() {\n  cat > \"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_example.launch.py\" <<'EOF'\nfrom isaac_ros_launch_utils.all_types import *\nimport isaac_ros_launch_utils as lu\n\nfrom nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME\n\n\ndef generate_launch_description() -> LaunchDescription:\n    args = lu.ArgumentContainer()\n    args.add_arg('log_level', 'info', choices=['debug', 'info', 'warn'], cli=True)\n    actions = args.get_launch_actions()\n\n    actions.append(\n        lu.include(\n            'nvblox_examples_bringup',\n            'launch/orbbec_transforms.launch.py'))\n\n    actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))\n\n    base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')\n    realsense_config = lu.get_path(\n        'nvblox_examples_bringup',\n        'config/nvblox/specializations/nvblox_realsense.yaml')\n    orbbec_static_config = lu.get_path(\n        'nvblox_examples_bringup',\n        'config/nvblox/specializations/nvblox_orbbec_static.yaml')\n\n    nvblox_node = ComposableNode(\n        name='nvblox_node',\n        package='nvblox_ros',\n        plugin='nvblox::NvbloxNode',\n        remappings=[\n            ('camera_0/depth/image', '/camera/depth/image_raw'),\n            ('camera_0/depth/camera_info', '/camera/depth/camera_info'),\n            ('camera_0/color/image', '/camera/color/image_raw'),\n            ('camera_0/color/camera_info', '/camera/color/camera_info'),\n        ],\n        parameters=[\n            base_config,\n            realsense_config,\n            orbbec_static_config,\n            {'num_cameras': 1},\n            {'use_lidar': False},\n        ],\n    )\n\n    actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))\n\n    rviz_config_path = lu.get_path(\n        'nvblox_examples_bringup',\n        'config/visualization/orbbec_example.rviz')\n    actions.append(\n        Node(\n            package='rviz2',\n            executable='rviz2',\n            arguments=['-d', str(rviz_config_path)],\n            output='screen'))\n\n    return LaunchDescription(actions)\nEOF\n}\n\nwrite_orbbec_debug_launch() {\n  cat > \"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_debug.launch.py\" <<'EOF'\nfrom isaac_ros_launch_utils.all_types import *\nimport isaac_ros_launch_utils as lu\n\nfrom nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME\n\n\ndef generate_launch_description() -> LaunchDescription:\n    args = lu.ArgumentContainer()\n    args.add_arg('log_level', 'debug', choices=['debug', 'info', 'warn'], cli=True)\n    actions = args.get_launch_actions()\n\n    actions.append(\n        lu.include(\n            'nvblox_examples_bringup',\n            'launch/orbbec_transforms.launch.py'))\n\n    actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))\n\n    base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')\n    realsense_config = lu.get_path(\n        'nvblox_examples_bringup',\n        'config/nvblox/specializations/nvblox_realsense.yaml')\n    orbbec_static_config = lu.get_path(\n        'nvblox_examples_bringup',\n        'config/nvblox/specializations/nvblox_orbbec_static.yaml')\n\n    nvblox_node = ComposableNode(\n        name='nvblox_node',\n        package='nvblox_ros',\n        plugin='nvblox::NvbloxNode',\n        remappings=[\n            ('camera_0/depth/image', '/camera/depth/image_raw'),\n            ('camera_0/depth/camera_info', '/camera/depth/camera_info'),\n            ('camera_0/color/image', '/camera/color/image_raw'),\n            ('camera_0/color/camera_info', '/camera/color/camera_info'),\n        ],\n        parameters=[\n            base_config,\n            realsense_config,\n            orbbec_static_config,\n            {'num_cameras': 1},\n            {'use_lidar': False},\n        ],\n    )\n\n    actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))\n    return LaunchDescription(actions)\nEOF\n}\n\nwrite_orbbec_standalone_launch() {\n  cat > \"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch/orbbec_nvblox_standalone.launch.py\" <<'EOF'\nfrom isaac_ros_launch_utils.all_types import *\nimport isaac_ros_launch_utils as lu\n\nfrom nvblox_ros_python_utils.nvblox_constants import NVBLOX_CONTAINER_NAME\n\n\ndef generate_launch_description() -> LaunchDescription:\n    args = lu.ArgumentContainer()\n    args.add_arg('log_level', 'info', choices=['debug', 'info', 'warn'], cli=True)\n    actions = args.get_launch_actions()\n\n    actions.append(\n        lu.include(\n            'nvblox_examples_bringup',\n            'launch/orbbec_transforms.launch.py'))\n\n    actions.append(lu.component_container(NVBLOX_CONTAINER_NAME, log_level=args.log_level))\n\n    base_config = lu.get_path('nvblox_examples_bringup', 'config/nvblox/nvblox_base.yaml')\n    realsense_config = lu.get_path(\n        'nvblox_examples_bringup',\n        'config/nvblox/specializations/nvblox_realsense.yaml')\n    orbbec_static_config = lu.get_path(\n        'nvblox_examples_bringup',\n        'config/nvblox/specializations/nvblox_orbbec_static.yaml')\n\n    nvblox_node = ComposableNode(\n        name='nvblox_node',\n        package='nvblox_ros',\n        plugin='nvblox::NvbloxNode',\n        remappings=[\n            ('camera_0/depth/image', '/camera/depth/image_raw'),\n            ('camera_0/depth/camera_info', '/camera/depth/camera_info'),\n            ('camera_0/color/image', '/camera/color/image_raw'),\n            ('camera_0/color/camera_info', '/camera/color/camera_info'),\n        ],\n        parameters=[\n            base_config,\n            realsense_config,\n            orbbec_static_config,\n            {'num_cameras': 1},\n            {'use_lidar': False},\n        ],\n    )\n\n    actions.append(lu.load_composable_nodes(NVBLOX_CONTAINER_NAME, [nvblox_node]))\n    return LaunchDescription(actions)\nEOF\n}\n\ngenerate_static_demo_launches() {\n  log \"Generating managed static demo launch files.\"\n  mkdir -p \\\n    \"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/launch\" \\\n    \"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/config/nvblox/specializations\"\n  write_orbbec_transforms_launch\n  write_orbbec_static_config\n  write_orbbec_example_launch\n  write_orbbec_debug_launch\n  write_orbbec_standalone_launch\n}\n\npatch_manifest_remove_dependencies() {\n  local manifest_path=\"$1\"\n  shift\n  local dependency_name=\"\"\n\n  [[ -f \"${manifest_path}\" ]] || die \"Expected manifest does not exist: ${manifest_path}\"\n\n  for dependency_name in \"$@\"; do\n    sed -i \"/>${dependency_name}</d\" \"${manifest_path}\"\n  done\n}\n\npatch_static_demo_manifests() {\n  local bringup_manifest=\"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/package.xml\"\n\n  log \"Patching synced manifests for the static demo workspace.\"\n  patch_manifest_remove_dependencies \"${bringup_manifest}\" \"${STATIC_DEMO_REMOVED_DEPENDENCIES[@]}\"\n}\n\nverify_synced_workspace_layout() {\n  local path_name=\"\"\n  local file_path=\"\"\n  local bringup_manifest=\"${SRC_DIR}/nvblox_examples/nvblox_examples_bringup/package.xml\"\n  local dependency_name=\"\"\n\n  for path_name in \"${REQUIRED_SRC_PATHS[@]}\"; do\n    [[ -d \"${SRC_DIR}/${path_name}\" ]] || die \"Required synced package path is missing: ${SRC_DIR}/${path_name}\"\n  done\n\n  for path_name in \"${EXCLUDED_SRC_PATHS[@]}\"; do\n    [[ ! -e \"${SRC_DIR}/${path_name}\" ]] || die \"Excluded package path should not exist in the managed workspace: ${SRC_DIR}/${path_name}\"\n  done\n\n  for file_path in \"${REQUIRED_SRC_FILE_PATHS[@]}\"; do\n    [[ -f \"${SRC_DIR}/${file_path}\" ]] || die \"Required synced file is missing: ${SRC_DIR}/${file_path}\"\n  done\n\n  [[ -f \"${bringup_manifest}\" ]] || die \"Expected bringup manifest is missing: ${bringup_manifest}\"\n  for dependency_name in \"${STATIC_DEMO_REMOVED_DEPENDENCIES[@]}\"; do\n    if grep -q \">${dependency_name}<\" \"${bringup_manifest}\"; then\n      die \"Static demo manifest still declares excluded dependency ${dependency_name}.\"\n    fi\n  done\n}\n\nsync_static_demo_workspace() {\n  log \"Syncing package whitelist into the managed workspace.\"\n  clear_managed_src_dir\n  sync_package_group \"${COMMUNITY_COMMON_ROOT}\" \"${COMMUNITY_COMMON_PACKAGE_PATHS[@]}\"\n  sync_package_group \"${COMMUNITY_NITROS_ROOT}\" \"${COMMUNITY_NITROS_PACKAGE_PATHS[@]}\"\n  sync_package_group \"${OFFICIAL_NVBLOX_ROOT}\" \"${OFFICIAL_NVBLOX_PACKAGE_PATHS[@]}\"\n  apply_overlay_files \"${COMMUNITY_NVBLOX_ROOT}\" \"${STATIC_DEMO_OVERLAY_FILE_PATHS[@]}\"\n  generate_static_demo_launches\n  patch_static_demo_manifests\n  verify_synced_workspace_layout\n}\n\nrebuild_workspace() {\n  local rosdep_dependency_args=(\n    --dependency-types buildtool\n    --dependency-types buildtool_export\n    --dependency-types build\n    --dependency-types build_export\n    --dependency-types exec\n  )\n  local rosdep_skip_args=()\n  local skip_key=\"\"\n\n  source_ros\n  ensure_rosdep_ready\n\n  for skip_key in \"${ROSDEP_SKIP_KEYS[@]}\"; do\n    rosdep_skip_args+=(--skip-keys \"${skip_key}\")\n  done\n\n  log \"Installing workspace dependencies with rosdep.\"\n  (\n    cd \"${ISAAC_WS}\"\n    rosdep install \\\n      --from-paths src \\\n      --ignore-src \\\n      -r \\\n      -y \\\n      --rosdistro \"${ROS_DISTRO}\" \\\n      \"${rosdep_dependency_args[@]}\" \\\n      \"${rosdep_skip_args[@]}\"\n  )\n\n  run_colcon_build() {\n    (\n      cd \"${ISAAC_WS}\"\n      colcon build \\\n        --packages-up-to \"${COLCON_TARGETS[@]}\" \\\n        --symlink-install \\\n        --event-handlers console_direct+ \\\n        --cmake-args -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=OFF\n    )\n  }\n\n  patch_ext_stdgpu_cuda_compat() {\n    local ext_stdgpu_root=\"\"\n    local memory_detail_path=\"\"\n    local unordered_base_path=\"\"\n    local patched_any=1\n\n    while IFS= read -r ext_stdgpu_root; do\n      memory_detail_path=\"${ext_stdgpu_root}/src/stdgpu/impl/memory_detail.h\"\n      unordered_base_path=\"${ext_stdgpu_root}/src/stdgpu/impl/unordered_base_detail.cuh\"\n\n      if [[ -f \"${memory_detail_path}\" ]]; then\n        if python3 - \"${memory_detail_path}\" <<'PY'\nfrom pathlib import Path\nimport sys\n\npath = Path(sys.argv[1])\ntext = path.read_text()\nreplacements = {\n    \"construct_at(p, forward<Args>(args)...);\": \"stdgpu::construct_at(p, stdgpu::forward<Args>(args)...);\",\n    \"destroy_at(p);\": \"stdgpu::destroy_at(p);\",\n    \"return to_address(pointer_traits<Ptr>::to_address(p));\": \"return stdgpu::to_address(pointer_traits<Ptr>::to_address(p));\",\n}\nchanged = False\nfor old, new in replacements.items():\n    if old in text:\n        text = text.replace(old, new)\n        changed = True\n\nif changed:\n    path.write_text(text)\n\nsys.exit(0 if changed else 1)\nPY\n        then\n          log \"Applied CUDA 12.6 stdgpu compatibility patch to ${memory_detail_path}.\"\n          patched_any=0\n        fi\n      fi\n\n      if [[ -f \"${unordered_base_path}\" ]]; then\n        if python3 - \"${unordered_base_path}\" <<'PY'\nfrom pathlib import Path\nimport sys\n\npath = Path(sys.argv[1])\ntext = path.read_text()\nreplacements = {\n    \"_base.insert(*to_address(_begin + i));\": \"_base.insert(*stdgpu::to_address(_begin + i));\",\n}\nchanged = False\nfor old, new in replacements.items():\n    if old in text:\n        text = text.replace(old, new)\n        changed = True\n\nif changed:\n    path.write_text(text)\n\nsys.exit(0 if changed else 1)\nPY\n        then\n          log \"Applied CUDA 12.6 stdgpu compatibility patch to ${unordered_base_path}.\"\n          patched_any=0\n        fi\n      fi\n    done < <(find \"${ISAAC_WS}/build\" -type d -path '*/_deps/ext_stdgpu-src' 2>/dev/null | sort)\n\n    return \"${patched_any}\"\n  }\n\n  log \"Building container workspace.\"\n  rm -rf \"${ISAAC_WS}/build\" \"${ISAAC_WS}/install\" \"${ISAAC_WS}/log\"\n\n  if run_colcon_build; then\n    return 0\n  fi\n\n  if patch_ext_stdgpu_cuda_compat; then\n    log \"Retrying container workspace build after applying stdgpu CUDA compatibility patches.\"\n    run_colcon_build\n    return 0\n  fi\n\n  die \"Container workspace build failed before the compatibility patch could be applied.\"\n}\n\ninitialize_managed_git_access\nverify_managed_git_cache_state\n\nclone_or_update_repo \"${COMMUNITY_REPO_URL}\" \"${COMMUNITY_REPO_BRANCH}\" \"${COMMUNITY_REPO_PATH}\" \"community repo\"\nclone_or_update_repo \"${OFFICIAL_NVBLOX_REPO_URL}\" \"${OFFICIAL_NVBLOX_REPO_BRANCH}\" \"${OFFICIAL_NVBLOX_REPO_PATH}\" \"official Isaac ROS Nvblox repo\"\nsync_git_submodule \"${OFFICIAL_NVBLOX_REPO_PATH}\" \"nvblox_ros/nvblox_core\" \"official Isaac ROS Nvblox\"\nassert_git_repo_accessible \"${COMMUNITY_REPO_PATH}\" \"community repo\"\nassert_git_repo_accessible \"${OFFICIAL_NVBLOX_REPO_PATH}\" \"official Isaac ROS Nvblox repo\"\nassert_git_repo_accessible \"${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core\" \"official Isaac ROS Nvblox submodule\"\nCOMMUNITY_COMMIT=\"$(git -C \"${COMMUNITY_REPO_PATH}\" rev-parse HEAD)\"\nOFFICIAL_NVBLOX_COMMIT=\"$(git -C \"${OFFICIAL_NVBLOX_REPO_PATH}\" rev-parse HEAD)\"\nOFFICIAL_NVBLOX_CORE_COMMIT=\"$(git -C \"${OFFICIAL_NVBLOX_REPO_PATH}/nvblox_ros/nvblox_core\" rev-parse HEAD)\"\n\nif [[ \"${FORCE_REBUILD}\" != \"1\" ]] && stamp_current; then\n  log \"Container workspace is already current. Skipping rebuild.\"\n  exit 0\nfi\n\nsync_static_demo_workspace\nrebuild_workspace\nverify_synced_workspace_layout\nverify_workspace_install || die \"Container workspace verification failed.\"\nwrite_stamp\nlog \"Container workspace preparation complete.\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/host/orbbec_mobile_host.launch.py",
    "content": "from launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.substitutions import LaunchConfiguration\nfrom launch_ros.actions import LoadComposableNodes, Node\nfrom launch_ros.descriptions import ComposableNode\n\n\ndef generate_launch_description():\n    config_file_path = LaunchConfiguration('config_file_path')\n    container_name = LaunchConfiguration('component_container_name', default='orbbec_host_container')\n\n    container = Node(\n        name=container_name,\n        package='rclcpp_components',\n        executable='component_container_mt',\n        output='screen')\n\n    load_orbbec_node = LoadComposableNodes(\n        target_container=container_name,\n        composable_node_descriptions=[\n            ComposableNode(\n                namespace='camera',\n                name='orbbec_camera_node',\n                package='orbbec_camera',\n                plugin='orbbec_camera::OBCameraNodeDriver',\n                parameters=[config_file_path],\n                remappings=[\n                    ('/camera/left_ir/image_raw', '~/output/infra_1'),\n                    ('/camera/right_ir/image_raw', '~/output/infra_2'),\n                    ('/camera/depth/image_raw', '~/output/depth'),\n                    ('/camera/depth_registered/points', '~/output/pointcloud'),\n                ],\n            )\n        ])\n\n    return LaunchDescription([\n        DeclareLaunchArgument('config_file_path'),\n        DeclareLaunchArgument('component_container_name', default_value='orbbec_host_container'),\n        container,\n        load_orbbec_node,\n    ])\n"
  },
  {
    "path": "reComputer/scripts/nvblox/init.sh",
    "content": "#!/bin/bash\n\necho \"NVBlox preflight, image download, docker load, and demo setup are handled by 'reComputer run nvblox'.\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/lib/common.sh",
    "content": "#!/usr/bin/env bash\n\nif [[ \"${SETUP_NVBOX_COMMON_SH:-0}\" == \"1\" ]]; then\n  return 0\nfi\nreadonly SETUP_NVBOX_COMMON_SH=1\n\nPROJECT_ROOT=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")/..\" && pwd)\"\nreadonly PROJECT_ROOT\n\ncommon_fatal() {\n  printf '[setup-nvbox][ERROR] %s\\n' \"$*\" >&2\n  exit 1\n}\n\nresolve_setup_user_name() {\n  if [[ -n \"${SUDO_USER:-}\" && \"${SUDO_USER}\" != \"root\" ]]; then\n    printf '%s\\n' \"${SUDO_USER}\"\n    return 0\n  fi\n\n  id -un\n}\n\nlookup_user_passwd_entry() {\n  local user_name=\"$1\"\n  getent passwd \"${user_name}\" 2>/dev/null | head -n 1\n}\n\nresolve_user_home() {\n  local user_name=\"$1\"\n  local passwd_entry=\"\"\n\n  passwd_entry=\"$(lookup_user_passwd_entry \"${user_name}\")\"\n  [[ -n \"${passwd_entry}\" ]] || common_fatal \"Cannot resolve passwd entry for user ${user_name}.\"\n  printf '%s\\n' \"$(cut -d: -f6 <<<\"${passwd_entry}\")\"\n}\n\nreadonly SETUP_USER_NAME=\"$(resolve_setup_user_name)\"\nreadonly SETUP_USER_HOME=\"$(resolve_user_home \"${SETUP_USER_NAME}\")\"\nreadonly SETUP_USER_UID=\"$(id -u \"${SETUP_USER_NAME}\")\"\nreadonly SETUP_USER_GID=\"$(id -g \"${SETUP_USER_NAME}\")\"\nreadonly MANAGED_ROOT_DEFAULT=\"${SETUP_USER_HOME}/nvblox_demo\"\nreadonly MANAGED_SENTINEL_NAME=\".managed-by-setup-nvbox\"\nreadonly ROS_DISTRO_DEFAULT=\"humble\"\nreadonly ORBBEC_VERSION=\"v2.3.4\"\nreadonly ORBBEC_REPO_URL=\"https://github.com/orbbec/OrbbecSDK_ROS2.git\"\nreadonly GEMINI2_USB_VENDOR_ID=\"2bc5\"\nreadonly GEMINI2_USB_PRODUCT_ID=\"0670\"\nreadonly GEMINI2_READY_TIMEOUT_SECONDS=15\nreadonly GEMINI2_SIGNAL_TIMEOUT_SECONDS=5\nreadonly HOST_CAMERA_LOG_TAIL_LINES=40\nreadonly COMMUNITY_REPO_URL_DEFAULT=\"https://github.com/jjjadand/isaac-NVblox-Orbbec.git\"\nreadonly COMMUNITY_REPO_BRANCH_DEFAULT=\"main\"\nreadonly BASE_IMAGE_PREFERRED=\"isaac_ros_dev-aarch64:latest\"\nreadonly DERIVED_IMAGE_TAG=\"local/isaac_ros_nvblox_orbbec:jp6-humble\"\nreadonly CONTAINER_NAME_DEFAULT=\"isaac_ros_nvblox_orbbec\"\nreadonly CONTAINER_WORKSPACE_SPEC_VERSION=\"static-demo-final-v3\"\nreadonly NVBLOX_IMAGE_SHARE_URL_DEFAULT=\"https://seeedstudio88-my.sharepoint.com/:u:/g/personal/youjiang_yu_seeedstudio88_onmicrosoft_com/IQCCDToomY6WSaRZdfsTs9vXAengb-SCEvNfSUgq0cipP6w?e=z9axor\"\nreadonly NVBLOX_IMAGE_ARCHIVE_NAME_DEFAULT=\"nvblox_images.tar\"\nreadonly NVBLOX_IMAGE_CACHE_DIR_DEFAULT=\"${SETUP_USER_HOME}/.cache/jetson-examples/nvblox\"\nreadonly FASTDDS_RUNTIME_DIR_RELATIVE=\".runtime/fastdds\"\nreadonly FASTDDS_UDP_ONLY_PROFILE_FILENAME=\"udp_only.xml\"\nreadonly ROS_DISCOVERY_ENV_VARS=(\n  \"ROS_DOMAIN_ID\"\n  \"ROS_LOCALHOST_ONLY\"\n  \"RMW_IMPLEMENTATION\"\n  \"ROS_AUTOMATIC_DISCOVERY_RANGE\"\n  \"ROS_STATIC_PEERS\"\n  \"CYCLONEDDS_URI\"\n  \"CYCLONEDDS_HOME\"\n  \"FASTDDS_DEFAULT_PROFILES_FILE\"\n  \"FASTRTPS_DEFAULT_PROFILES_FILE\"\n)\nreadonly ROS_DISCOVERY_PATH_ENV_VARS=(\n  \"CYCLONEDDS_URI\"\n  \"CYCLONEDDS_HOME\"\n  \"FASTDDS_DEFAULT_PROFILES_FILE\"\n  \"FASTRTPS_DEFAULT_PROFILES_FILE\"\n)\n\nDOCKER_PREFIX=()\n\ntimestamp() {\n  date '+%Y-%m-%d %H:%M:%S'\n}\n\nlog() {\n  local level=\"$1\"\n  shift\n  printf '[%s] [%s] %s\\n' \"$(timestamp)\" \"${level}\" \"$*\"\n}\n\ninfo() {\n  log INFO \"$@\"\n}\n\nwarn() {\n  log WARN \"$@\"\n}\n\nerror() {\n  log ERROR \"$@\" >&2\n}\n\ndie() {\n  error \"$@\"\n  exit 1\n}\n\nresolve_nvblox_image_share_url() {\n  printf '%s\\n' \"${NVBLOX_IMAGE_SHARE_URL:-${NVBLOX_IMAGE_SHARE_URL_DEFAULT}}\"\n}\n\nresolve_nvblox_image_archive_name() {\n  printf '%s\\n' \"${NVBLOX_IMAGE_ARCHIVE_NAME:-${NVBLOX_IMAGE_ARCHIVE_NAME_DEFAULT}}\"\n}\n\nresolve_nvblox_image_cache_dir() {\n  printf '%s\\n' \"${NVBLOX_IMAGE_CACHE_DIR:-${NVBLOX_IMAGE_CACHE_DIR_DEFAULT}}\"\n}\n\nresolve_nvblox_image_archive_path() {\n  local cache_dir=\"${1:-$(resolve_nvblox_image_cache_dir)}\"\n  local archive_name=\"${2:-$(resolve_nvblox_image_archive_name)}\"\n\n  printf '%s/%s\\n' \"${cache_dir%/}\" \"${archive_name}\"\n}\n\ncleanup_nvblox_partial_downloads() {\n  local cache_dir=\"${1:-$(resolve_nvblox_image_cache_dir)}\"\n  local partial_file=\"\"\n\n  [[ -d \"${cache_dir}\" ]] || return 0\n\n  while IFS= read -r partial_file; do\n    [[ -n \"${partial_file}\" ]] || continue\n    rm -f \"${partial_file}\"\n    info \"Removed partial NVBlox download ${partial_file}\"\n  done < <(find \"${cache_dir}\" -maxdepth 1 -type f -name '*.part' 2>/dev/null | sort)\n}\n\nensure_supported_user_context() {\n  if [[ \"${EUID}\" -eq 0 && -z \"${SUDO_USER:-}\" ]]; then\n    die \"Running from a root login shell is not supported. Use your normal user account, or invoke this script with sudo from that account.\"\n  fi\n\n  if [[ \"${EUID}\" -eq 0 && \"${SETUP_USER_NAME}\" == \"root\" ]]; then\n    die \"Cannot determine a non-root setup user from sudo context.\"\n  fi\n}\n\nshould_reexec_as_setup_user() {\n  [[ \"${EUID}\" -eq 0 ]] || return 1\n  [[ -n \"${SUDO_USER:-}\" && \"${SUDO_USER}\" != \"root\" ]] || return 1\n  [[ \"${SETUP_NVBOX_REEXECED:-0}\" != \"1\" ]]\n}\n\nreexec_as_setup_user() {\n  local script_path=\"$1\"\n  shift\n  local env_args=(\"SETUP_NVBOX_REEXECED=1\")\n\n  if [[ -n \"${MANAGED_ROOT:-}\" ]]; then\n    env_args+=(\"MANAGED_ROOT=${MANAGED_ROOT}\")\n  fi\n\n  exec sudo -H -u \"${SETUP_USER_NAME}\" env \"${env_args[@]}\" bash \"${script_path}\" \"$@\"\n}\n\nrun_sudo() {\n  if [[ \"${EUID}\" -eq 0 ]]; then\n    \"$@\"\n  else\n    sudo \"$@\"\n  fi\n}\n\nrun_sudo_noninteractive() {\n  if [[ \"${EUID}\" -eq 0 ]]; then\n    \"$@\"\n    return 0\n  fi\n\n  sudo -n \"$@\"\n}\n\nguard_managed_root_path() {\n  local root=\"$1\"\n  local sentinel=\"${root}/${MANAGED_SENTINEL_NAME}\"\n\n  if [[ -e \"${root}\" && ! -e \"${sentinel}\" ]]; then\n    die \"Managed root ${root} exists but is not owned by this project. Refusing to continue.\"\n  fi\n}\n\nbootstrap_managed_root() {\n  local root=\"$1\"\n  local sentinel=\"${root}/${MANAGED_SENTINEL_NAME}\"\n\n  guard_managed_root_path \"${root}\"\n  mkdir -p \"${root}/logs\" \"${root}/.stamps\"\n  if [[ ! -f \"${sentinel}\" ]]; then\n    {\n      printf 'managed_root=%s\\n' \"${root}\"\n      printf 'created_at=%s\\n' \"$(date -Is 2>/dev/null || date)\"\n      printf 'project_root=%s\\n' \"${PROJECT_ROOT}\"\n    } > \"${sentinel}\"\n  fi\n}\n\nrepair_managed_root_ownership() {\n  local root=\"$1\"\n  local sentinel=\"${root}/${MANAGED_SENTINEL_NAME}\"\n\n  [[ -d \"${root}\" ]] || return 0\n  [[ -f \"${sentinel}\" ]] || return 0\n\n  if find \"${root}\" \\( ! -uid \"${SETUP_USER_UID}\" -o ! -gid \"${SETUP_USER_GID}\" \\) -print -quit 2>/dev/null | grep -q .; then\n    info \"Repairing managed root ownership under ${root}.\"\n    run_sudo chown -R \"${SETUP_USER_UID}:${SETUP_USER_GID}\" \"${root}\"\n  fi\n}\n\nrequire_bootstrapped_managed_root() {\n  local root=\"$1\"\n  local sentinel=\"${root}/${MANAGED_SENTINEL_NAME}\"\n\n  if [[ ! -f \"${sentinel}\" ]]; then\n    die \"Managed root ${root} is not prepared. Run with --prepare-only or the default mode first.\"\n  fi\n}\n\npackage_installed() {\n  local package_name=\"$1\"\n  dpkg-query -W -f='${Status}' \"${package_name}\" 2>/dev/null | grep -q 'install ok installed'\n}\n\ninstall_packages_if_missing() {\n  local missing=()\n  local package_name\n\n  for package_name in \"$@\"; do\n    if ! package_installed \"${package_name}\"; then\n      missing+=(\"${package_name}\")\n    fi\n  done\n\n  if ((${#missing[@]} == 0)); then\n    return 0\n  fi\n\n  info \"Installing apt packages: ${missing[*]}\"\n  run_sudo apt-get update\n  run_sudo apt-get install -y --no-install-recommends \"${missing[@]}\"\n}\n\nassert_command() {\n  local command_name=\"$1\"\n  command -v \"${command_name}\" >/dev/null 2>&1 || die \"Required command not found: ${command_name}\"\n}\n\nread_file_lower_trimmed() {\n  local file_path=\"$1\"\n  tr '[:upper:]' '[:lower:]' < \"${file_path}\" | tr -d '[:space:]'\n}\n\nfind_usb_device_with_ids() {\n  local start_path=\"$1\"\n  local current_path=\"\"\n\n  current_path=\"$(readlink -f \"${start_path}\" 2>/dev/null || true)\"\n  [[ -n \"${current_path}\" ]] || return 1\n\n  while [[ \"${current_path}\" != \"/\" ]]; do\n    if [[ -f \"${current_path}/idVendor\" && -f \"${current_path}/idProduct\" ]]; then\n      if [[ \"$(read_file_lower_trimmed \"${current_path}/idVendor\")\" == \"${GEMINI2_USB_VENDOR_ID}\" ]] && \\\n         [[ \"$(read_file_lower_trimmed \"${current_path}/idProduct\")\" == \"${GEMINI2_USB_PRODUCT_ID}\" ]]; then\n        printf '%s\\n' \"${current_path}\"\n        return 0\n      fi\n    fi\n    current_path=\"$(dirname \"${current_path}\")\"\n  done\n\n  return 1\n}\n\ngemini2_usb_device_dirs() {\n  local device_dir\n\n  for device_dir in /sys/bus/usb/devices/*; do\n    [[ -f \"${device_dir}/idVendor\" && -f \"${device_dir}/idProduct\" ]] || continue\n    if [[ \"$(read_file_lower_trimmed \"${device_dir}/idVendor\")\" == \"${GEMINI2_USB_VENDOR_ID}\" ]] && \\\n       [[ \"$(read_file_lower_trimmed \"${device_dir}/idProduct\")\" == \"${GEMINI2_USB_PRODUCT_ID}\" ]]; then\n      printf '%s\\n' \"${device_dir}\"\n    fi\n  done\n}\n\ngemini2_usb_present() {\n  local usb_device=\"\"\n  usb_device=\"$(gemini2_usb_device_dirs | head -n 1 || true)\"\n  [[ -n \"${usb_device}\" ]]\n}\n\ngemini2_usb_link_speed_mbps() {\n  local device_dir=\"\"\n  local speed_path=\"\"\n  local speed_value=\"\"\n\n  device_dir=\"$(gemini2_usb_device_dirs | head -n 1 || true)\"\n  [[ -n \"${device_dir}\" ]] || return 0\n\n  speed_path=\"${device_dir}/speed\"\n  [[ -f \"${speed_path}\" ]] || return 0\n  speed_value=\"$(tr -d '[:space:]' < \"${speed_path}\" 2>/dev/null || true)\"\n  [[ \"${speed_value}\" =~ ^[0-9]+$ ]] || return 0\n  printf '%s\\n' \"${speed_value}\"\n}\n\ngemini2_video_nodes() {\n  local video_sysfs_path=\"\"\n  local video_name=\"\"\n\n  for video_sysfs_path in /sys/class/video4linux/video*; do\n    [[ -e \"${video_sysfs_path}\" ]] || continue\n    if find_usb_device_with_ids \"${video_sysfs_path}/device\" >/dev/null 2>&1; then\n      video_name=\"$(basename \"${video_sysfs_path}\")\"\n      [[ -e \"/dev/${video_name}\" ]] || continue\n      printf '/dev/%s\\n' \"${video_name}\"\n    fi\n  done | sort -u\n}\n\ngemini2_video_nodes_joined() {\n  local video_nodes=()\n\n  mapfile -t video_nodes < <(gemini2_video_nodes)\n  if ((${#video_nodes[@]} == 0)); then\n    return 0\n  fi\n\n  printf '%s\\n' \"${video_nodes[*]}\"\n}\n\nlog_gemini2_video_nodes_snapshot() {\n  local prefix=\"${1:-Gemini2 /dev/video snapshot}\"\n  local video_nodes=\"\"\n\n  video_nodes=\"$(gemini2_video_nodes_joined)\"\n  if [[ -n \"${video_nodes}\" ]]; then\n    info \"${prefix}: ${video_nodes}\"\n  else\n    warn \"${prefix}: <none>\"\n  fi\n}\n\ngemini2_device_state() {\n  local video_nodes=\"\"\n\n  if ! gemini2_usb_present; then\n    printf 'usb_missing\\n'\n    return 0\n  fi\n\n  video_nodes=\"$(gemini2_video_nodes_joined)\"\n  if [[ -n \"${video_nodes}\" ]]; then\n    printf 'ready\\n'\n  else\n    printf 'usb_present_no_video\\n'\n  fi\n}\n\nlog_gemini2_device_state() {\n  local prefix=\"${1:-Gemini2 device state}\"\n  local state=\"\"\n  local video_nodes=\"\"\n  local speed_mbps=\"\"\n\n  state=\"$(gemini2_device_state)\"\n  video_nodes=\"$(gemini2_video_nodes_joined)\"\n  speed_mbps=\"$(gemini2_usb_link_speed_mbps)\"\n\n  if [[ -n \"${video_nodes}\" ]]; then\n    if [[ -n \"${speed_mbps}\" ]]; then\n      info \"${prefix}: ${state} (video nodes: ${video_nodes}; usb speed: ${speed_mbps} Mbps)\"\n    else\n      info \"${prefix}: ${state} (video nodes: ${video_nodes})\"\n    fi\n  else\n    if [[ -n \"${speed_mbps}\" ]]; then\n      info \"${prefix}: ${state} (usb speed: ${speed_mbps} Mbps)\"\n    else\n      info \"${prefix}: ${state}\"\n    fi\n  fi\n}\n\ngemini2_detected() {\n  [[ \"$(gemini2_device_state)\" == \"ready\" ]]\n}\n\nwait_for_gemini2_ready() {\n  local timeout_seconds=\"${1:-${GEMINI2_READY_TIMEOUT_SECONDS}}\"\n  local deadline=$((SECONDS + timeout_seconds))\n\n  while ((SECONDS < deadline)); do\n    if [[ \"$(gemini2_device_state)\" == \"ready\" ]]; then\n      return 0\n    fi\n    sleep 1\n  done\n\n  return 1\n}\n\ncollect_live_pids() {\n  local pid=\"\"\n\n  for pid in \"$@\"; do\n    if kill -0 \"${pid}\" 2>/dev/null; then\n      printf '%s\\n' \"${pid}\"\n    fi\n  done\n}\n\ncleanup_residual_gemini2_processes() {\n  local context=\"${1:-Gemini2 cleanup}\"\n  local patterns=(\n    'ros2 launch orbbec_camera gemini2.launch.py'\n    'ros2 launch orbbec_camera gemini_330_series.launch.py'\n    'orbbec_mobile_host.launch.py'\n    'camera_container'\n    'orbbec_host_container'\n    'orbbec_camera_node'\n  )\n  local pattern=\"\"\n  local pid=\"\"\n  local signal=\"\"\n  local deadline=0\n  local pids=()\n  local live_pids=()\n  declare -A seen_pids=()\n\n  command -v pgrep >/dev/null 2>&1 || return 0\n\n  for pattern in \"${patterns[@]}\"; do\n    while IFS= read -r pid; do\n      [[ -n \"${pid}\" ]] || continue\n      [[ -n \"${seen_pids[${pid}]:-}\" ]] && continue\n      seen_pids[\"${pid}\"]=1\n      pids+=(\"${pid}\")\n    done < <(pgrep -f -- \"${pattern}\" || true)\n  done\n\n  if ((${#pids[@]} == 0)); then\n    return 0\n  fi\n\n  for pid in \"${pids[@]}\"; do\n    info \"${context}: found residual Gemini2 host process ${pid}: $(ps -p \"${pid}\" -o args= 2>/dev/null | sed 's/^[[:space:]]*//' || true)\"\n  done\n\n  for signal in INT TERM KILL; do\n    live_pids=()\n    mapfile -t live_pids < <(collect_live_pids \"${pids[@]}\")\n    ((${#live_pids[@]} == 0)) && return 0\n\n    info \"${context}: sending SIG${signal} to Gemini2 host processes: ${live_pids[*]}\"\n    kill \"-${signal}\" \"${live_pids[@]}\" 2>/dev/null || true\n\n    deadline=$((SECONDS + GEMINI2_SIGNAL_TIMEOUT_SECONDS))\n    while ((SECONDS < deadline)); do\n      mapfile -t live_pids < <(collect_live_pids \"${pids[@]}\")\n      ((${#live_pids[@]} == 0)) && return 0\n      sleep 1\n    done\n  done\n\n  mapfile -t live_pids < <(collect_live_pids \"${pids[@]}\")\n  if ((${#live_pids[@]} != 0)); then\n    warn \"${context}: Gemini2 host processes are still alive after SIGKILL: ${live_pids[*]}\"\n    return 1\n  fi\n\n  return 0\n}\n\ngemini2_refresh_udev() {\n  local interactive_sudo=\"${1:-1}\"\n\n  if ! command -v udevadm >/dev/null 2>&1; then\n    warn \"udevadm is not available; skipping Gemini2 udev refresh.\"\n    return 1\n  fi\n\n  info \"Refreshing udev rules for Gemini2.\"\n  if (( interactive_sudo )); then\n    run_sudo udevadm control --reload-rules\n    run_sudo udevadm trigger\n  else\n    if ! run_sudo_noninteractive udevadm control --reload-rules; then\n      warn \"Skipping Gemini2 udev refresh because passwordless sudo is not available.\"\n      return 1\n    fi\n    run_sudo_noninteractive udevadm trigger || return 1\n  fi\n\n  return 0\n}\n\nwrite_sysfs_value_with_sudo() {\n  local file_path=\"$1\"\n  local value=\"$2\"\n  local interactive_sudo=\"${3:-1}\"\n\n  if (( interactive_sudo )); then\n    run_sudo bash -lc \"printf '%s' '${value}' > '${file_path}'\"\n  else\n    if ! run_sudo_noninteractive bash -lc \"printf '%s' '${value}' > '${file_path}'\"; then\n      warn \"Skipping Gemini2 sysfs write to ${file_path} because passwordless sudo is not available.\"\n      return 1\n    fi\n  fi\n}\n\nrebind_gemini2_usb_devices() {\n  local interactive_sudo=\"${1:-1}\"\n  local device_dir=\"\"\n  local device_name=\"\"\n  local found_device=0\n\n  while IFS= read -r device_dir; do\n    [[ -n \"${device_dir}\" ]] || continue\n    found_device=1\n    device_name=\"$(basename \"${device_dir}\")\"\n    info \"Rebinding Gemini2 USB device ${device_name}.\"\n    write_sysfs_value_with_sudo \"/sys/bus/usb/drivers/usb/unbind\" \"${device_name}\" \"${interactive_sudo}\" || return 1\n    sleep 1\n    write_sysfs_value_with_sudo \"/sys/bus/usb/drivers/usb/bind\" \"${device_name}\" \"${interactive_sudo}\" || return 1\n  done < <(gemini2_usb_device_dirs)\n\n  (( found_device )) || return 1\n  return 0\n}\n\nrecover_gemini2_device() {\n  local context=\"${1:-Gemini2 recovery}\"\n  local cleanup_processes=\"${2:-1}\"\n  local allow_usb_rebind=\"${3:-1}\"\n  local interactive_sudo=\"${4:-1}\"\n\n  log_gemini2_device_state \"Gemini2 device state before ${context}\"\n  if [[ \"$(gemini2_device_state)\" == \"ready\" ]]; then\n    return 0\n  fi\n\n  if [[ \"$(gemini2_device_state)\" == \"usb_missing\" ]]; then\n    return 1\n  fi\n\n  if (( cleanup_processes )); then\n    cleanup_residual_gemini2_processes \"${context}\" || true\n  fi\n\n  if gemini2_refresh_udev \"${interactive_sudo}\"; then\n    if wait_for_gemini2_ready \"${GEMINI2_READY_TIMEOUT_SECONDS}\"; then\n      info \"Gemini2 recovery succeeded after udev refresh (${context}).\"\n      log_gemini2_device_state \"Gemini2 device state after ${context}\"\n      return 0\n    fi\n  fi\n\n  if (( allow_usb_rebind )) && gemini2_usb_present; then\n    if rebind_gemini2_usb_devices \"${interactive_sudo}\"; then\n      if wait_for_gemini2_ready \"${GEMINI2_READY_TIMEOUT_SECONDS}\"; then\n        info \"Gemini2 recovery succeeded after USB rebind (${context}).\"\n        log_gemini2_device_state \"Gemini2 device state after ${context}\"\n        return 0\n      fi\n    fi\n  fi\n\n  log_gemini2_device_state \"Gemini2 device state after ${context}\"\n  return 1\n}\n\nrecover_gemini2_after_host_camera_failure() {\n  local context=\"${1:-host camera failure}\"\n  local initial_state=\"${2:-}\"\n  local current_state=\"\"\n\n  current_state=\"$(gemini2_device_state)\"\n  if [[ \"${initial_state}\" != \"ready\" || \"${current_state}\" != \"usb_present_no_video\" ]]; then\n    return 1\n  fi\n\n  warn \"Gemini2 lost its /dev/video nodes during ${context}. Attempting one full recovery.\"\n  if recover_gemini2_device \"${context}\" 0 1 1; then\n    info \"Gemini2 full recovery succeeded after ${context}.\"\n    return 0\n  fi\n\n  warn \"Gemini2 full recovery did not restore /dev/video nodes after ${context}.\"\n  return 1\n}\n\nlog_host_camera_failure_diagnostics() {\n  local log_path=\"$1\"\n  local readiness_output=\"${2:-}\"\n  local context=\"${3:-Host camera failure}\"\n  local line=\"\"\n  local speed_mbps=\"\"\n\n  warn \"${context}: Gemini2 device state is $(gemini2_device_state).\"\n  speed_mbps=\"$(gemini2_usb_link_speed_mbps)\"\n  if [[ -n \"${speed_mbps}\" ]]; then\n    warn \"${context}: Gemini2 USB link speed is ${speed_mbps} Mbps.\"\n  fi\n  log_gemini2_video_nodes_snapshot \"${context} /dev/video snapshot\"\n\n  if [[ -n \"${readiness_output}\" ]]; then\n    while IFS= read -r line; do\n      [[ -n \"${line}\" ]] || continue\n      warn \"${context}: ${line}\"\n    done <<< \"${readiness_output}\"\n  else\n    warn \"${context}: readiness probe produced no additional output.\"\n  fi\n\n  if [[ -f \"${log_path}\" ]]; then\n    warn \"${context}: host camera log tail (${log_path})\"\n    while IFS= read -r line; do\n      [[ -n \"${line}\" ]] || continue\n      warn \"[host-camera-log] ${line}\"\n    done < <(tail -n \"${HOST_CAMERA_LOG_TAIL_LINES}\" \"${log_path}\" 2>/dev/null || true)\n  else\n    warn \"${context}: host camera log is missing at ${log_path}.\"\n  fi\n}\n\nassert_supported_platform() {\n  local arch=\"\"\n  local model=\"\"\n  local jetpack_version=\"\"\n  local jetpack_major=\"\"\n\n  arch=\"$(dpkg --print-architecture 2>/dev/null || uname -m)\"\n  if [[ \"${arch}\" != \"arm64\" && \"${arch}\" != \"aarch64\" ]]; then\n    die \"Unsupported architecture: ${arch}. This script only supports Jetson Orin arm64.\"\n  fi\n\n  [[ -f /etc/os-release ]] || die \"Cannot detect OS version because /etc/os-release is missing.\"\n  # shellcheck disable=SC1091\n  source /etc/os-release\n  [[ \"${ID:-}\" == \"ubuntu\" ]] || die \"Unsupported OS: ${ID:-unknown}. Ubuntu 22.04 is required.\"\n  [[ \"${VERSION_ID:-}\" == \"22.04\" ]] || die \"Unsupported Ubuntu version: ${VERSION_ID:-unknown}. Ubuntu 22.04 is required.\"\n\n  [[ -f /proc/device-tree/model ]] || die \"Cannot detect Jetson model from /proc/device-tree/model.\"\n  model=\"$(tr -d '\\0' < /proc/device-tree/model)\"\n  [[ \"${model}\" == *\"Jetson\"* ]] || die \"Unsupported Jetson model: ${model}. A Jetson Orin device is required.\"\n  [[ \"${model}\" == *\"Orin\"* ]] || die \"Unsupported Jetson model: ${model}. A Jetson Orin device is required.\"\n\n  jetpack_version=\"$(dpkg-query -W -f='${Version}' nvidia-jetpack 2>/dev/null || true)\"\n  [[ -n \"${jetpack_version}\" ]] || die \"nvidia-jetpack is not installed. JetPack 6.x is required.\"\n  if [[ \"${jetpack_version}\" =~ ^([0-9]+) ]]; then\n    jetpack_major=\"${BASH_REMATCH[1]}\"\n  else\n    die \"Unable to parse nvidia-jetpack version: ${jetpack_version}\"\n  fi\n\n  [[ \"${jetpack_major}\" == \"6\" ]] || die \"Unsupported JetPack version: ${jetpack_version}. JetPack 6.x is required.\"\n  info \"Platform OK: ${model}, Ubuntu ${VERSION_ID}, JetPack ${jetpack_version}\"\n}\n\ncheck_apt_locks() {\n  local lock_path\n  local pids\n\n  if ! command -v fuser >/dev/null 2>&1; then\n    warn \"fuser is not available; skipping apt lock inspection.\"\n    return 0\n  fi\n\n  for lock_path in /var/lib/dpkg/lock-frontend /var/lib/dpkg/lock; do\n    pids=\"$(fuser \"${lock_path}\" 2>/dev/null || true)\"\n    if [[ -n \"${pids}\" ]]; then\n      die \"apt/dpkg lock detected on ${lock_path} (pids: ${pids}). Resolve it before continuing.\"\n    fi\n  done\n}\n\ncheck_network_endpoints() {\n  local endpoint\n\n  assert_command curl\n  for endpoint in \"$@\"; do\n    if ! curl -fsSI --max-time 10 \"${endpoint}\" >/dev/null 2>&1; then\n      die \"Cannot reach ${endpoint}. Network access is required for prepare mode.\"\n    fi\n  done\n}\n\nwarn_on_unreachable_endpoints() {\n  local endpoint\n\n  assert_command curl\n  for endpoint in \"$@\"; do\n    if curl -fsSI --max-time 10 \"${endpoint}\" >/dev/null 2>&1; then\n      info \"Network probe OK: ${endpoint}\"\n    else\n      warn \"Network probe failed for ${endpoint}. Continuing; the real install steps will fail later if access is actually required.\"\n    fi\n  done\n}\n\nensure_docker_access() {\n  if docker info >/dev/null 2>&1; then\n    DOCKER_PREFIX=()\n    return 0\n  fi\n\n  if sudo docker info >/dev/null 2>&1; then\n    DOCKER_PREFIX=(sudo)\n    return 0\n  fi\n\n  die \"Cannot access the Docker daemon with docker or sudo docker.\"\n}\n\ndocker_cmd() {\n  if ((${#DOCKER_PREFIX[@]})); then\n    \"${DOCKER_PREFIX[@]}\" docker \"$@\"\n  else\n    docker \"$@\"\n  fi\n}\n\nappend_jetson_container_args() {\n  local -n jetson_docker_args_ref=\"$1\"\n\n  jetson_docker_args_ref+=(\n    --runtime=nvidia\n    --privileged\n    --network host\n    --ipc host\n    --pid host\n    --ulimit memlock=-1\n    --ulimit stack=67108864\n    -e \"NVIDIA_VISIBLE_DEVICES=nvidia.com/gpu=all,nvidia.com/pva=all\"\n    -e \"NVIDIA_DRIVER_CAPABILITIES=all\"\n    -e \"ISAAC_ROS_WS=/workspaces/isaac_ros-dev\"\n    -v /etc/localtime:/etc/localtime:ro\n    -v /tmp:/tmp\n  )\n\n  if [[ -f /usr/bin/tegrastats ]]; then\n    jetson_docker_args_ref+=(-v /usr/bin/tegrastats:/usr/bin/tegrastats)\n  fi\n  if [[ -d /usr/lib/aarch64-linux-gnu/tegra ]]; then\n    jetson_docker_args_ref+=(-v /usr/lib/aarch64-linux-gnu/tegra:/usr/lib/aarch64-linux-gnu/tegra)\n  fi\n  if [[ -d /usr/src/jetson_multimedia_api ]]; then\n    jetson_docker_args_ref+=(-v /usr/src/jetson_multimedia_api:/usr/src/jetson_multimedia_api)\n  fi\n  if [[ -d /usr/share/vpi3 ]]; then\n    jetson_docker_args_ref+=(-v /usr/share/vpi3:/usr/share/vpi3)\n  fi\n  if [[ -d /dev/input ]]; then\n    jetson_docker_args_ref+=(-v /dev/input:/dev/input)\n  fi\n  if getent group jtop >/dev/null 2>&1 && [[ -S /run/jtop.sock ]]; then\n    jetson_docker_args_ref+=(-v /run/jtop.sock:/run/jtop.sock:ro)\n  fi\n}\n\nresolve_ros_discovery_env_value() {\n  local var_name=\"$1\"\n  local value=\"\"\n\n  case \"${var_name}\" in\n    RMW_IMPLEMENTATION)\n      value=\"${RMW_IMPLEMENTATION:-}\"\n      if [[ -z \"${value}\" ]]; then\n        value=\"rmw_fastrtps_cpp\"\n      fi\n      ;;\n    *)\n      value=\"${!var_name-}\"\n      ;;\n  esac\n\n  printf '%s\\n' \"${value}\"\n}\n\nexport_effective_ros_discovery_env() {\n  local var_name=\"\"\n  local value=\"\"\n\n  for var_name in \"${ROS_DISCOVERY_ENV_VARS[@]}\"; do\n    value=\"$(resolve_ros_discovery_env_value \"${var_name}\")\"\n    if [[ -n \"${value}\" ]]; then\n      export \"${var_name}=${value}\"\n    else\n      unset \"${var_name}\" || true\n    fi\n  done\n}\n\nros_discovery_env_summary() {\n  local parts=()\n  local var_name=\"\"\n  local value=\"\"\n  local old_ifs=\"${IFS}\"\n\n  for var_name in \"${ROS_DISCOVERY_ENV_VARS[@]}\"; do\n    value=\"$(resolve_ros_discovery_env_value \"${var_name}\")\"\n    if [[ -n \"${value}\" ]]; then\n      parts+=(\"${var_name}=${value}\")\n    else\n      parts+=(\"${var_name}=<unset>\")\n    fi\n  done\n\n  IFS=', '\n  printf '%s\\n' \"${parts[*]}\"\n  IFS=\"${old_ifs}\"\n}\n\nlog_ros_discovery_env() {\n  local prefix=\"${1:-ROS discovery env}\"\n  info \"${prefix}: $(ros_discovery_env_summary)\"\n}\n\nemit_ros_discovery_env_shell_exports() {\n  local var_name=\"\"\n  local value=\"\"\n\n  for var_name in \"${ROS_DISCOVERY_ENV_VARS[@]}\"; do\n    value=\"$(resolve_ros_discovery_env_value \"${var_name}\")\"\n    if [[ -n \"${value}\" ]]; then\n      printf 'export %s=%q\\n' \"${var_name}\" \"${value}\"\n    else\n      printf 'unset %s\\n' \"${var_name}\"\n    fi\n  done\n}\n\nmanaged_fastdds_profile_path() {\n  local managed_root=\"$1\"\n  printf '%s/%s/%s\\n' \"${managed_root}\" \"${FASTDDS_RUNTIME_DIR_RELATIVE}\" \"${FASTDDS_UDP_ONLY_PROFILE_FILENAME}\"\n}\n\nwrite_managed_fastdds_udp_profile() {\n  local managed_root=\"$1\"\n  local profile_path=\"\"\n  local profile_dir=\"\"\n\n  profile_path=\"$(managed_fastdds_profile_path \"${managed_root}\")\"\n  profile_dir=\"$(dirname \"${profile_path}\")\"\n  mkdir -p \"${profile_dir}\"\n\n  cat > \"${profile_path}\" <<'EOF'\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<dds xmlns=\"http://www.eprosima.com/XMLSchemas/fastRTPS_Profiles\">\n  <profiles>\n    <transport_descriptors>\n      <transport_descriptor>\n        <transport_id>udp_transport</transport_id>\n        <type>UDPv4</type>\n      </transport_descriptor>\n    </transport_descriptors>\n    <participant profile_name=\"udp_only_participant\" is_default_profile=\"true\">\n      <rtps>\n        <useBuiltinTransports>false</useBuiltinTransports>\n        <userTransports>\n          <transport_id>udp_transport</transport_id>\n        </userTransports>\n      </rtps>\n    </participant>\n  </profiles>\n</dds>\nEOF\n\n  printf '%s\\n' \"${profile_path}\"\n}\n\nenable_managed_fastdds_udp_runtime() {\n  local managed_root=\"$1\"\n  local profile_path=\"\"\n\n  profile_path=\"$(write_managed_fastdds_udp_profile \"${managed_root}\")\"\n  export FASTDDS_DEFAULT_PROFILES_FILE=\"${profile_path}\"\n  export FASTRTPS_DEFAULT_PROFILES_FILE=\"${profile_path}\"\n  info \"Managed Fast DDS UDP-only profile: ${profile_path}\"\n}\n\nappend_ros_discovery_env_args() {\n  local -n ros_discovery_env_args_ref=\"$1\"\n  local var_name=\"\"\n  local value=\"\"\n\n  for var_name in \"${ROS_DISCOVERY_ENV_VARS[@]}\"; do\n    value=\"$(resolve_ros_discovery_env_value \"${var_name}\")\"\n    if [[ -n \"${value}\" ]]; then\n      ros_discovery_env_args_ref+=(-e \"${var_name}=${value}\")\n    fi\n  done\n}\n\nresolve_ros_discovery_mount_source() {\n  local var_name=\"$1\"\n  local value=\"\"\n\n  value=\"$(resolve_ros_discovery_env_value \"${var_name}\")\"\n  [[ -n \"${value}\" ]] || return 1\n\n  case \"${var_name}\" in\n    CYCLONEDDS_URI)\n      if [[ \"${value}\" == file://* ]]; then\n        value=\"${value#file://}\"\n      fi\n      ;;\n  esac\n\n  [[ \"${value}\" = /* ]] || return 1\n  [[ -e \"${value}\" ]] || return 1\n  printf '%s\\n' \"${value}\"\n}\n\nappend_ros_discovery_mount_args() {\n  local -n ros_discovery_mount_args_ref=\"$1\"\n  local var_name=\"\"\n  local mount_source=\"\"\n  local mount_mode=\"ro\"\n  declare -A seen_mounts=()\n\n  for var_name in \"${ROS_DISCOVERY_PATH_ENV_VARS[@]}\"; do\n    mount_source=\"$(resolve_ros_discovery_mount_source \"${var_name}\" || true)\"\n    [[ -n \"${mount_source}\" ]] || continue\n    [[ -z \"${seen_mounts[${mount_source}]:-}\" ]] || continue\n    seen_mounts[\"${mount_source}\"]=1\n\n    if [[ -d \"${mount_source}\" && \"${var_name}\" == \"CYCLONEDDS_HOME\" ]]; then\n      mount_mode=\"rw\"\n    else\n      mount_mode=\"ro\"\n    fi\n\n    ros_discovery_mount_args_ref+=(-v \"${mount_source}:${mount_source}:${mount_mode}\")\n  done\n}\n\nappend_ros_discovery_container_args() {\n  local docker_args_name=\"$1\"\n  append_ros_discovery_env_args \"${docker_args_name}\"\n  append_ros_discovery_mount_args \"${docker_args_name}\"\n}\n\nvalidate_package_install_artifacts() {\n  local workspace_root=\"$1\"\n  local package_name=\"$2\"\n  shift 2\n  local required_paths=(\"$@\")\n  local required_artifact_list=\"\"\n  local validate_cmd=\"\"\n  local validate_args=(\n    run\n    --rm\n    -e \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\"\n    -e \"PACKAGE_NAME=${package_name}\"\n    -v \"${workspace_root}:/workspaces/isaac_ros-dev\"\n  )\n\n  ((${#required_paths[@]} != 0)) || return 0\n  required_artifact_list=\"$(printf '%s\\n' \"${required_paths[@]}\")\"\n  append_jetson_container_args validate_args\n  append_ros_discovery_container_args validate_args\n  validate_args+=(-e \"REQUIRED_ARTIFACT_LIST=${required_artifact_list}\")\n\n  validate_cmd=$(\n    cat <<'EOF'\nset -euo pipefail\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/install/setup.bash\"\nif (( restore_nounset )); then\n  set -u\nfi\nPACKAGE_PREFIX=\"$(ros2 pkg prefix \"${PACKAGE_NAME}\" 2>/dev/null || true)\"\n[[ -n \"${PACKAGE_PREFIX}\" ]]\nINSTALL_ROOT=\"${PACKAGE_PREFIX}/share/${PACKAGE_NAME}\"\n\nwhile IFS= read -r relative_path; do\n  [[ -n \"${relative_path}\" ]] || continue\n  [[ -f \"${INSTALL_ROOT}/${relative_path}\" ]] || {\n    printf '%s\\n' \"${INSTALL_ROOT}/${relative_path}\" >&2\n    exit 10\n  }\ndone <<< \"${REQUIRED_ARTIFACT_LIST}\"\nEOF\n  )\n\n  docker_cmd \"${validate_args[@]}\" \"${DERIVED_IMAGE_TAG}\" bash -lc \"${validate_cmd}\"\n}\n\nselect_base_image() {\n  local candidate=\"\"\n\n  if docker_cmd image inspect \"${BASE_IMAGE_PREFERRED}\" >/dev/null 2>&1; then\n    printf '%s\\n' \"${BASE_IMAGE_PREFERRED}\"\n    return 0\n  fi\n\n  candidate=\"$(docker_cmd image ls --format '{{.Repository}}:{{.Tag}}' | grep -E '^nvcr\\.io/nvidia/isaac/ros:.*aarch64-ros2_humble' | head -n 1 || true)\"\n  if [[ -n \"${candidate}\" ]]; then\n    printf '%s\\n' \"${candidate}\"\n    return 0\n  fi\n\n  return 1\n}\n\nacceptable_base_image_hint() {\n  printf '%s\\n' \"${BASE_IMAGE_PREFERRED} or nvcr.io/nvidia/isaac/ros:*aarch64-ros2_humble*\"\n}\n\ndocker_image_id() {\n  local image_ref=\"$1\"\n  docker_cmd image inspect --format '{{.Id}}' \"${image_ref}\"\n}\n\ncompute_tree_hash() {\n  local combined=\"\"\n  local file_path\n\n  assert_command sha256sum\n\n  for file_path in \"$@\"; do\n    [[ -f \"${file_path}\" ]] || die \"Cannot hash missing file: ${file_path}\"\n    combined+=$(sha256sum \"${file_path}\")\n  done\n\n  printf '%s' \"${combined}\" | sha256sum | awk '{print $1}'\n}\n\ncontainer_image_context_hash() {\n  compute_tree_hash \\\n    \"${PROJECT_ROOT}/docker/Dockerfile.nvblox_orbbec\" \\\n    \"${PROJECT_ROOT}/docker/prepare_container_workspace.sh\" \\\n    \"${PROJECT_ROOT}/docker/launch_nvblox.sh\"\n}\n\nsource_ros_setup() {\n  local workspace_root=\"${1:-}\"\n  local restore_nounset=0\n\n  if [[ $- == *u* ]]; then\n    restore_nounset=1\n    set +u\n  fi\n\n  # shellcheck disable=SC1091\n  source \"/opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash\"\n  if [[ -n \"${workspace_root}\" && -f \"${workspace_root}/install/setup.bash\" ]]; then\n    # shellcheck disable=SC1090\n    source \"${workspace_root}/install/setup.bash\"\n  fi\n\n  if (( restore_nounset )); then\n    set -u\n  fi\n}\n"
  },
  {
    "path": "reComputer/scripts/nvblox/onedrive_downloader.py",
    "content": "#!/usr/bin/env python3\n\"\"\"Download public OneDrive/SharePoint share links with resume support.\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport re\nimport sys\nfrom pathlib import Path\nfrom urllib.parse import parse_qsl, unquote, urlencode, urlparse, urlunparse\n\nimport requests\nfrom tqdm import tqdm\n\n\nCHUNK_SIZE = 65536\nMIN_VALID_SIZE = 1024 * 1024\nPROBE_CHUNK_SIZE = 4096\nREQUEST_TIMEOUT = (15, 600)\nDEFAULT_SHARE_URL = (\n    \"https://seeedstudio88-my.sharepoint.com/:u:/g/personal/\"\n    \"youjiang_yu_seeedstudio88_onmicrosoft_com/\"\n    \"IQCCDToomY6WSaRZdfsTs9vXAengb-SCEvNfSUgq0cipP6w?e=z9axor\"\n)\nDEFAULT_FILENAME = \"nvblox_images.tar\"\nDEFAULT_OUTPUT_DIR = Path.home() / \".cache\" / \"jetson-examples\" / \"nvblox\"\nSUPPORTED_DOMAINS = (\"sharepoint.com\", \"sharepoint.cn\")\nSHARE_LINK_RE = re.compile(r\"^/:[a-z]:/\", re.IGNORECASE)\nTEXT_ERROR_MARKERS = (\n    \"forbidden\",\n    \"access denied\",\n    \"sign in\",\n    \"login\",\n    \"not found\",\n    \"permission\",\n)\n\n\nclass DownloadError(Exception):\n    \"\"\"Raised when the download cannot proceed safely.\"\"\"\n\n\ndef parse_args() -> argparse.Namespace:\n    parser = argparse.ArgumentParser(\n        description=\"Download a public Microsoft 365 OneDrive/SharePoint share link.\"\n    )\n    parser.add_argument(\n        \"share_url\",\n        nargs=\"?\",\n        default=DEFAULT_SHARE_URL,\n        help=\"Public sharepoint.com/sharepoint.cn share link\",\n    )\n    parser.add_argument(\n        \"legacy_filename\",\n        nargs=\"?\",\n        help=\"Legacy positional filename override\",\n    )\n    parser.add_argument(\n        \"--output-dir\",\n        \"--download-dir\",\n        dest=\"output_dir\",\n        type=Path,\n        default=DEFAULT_OUTPUT_DIR,\n        help=f\"Directory to save the file (default: {DEFAULT_OUTPUT_DIR})\",\n    )\n    parser.add_argument(\n        \"--filename\",\n        help=\"Override the detected filename. Only the final path component is used.\",\n    )\n    parser.add_argument(\n        \"--force\",\n        action=\"store_true\",\n        help=\"Redownload even if the target file already exists.\",\n    )\n    parser.add_argument(\n        \"--aria2c\",\n        action=\"store_true\",\n        help=\"Print an aria2c command for the resolved direct download URL\",\n    )\n    return parser.parse_args()\n\n\ndef is_supported_host(hostname: str) -> bool:\n    hostname = hostname.lower()\n    return any(\n        hostname == domain or hostname.endswith(f\".{domain}\")\n        for domain in SUPPORTED_DOMAINS\n    )\n\n\ndef sanitize_filename(value: str | None) -> str | None:\n    if not value:\n        return None\n    candidate = value.strip().strip(\"\\\"'\")\n    if not candidate:\n        return None\n    candidate = candidate.replace(\"\\\\\", \"/\")\n    candidate = Path(candidate).name\n    if candidate in {\"\", \".\", \"..\"}:\n        return None\n    return candidate\n\n\ndef validate_source_url(raw_url: str) -> str:\n    url = raw_url.strip()\n    if not url:\n        raise DownloadError(\"share_url is required.\")\n\n    parsed = urlparse(url)\n    if parsed.scheme not in {\"http\", \"https\"}:\n        raise DownloadError(\"URL must start with http:// or https://.\")\n\n    hostname = parsed.hostname or \"\"\n    if not is_supported_host(hostname):\n        raise DownloadError(\n            \"Only public sharepoint.com/sharepoint.cn links are supported in v1.\"\n        )\n\n    lower_path = (parsed.path or \"\").lower()\n    if \"/_layouts/15/onedrive.aspx\" in lower_path:\n        raise DownloadError(\n            \"Unsupported page-style OneDrive URL. Use a public share link instead of \"\n            \"a /_layouts/15/onedrive.aspx page or a login-protected page.\"\n        )\n\n    if not parsed.path:\n        raise DownloadError(\"URL path is empty.\")\n\n    return url\n\n\ndef needs_download_flag(parsed_url) -> bool:\n    return bool(SHARE_LINK_RE.match(parsed_url.path or \"\"))\n\n\ndef with_download_flag(url: str) -> str:\n    parsed = urlparse(url)\n    if not needs_download_flag(parsed):\n        return url\n\n    query_items = [\n        (key, value)\n        for key, value in parse_qsl(parsed.query, keep_blank_values=True)\n        if key.lower() != \"download\"\n    ]\n    query_items.append((\"download\", \"1\"))\n    return urlunparse(parsed._replace(query=urlencode(query_items, doseq=True)))\n\n\ndef looks_like_landing_page(content_type: str, first_chunk: bytes) -> bool:\n    content_type = (content_type or \"\").lower()\n    first = (first_chunk or b\"\").lstrip()\n    first_lower = first.lower()\n\n    if \"text/html\" in content_type or \"application/xhtml\" in content_type:\n        return True\n\n    if first_lower.startswith(b\"<!doctype html\") or first_lower.startswith(b\"<html\"):\n        return True\n\n    if content_type.startswith(\"text/plain\"):\n        snippet = first[:512].decode(\"utf-8\", errors=\"ignore\").lower()\n        if any(marker in snippet for marker in TEXT_ERROR_MARKERS):\n            return True\n\n    return False\n\n\ndef filename_from_content_disposition(header_value: str | None) -> str | None:\n    if not header_value:\n        return None\n\n    match = re.search(\n        r\"filename\\*\\s*=\\s*(?:[A-Za-z0-9!#$&+\\-.^_`|~]+'[^']*')?([^;]+)\",\n        header_value,\n        flags=re.IGNORECASE,\n    )\n    if match:\n        return sanitize_filename(unquote(match.group(1).strip().strip(\"\\\"'\")))\n\n    match = re.search(r'filename\\s*=\\s*\"([^\"]+)\"', header_value, flags=re.IGNORECASE)\n    if match:\n        return sanitize_filename(match.group(1))\n\n    match = re.search(r\"filename\\s*=\\s*([^;]+)\", header_value, flags=re.IGNORECASE)\n    if match:\n        return sanitize_filename(match.group(1))\n\n    return None\n\n\ndef filename_from_url(url: str) -> str | None:\n    parsed = urlparse(url)\n    return sanitize_filename(unquote(Path(parsed.path or \"\").name))\n\n\ndef probe_remote_target(url: str, filename_override: str | None) -> tuple[str, str]:\n    headers = {\"Range\": \"bytes=0-0\"}\n    try:\n        response = requests.get(\n            url,\n            stream=True,\n            timeout=REQUEST_TIMEOUT,\n            allow_redirects=True,\n            headers=headers,\n        )\n    except requests.RequestException as exc:\n        raise DownloadError(f\"Failed to resolve the download target: {exc}\") from exc\n\n    try:\n        response.raise_for_status()\n        first_chunk = next(response.iter_content(chunk_size=PROBE_CHUNK_SIZE), b\"\")\n        if looks_like_landing_page(response.headers.get(\"content-type\", \"\"), first_chunk):\n            raise DownloadError(\n                \"The link resolved to an HTML/text page instead of a downloadable file.\"\n            )\n\n        filename = (\n            sanitize_filename(filename_override)\n            or filename_from_content_disposition(\n                response.headers.get(\"content-disposition\")\n            )\n            or filename_from_url(response.url)\n        )\n        if not filename:\n            raise DownloadError(\n                \"Could not infer a filename from the response. Pass --filename.\"\n            )\n\n        return response.url, filename\n    except requests.RequestException as exc:\n        raise DownloadError(f\"Failed to resolve the download target: {exc}\") from exc\n    finally:\n        response.close()\n\n\ndef prepare_target_paths(\n    output_dir: Path, filename: str, force: bool\n) -> tuple[Path, Path, bool]:\n    output_dir.mkdir(parents=True, exist_ok=True)\n\n    filepath = output_dir / filename\n    tmp_path = filepath.with_suffix(filepath.suffix + \".part\")\n\n    if force:\n        if filepath.exists():\n            print(f\"Removing cached file: {filepath}\")\n            filepath.unlink()\n        if tmp_path.exists():\n            print(f\"Removing partial download: {tmp_path}\")\n            tmp_path.unlink()\n        return filepath, tmp_path, False\n\n    if filepath.exists():\n        size = filepath.stat().st_size\n        if size > MIN_VALID_SIZE:\n            print(f\"File already exists: {filepath}\")\n            return filepath, tmp_path, True\n        print(\n            f\"Existing file is too small ({size} bytes), redownloading: {filepath}\"\n        )\n        filepath.unlink()\n        if tmp_path.exists():\n            tmp_path.unlink()\n\n    return filepath, tmp_path, False\n\n\ndef progress_stream():\n    try:\n        return open(\"/dev/tty\", \"w\", encoding=\"utf-8\", buffering=1)\n    except OSError:\n        return sys.stdout\n\n\ndef download_file(url: str, filepath: Path, filename: str) -> None:\n    tmp_path = filepath.with_suffix(filepath.suffix + \".part\")\n\n    while True:\n        resume_pos = tmp_path.stat().st_size if tmp_path.exists() else 0\n        headers = {}\n        if resume_pos > 0:\n            headers[\"Range\"] = f\"bytes={resume_pos}-\"\n            print(f\"Resuming download from byte {resume_pos}\")\n\n        try:\n            response = requests.get(\n                url,\n                stream=True,\n                timeout=REQUEST_TIMEOUT,\n                allow_redirects=True,\n                headers=headers,\n            )\n        except requests.RequestException as exc:\n            raise DownloadError(f\"Failed to start download: {exc}\") from exc\n\n        try:\n            if resume_pos > 0 and response.status_code == 200:\n                print(\"Server ignored the resume request, restarting from byte 0.\")\n                response.close()\n                tmp_path.unlink(missing_ok=True)\n                continue\n\n            response.raise_for_status()\n\n            total_size = int(response.headers.get(\"content-length\", 0) or 0)\n            if total_size and resume_pos:\n                total_size += resume_pos\n\n            chunks = response.iter_content(chunk_size=CHUNK_SIZE)\n            first_chunk = next((chunk for chunk in chunks if chunk), b\"\")\n            if not first_chunk:\n                raise DownloadError(\"Downloaded content is empty.\")\n\n            if resume_pos == 0 and looks_like_landing_page(\n                response.headers.get(\"content-type\", \"\"), first_chunk\n            ):\n                raise DownloadError(\n                    \"The link resolved to an HTML/text page instead of a downloadable file.\"\n                )\n\n            written = resume_pos + len(first_chunk)\n            mode = \"ab\" if resume_pos > 0 else \"wb\"\n\n            progress_file = progress_stream()\n            progress_bar = tqdm(\n                desc=filename,\n                initial=resume_pos,\n                total=total_size if total_size > 0 else None,\n                unit=\"B\",\n                unit_scale=True,\n                unit_divisor=1024,\n                file=progress_file,\n                dynamic_ncols=True,\n                ascii=True,\n                leave=False,\n                mininterval=0.2,\n                smoothing=0.1,\n            )\n            try:\n                with open(tmp_path, mode) as handle:\n                    handle.write(first_chunk)\n                    progress_bar.update(len(first_chunk))\n\n                    for chunk in chunks:\n                        if not chunk:\n                            continue\n                        handle.write(chunk)\n                        written += len(chunk)\n                        progress_bar.update(len(chunk))\n            finally:\n                progress_bar.close()\n                if progress_file not in (sys.stdout, sys.stderr):\n                    progress_file.write(\"\\n\")\n                    progress_file.close()\n\n            if written < MIN_VALID_SIZE:\n                tmp_path.unlink(missing_ok=True)\n                raise DownloadError(\n                    f\"Downloaded file is unexpectedly small: {written} bytes.\"\n                )\n\n            tmp_path.replace(filepath)\n            return\n        except requests.RequestException as exc:\n            raise DownloadError(\n                f\"Download interrupted by a network/protocol error: {exc}\"\n            ) from exc\n        finally:\n            response.close()\n\n\ndef main() -> int:\n    args = parse_args()\n\n    try:\n        validated_url = validate_source_url(args.share_url)\n        normalized_url = with_download_flag(validated_url)\n        output_dir = args.output_dir.expanduser()\n\n        filename_override = sanitize_filename(args.filename or args.legacy_filename)\n        if (args.filename or args.legacy_filename) and not filename_override:\n            raise DownloadError(\"Invalid filename value.\")\n\n        print(f\"Resolving download target: {normalized_url}\")\n        resolved_url, detected_filename = probe_remote_target(normalized_url, filename_override)\n        filename = filename_override or detected_filename or DEFAULT_FILENAME\n\n        filepath, _tmp_path, already_exists = prepare_target_paths(\n            output_dir, filename, args.force\n        )\n        if already_exists:\n            return 0\n\n        if resolved_url != normalized_url:\n            print(f\"Resolved file URL: {resolved_url}\")\n\n        print(f\"Download URL: {normalized_url}\")\n        print(f\"Saving to: {filepath}\")\n\n        if args.aria2c:\n            print(f\"aria2c '{resolved_url}' -d '{output_dir}' -o '{filename}'\")\n            return 0\n\n        download_file(normalized_url, filepath, filename)\n        print(f\"Download complete: {filepath}\")\n        return 0\n    except DownloadError as exc:\n        print(f\"Error: {exc}\")\n        return 1\n    except OSError as exc:\n        print(f\"Error: {exc}\")\n        return 1\n\n\nif __name__ == \"__main__\":\n    raise SystemExit(main())\n"
  },
  {
    "path": "reComputer/scripts/nvblox/run.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nMODE=\"${NVBLOX_MODE:-all}\"\nSTART_ARGS=()\n\nis_truthy() {\n  case \"${1:-}\" in\n    1|true|TRUE|True|yes|YES|Yes|on|ON|On)\n      return 0\n      ;;\n    *)\n      return 1\n      ;;\n  esac\n}\n\ncase \"${MODE}\" in\n  \"\"|all)\n    ;;\n  prepare|prepare-only)\n    START_ARGS+=(--prepare-only)\n    ;;\n  run|run-only)\n    START_ARGS+=(--run-only)\n    ;;\n  *)\n    echo \"Invalid NVBLOX_MODE='${MODE}'. Use all, prepare, or run.\" >&2\n    exit 1\n    ;;\nesac\n\nif is_truthy \"${NVBLOX_FORCE_REBUILD:-0}\"; then\n  START_ARGS+=(--force-rebuild)\nfi\n\nif is_truthy \"${NVBLOX_HEADLESS:-0}\"; then\n  START_ARGS+=(--headless)\nfi\n\nbash \"${SCRIPT_DIR}/start_nvblox_demo.sh\" \"${START_ARGS[@]}\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=SC1091\nsource \"${SCRIPT_DIR}/../lib/common.sh\"\n\nMANAGED_ROOT=\"${MANAGED_ROOT_DEFAULT}\"\nHEADLESS=0\nUSE_GUI=0\nLAUNCH_FILE=\"orbbec_example.launch.py\"\nEXPECTED_CAMERA_INFO_FRAME=\"camera_color_optical_frame\"\nDEBUG_CAMERA_VISIBILITY_TIMEOUT_SEC=20\nDEBUG_STATIC_TF_TIMEOUT_SEC=20\nDEBUG_RUNTIME_OUTPUT_TIMEOUT_SEC=30\nCURRENT_STAGE=\"\"\nXHOST_GRANTED=0\nHOST_CAMERA_PID=\"\"\n\nwhile (($#)); do\n  case \"$1\" in\n    --managed-root)\n      shift\n      MANAGED_ROOT=\"$1\"\n      ;;\n    --headless)\n      HEADLESS=1\n      ;;\n    *)\n      die \"Unknown argument: $1\"\n      ;;\n  esac\n  shift\ndone\n\nensure_supported_user_context\nif should_reexec_as_setup_user; then\n  die \"Do not invoke debug_runtime_connectivity.sh with sudo directly. Run it as the setup user.\"\nfi\nrequire_bootstrapped_managed_root \"${MANAGED_ROOT}\"\nensure_docker_access\n\nHOST_WS=\"${MANAGED_ROOT}/ros2_ws\"\nCONTAINER_WS=\"${MANAGED_ROOT}/isaac_ros-dev\"\nCONTAINER_STAMP=\"${CONTAINER_WS}/.setup-nvbox/container_workspace.env\"\nIMAGE_STAMP=\"${MANAGED_ROOT}/.stamps/derived_image.env\"\nHOST_STAMP=\"${MANAGED_ROOT}/.stamps/host_workspace.env\"\nLOG_DIR=\"${MANAGED_ROOT}/logs\"\nHOST_CAMERA_LOG=\"${LOG_DIR}/host-camera-debug-$(date '+%Y%m%d-%H%M%S').log\"\n\nPREPARED_CONTAINER_REQUIRED_PACKAGE=\"nvblox_examples_bringup\"\nPREPARED_CONTAINER_REQUIRED_PATHS=(\n  \"launch/orbbec_transforms.launch.py\"\n  \"launch/orbbec_example.launch.py\"\n  \"launch/orbbec_debug.launch.py\"\n  \"launch/orbbec_nvblox_standalone.launch.py\"\n  \"config/nvblox/specializations/nvblox_orbbec_static.yaml\"\n)\n\nbegin_stage() {\n  CURRENT_STAGE=\"$1\"\n  info \"Stage ${CURRENT_STAGE}\"\n}\n\npass_stage() {\n  info \"PASS ${CURRENT_STAGE}\"\n}\n\nfail_stage() {\n  local message=\"${1:-failed}\"\n  die \"FAIL ${CURRENT_STAGE}: ${message}\"\n}\n\nbuild_base_container_args() {\n  local docker_args_name=\"$1\"\n  local -n base_docker_args_ref=\"${docker_args_name}\"\n\n  base_docker_args_ref=(\n    run\n    --rm\n    -e \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\"\n    -v \"${CONTAINER_WS}:/workspaces/isaac_ros-dev\"\n  )\n  append_jetson_container_args \"${docker_args_name}\"\n  append_ros_discovery_container_args \"${docker_args_name}\"\n}\n\nappend_gui_container_args() {\n  local gui_docker_args_name=\"$1\"\n  local -n gui_docker_args_ref=\"${gui_docker_args_name}\"\n\n  if (( USE_GUI )); then\n    gui_docker_args_ref+=(\n      -e \"DISPLAY=${DISPLAY}\"\n      -e \"QT_X11_NO_MITSHM=1\"\n      -v /tmp/.X11-unix:/tmp/.X11-unix:rw\n    )\n  fi\n}\n\nreport_prepared_runtime_state() {\n  local current_context_hash=\"\"\n  local current_image_id=\"\"\n\n  info \"Using existing prepared artifacts only. This debug path does not rebuild the image or workspace.\"\n\n  [[ -f \"${HOST_WS}/install/setup.bash\" ]] || die \"Host workspace is missing at ${HOST_WS}.\"\n  [[ -f \"${CONTAINER_WS}/install/setup.bash\" ]] || die \"Container workspace is missing at ${CONTAINER_WS}.\"\n  [[ -f \"${CONTAINER_STAMP}\" ]] || die \"Container workspace stamp is missing at ${CONTAINER_STAMP}.\"\n  docker_cmd image inspect \"${DERIVED_IMAGE_TAG}\" >/dev/null 2>&1 || die \"Derived image ${DERIVED_IMAGE_TAG} is missing.\"\n\n  current_context_hash=\"$(container_image_context_hash)\"\n  current_image_id=\"$(docker_image_id \"${DERIVED_IMAGE_TAG}\")\"\n\n  if [[ -f \"${IMAGE_STAMP}\" ]]; then\n    # shellcheck disable=SC1090\n    source \"${IMAGE_STAMP}\"\n    info \"Prepared derived image context hash: ${STAMP_CONTEXT_HASH:-unknown}\"\n    info \"Prepared derived image stamped at: ${STAMPED_AT:-unknown}\"\n    if [[ \"${STAMP_CONTEXT_HASH:-}\" != \"${current_context_hash}\" ]]; then\n      warn \"Prepared derived image context hash differs from the current repo state. Continuing with the existing image for diagnosis.\"\n    fi\n  else\n    warn \"Derived image stamp is missing at ${IMAGE_STAMP}. Continuing with the existing image for diagnosis.\"\n  fi\n\n  if [[ -f \"${HOST_STAMP}\" ]]; then\n    # shellcheck disable=SC1090\n    source \"${HOST_STAMP}\"\n    info \"Prepared host Orbbec version: ${HOST_ORBBEC_VERSION:-unknown}\"\n    info \"Prepared host workspace stamped at: ${HOST_STAMPED_AT:-unknown}\"\n    if [[ -n \"${HOST_ORBBEC_VERSION:-}\" && \"${HOST_ORBBEC_VERSION:-}\" != \"${ORBBEC_VERSION}\" ]]; then\n      warn \"Prepared host workspace version differs from the current repo target (${ORBBEC_VERSION}). Continuing with the prepared host workspace for diagnosis.\"\n    fi\n  else\n    warn \"Host workspace stamp is missing at ${HOST_STAMP}. Continuing with the prepared host workspace for diagnosis.\"\n  fi\n\n  source_ros_setup \"${HOST_WS}\"\n  ros2 pkg prefix orbbec_camera >/dev/null 2>&1 || \\\n    die \"Prepared host workspace cannot resolve orbbec_camera.\"\n\n  # shellcheck disable=SC1090\n  source \"${CONTAINER_STAMP}\"\n  info \"Prepared container workspace spec: ${STAMP_WORKSPACE_SPEC_VERSION:-unknown}\"\n  info \"Prepared container workspace stamped at: ${STAMPED_AT:-unknown}\"\n  if [[ \"${STAMP_WORKSPACE_SPEC_VERSION:-}\" != \"${CONTAINER_WORKSPACE_SPEC_VERSION}\" ]]; then\n    warn \"Prepared container workspace spec differs from the current repo target (${CONTAINER_WORKSPACE_SPEC_VERSION}). Continuing with the prepared workspace for diagnosis.\"\n  fi\n  if [[ \"${STAMP_IMAGE_CONTEXT_HASH:-}\" != \"${current_context_hash}\" ]]; then\n    warn \"Prepared container workspace context hash differs from the current repo state. Continuing with the prepared workspace for diagnosis.\"\n  fi\n  if [[ \"${STAMP_IMAGE_ID:-}\" != \"${current_image_id}\" ]]; then\n    warn \"Prepared container workspace was built against a different derived image. Continuing with the current prepared workspace for diagnosis.\"\n  fi\n\n  if ! validate_package_install_artifacts \"${CONTAINER_WS}\" \"${PREPARED_CONTAINER_REQUIRED_PACKAGE}\" \"${PREPARED_CONTAINER_REQUIRED_PATHS[@]}\"; then\n    die \"Prepared container install artifacts are missing or invalid.\"\n  fi\n\n  info \"Validated prepared container artifacts: ${PREPARED_CONTAINER_REQUIRED_PATHS[*]}\"\n}\n\nensure_gemini2_ready_for_debug() {\n  local gemini2_state=\"\"\n\n  cleanup_residual_gemini2_processes \"pre-debug Gemini2 cleanup\" || true\n  log_gemini2_device_state \"Gemini2 device state before debug\"\n\n  gemini2_state=\"$(gemini2_device_state)\"\n  case \"${gemini2_state}\" in\n    ready)\n      return 0\n      ;;\n    usb_missing)\n      return 1\n      ;;\n    usb_present_no_video)\n      warn \"Gemini2 USB device is present, but no /dev/video nodes were found. Attempting one automatic recovery.\"\n      recover_gemini2_device \"debug preflight\" 0 1 1\n      return $?\n      ;;\n    *)\n      warn \"Unexpected Gemini2 device state during debug preflight: ${gemini2_state}\"\n      return 1\n      ;;\n  esac\n}\n\nstop_host_camera_driver() {\n  local signal=\"\"\n  local deadline=0\n\n  if [[ -n \"${HOST_CAMERA_PID}\" ]] && kill -0 \"${HOST_CAMERA_PID}\" 2>/dev/null; then\n    info \"Stopping host Gemini2 driver (pid=${HOST_CAMERA_PID}).\"\n    for signal in INT TERM KILL; do\n      kill \"-${signal}\" \"${HOST_CAMERA_PID}\" 2>/dev/null || true\n      deadline=$((SECONDS + GEMINI2_SIGNAL_TIMEOUT_SECONDS))\n      while ((SECONDS < deadline)); do\n        if ! kill -0 \"${HOST_CAMERA_PID}\" 2>/dev/null; then\n          break 2\n        fi\n        sleep 1\n      done\n    done\n  fi\n\n  HOST_CAMERA_PID=\"\"\n  cleanup_residual_gemini2_processes \"post-debug Gemini2 cleanup\" || true\n  log_gemini2_device_state \"Gemini2 device state after debug cleanup\"\n}\n\ncleanup() {\n  stop_host_camera_driver\n\n  if (( XHOST_GRANTED )); then\n    xhost -si:localuser:root >/dev/null 2>&1 || true\n  fi\n}\ntrap cleanup EXIT INT TERM\n\nlaunch_host_camera() {\n  local launch_cmd=\"\"\n\n  launch_cmd=$(\n    cat <<EOF\nsource /opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash\nsource \"${HOST_WS}/install/setup.bash\"\n$(emit_ros_discovery_env_shell_exports)\nexec ros2 launch orbbec_camera gemini2.launch.py publish_tf:=false tf_publish_rate:=0.0\nEOF\n  )\n\n  info \"Launching Gemini2 driver on the host.\"\n  bash -lc \"${launch_cmd}\" >>\"${HOST_CAMERA_LOG}\" 2>&1 &\n  HOST_CAMERA_PID=$!\n  info \"Host camera log: ${HOST_CAMERA_LOG}\"\n}\n\nwait_for_camera_streams_ready() {\n  local readiness_output=\"\"\n\n  source_ros_setup \"${HOST_WS}\"\n\n  readiness_output=\"$(\n    python3 - \"${EXPECTED_CAMERA_INFO_FRAME}\" <<'PY' 2>&1\nimport sys\nimport time\n\nimport rclpy\nfrom rclpy.executors import SingleThreadedExecutor\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import CameraInfo, Image\n\nexpected_frame = sys.argv[1]\ntimeout_seconds = 90.0\n\n\nclass CameraReadinessProbe(Node):\n    def __init__(self):\n        super().__init__('orbbec_host_readiness_probe')\n        self.frames = {}\n        self.received = {\n            'color_info': False,\n            'depth_info': False,\n            'color_image': False,\n            'depth_image': False,\n        }\n        self.create_subscription(\n            CameraInfo,\n            '/camera/color/camera_info',\n            self._color_info_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            CameraInfo,\n            '/camera/depth/camera_info',\n            self._depth_info_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            Image,\n            '/camera/color/image_raw',\n            self._color_image_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            Image,\n            '/camera/depth/image_raw',\n            self._depth_image_callback,\n            qos_profile_sensor_data)\n\n    def _color_info_callback(self, msg: CameraInfo):\n        self.received['color_info'] = True\n        self.frames['color_info'] = msg.header.frame_id\n\n    def _depth_info_callback(self, msg: CameraInfo):\n        self.received['depth_info'] = True\n        self.frames['depth_info'] = msg.header.frame_id\n\n    def _color_image_callback(self, msg: Image):\n        self.received['color_image'] = True\n\n    def _depth_image_callback(self, msg: Image):\n        self.received['depth_image'] = True\n\n\ndef main():\n    rclpy.init(args=None)\n    node = CameraReadinessProbe()\n    executor = SingleThreadedExecutor()\n    executor.add_node(node)\n    deadline = time.monotonic() + timeout_seconds\n\n    try:\n        while time.monotonic() < deadline:\n            executor.spin_once(timeout_sec=0.2)\n            if all(node.received.values()):\n                break\n\n        missing = [name for name, received in node.received.items() if not received]\n        if missing:\n            print(\n                'Host stream readiness probe timed out waiting for: ' + ', '.join(missing),\n                file=sys.stderr)\n            return 1\n\n        color_frame = node.frames.get('color_info', '')\n        depth_frame = node.frames.get('depth_info', '')\n        print(f'/camera/color/camera_info frame_id={color_frame}')\n        print(f'/camera/depth/camera_info frame_id={depth_frame}')\n\n        if color_frame != expected_frame:\n            print(\n                f'Unexpected /camera/color/camera_info frame_id: {color_frame} '\n                f'(expected {expected_frame})',\n                file=sys.stderr)\n            return 1\n        if depth_frame != expected_frame:\n            print(\n                f'Unexpected /camera/depth/camera_info frame_id: {depth_frame} '\n                f'(expected {expected_frame})',\n                file=sys.stderr)\n            return 1\n        return 0\n    finally:\n        executor.remove_node(node)\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\n  )\" || {\n    printf '%s\\n' \"${readiness_output}\" >&2\n    return 1\n  }\n\n  while IFS= read -r readiness_line; do\n    [[ -n \"${readiness_line}\" ]] || continue\n    info \"${readiness_line}\"\n  done <<< \"${readiness_output}\"\n\n  return 0\n}\n\nprobe_container_camera_visibility() {\n  local probe_output=\"\"\n  local probe_args=()\n\n  build_base_container_args probe_args\n  probe_args+=(\n    -e \"EXPECTED_CAMERA_INFO_FRAME=${EXPECTED_CAMERA_INFO_FRAME}\"\n    -e \"PROBE_TIMEOUT_SECONDS=${DEBUG_CAMERA_VISIBILITY_TIMEOUT_SEC}\"\n  )\n\n  probe_output=\"$(\n    docker_cmd \"${probe_args[@]}\" \"${DERIVED_IMAGE_TAG}\" bash -lc \"$(cat <<'EOF'\nset -euo pipefail\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/install/setup.bash\"\nif (( restore_nounset )); then\n  set -u\nfi\n\nprint_discovery_snapshot() {\n  printf '[container-probe] Container ROS discovery env: ROS_DOMAIN_ID=%s, ROS_LOCALHOST_ONLY=%s, RMW_IMPLEMENTATION=%s, ROS_AUTOMATIC_DISCOVERY_RANGE=%s, ROS_STATIC_PEERS=%s, CYCLONEDDS_URI=%s, CYCLONEDDS_HOME=%s, FASTDDS_DEFAULT_PROFILES_FILE=%s, FASTRTPS_DEFAULT_PROFILES_FILE=%s\\n' \\\n    \"${ROS_DOMAIN_ID:-<unset>}\" \\\n    \"${ROS_LOCALHOST_ONLY:-<unset>}\" \\\n    \"${RMW_IMPLEMENTATION:-<unset>}\" \\\n    \"${ROS_AUTOMATIC_DISCOVERY_RANGE:-<unset>}\" \\\n    \"${ROS_STATIC_PEERS:-<unset>}\" \\\n    \"${CYCLONEDDS_URI:-<unset>}\" \\\n    \"${CYCLONEDDS_HOME:-<unset>}\" \\\n    \"${FASTDDS_DEFAULT_PROFILES_FILE:-<unset>}\" \\\n    \"${FASTRTPS_DEFAULT_PROFILES_FILE:-<unset>}\"\n  printf '[container-probe] ros2 topic list snapshot:\\n'\n  ros2 topic list 2>&1 | sed 's/^/[container-probe][topic] /'\n  printf '[container-probe] ros2 node list snapshot:\\n'\n  ros2 node list 2>&1 | sed 's/^/[container-probe][node] /'\n}\n\nprobe_status=0\nset +e\npython3 - \"${EXPECTED_CAMERA_INFO_FRAME}\" \"${PROBE_TIMEOUT_SECONDS}\" <<'PY'\nimport sys\nimport time\n\nimport rclpy\nfrom rclpy.executors import SingleThreadedExecutor\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import CameraInfo\n\nexpected_frame = sys.argv[1]\ntimeout_seconds = float(sys.argv[2])\n\n\nclass CameraVisibilityProbe(Node):\n    def __init__(self):\n        super().__init__('orbbec_container_camera_visibility_probe')\n        self.frames = {}\n        self.create_subscription(\n            CameraInfo,\n            '/camera/color/camera_info',\n            self._color_info_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            CameraInfo,\n            '/camera/depth/camera_info',\n            self._depth_info_callback,\n            qos_profile_sensor_data)\n\n    def _color_info_callback(self, msg: CameraInfo):\n        self.frames['color'] = msg.header.frame_id\n\n    def _depth_info_callback(self, msg: CameraInfo):\n        self.frames['depth'] = msg.header.frame_id\n\n\ndef main() -> int:\n    print('[container-probe] Waiting for host camera_info topics inside the container', flush=True)\n    rclpy.init(args=None)\n    node = CameraVisibilityProbe()\n    executor = SingleThreadedExecutor()\n    executor.add_node(node)\n    deadline = time.monotonic() + timeout_seconds\n\n    try:\n        while time.monotonic() < deadline:\n            executor.spin_once(timeout_sec=0.2)\n            if 'color' in node.frames and 'depth' in node.frames:\n                break\n\n        missing = []\n        if 'color' not in node.frames:\n            missing.append('/camera/color/camera_info')\n        if 'depth' not in node.frames:\n            missing.append('/camera/depth/camera_info')\n        if missing:\n            print(\n                '[container-probe] Timed out waiting for: ' + ', '.join(missing),\n                file=sys.stderr,\n                flush=True)\n            return 1\n\n        print(f'[container-probe] Observed /camera/color/camera_info frame_id: {node.frames[\"color\"]}', flush=True)\n        print(f'[container-probe] Observed /camera/depth/camera_info frame_id: {node.frames[\"depth\"]}', flush=True)\n\n        if node.frames['color'] != expected_frame:\n            print(\n                f'[container-probe] Unexpected /camera/color/camera_info frame_id: {node.frames[\"color\"]} '\n                f'(expected {expected_frame})',\n                file=sys.stderr,\n                flush=True)\n            return 1\n        if node.frames['depth'] != expected_frame:\n            print(\n                f'[container-probe] Unexpected /camera/depth/camera_info frame_id: {node.frames[\"depth\"]} '\n                f'(expected {expected_frame})',\n                file=sys.stderr,\n                flush=True)\n            return 1\n\n        print('[container-probe] Container camera visibility probe passed.', flush=True)\n        return 0\n    finally:\n        executor.remove_node(node)\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\nprobe_status=$?\nset -e\nif (( probe_status != 0 )); then\n  print_discovery_snapshot\n  exit \"${probe_status}\"\nfi\nEOF\n)\" 2>&1\n  )\" || {\n    printf '%s\\n' \"${probe_output}\" >&2\n    return 1\n  }\n\n  while IFS= read -r probe_line; do\n    [[ -n \"${probe_line}\" ]] || continue\n    info \"${probe_line}\"\n  done <<< \"${probe_output}\"\n\n  return 0\n}\n\nprobe_container_static_tf() {\n  local probe_output=\"\"\n  local probe_args=()\n\n  build_base_container_args probe_args\n  probe_args+=(-e \"PROBE_TIMEOUT_SECONDS=${DEBUG_STATIC_TF_TIMEOUT_SEC}\")\n\n  probe_output=\"$(\n    docker_cmd \"${probe_args[@]}\" \"${DERIVED_IMAGE_TAG}\" bash -lc \"$(cat <<'EOF'\nset -euo pipefail\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/install/setup.bash\"\nif (( restore_nounset )); then\n  set -u\nfi\n\nLOG_FILE=\"/tmp/orbbec-tf-probe.log\"\nLAUNCH_PID=\"\"\nLAUNCH_STOP_TIMEOUT=8\n\nterminate_launch() {\n  local signal=\"\"\n  local deadline=0\n\n  if [[ -z \"${LAUNCH_PID}\" ]] || ! kill -0 \"${LAUNCH_PID}\" 2>/dev/null; then\n    LAUNCH_PID=\"\"\n    return 0\n  fi\n\n  for signal in INT TERM KILL; do\n    kill \"-${signal}\" \"${LAUNCH_PID}\" 2>/dev/null || true\n    deadline=$((SECONDS + LAUNCH_STOP_TIMEOUT))\n    while ((SECONDS < deadline)); do\n      if ! kill -0 \"${LAUNCH_PID}\" 2>/dev/null; then\n        wait \"${LAUNCH_PID}\" 2>/dev/null || true\n        LAUNCH_PID=\"\"\n        return 0\n      fi\n      sleep 1\n    done\n  done\n\n  wait \"${LAUNCH_PID}\" 2>/dev/null || true\n  LAUNCH_PID=\"\"\n}\n\ncleanup() {\n  terminate_launch\n}\ntrap cleanup EXIT INT TERM\n\nros2 launch nvblox_examples_bringup orbbec_transforms.launch.py >\"${LOG_FILE}\" 2>&1 &\nLAUNCH_PID=$!\n\nstatus=0\npython3 - \"${PROBE_TIMEOUT_SECONDS}\" <<'PY' || status=$?\nimport sys\nimport time\n\nimport rclpy\nfrom rclpy.duration import Duration\nfrom rclpy.time import Time\nfrom tf2_ros import Buffer, TransformListener\n\ntimeout_seconds = float(sys.argv[1])\nrequired_transforms = [\n    ('odom', 'base_link'),\n    ('odom', 'camera_link'),\n    ('odom', 'camera_color_optical_frame'),\n]\n\n\ndef main() -> int:\n    print('[container-tf-probe] Waiting for managed static TF chain inside the container', flush=True)\n    rclpy.init(args=None)\n    node = rclpy.create_node('orbbec_container_tf_probe')\n    tf_buffer = Buffer(cache_time=Duration(seconds=timeout_seconds))\n    tf_listener = TransformListener(tf_buffer, node, spin_thread=False)\n    deadline = time.monotonic() + timeout_seconds\n    last_missing = []\n\n    try:\n        while time.monotonic() < deadline:\n            rclpy.spin_once(node, timeout_sec=0.2)\n            last_missing = []\n            for target_frame, source_frame in required_transforms:\n                if not tf_buffer.can_transform(\n                        target_frame,\n                        source_frame,\n                        Time(),\n                        timeout=Duration(seconds=0.1)):\n                    last_missing.append(f'{target_frame} <- {source_frame}')\n\n            if not last_missing:\n                print(\n                    '[container-tf-probe] TF probe passed for odom <- base_link, '\n                    'odom <- camera_link, odom <- camera_color_optical_frame',\n                    flush=True)\n                return 0\n\n        print(\n            '[container-tf-probe] TF probe failed. Missing transforms: '\n            + ', '.join(last_missing or ['unknown']),\n            file=sys.stderr,\n            flush=True)\n        return 1\n    finally:\n        del tf_listener\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\n\nif (( status != 0 )); then\n  printf '[container-tf-probe] Relevant launch log tail:\\n'\n  tail -n 40 \"${LOG_FILE}\" 2>/dev/null || true\nfi\n\nterminate_launch\nexit \"${status}\"\nEOF\n)\" 2>&1\n  )\" || {\n    printf '%s\\n' \"${probe_output}\" >&2\n    return 1\n  }\n\n  while IFS= read -r probe_line; do\n    [[ -n \"${probe_line}\" ]] || continue\n    info \"${probe_line}\"\n  done <<< \"${probe_output}\"\n\n  return 0\n}\n\nprobe_full_demo_runtime_output() {\n  local probe_output=\"\"\n  local probe_args=()\n\n  build_base_container_args probe_args\n  probe_args+=(\n    -e \"NVBLOX_LAUNCH_FILE=${LAUNCH_FILE}\"\n    -e \"RUNTIME_PROBE_TIMEOUT_SECONDS=${DEBUG_RUNTIME_OUTPUT_TIMEOUT_SEC}\"\n  )\n  append_gui_container_args probe_args\n\n  probe_output=\"$(\n    docker_cmd \"${probe_args[@]}\" \"${DERIVED_IMAGE_TAG}\" bash -lc \"$(cat <<'EOF'\nset -euo pipefail\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/install/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/.setup-nvbox/container_workspace.env\"\nif (( restore_nounset )); then\n  set -u\nfi\n\nPACKAGE_PREFIX=\"$(ros2 pkg prefix nvblox_examples_bringup 2>/dev/null || true)\"\n[[ -n \"${PACKAGE_PREFIX}\" ]]\n[[ -f \"${PACKAGE_PREFIX}/share/nvblox_examples_bringup/launch/${NVBLOX_LAUNCH_FILE}\" ]]\n\nprintf '[full-demo-probe] Workspace spec: %s\\n' \"${STAMP_WORKSPACE_SPEC_VERSION:-unknown}\"\nprintf '[full-demo-probe] Launch file: %s\\n' \"${NVBLOX_LAUNCH_FILE}\"\nprintf '[full-demo-probe] Managed static TF chain: odom -> base_link -> camera_link -> camera_color_optical_frame\\n'\n\nLAUNCH_LOG=\"/tmp/nvblox-full-demo-probe.log\"\nLAUNCH_PID=\"\"\nPROBE_PID=\"\"\n\ncleanup() {\n  if [[ -n \"${PROBE_PID}\" ]] && kill -0 \"${PROBE_PID}\" 2>/dev/null; then\n    kill -TERM \"${PROBE_PID}\" 2>/dev/null || true\n    wait \"${PROBE_PID}\" 2>/dev/null || true\n  fi\n  if [[ -n \"${LAUNCH_PID}\" ]] && kill -0 \"${LAUNCH_PID}\" 2>/dev/null; then\n    kill -INT \"${LAUNCH_PID}\" 2>/dev/null || true\n    wait \"${LAUNCH_PID}\" 2>/dev/null || true\n  fi\n}\ntrap cleanup EXIT INT TERM\n\nros2 launch nvblox_examples_bringup \"${NVBLOX_LAUNCH_FILE}\" >\"${LAUNCH_LOG}\" 2>&1 &\nLAUNCH_PID=$!\n\npython3 - \"${RUNTIME_PROBE_TIMEOUT_SECONDS}\" <<'PY' &\nimport sys\nimport time\n\nimport rclpy\nfrom nav_msgs.msg import OccupancyGrid\nfrom rclpy.executors import SingleThreadedExecutor\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import PointCloud2\n\ntimeout_seconds = float(sys.argv[1])\n\n\nclass NvbloxOutputProbe(Node):\n    def __init__(self):\n        super().__init__('nvblox_runtime_output_probe')\n        self.result = None\n        self.create_subscription(\n            PointCloud2,\n            '/nvblox_node/static_esdf_pointcloud',\n            self._pointcloud_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            OccupancyGrid,\n            '/nvblox_node/static_map_slice',\n            self._map_slice_callback,\n            10)\n\n    def _pointcloud_callback(self, msg: PointCloud2):\n        self.result = (\n            '/nvblox_node/static_esdf_pointcloud',\n            f'frame_id={msg.header.frame_id or \"<empty>\"} width={msg.width} height={msg.height}')\n\n    def _map_slice_callback(self, msg: OccupancyGrid):\n        self.result = (\n            '/nvblox_node/static_map_slice',\n            f'frame_id={msg.header.frame_id or \"<empty>\"} width={msg.info.width} '\n            f'height={msg.info.height} resolution={msg.info.resolution:.3f}')\n\n\ndef main() -> int:\n    print(\n        '[full-demo-probe] Waiting for /nvblox_node/static_esdf_pointcloud or '\n        '/nvblox_node/static_map_slice',\n        flush=True)\n    rclpy.init(args=None)\n    node = NvbloxOutputProbe()\n    executor = SingleThreadedExecutor()\n    executor.add_node(node)\n    deadline = time.monotonic() + timeout_seconds\n\n    try:\n        while time.monotonic() < deadline and node.result is None:\n            executor.spin_once(timeout_sec=0.2)\n\n        if node.result is None:\n            print(\n                '[full-demo-probe] Runtime output probe timed out waiting for '\n                '/nvblox_node/static_esdf_pointcloud or /nvblox_node/static_map_slice.',\n                file=sys.stderr,\n                flush=True)\n            return 2\n\n        topic_name, details = node.result\n        print(f'[full-demo-probe] Runtime output probe received {topic_name}: {details}', flush=True)\n        return 0\n    finally:\n        executor.remove_node(node)\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\nPROBE_PID=$!\n\nwhile true; do\n  if ! kill -0 \"${LAUNCH_PID}\" 2>/dev/null; then\n    wait \"${LAUNCH_PID}\" || launch_status=$?\n    launch_status=\"${launch_status:-0}\"\n    if kill -0 \"${PROBE_PID}\" 2>/dev/null; then\n      kill -TERM \"${PROBE_PID}\" 2>/dev/null || true\n      wait \"${PROBE_PID}\" 2>/dev/null || true\n    fi\n\n    if grep -q 'Camera info readiness probe timed out waiting for:' \"${LAUNCH_LOG}\"; then\n      printf '[full-demo-probe] Launch failed during internal camera readiness probe.\\n'\n      grep 'Camera info readiness probe timed out waiting for:' \"${LAUNCH_LOG}\" | tail -n 1\n      printf '[full-demo-probe] Relevant launch log tail:\\n'\n      tail -n 40 \"${LAUNCH_LOG}\" 2>/dev/null || true\n      exit 1\n    fi\n    if grep -q 'TF readiness probe failed.' \"${LAUNCH_LOG}\"; then\n      printf '[full-demo-probe] Launch failed during TF readiness.\\n'\n      grep 'TF readiness probe failed.' \"${LAUNCH_LOG}\" | tail -n 1\n      printf '[full-demo-probe] Relevant launch log tail:\\n'\n      tail -n 40 \"${LAUNCH_LOG}\" 2>/dev/null || true\n      exit 1\n    fi\n\n    printf '[full-demo-probe] Launch exited before runtime output probe succeeded (status=%s).\\n' \"${launch_status}\"\n    printf '[full-demo-probe] Relevant launch log tail:\\n'\n    tail -n 40 \"${LAUNCH_LOG}\" 2>/dev/null || true\n    exit 1\n  fi\n\n  if ! kill -0 \"${PROBE_PID}\" 2>/dev/null; then\n    wait \"${PROBE_PID}\" || probe_status=$?\n    probe_status=\"${probe_status:-0}\"\n    if (( probe_status == 0 )); then\n      printf '[full-demo-probe] Runtime output probe passed. Stopping demo launch.\\n'\n      exit 0\n    fi\n\n    printf '[full-demo-probe] Runtime output probe finished without observing map output.\\n'\n    printf '[full-demo-probe] Relevant launch log tail:\\n'\n    tail -n 40 \"${LAUNCH_LOG}\" 2>/dev/null || true\n    exit 1\n  fi\n\n  sleep 1\ndone\nEOF\n)\" 2>&1\n  )\" || {\n    printf '%s\\n' \"${probe_output}\" >&2\n    return 1\n  }\n\n  while IFS= read -r probe_line; do\n    [[ -n \"${probe_line}\" ]] || continue\n    info \"${probe_line}\"\n  done <<< \"${probe_output}\"\n\n  return 0\n}\n\nconfigure_display() {\n  if (( HEADLESS )); then\n    LAUNCH_FILE=\"orbbec_debug.launch.py\"\n    return 0\n  fi\n\n  if [[ -z \"${DISPLAY:-}\" ]]; then\n    warn \"DISPLAY is not set. Falling back to headless launch probing.\"\n    HEADLESS=1\n    LAUNCH_FILE=\"orbbec_debug.launch.py\"\n    return 0\n  fi\n\n  if [[ ! -d /tmp/.X11-unix ]]; then\n    warn \"/tmp/.X11-unix is missing. Falling back to headless launch probing.\"\n    HEADLESS=1\n    LAUNCH_FILE=\"orbbec_debug.launch.py\"\n    return 0\n  fi\n\n  if ! command -v xhost >/dev/null 2>&1; then\n    warn \"xhost is not available. Falling back to headless launch probing.\"\n    HEADLESS=1\n    LAUNCH_FILE=\"orbbec_debug.launch.py\"\n    return 0\n  fi\n\n  if xhost +si:localuser:root >/dev/null 2>&1; then\n    XHOST_GRANTED=1\n    USE_GUI=1\n    LAUNCH_FILE=\"orbbec_example.launch.py\"\n    return 0\n  fi\n\n  warn \"Failed to grant X11 access for the container. Falling back to headless launch probing.\"\n  HEADLESS=1\n  LAUNCH_FILE=\"orbbec_debug.launch.py\"\n}\n\nenable_managed_fastdds_udp_runtime \"${MANAGED_ROOT}\"\nexport_effective_ros_discovery_env\nconfigure_display\nreport_prepared_runtime_state\n\nbegin_stage \"1/7 Gemini2 device state\"\nif ensure_gemini2_ready_for_debug; then\n  pass_stage\nelse\n  fail_stage \"Gemini2 is not ready for runtime debugging.\"\nfi\n\nbegin_stage \"2/7 Host ROS discovery env\"\nlog_ros_discovery_env \"Host ROS discovery env\"\npass_stage\n\nbegin_stage \"3/7 Container ROS discovery env\"\ninfo \"Container ROS discovery env: $(ros_discovery_env_summary)\"\npass_stage\n\nbegin_stage \"4/7 Host camera stream readiness\"\nlaunch_host_camera\nif wait_for_camera_streams_ready; then\n  pass_stage\nelse\n  if ! kill -0 \"${HOST_CAMERA_PID}\" 2>/dev/null; then\n    fail_stage \"Host Gemini2 driver exited before camera streams became ready. Check ${HOST_CAMERA_LOG}.\"\n  fi\n  fail_stage \"Camera stream readiness probe failed. Check ${HOST_CAMERA_LOG}.\"\nfi\n\nbegin_stage \"5/7 Container camera visibility probe\"\nif probe_container_camera_visibility; then\n  pass_stage\nelse\n  fail_stage \"The container cannot discover host camera_info topics with the current ROS discovery environment.\"\nfi\n\nbegin_stage \"6/7 Container static TF probe\"\nif probe_container_static_tf; then\n  pass_stage\nelse\n  fail_stage \"The container managed static TF chain is not queryable.\"\nfi\n\nbegin_stage \"7/7 Full demo runtime output probe\"\nif probe_full_demo_runtime_output; then\n  pass_stage\nelse\n  fail_stage \"The current prepared launch/runtime path did not reach stable map output.\"\nfi\n\ninfo \"Runtime connectivity debug completed successfully.\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/scripts/preflight.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=SC1091\nsource \"${SCRIPT_DIR}/../lib/common.sh\"\n\nMANAGED_ROOT=\"${MANAGED_ROOT_DEFAULT}\"\nMODE_PREPARE=0\nMODE_RUN=0\n\nwhile (($#)); do\n  case \"$1\" in\n    --managed-root)\n      shift\n      MANAGED_ROOT=\"$1\"\n      ;;\n    --prepare)\n      MODE_PREPARE=1\n      ;;\n    --run)\n      MODE_RUN=1\n      ;;\n    *)\n      die \"Unknown argument: $1\"\n      ;;\n  esac\n  shift\ndone\n\n(( MODE_PREPARE || MODE_RUN )) || die \"preflight.sh requires --prepare, --run, or both.\"\n\nensure_supported_user_context\nif should_reexec_as_setup_user; then\n  die \"Do not invoke preflight.sh with sudo directly. Run reComputer run nvblox instead.\"\nfi\n\nguard_managed_root_path \"${MANAGED_ROOT}\"\nif (( MODE_PREPARE )); then\n  bootstrap_managed_root \"${MANAGED_ROOT}\"\nelse\n  require_bootstrapped_managed_root \"${MANAGED_ROOT}\"\nfi\n\nassert_command sudo\nassert_command git\nassert_command bash\nassert_supported_platform\ncheck_apt_locks\nensure_docker_access\n\nif (( MODE_PREPARE )); then\n  warn_on_unreachable_endpoints \"https://github.com\" \"https://packages.ros.org\" \"https://raw.githubusercontent.com/ros/rosdistro/master/ros.key\"\n  if ! base_image=\"$(select_base_image)\"; then\n    die \"No supported local base image found. Run reComputer run nvblox to download and load the OneDrive archive, or ensure $(acceptable_base_image_hint) already exists.\"\n  fi\n  info \"Selected base image: ${base_image}\"\nfi\n\nif (( MODE_RUN )); then\n  gemini2_state=\"$(gemini2_device_state)\"\n  log_gemini2_device_state \"Gemini2 device state during preflight\"\n\n  case \"${gemini2_state}\" in\n    ready)\n      ;;\n    usb_missing)\n      die \"Gemini2 is not connected. Current device state: usb_missing.\"\n      ;;\n    usb_present_no_video)\n      warn \"Gemini2 USB device is present, but no /dev/video nodes were found. Attempting one automatic recovery.\"\n      if ! recover_gemini2_device \"run preflight\" 1 1 1; then\n        gemini2_state=\"$(gemini2_device_state)\"\n        die \"Gemini2 USB device is present, but video nodes were not recovered. Current device state: ${gemini2_state}. Reconnect the camera if this persists.\"\n      fi\n      ;;\n    *)\n      die \"Unexpected Gemini2 device state during preflight: ${gemini2_state}\"\n      ;;\n  esac\n\n  if (( ! MODE_PREPARE )) && ! docker_cmd image inspect \"${DERIVED_IMAGE_TAG}\" >/dev/null 2>&1; then\n    die \"Derived image ${DERIVED_IMAGE_TAG} does not exist. Run with --prepare-only or the default mode first.\"\n  fi\nfi\n\ninfo \"Preflight checks passed.\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/scripts/prepare_container.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=SC1091\nsource \"${SCRIPT_DIR}/../lib/common.sh\"\n\nMANAGED_ROOT=\"${MANAGED_ROOT_DEFAULT}\"\nFORCE_REBUILD=0\n\nwhile (($#)); do\n  case \"$1\" in\n    --managed-root)\n      shift\n      MANAGED_ROOT=\"$1\"\n      ;;\n    --force-rebuild)\n      FORCE_REBUILD=1\n      ;;\n    *)\n      die \"Unknown argument: $1\"\n      ;;\n  esac\n  shift\ndone\n\nensure_supported_user_context\nif should_reexec_as_setup_user; then\n  die \"Do not invoke prepare_container.sh with sudo directly. Run reComputer run nvblox instead.\"\nfi\nbootstrap_managed_root \"${MANAGED_ROOT}\"\nensure_docker_access\n\nCONTAINER_WS=\"${MANAGED_ROOT}/isaac_ros-dev\"\nIMAGE_STAMP=\"${MANAGED_ROOT}/.stamps/derived_image.env\"\nDOCKERFILE_PATH=\"${PROJECT_ROOT}/docker/Dockerfile.nvblox_orbbec\"\nCONTEXT_HASH=\"$(container_image_context_hash)\"\nPREPARED_CONTAINER_REQUIRED_PACKAGE=\"nvblox_examples_bringup\"\nPREPARED_CONTAINER_REQUIRED_PATHS=(\n  \"launch/orbbec_transforms.launch.py\"\n  \"launch/orbbec_example.launch.py\"\n  \"launch/orbbec_debug.launch.py\"\n  \"launch/orbbec_nvblox_standalone.launch.py\"\n  \"config/nvblox/specializations/nvblox_orbbec_static.yaml\"\n)\n\nprobe_gpu_runtime() {\n  local image_ref=\"$1\"\n  local log_file=\"${2:-/dev/null}\"\n  local args=(run --rm --entrypoint /bin/bash)\n  append_jetson_container_args args\n  docker_cmd \"${args[@]}\" \"${image_ref}\" -lc 'echo runtime-ok >/dev/null' >\"${log_file}\" 2>&1\n}\n\nensure_nvidia_runtime() {\n  local base_image=\"$1\"\n  local config_file=\"/etc/nvidia-container-runtime/config.toml\"\n  local probe_log\n  probe_log=\"$(mktemp)\"\n  trap 'rm -f \"${probe_log}\"' RETURN\n\n  if probe_gpu_runtime \"${base_image}\" \"${probe_log}\"; then\n    info \"NVIDIA container runtime probe succeeded.\"\n    return 0\n  fi\n\n  warn \"Initial NVIDIA runtime probe output:\"\n  sed 's/^/[probe] /' \"${probe_log}\" >&2 || true\n  warn \"NVIDIA runtime probe failed. Trying to switch the runtime to csv mode.\"\n  [[ -f \"${config_file}\" ]] || die \"Runtime config file ${config_file} was not found.\"\n\n  run_sudo sed -i -E 's/mode = \"(auto|cdi)\"/mode = \"csv\"/' \"${config_file}\"\n  run_sudo systemctl restart docker\n\n  : > \"${probe_log}\"\n  if ! probe_gpu_runtime \"${base_image}\" \"${probe_log}\"; then\n    sed 's/^/[probe] /' \"${probe_log}\" >&2 || true\n    die \"NVIDIA runtime probe still fails after switching to csv mode.\"\n  fi\n  info \"NVIDIA container runtime is now working.\"\n}\n\nimage_stamp_current() {\n  [[ -f \"${IMAGE_STAMP}\" ]] || return 1\n  # shellcheck disable=SC1090\n  source \"${IMAGE_STAMP}\"\n  [[ \"${STAMP_BASE_IMAGE_REF:-}\" == \"${BASE_IMAGE_REF}\" ]] || return 1\n  [[ \"${STAMP_BASE_IMAGE_ID:-}\" == \"${BASE_IMAGE_ID}\" ]] || return 1\n  [[ \"${STAMP_CONTEXT_HASH:-}\" == \"${CONTEXT_HASH}\" ]] || return 1\n  docker_cmd image inspect \"${DERIVED_IMAGE_TAG}\" >/dev/null 2>&1\n}\n\nwrite_image_stamp() {\n  {\n    printf 'STAMP_BASE_IMAGE_REF=%q\\n' \"${BASE_IMAGE_REF}\"\n    printf 'STAMP_BASE_IMAGE_ID=%q\\n' \"${BASE_IMAGE_ID}\"\n    printf 'STAMP_CONTEXT_HASH=%q\\n' \"${CONTEXT_HASH}\"\n    printf 'STAMPED_AT=%q\\n' \"$(date -Is 2>/dev/null || date)\"\n  } > \"${IMAGE_STAMP}\"\n}\n\nbuild_derived_image() {\n  info \"Building derived image ${DERIVED_IMAGE_TAG} from ${BASE_IMAGE_REF}.\"\n  docker_cmd build \\\n    --network host \\\n    --build-arg \"BASE_IMAGE=${BASE_IMAGE_REF}\" \\\n    --build-arg \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\" \\\n    -t \"${DERIVED_IMAGE_TAG}\" \\\n    -f \"${DOCKERFILE_PATH}\" \\\n    \"${PROJECT_ROOT}\"\n}\n\nprepare_container_workspace() {\n  local args=(run --rm)\n\n  mkdir -p \"${CONTAINER_WS}/src\" \"${CONTAINER_WS}/.setup-nvbox\"\n\n  info \"Preparing container workspace in ${CONTAINER_WS}.\"\n  append_jetson_container_args args\n  args+=(\n    -e \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\" \\\n    -e \"FORCE_REBUILD=${FORCE_REBUILD}\" \\\n    -e \"EXPECTED_WORKSPACE_SPEC_VERSION=${CONTAINER_WORKSPACE_SPEC_VERSION}\" \\\n    -e \"SETUP_IMAGE_ID=${DERIVED_IMAGE_ID}\" \\\n    -e \"SETUP_IMAGE_CONTEXT_HASH=${CONTEXT_HASH}\" \\\n    -e \"COMMUNITY_REPO_URL=${COMMUNITY_REPO_URL_DEFAULT}\" \\\n    -e \"COMMUNITY_REPO_BRANCH=${COMMUNITY_REPO_BRANCH_DEFAULT}\" \\\n    -v \"${CONTAINER_WS}:/workspaces/isaac_ros-dev\" \\\n    \"${DERIVED_IMAGE_TAG}\" \\\n    /opt/nvblox/bin/prepare_container_workspace.sh\n  )\n  docker_cmd \"${args[@]}\"\n}\n\nvalidate_prepared_container_workspace() {\n  local stamp_path=\"${CONTAINER_WS}/.setup-nvbox/container_workspace.env\"\n  local current_image_id=\"\"\n\n  [[ -f \"${CONTAINER_WS}/install/setup.bash\" ]] || die \"Prepared container workspace is missing ${CONTAINER_WS}/install/setup.bash.\"\n  [[ -f \"${stamp_path}\" ]] || die \"Prepared container workspace stamp is missing at ${stamp_path}.\"\n\n  current_image_id=\"$(docker_image_id \"${DERIVED_IMAGE_TAG}\")\"\n\n  # shellcheck disable=SC1090\n  source \"${stamp_path}\"\n\n  [[ \"${STAMP_WORKSPACE_SPEC_VERSION:-}\" == \"${CONTAINER_WORKSPACE_SPEC_VERSION}\" ]] || \\\n    die \"Prepared container workspace spec is ${STAMP_WORKSPACE_SPEC_VERSION:-unknown}, expected ${CONTAINER_WORKSPACE_SPEC_VERSION}.\"\n  [[ \"${STAMP_IMAGE_CONTEXT_HASH:-}\" == \"${CONTEXT_HASH}\" ]] || \\\n    die \"Prepared container workspace context hash is stale. Expected ${CONTEXT_HASH}, got ${STAMP_IMAGE_CONTEXT_HASH:-unknown}.\"\n  [[ \"${STAMP_IMAGE_ID:-}\" == \"${current_image_id}\" ]] || \\\n    die \"Prepared container workspace was built against image ${STAMP_IMAGE_ID:-unknown}, expected ${current_image_id}.\"\n\n  if ! validate_package_install_artifacts \"${CONTAINER_WS}\" \"${PREPARED_CONTAINER_REQUIRED_PACKAGE}\" \"${PREPARED_CONTAINER_REQUIRED_PATHS[@]}\"; then\n    die \"Prepared container install artifacts are missing or invalid inside the container workspace.\"\n  fi\n\n  info \"Prepared container workspace spec: ${STAMP_WORKSPACE_SPEC_VERSION}\"\n  info \"Prepared container workspace stamped at: ${STAMPED_AT:-unknown}\"\n  info \"Verified prepared launch artifacts: ${PREPARED_CONTAINER_REQUIRED_PATHS[*]}\"\n}\n\nBASE_IMAGE_REF=\"$(select_base_image || true)\"\n[[ -n \"${BASE_IMAGE_REF}\" ]] || die \"No supported local base image found. Run reComputer run nvblox to download and load the OneDrive archive, or ensure $(acceptable_base_image_hint) already exists.\"\nBASE_IMAGE_ID=\"$(docker_image_id \"${BASE_IMAGE_REF}\")\"\n\nensure_nvidia_runtime \"${BASE_IMAGE_REF}\"\n\nif (( FORCE_REBUILD )) || ! image_stamp_current; then\n  build_derived_image\n  write_image_stamp\nelse\n  info \"Derived image ${DERIVED_IMAGE_TAG} is current. Skipping rebuild.\"\nfi\n\nDERIVED_IMAGE_ID=\"$(docker_image_id \"${DERIVED_IMAGE_TAG}\")\"\nprepare_container_workspace\nrepair_managed_root_ownership \"${MANAGED_ROOT}\"\nvalidate_prepared_container_workspace\ninfo \"Container preparation complete.\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/scripts/prepare_host.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=SC1091\nsource \"${SCRIPT_DIR}/../lib/common.sh\"\n\nMANAGED_ROOT=\"${MANAGED_ROOT_DEFAULT}\"\nFORCE_REBUILD=0\n\nwhile (($#)); do\n  case \"$1\" in\n    --managed-root)\n      shift\n      MANAGED_ROOT=\"$1\"\n      ;;\n    --force-rebuild)\n      FORCE_REBUILD=1\n      ;;\n    *)\n      die \"Unknown argument: $1\"\n      ;;\n  esac\n  shift\ndone\n\nensure_supported_user_context\nif should_reexec_as_setup_user; then\n  die \"Do not invoke prepare_host.sh with sudo directly. Run reComputer run nvblox instead.\"\nfi\nbootstrap_managed_root \"${MANAGED_ROOT}\"\n\nHOST_WS=\"${MANAGED_ROOT}/ros2_ws\"\nHOST_REPO=\"${HOST_WS}/src/OrbbecSDK_ROS2\"\nHOST_STAMP=\"${MANAGED_ROOT}/.stamps/host_workspace.env\"\n\nensure_locale() {\n  install_packages_if_missing locales\n  if ! locale -a 2>/dev/null | grep -qi '^en_US\\.utf-8$'; then\n    info \"Generating en_US.UTF-8 locale.\"\n    printf 'en_US.UTF-8 UTF-8\\n' | run_sudo tee -a /etc/locale.gen >/dev/null\n    run_sudo locale-gen en_US.UTF-8\n  fi\n\n  run_sudo update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8\n  export LANG=en_US.UTF-8\n  export LC_ALL=en_US.UTF-8\n}\n\nensure_ros2_repository() {\n  local ros_keyring=\"/usr/share/keyrings/ros-archive-keyring.gpg\"\n  local ros_source=\"/etc/apt/sources.list.d/ros2.list\"\n  local repo_line=\"\"\n\n  if [[ -f \"/opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash\" ]]; then\n    return 0\n  fi\n\n  info \"Installing ROS 2 apt repository.\"\n  install_packages_if_missing curl gnupg lsb-release ca-certificates software-properties-common\n  run_sudo add-apt-repository universe -y\n\n  if [[ ! -f \"${ros_keyring}\" ]]; then\n    run_sudo curl -fsSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o \"${ros_keyring}\"\n  fi\n\n  # shellcheck disable=SC1091\n  source /etc/os-release\n  repo_line=\"deb [arch=$(dpkg --print-architecture) signed-by=${ros_keyring}] http://packages.ros.org/ros2/ubuntu ${UBUNTU_CODENAME} main\"\n  if [[ ! -f \"${ros_source}\" ]] || ! grep -Fqx \"${repo_line}\" \"${ros_source}\" 2>/dev/null; then\n    printf '%s\\n' \"${repo_line}\" | run_sudo tee \"${ros_source}\" >/dev/null\n  fi\n}\n\nensure_ros2_humble() {\n  if [[ -f \"/opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash\" ]]; then\n    info \"ROS 2 ${ROS_DISTRO_DEFAULT} already installed.\"\n    return 0\n  fi\n\n  ensure_locale\n  ensure_ros2_repository\n  install_packages_if_missing \"ros-${ROS_DISTRO_DEFAULT}-desktop\" python3-rosdep python3-vcstool python3-colcon-common-extensions\n}\n\nensure_rosdep_ready() {\n  install_packages_if_missing python3-rosdep python3-vcstool python3-colcon-common-extensions python3-pip build-essential git curl\n\n  if [[ ! -f /etc/ros/rosdep/sources.list.d/20-default.list ]]; then\n    info \"Initializing rosdep.\"\n    run_sudo rosdep init\n  fi\n\n  info \"Updating rosdep.\"\n  rosdep update\n}\n\nsync_orbbec_repo() {\n  mkdir -p \"${HOST_WS}/src\"\n\n  if [[ ! -d \"${HOST_REPO}/.git\" ]]; then\n    info \"Cloning OrbbecSDK_ROS2 ${ORBBEC_VERSION}.\"\n    git clone --branch \"${ORBBEC_VERSION}\" --depth 1 \"${ORBBEC_REPO_URL}\" \"${HOST_REPO}\"\n    return 0\n  fi\n\n  if [[ -n \"$(git -C \"${HOST_REPO}\" status --porcelain)\" ]]; then\n    die \"Managed Orbbec repo at ${HOST_REPO} has local changes. Clean it or remove ${MANAGED_ROOT} before retrying.\"\n  fi\n\n  info \"Refreshing OrbbecSDK_ROS2 checkout.\"\n  git -C \"${HOST_REPO}\" fetch --depth 1 origin \"refs/tags/${ORBBEC_VERSION}:refs/tags/${ORBBEC_VERSION}\"\n  git -C \"${HOST_REPO}\" checkout -f \"${ORBBEC_VERSION}\"\n}\n\ninstall_orbbec_udev_rules() {\n  info \"Installing Orbbec udev rules.\"\n  (\n    cd \"${HOST_REPO}/orbbec_camera/scripts\"\n    run_sudo bash install_udev_rules.sh\n  )\n  run_sudo udevadm control --reload-rules\n  run_sudo udevadm trigger\n}\n\nverify_host_workspace() {\n  local pkg_prefix=\"\"\n\n  [[ -f \"${HOST_WS}/install/setup.bash\" ]] || return 1\n\n  source_ros_setup \"${HOST_WS}\"\n  pkg_prefix=\"$(ros2 pkg prefix orbbec_camera 2>/dev/null || true)\"\n  [[ -n \"${pkg_prefix}\" ]] || return 1\n  [[ -d \"${pkg_prefix}/share/orbbec_camera\" ]] || return 1\n}\n\nhost_stamp_current() {\n  [[ -f \"${HOST_STAMP}\" ]] || return 1\n  # shellcheck disable=SC1090\n  source \"${HOST_STAMP}\"\n  [[ \"${HOST_ORBBEC_VERSION:-}\" == \"${ORBBEC_VERSION}\" ]] || return 1\n  verify_host_workspace\n}\n\nwrite_host_stamp() {\n  {\n    printf 'HOST_ORBBEC_VERSION=%q\\n' \"${ORBBEC_VERSION}\"\n    printf 'HOST_STAMPED_AT=%q\\n' \"$(date -Is 2>/dev/null || date)\"\n  } > \"${HOST_STAMP}\"\n}\n\nensure_locale\nensure_ros2_humble\nensure_rosdep_ready\n\ninstall_packages_if_missing \\\n  libgflags-dev \\\n  nlohmann-json3-dev \\\n  libdw-dev \\\n  libssl-dev \\\n  mesa-utils \\\n  libgl1 \\\n  libgoogle-glog-dev \\\n  \"ros-${ROS_DISTRO_DEFAULT}-image-transport\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-image-transport-plugins\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-compressed-image-transport\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-image-publisher\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-camera-info-manager\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-diagnostic-updater\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-diagnostic-msgs\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-statistics-msgs\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-xacro\" \\\n  \"ros-${ROS_DISTRO_DEFAULT}-backward-ros\"\n\nsync_orbbec_repo\ninstall_orbbec_udev_rules\n\nif (( FORCE_REBUILD == 0 )) && host_stamp_current; then\n  info \"Host Orbbec workspace is already prepared. Skipping rebuild.\"\n  exit 0\nfi\n\nsource_ros_setup\n\ninfo \"Installing host workspace rosdep dependencies.\"\n(\n  cd \"${HOST_WS}\"\n  rosdep install --from-paths src --ignore-src -r -y --rosdistro \"${ROS_DISTRO_DEFAULT}\"\n)\n\ninfo \"Building host Orbbec workspace.\"\n(\n  cd \"${HOST_WS}\"\n  if (( FORCE_REBUILD )); then\n    rm -rf build install log\n  fi\n  colcon build --event-handlers console_direct+ --cmake-args -DCMAKE_BUILD_TYPE=Release\n)\n\nverify_host_workspace || die \"Host Orbbec workspace verification failed.\"\nwrite_host_stamp\ninfo \"Host preparation complete.\"\n"
  },
  {
    "path": "reComputer/scripts/nvblox/scripts/run_demo.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=SC1091\nsource \"${SCRIPT_DIR}/../lib/common.sh\"\n\nMANAGED_ROOT=\"${MANAGED_ROOT_DEFAULT}\"\nHEADLESS=0\n\nwhile (($#)); do\n  case \"$1\" in\n    --managed-root)\n      shift\n      MANAGED_ROOT=\"$1\"\n      ;;\n    --headless)\n      HEADLESS=1\n      ;;\n    *)\n      die \"Unknown argument: $1\"\n      ;;\n  esac\n  shift\ndone\n\nensure_supported_user_context\nif should_reexec_as_setup_user; then\n  die \"Do not invoke run_demo.sh with sudo directly. Run reComputer run nvblox instead.\"\nfi\nrequire_bootstrapped_managed_root \"${MANAGED_ROOT}\"\nensure_docker_access\n\nHOST_WS=\"${MANAGED_ROOT}/ros2_ws\"\nCONTAINER_WS=\"${MANAGED_ROOT}/isaac_ros-dev\"\nCONTAINER_STAMP=\"${CONTAINER_WS}/.setup-nvbox/container_workspace.env\"\nIMAGE_STAMP=\"${MANAGED_ROOT}/.stamps/derived_image.env\"\nHOST_STAMP=\"${MANAGED_ROOT}/.stamps/host_workspace.env\"\nLOG_DIR=\"${MANAGED_ROOT}/logs\"\nCONTAINER_NAME=\"${CONTAINER_NAME_DEFAULT}\"\nHOST_CAMERA_LOG=\"${LOG_DIR}/host-camera-$(date '+%Y%m%d-%H%M%S').log\"\nHOST_CAMERA_PID=\"\"\nXHOST_GRANTED=0\nLAUNCH_FILE=\"orbbec_example.launch.py\"\nUSE_GUI=0\nEXPECTED_CAMERA_INFO_FRAME=\"camera_color_optical_frame\"\nPREPARE_HINT=\"Run NVBLOX_MODE=prepare NVBLOX_FORCE_REBUILD=1 reComputer run nvblox.\"\nCONTAINER_PREPARE_HINT=\"Prepared container workspace is invalid. ${PREPARE_HINT}\"\n\n[[ -f \"${HOST_WS}/install/setup.bash\" ]] || die \"Host workspace is missing at ${HOST_WS}. ${PREPARE_HINT}\"\n[[ -f \"${CONTAINER_WS}/install/setup.bash\" ]] || die \"Container workspace is missing at ${CONTAINER_WS}. ${PREPARE_HINT}\"\n[[ -f \"${CONTAINER_STAMP}\" ]] || die \"Container workspace stamp is missing at ${CONTAINER_STAMP}. ${PREPARE_HINT}\"\ndocker_cmd image inspect \"${DERIVED_IMAGE_TAG}\" >/dev/null 2>&1 || die \"Derived image ${DERIVED_IMAGE_TAG} is missing. ${PREPARE_HINT}\"\n\nPREPARED_CONTAINER_REQUIRED_PACKAGE=\"nvblox_examples_bringup\"\nPREPARED_CONTAINER_REQUIRED_PATHS=(\n  \"launch/orbbec_transforms.launch.py\"\n  \"launch/orbbec_example.launch.py\"\n  \"launch/orbbec_debug.launch.py\"\n  \"launch/orbbec_nvblox_standalone.launch.py\"\n  \"config/nvblox/specializations/nvblox_orbbec_static.yaml\"\n)\nCONTAINER_STATIC_TF_TIMEOUT_SEC=20\n\nvalidate_prepared_image_state() {\n  local current_context_hash=\"\"\n\n  [[ -f \"${IMAGE_STAMP}\" ]] || die \"Derived image stamp is missing at ${IMAGE_STAMP}. ${PREPARE_HINT}\"\n  docker_cmd image inspect \"${DERIVED_IMAGE_TAG}\" >/dev/null 2>&1 || die \"Derived image ${DERIVED_IMAGE_TAG} is missing. ${PREPARE_HINT}\"\n\n  current_context_hash=\"$(container_image_context_hash)\"\n\n  # shellcheck disable=SC1090\n  source \"${IMAGE_STAMP}\"\n\n  [[ \"${STAMP_CONTEXT_HASH:-}\" == \"${current_context_hash}\" ]] || \\\n    die \"Derived image ${DERIVED_IMAGE_TAG} is stale for the current repo state. ${PREPARE_HINT}\"\n\n  info \"Prepared derived image context hash: ${STAMP_CONTEXT_HASH}\"\n  info \"Prepared derived image stamped at: ${STAMPED_AT:-unknown}\"\n}\n\nvalidate_prepared_host_workspace() {\n  [[ -f \"${HOST_STAMP}\" ]] || die \"Host workspace stamp is missing at ${HOST_STAMP}. ${PREPARE_HINT}\"\n  [[ -f \"${HOST_WS}/install/setup.bash\" ]] || die \"Host workspace is missing at ${HOST_WS}. ${PREPARE_HINT}\"\n\n  # shellcheck disable=SC1090\n  source \"${HOST_STAMP}\"\n\n  [[ \"${HOST_ORBBEC_VERSION:-}\" == \"${ORBBEC_VERSION}\" ]] || \\\n    die \"Prepared host workspace version is ${HOST_ORBBEC_VERSION:-unknown}, expected ${ORBBEC_VERSION}. ${PREPARE_HINT}\"\n\n  source_ros_setup \"${HOST_WS}\"\n  ros2 pkg prefix orbbec_camera >/dev/null 2>&1 || \\\n    die \"Prepared host workspace cannot resolve orbbec_camera. ${PREPARE_HINT}\"\n\n  info \"Prepared host Orbbec version: ${HOST_ORBBEC_VERSION}\"\n  info \"Prepared host workspace stamped at: ${HOST_STAMPED_AT:-unknown}\"\n}\n\nvalidate_prepared_container_workspace_state() {\n  local current_context_hash=\"\"\n  local current_image_id=\"\"\n\n  [[ -f \"${CONTAINER_WS}/install/setup.bash\" ]] || die \"Container workspace is missing at ${CONTAINER_WS}. ${PREPARE_HINT}\"\n  [[ -f \"${CONTAINER_STAMP}\" ]] || die \"Container workspace stamp is missing at ${CONTAINER_STAMP}. ${PREPARE_HINT}\"\n\n  current_context_hash=\"$(container_image_context_hash)\"\n  current_image_id=\"$(docker_image_id \"${DERIVED_IMAGE_TAG}\")\"\n\n  # shellcheck disable=SC1090\n  source \"${CONTAINER_STAMP}\"\n\n  [[ \"${STAMP_WORKSPACE_SPEC_VERSION:-}\" == \"${CONTAINER_WORKSPACE_SPEC_VERSION}\" ]] || \\\n    die \"Prepared container workspace spec is ${STAMP_WORKSPACE_SPEC_VERSION:-unknown}, expected ${CONTAINER_WORKSPACE_SPEC_VERSION}. ${PREPARE_HINT}\"\n  [[ \"${STAMP_IMAGE_CONTEXT_HASH:-}\" == \"${current_context_hash}\" ]] || \\\n    die \"Prepared container workspace is stale for the current repo state. ${PREPARE_HINT}\"\n  [[ \"${STAMP_IMAGE_ID:-}\" == \"${current_image_id}\" ]] || \\\n    die \"Prepared container workspace was built against image ${STAMP_IMAGE_ID:-unknown}, expected ${current_image_id}. ${PREPARE_HINT}\"\n\n  if ! validate_package_install_artifacts \"${CONTAINER_WS}\" \"${PREPARED_CONTAINER_REQUIRED_PACKAGE}\" \"${PREPARED_CONTAINER_REQUIRED_PATHS[@]}\"; then\n    die \"Prepared container install artifacts are missing or invalid. ${PREPARE_HINT}\"\n  fi\n\n  info \"Prepared container workspace spec: ${STAMP_WORKSPACE_SPEC_VERSION}\"\n  info \"Prepared container workspace stamped at: ${STAMPED_AT:-unknown}\"\n  info \"Validated prepared container artifacts: ${PREPARED_CONTAINER_REQUIRED_PATHS[*]}\"\n}\n\nprobe_container_camera_visibility() {\n  local probe_output=\"\"\n  local probe_args=(\n    run\n    --rm\n    -e \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\"\n    -e \"EXPECTED_CAMERA_INFO_FRAME=${EXPECTED_CAMERA_INFO_FRAME}\"\n    -e \"PROBE_TIMEOUT_SECONDS=20\"\n    -v \"${CONTAINER_WS}:/workspaces/isaac_ros-dev\"\n  )\n\n  append_jetson_container_args probe_args\n  append_ros_discovery_container_args probe_args\n\n  probe_output=\"$(\n    docker_cmd \"${probe_args[@]}\" \"${DERIVED_IMAGE_TAG}\" bash -lc \"$(cat <<'EOF'\nset -euo pipefail\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/install/setup.bash\"\nif (( restore_nounset )); then\n  set -u\nfi\npython3 - \"${EXPECTED_CAMERA_INFO_FRAME}\" \"${PROBE_TIMEOUT_SECONDS}\" <<'PY'\nimport sys\nimport time\n\nimport rclpy\nfrom rclpy.executors import SingleThreadedExecutor\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import CameraInfo\n\nexpected_frame = sys.argv[1]\ntimeout_seconds = float(sys.argv[2])\n\n\nclass CameraVisibilityProbe(Node):\n    def __init__(self):\n        super().__init__('orbbec_container_camera_visibility_probe')\n        self.frames = {}\n        self.create_subscription(\n            CameraInfo,\n            '/camera/color/camera_info',\n            self._color_info_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            CameraInfo,\n            '/camera/depth/camera_info',\n            self._depth_info_callback,\n            qos_profile_sensor_data)\n\n    def _color_info_callback(self, msg: CameraInfo):\n        self.frames['color'] = msg.header.frame_id\n\n    def _depth_info_callback(self, msg: CameraInfo):\n        self.frames['depth'] = msg.header.frame_id\n\n\ndef main() -> int:\n    print('[container-probe] Waiting for host camera_info topics inside the container', flush=True)\n    rclpy.init(args=None)\n    node = CameraVisibilityProbe()\n    executor = SingleThreadedExecutor()\n    executor.add_node(node)\n    deadline = time.monotonic() + timeout_seconds\n\n    try:\n        while time.monotonic() < deadline:\n            executor.spin_once(timeout_sec=0.2)\n            if 'color' in node.frames and 'depth' in node.frames:\n                break\n\n        missing = []\n        if 'color' not in node.frames:\n            missing.append('/camera/color/camera_info')\n        if 'depth' not in node.frames:\n            missing.append('/camera/depth/camera_info')\n        if missing:\n            print(\n                '[container-probe] Timed out waiting for: ' + ', '.join(missing),\n                file=sys.stderr,\n                flush=True)\n            return 1\n\n        print(f'[container-probe] Observed /camera/color/camera_info frame_id: {node.frames[\"color\"]}', flush=True)\n        print(f'[container-probe] Observed /camera/depth/camera_info frame_id: {node.frames[\"depth\"]}', flush=True)\n\n        if node.frames['color'] != expected_frame:\n            print(\n                f'[container-probe] Unexpected /camera/color/camera_info frame_id: {node.frames[\"color\"]} '\n                f'(expected {expected_frame})',\n                file=sys.stderr,\n                flush=True)\n            return 1\n        if node.frames['depth'] != expected_frame:\n            print(\n                f'[container-probe] Unexpected /camera/depth/camera_info frame_id: {node.frames[\"depth\"]} '\n                f'(expected {expected_frame})',\n                file=sys.stderr,\n                flush=True)\n            return 1\n\n        print('[container-probe] Container camera visibility probe passed.', flush=True)\n        return 0\n    finally:\n        executor.remove_node(node)\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\nEOF\n)\" 2>&1\n  )\" || {\n    printf '%s\\n' \"${probe_output}\" >&2\n    return 1\n  }\n\n  while IFS= read -r probe_line; do\n    [[ -n \"${probe_line}\" ]] || continue\n    info \"${probe_line}\"\n  done <<< \"${probe_output}\"\n\n  return 0\n}\n\nprobe_container_static_tf() {\n  local probe_output=\"\"\n  local probe_args=(\n    run\n    --rm\n    -e \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\"\n    -e \"PROBE_TIMEOUT_SECONDS=${CONTAINER_STATIC_TF_TIMEOUT_SEC}\"\n    -v \"${CONTAINER_WS}:/workspaces/isaac_ros-dev\"\n  )\n\n  append_jetson_container_args probe_args\n  append_ros_discovery_container_args probe_args\n\n  probe_output=\"$(\n    docker_cmd \"${probe_args[@]}\" \"${DERIVED_IMAGE_TAG}\" bash -lc \"$(cat <<'EOF'\nset -euo pipefail\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/install/setup.bash\"\nif (( restore_nounset )); then\n  set -u\nfi\n\nLOG_FILE=\"/tmp/orbbec-tf-probe.log\"\nLAUNCH_PID=\"\"\nLAUNCH_STOP_TIMEOUT=8\n\nterminate_launch() {\n  local signal=\"\"\n  local deadline=0\n\n  if [[ -z \"${LAUNCH_PID}\" ]] || ! kill -0 \"${LAUNCH_PID}\" 2>/dev/null; then\n    LAUNCH_PID=\"\"\n    return 0\n  fi\n\n  for signal in INT TERM KILL; do\n    kill \"-${signal}\" \"${LAUNCH_PID}\" 2>/dev/null || true\n    deadline=$((SECONDS + LAUNCH_STOP_TIMEOUT))\n    while ((SECONDS < deadline)); do\n      if ! kill -0 \"${LAUNCH_PID}\" 2>/dev/null; then\n        wait \"${LAUNCH_PID}\" 2>/dev/null || true\n        LAUNCH_PID=\"\"\n        return 0\n      fi\n      sleep 1\n    done\n  done\n\n  wait \"${LAUNCH_PID}\" 2>/dev/null || true\n  LAUNCH_PID=\"\"\n}\n\ncleanup() {\n  terminate_launch\n}\ntrap cleanup EXIT INT TERM\n\nros2 launch nvblox_examples_bringup orbbec_transforms.launch.py >\"${LOG_FILE}\" 2>&1 &\nLAUNCH_PID=$!\n\nstatus=0\npython3 - \"${PROBE_TIMEOUT_SECONDS}\" <<'PY' || status=$?\nimport sys\nimport time\n\nimport rclpy\nfrom rclpy.duration import Duration\nfrom rclpy.time import Time\nfrom tf2_ros import Buffer, TransformListener\n\ntimeout_seconds = float(sys.argv[1])\nrequired_transforms = [\n    ('odom', 'base_link'),\n    ('odom', 'camera_link'),\n    ('odom', 'camera_color_optical_frame'),\n]\n\n\ndef main() -> int:\n    print('[container-tf-probe] Waiting for managed static TF chain inside the container', flush=True)\n    rclpy.init(args=None)\n    node = rclpy.create_node('orbbec_container_tf_probe')\n    tf_buffer = Buffer(cache_time=Duration(seconds=timeout_seconds))\n    tf_listener = TransformListener(tf_buffer, node, spin_thread=False)\n    deadline = time.monotonic() + timeout_seconds\n    last_missing = []\n\n    try:\n        while time.monotonic() < deadline:\n            rclpy.spin_once(node, timeout_sec=0.2)\n            last_missing = []\n            for target_frame, source_frame in required_transforms:\n                if not tf_buffer.can_transform(\n                        target_frame,\n                        source_frame,\n                        Time(),\n                        timeout=Duration(seconds=0.1)):\n                    last_missing.append(f'{target_frame} <- {source_frame}')\n\n            if not last_missing:\n                print(\n                    '[container-tf-probe] TF probe passed for odom <- base_link, '\n                    'odom <- camera_link, odom <- camera_color_optical_frame',\n                    flush=True)\n                return 0\n\n        print(\n            '[container-tf-probe] TF probe failed. Missing transforms: '\n            + ', '.join(last_missing or ['unknown']),\n            file=sys.stderr,\n            flush=True)\n        return 1\n    finally:\n        del tf_listener\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\n\nif (( status != 0 )); then\n  printf '[container-tf-probe] Relevant launch log tail:\\n'\n  tail -n 40 \"${LOG_FILE}\" 2>/dev/null || true\nfi\n\nterminate_launch\nexit \"${status}\"\nEOF\n)\" 2>&1\n  )\" || {\n    printf '%s\\n' \"${probe_output}\" >&2\n    return 1\n  }\n\n  while IFS= read -r probe_line; do\n    [[ -n \"${probe_line}\" ]] || continue\n    info \"${probe_line}\"\n  done <<< \"${probe_output}\"\n\n  return 0\n}\n\nensure_gemini2_ready_for_run() {\n  local gemini2_state=\"\"\n\n  cleanup_residual_gemini2_processes \"pre-run Gemini2 cleanup\" || true\n  log_gemini2_device_state \"Gemini2 device state before host launch\"\n\n  gemini2_state=\"$(gemini2_device_state)\"\n  case \"${gemini2_state}\" in\n    ready)\n      return 0\n      ;;\n    usb_missing)\n      die \"Gemini2 is not connected. Current device state: usb_missing.\"\n      ;;\n    usb_present_no_video)\n      warn \"Gemini2 USB device is present, but no /dev/video nodes were found before host launch. Attempting one automatic recovery.\"\n      if ! recover_gemini2_device \"pre-run host launch\" 0 1 1; then\n        gemini2_state=\"$(gemini2_device_state)\"\n        die \"Gemini2 USB device is present, but video nodes were not recovered before launch. Current device state: ${gemini2_state}.\"\n      fi\n      ;;\n    *)\n      die \"Unexpected Gemini2 device state before host launch: ${gemini2_state}\"\n      ;;\n  esac\n}\n\nstop_host_camera_driver() {\n  local signal=\"\"\n  local deadline=0\n\n  if [[ -n \"${HOST_CAMERA_PID}\" ]] && kill -0 \"${HOST_CAMERA_PID}\" 2>/dev/null; then\n    info \"Stopping host Gemini2 driver (pid=${HOST_CAMERA_PID}).\"\n    for signal in INT TERM KILL; do\n      kill \"-${signal}\" \"${HOST_CAMERA_PID}\" 2>/dev/null || true\n      deadline=$((SECONDS + GEMINI2_SIGNAL_TIMEOUT_SECONDS))\n      while ((SECONDS < deadline)); do\n        if ! kill -0 \"${HOST_CAMERA_PID}\" 2>/dev/null; then\n          break 2\n        fi\n        sleep 1\n      done\n    done\n  fi\n\n  HOST_CAMERA_PID=\"\"\n  cleanup_residual_gemini2_processes \"post-run Gemini2 cleanup\" || true\n\n  if [[ \"$(gemini2_device_state)\" == \"usb_present_no_video\" ]]; then\n    warn \"Gemini2 USB device is still present, but /dev/video nodes are missing after cleanup. Attempting full recovery.\"\n    if ! recover_gemini2_device \"post-run cleanup\" 0 1 0; then\n      warn \"Gemini2 full recovery did not restore /dev/video nodes after cleanup.\"\n    fi\n  fi\n\n  log_gemini2_device_state \"Gemini2 device state after cleanup\"\n}\n\ncleanup() {\n  stop_host_camera_driver\n\n  if (( XHOST_GRANTED )); then\n    xhost -si:localuser:root >/dev/null 2>&1 || true\n  fi\n}\ntrap cleanup EXIT INT TERM\n\nlaunch_host_camera() {\n  local launch_cmd\n\n  ensure_gemini2_ready_for_run\n  launch_cmd=$(\n    cat <<EOF\nsource /opt/ros/${ROS_DISTRO_DEFAULT}/setup.bash\nsource \"${HOST_WS}/install/setup.bash\"\n$(emit_ros_discovery_env_shell_exports)\nexec ros2 launch orbbec_camera gemini2.launch.py publish_tf:=false tf_publish_rate:=0.0\nEOF\n  )\n\n  info \"Launching Gemini2 driver on the host.\"\n  bash -lc \"${launch_cmd}\" >>\"${HOST_CAMERA_LOG}\" 2>&1 &\n  HOST_CAMERA_PID=$!\n  info \"Host camera log: ${HOST_CAMERA_LOG}\"\n}\n\nwait_for_camera_streams_ready() {\n  local readiness_output=\"\"\n\n  source_ros_setup \"${HOST_WS}\"\n\n  readiness_output=\"$(\n    python3 - \"${EXPECTED_CAMERA_INFO_FRAME}\" <<'PY' 2>&1\nimport sys\nimport time\n\nimport rclpy\nfrom rclpy.executors import SingleThreadedExecutor\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import CameraInfo, Image\n\nexpected_frame = sys.argv[1]\ntimeout_seconds = 90.0\n\n\nclass CameraReadinessProbe(Node):\n    def __init__(self):\n        super().__init__('orbbec_host_readiness_probe')\n        self.frames = {}\n        self.received = {\n            'color_info': False,\n            'depth_info': False,\n            'color_image': False,\n            'depth_image': False,\n        }\n        self.create_subscription(\n            CameraInfo,\n            '/camera/color/camera_info',\n            self._color_info_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            CameraInfo,\n            '/camera/depth/camera_info',\n            self._depth_info_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            Image,\n            '/camera/color/image_raw',\n            self._color_image_callback,\n            qos_profile_sensor_data)\n        self.create_subscription(\n            Image,\n            '/camera/depth/image_raw',\n            self._depth_image_callback,\n            qos_profile_sensor_data)\n\n    def _color_info_callback(self, msg: CameraInfo):\n        self.received['color_info'] = True\n        self.frames['color_info'] = msg.header.frame_id\n\n    def _depth_info_callback(self, msg: CameraInfo):\n        self.received['depth_info'] = True\n        self.frames['depth_info'] = msg.header.frame_id\n\n    def _color_image_callback(self, msg: Image):\n        self.received['color_image'] = True\n\n    def _depth_image_callback(self, msg: Image):\n        self.received['depth_image'] = True\n\n\ndef main():\n    rclpy.init(args=None)\n    node = CameraReadinessProbe()\n    executor = SingleThreadedExecutor()\n    executor.add_node(node)\n    deadline = time.monotonic() + timeout_seconds\n\n    try:\n        while time.monotonic() < deadline:\n            executor.spin_once(timeout_sec=0.2)\n            if all(node.received.values()):\n                break\n\n        missing = [name for name, received in node.received.items() if not received]\n        if missing:\n            print(\n                'Host stream readiness probe timed out waiting for: ' + ', '.join(missing),\n                file=sys.stderr)\n            return 1\n\n        color_frame = node.frames.get('color_info', '')\n        depth_frame = node.frames.get('depth_info', '')\n        print(f'/camera/color/camera_info frame_id={color_frame}')\n        print(f'/camera/depth/camera_info frame_id={depth_frame}')\n\n        if color_frame != expected_frame:\n            print(\n                f'Unexpected /camera/color/camera_info frame_id: {color_frame} '\n                f'(expected {expected_frame})',\n                file=sys.stderr)\n            return 1\n        if depth_frame != expected_frame:\n            print(\n                f'Unexpected /camera/depth/camera_info frame_id: {depth_frame} '\n                f'(expected {expected_frame})',\n                file=sys.stderr)\n            return 1\n        return 0\n    finally:\n        executor.remove_node(node)\n        node.destroy_node()\n        rclpy.shutdown()\n\n\nsys.exit(main())\nPY\n  )\" || {\n    printf '%s\\n' \"${readiness_output}\" >&2\n    return 1\n  }\n\n  while IFS= read -r readiness_line; do\n    [[ -n \"${readiness_line}\" ]] || continue\n    info \"${readiness_line}\"\n  done <<< \"${readiness_output}\"\n\n  return 0\n}\n\nvalidate_container_launch_artifact() {\n  local validate_cmd=\"\"\n  local validate_args=(\n    run\n    --rm\n    -e \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\"\n    -e \"NVBLOX_LAUNCH_FILE=${LAUNCH_FILE}\"\n    -e \"EXPECTED_WORKSPACE_SPEC_VERSION=${CONTAINER_WORKSPACE_SPEC_VERSION}\"\n    -v \"${CONTAINER_WS}:/workspaces/isaac_ros-dev\"\n  )\n\n  append_jetson_container_args validate_args\n  append_ros_discovery_container_args validate_args\n  validate_cmd=$(\n    cat <<'EOF'\nset -euo pipefail\nrestore_nounset=0\nif [[ $- == *u* ]]; then\n  restore_nounset=1\n  set +u\nfi\nsource \"/opt/ros/${ROS_DISTRO}/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/install/setup.bash\"\nsource \"/workspaces/isaac_ros-dev/.setup-nvbox/container_workspace.env\"\nif (( restore_nounset )); then\n  set -u\nfi\nPACKAGE_PREFIX=\"$(ros2 pkg prefix nvblox_examples_bringup 2>/dev/null || true)\"\n[[ -n \"${PACKAGE_PREFIX}\" ]]\n[[ \"${STAMP_WORKSPACE_SPEC_VERSION:-}\" == \"${EXPECTED_WORKSPACE_SPEC_VERSION}\" ]]\n[[ -f \"${PACKAGE_PREFIX}/share/nvblox_examples_bringup/launch/${NVBLOX_LAUNCH_FILE}\" ]]\nEOF\n  )\n\n  info \"Validating prepared launch artifact inside the container.\"\n  docker_cmd \"${validate_args[@]}\" \"${DERIVED_IMAGE_TAG}\" bash -lc \"${validate_cmd}\" >/dev/null 2>&1\n}\n\nconfigure_display() {\n  if (( HEADLESS )); then\n    return 0\n  fi\n\n  if [[ -z \"${DISPLAY:-}\" ]]; then\n    warn \"DISPLAY is not set. Falling back to headless mode.\"\n    HEADLESS=1\n    return 0\n  fi\n\n  if [[ ! -d /tmp/.X11-unix ]]; then\n    warn \"/tmp/.X11-unix is missing. Falling back to headless mode.\"\n    HEADLESS=1\n    return 0\n  fi\n\n  if ! command -v xhost >/dev/null 2>&1; then\n    warn \"xhost is not available. Falling back to headless mode.\"\n    HEADLESS=1\n    return 0\n  fi\n\n  if xhost +si:localuser:root >/dev/null 2>&1; then\n    XHOST_GRANTED=1\n    USE_GUI=1\n    LAUNCH_FILE=\"orbbec_example.launch.py\"\n    return 0\n  fi\n\n  warn \"Failed to grant X11 access for the container. Falling back to headless mode.\"\n  HEADLESS=1\n}\n\nif (( HEADLESS )); then\n  LAUNCH_FILE=\"orbbec_debug.launch.py\"\nfi\n\nconfigure_display\nif (( HEADLESS )); then\n  LAUNCH_FILE=\"orbbec_debug.launch.py\"\nfi\n\nenable_managed_fastdds_udp_runtime \"${MANAGED_ROOT}\"\nexport_effective_ros_discovery_env\nlog_ros_discovery_env \"Host ROS discovery env\"\ninfo \"Container ROS discovery env: $(ros_discovery_env_summary)\"\n\nvalidate_prepared_image_state\nvalidate_prepared_host_workspace\nvalidate_prepared_container_workspace_state\n\nif ! validate_container_launch_artifact; then\n  die \"${CONTAINER_PREPARE_HINT}\"\nfi\n\nlaunch_host_camera\nif ! wait_for_camera_streams_ready; then\n  if ! kill -0 \"${HOST_CAMERA_PID}\" 2>/dev/null; then\n    die \"Host Gemini2 driver exited before camera streams became ready. Check ${HOST_CAMERA_LOG}.\"\n  fi\n  die \"Camera stream readiness probe failed. Check ${HOST_CAMERA_LOG}.\"\nfi\ninfo \"Camera streams and frame IDs are ready.\"\n\nif ! probe_container_camera_visibility; then\n  die \"Host camera streams are ready, but the container cannot discover host camera topics. Check the ROS discovery environment shown above, or run bash reComputer/scripts/nvblox/scripts/debug_runtime_connectivity.sh for a discovery snapshot.\"\nfi\n\nif ! probe_container_static_tf; then\n  die \"Host camera streams and container camera visibility are ready, but the managed static TF chain is not queryable inside the container.\"\nfi\n\ndocker_cmd rm -f \"${CONTAINER_NAME}\" >/dev/null 2>&1 || true\n\nDOCKER_ARGS=(\n  run\n  --rm\n  --name \"${CONTAINER_NAME}\"\n  -e \"ROS_DISTRO=${ROS_DISTRO_DEFAULT}\"\n  -e \"NVBLOX_LAUNCH_FILE=${LAUNCH_FILE}\"\n  -e \"EXPECTED_WORKSPACE_SPEC_VERSION=${CONTAINER_WORKSPACE_SPEC_VERSION}\"\n  -v \"${CONTAINER_WS}:/workspaces/isaac_ros-dev\"\n  -v \"${PROJECT_ROOT}/docker/launch_nvblox.sh:/opt/nvblox/bin/launch_nvblox.sh:ro\"\n)\nappend_jetson_container_args DOCKER_ARGS\nappend_ros_discovery_container_args DOCKER_ARGS\n\nif [[ -t 0 && -t 1 ]]; then\n  DOCKER_ARGS+=(-it)\nelse\n  DOCKER_ARGS+=(-i)\nfi\n\nif (( USE_GUI )); then\n  DOCKER_ARGS+=(\n    -e \"DISPLAY=${DISPLAY}\"\n    -e \"QT_X11_NO_MITSHM=1\"\n    -v /tmp/.X11-unix:/tmp/.X11-unix:rw\n  )\nelse\n  info \"Starting in headless mode with ${LAUNCH_FILE}.\"\nfi\n\ninfo \"Launching NVBlox demo in container ${CONTAINER_NAME}.\"\ndocker_cmd \"${DOCKER_ARGS[@]}\" \"${DERIVED_IMAGE_TAG}\" bash /opt/nvblox/bin/launch_nvblox.sh\n"
  },
  {
    "path": "reComputer/scripts/nvblox/start_nvblox_demo.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n# shellcheck disable=SC1091\nsource \"${SCRIPT_DIR}/lib/common.sh\"\n\nMODE_PREPARE=1\nMODE_RUN=1\nFORCE_REBUILD=0\nHEADLESS=0\nMANAGED_ROOT=\"${MANAGED_ROOT:-${MANAGED_ROOT_DEFAULT}}\"\nORIGINAL_ARGS=(\"$@\")\n\nensure_base_image() {\n  local base_image=\"\"\n  local share_url=\"\"\n  local archive_name=\"\"\n  local cache_dir=\"\"\n  local archive_path=\"\"\n\n  assert_command python3\n  ensure_docker_access\n\n  base_image=\"$(select_base_image || true)\"\n  if [[ -n \"${base_image}\" ]]; then\n    info \"Base image already present: ${base_image}. Skipping OneDrive download and docker load.\"\n    return 0\n  fi\n\n  install_packages_if_missing python3-requests python3-tqdm\n\n  share_url=\"$(resolve_nvblox_image_share_url)\"\n  archive_name=\"$(resolve_nvblox_image_archive_name)\"\n  cache_dir=\"$(resolve_nvblox_image_cache_dir)\"\n  archive_path=\"$(resolve_nvblox_image_archive_path \"${cache_dir}\" \"${archive_name}\")\"\n\n  mkdir -p \"${cache_dir}\"\n  cleanup_nvblox_partial_downloads \"${cache_dir}\"\n\n  info \"Ensuring NVBlox base image archive at ${archive_path}\"\n  python3 \"${SCRIPT_DIR}/onedrive_downloader.py\" \"${share_url}\" --filename \"${archive_name}\" --output-dir \"${cache_dir}\"\n  [[ -f \"${archive_path}\" ]] || die \"Base image archive was not created at ${archive_path}.\"\n\n  info \"Loading Docker image archive ${archive_path}\"\n  docker_cmd load -i \"${archive_path}\"\n\n  base_image=\"$(select_base_image || true)\"\n  [[ -n \"${base_image}\" ]] || die \"docker load finished, but no supported local base image was detected. Expected $(acceptable_base_image_hint).\"\n  info \"Base image ready: ${base_image}\"\n}\n\nusage() {\n  cat <<'EOF'\nUsage:\n  ./start_nvblox_demo.sh\n  ./start_nvblox_demo.sh --prepare-only\n  ./start_nvblox_demo.sh --run-only\n  ./start_nvblox_demo.sh --force-rebuild\n  ./start_nvblox_demo.sh --headless\n\nEnvironment:\n  MANAGED_ROOT                Override managed workspace root. Default: ~/nvblox_demo\n  NVBLOX_IMAGE_SHARE_URL      Override the default OneDrive share link\n  NVBLOX_IMAGE_ARCHIVE_NAME   Override the downloaded archive filename\n  NVBLOX_IMAGE_CACHE_DIR      Override the Docker archive cache directory\nEOF\n}\n\nwhile (($#)); do\n  case \"$1\" in\n    --prepare-only)\n      MODE_PREPARE=1\n      MODE_RUN=0\n      ;;\n    --run-only)\n      MODE_PREPARE=0\n      MODE_RUN=1\n      ;;\n    --force-rebuild)\n      FORCE_REBUILD=1\n      ;;\n    --headless)\n      HEADLESS=1\n      ;;\n    -h|--help)\n      usage\n      exit 0\n      ;;\n    *)\n      die \"Unknown argument: $1\"\n      ;;\n  esac\n  shift\ndone\n\nif (( MODE_PREPARE == 0 && MODE_RUN == 0 )); then\n  die \"Nothing to do. Use the default mode, --prepare-only, or --run-only.\"\nfi\n\nensure_supported_user_context\nif should_reexec_as_setup_user; then\n  printf '[reComputer][nvblox] Re-entering as %s.\\n' \"${SETUP_USER_NAME}\" >&2\n  reexec_as_setup_user \"${SCRIPT_DIR}/start_nvblox_demo.sh\" \"${ORIGINAL_ARGS[@]}\"\nfi\n\nguard_managed_root_path \"${MANAGED_ROOT}\"\nif (( MODE_PREPARE )); then\n  repair_managed_root_ownership \"${MANAGED_ROOT}\"\n  bootstrap_managed_root \"${MANAGED_ROOT}\"\nelse\n  require_bootstrapped_managed_root \"${MANAGED_ROOT}\"\nfi\n\nmkdir -p \"${MANAGED_ROOT}/logs\"\nRUN_LOG=\"${MANAGED_ROOT}/logs/run-$(date '+%Y%m%d-%H%M%S').log\"\nexec > >(tee -a \"${RUN_LOG}\") 2>&1\n\ninfo \"Managed root: ${MANAGED_ROOT}\"\ninfo \"Run log: ${RUN_LOG}\"\ninfo \"Mode: prepare=${MODE_PREPARE} run=${MODE_RUN} force_rebuild=${FORCE_REBUILD} headless=${HEADLESS}\"\n\nif (( MODE_PREPARE )); then\n  ensure_base_image\nfi\n\nPREFLIGHT_ARGS=(--managed-root \"${MANAGED_ROOT}\")\nif (( MODE_PREPARE )); then\n  PREFLIGHT_ARGS+=(--prepare)\nfi\nif (( MODE_RUN )); then\n  PREFLIGHT_ARGS+=(--run)\nfi\nbash \"${SCRIPT_DIR}/scripts/preflight.sh\" \"${PREFLIGHT_ARGS[@]}\"\n\nif (( MODE_PREPARE )); then\n  PREPARE_ARGS=(--managed-root \"${MANAGED_ROOT}\")\n  if (( FORCE_REBUILD )); then\n    PREPARE_ARGS+=(--force-rebuild)\n  fi\n\n  bash \"${SCRIPT_DIR}/scripts/prepare_host.sh\" \"${PREPARE_ARGS[@]}\"\n  bash \"${SCRIPT_DIR}/scripts/prepare_container.sh\" \"${PREPARE_ARGS[@]}\"\nfi\n\nif (( MODE_RUN )); then\n  RUN_ARGS=(--managed-root \"${MANAGED_ROOT}\")\n  if (( HEADLESS )); then\n    RUN_ARGS+=(--headless)\n  fi\n\n  bash \"${SCRIPT_DIR}/scripts/run_demo.sh\" \"${RUN_ARGS[@]}\"\nfi\n\ninfo \"Done.\"\n"
  },
  {
    "path": "reComputer/scripts/ollama/clean.sh",
    "content": "#!/bin/bash\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\n# search local image\nimg_tag=$($JETSON_REPO_PATH/autotag -p local ollama)\n# 检查返回值\nif [ $? -eq 0 ]; then\n    echo \"Found Image successfully.\"\n    sudo docker rmi $img_tag\nelse\n    echo \"[warn] Found Image failed with error code $?. skip delete Image.\"\nfi\n# \n# 4 build whl\nread -p \"Delete all data for ollama? (y/n): \" choice\nif [[ $choice == \"y\" || $choice == \"Y\" ]]; then\n    echo \"Delete=> $JETSON_REPO_PATH/data/models/ollama/\"\n    sudo rm -rf $JETSON_REPO_PATH/data/models/ollama/\n    echo \"Clean Data Done.\"\nelse\n    echo \"[warn] Skip Clean Data.\"\nfi\n"
  },
  {
    "path": "reComputer/scripts/ollama/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 15  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/ollama/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/ollama/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n# try stop old server\ndocker rm -f ollama\n# run Front-end\n./run.sh $(./autotag ollama)\n# user only can access with http://ip:11434\n"
  },
  {
    "path": "reComputer/scripts/parler-tts/clean.sh",
    "content": "#!/bin/bash\n\n# get image\nsource ./getVersion.sh\n\n# remove docker image\nsudo docker rmi feiticeir0/parler-tts:${TAG_IMAGE}\n"
  },
  {
    "path": "reComputer/scripts/parler-tts/getVersion.sh",
    "content": "#!/bin/bash\n# based on dusty - https://github.com/dusty-nv/jetson-containers/blob/master/jetson_containers/l4t_version.sh\n# and llama-factory init script\n\n# we only have images for these - 36.2.0 works on 36.3.0\nL4T_VERSIONS=(\"35.3.1\", \"35.4.1\", \"36.2.0\", \"36.3.0\")\n\nARCH=$(uname -i)\n# echo \"ARCH:  $ARCH\"\n\nif [ $ARCH = \"aarch64\" ]; then\n\tL4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)\n\n\tif [ -z \"$L4T_VERSION_STRING\" ]; then\n\t\t#echo \"reading L4T version from \\\"dpkg-query --show nvidia-l4t-core\\\"\"\n\n\t\tL4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)\n\t\tL4T_VERSION_ARRAY=(${L4T_VERSION_STRING//./ })\n\n\t\t#echo ${L4T_VERSION_ARRAY[@]}\n\t\t#echo ${#L4T_VERSION_ARRAY[@]}\n\n\t\tL4T_RELEASE=${L4T_VERSION_ARRAY[0]}\n\t\tL4T_REVISION=${L4T_VERSION_ARRAY[1]}\n\telse\n\t\t#echo \"reading L4T version from /etc/nv_tegra_release\"\n\n\t\tL4T_RELEASE=$(echo $L4T_VERSION_STRING | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')\n\t\tL4T_REVISION=$(echo $L4T_VERSION_STRING | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')\n\tfi\n\n\tL4T_REVISION_MAJOR=${L4T_REVISION:0:1}\n\tL4T_REVISION_MINOR=${L4T_REVISION:2:1}\n\n\tL4T_VERSION=\"$L4T_RELEASE.$L4T_REVISION\"\n\n\tIMAGE_TAG=$L4T_VERSION\n\n\t#echo \"L4T_VERSION :  $L4T_VERSION\"\n\t#echo \"L4T_RELEASE :  $L4T_RELEASE\"\n\t#echo \"L4T_REVISION:  $L4T_REVISION\"\n\nelif [ $ARCH != \"x86_64\" ]; then\n\techo \"unsupported architecture:  $ARCH\"\n\texit 1\nfi\n\n\nif [[ ! \" ${L4T_VERSIONS[@]} \" =~ \" ${L4T_VERSION} \" ]]; then\n    echo \"L4T_VERSION is not in the allowed versions list. Exiting.\"\n    exit 1\nfi\n\n# check if 36 to change IMAGE_TAG\nif [ ${L4T_RELEASE} -eq \"36\" ]; then\n\t# image tag will be 2.0\n\tIMAGE_TAG=\"36.2.0\"\nfi\n\n"
  },
  {
    "path": "reComputer/scripts/parler-tts/init.sh",
    "content": "#!/bin/bash\n\necho \"Creating models directory at /home/$USER/models\"\n\n# Create Model dir in User home\nmkdir /home/$USER/models\n\n\n"
  },
  {
    "path": "reComputer/scripts/parler-tts/readme.md",
    "content": "# Parler TTS Mini: Expresso\n\n\nParler-TTS Mini: Expresso is a fine-tuned version of Parler-TTS Mini v0.1 on the Expresso dataset. It is a lightweight text-to-speech (TTS) model that can generate high-quality, natural sounding speech. Compared to the original model, Parler-TTS Expresso provides superior control over emotions (happy, confused, laughing, sad) and consistent voices (Jerry, Thomas, Elisabeth, Talia).\n\n[You can get more information on HuggingFace](https://huggingface.co/parler-tts/parler-tts-mini-expresso)\n\n![Gradio Interface] (audio1.png)\n![Gradio Interface result] (audio2.png)\n\n## Getting started\n#### Prerequisites\n* SeeedStudio reComputer J402 [Buy one](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)\n* Audio Columns\n* Docker installed\n\n## Instalation\nPyPI (best)\n\n```bash\npip install jetson-examples\n```\n\n## Usage\n### Method 1\n##### If you're running inside your reComputer\n1. Type the following command in a terminal\n```bash\nreComputer run parler-tts\n```\n2. Open a web browser and go to [http://localhost:7860](http://localhost:7860)\n3. A Gradio interface will appear with two text boxes\n    1. The first for you to write the text that will be converted to audio\n    2. A second one for you to describe the speaker: Male/Female, tone, pitch, mood, etc.. See the examples in Parler-tts page. \n4. When you press submit, after a while, the audio will appear on the right box. You can also download the file if yo want. \n\n### Method 2\n##### If you want to connect remotely with ssh to the reComputer\n1. Connect using SSH but redirecting the 7860 port\n```bash\nssh -L 7860:localhost:7860 <username>@<reComputer_IP>\n```\n2. Type the following command in a terminal\n```bash\nreComputer run parler-tts\n```\n3. Open a web browser (on your machine) and go to [http://localhost:7860](http://localhost:7860)\n\n4. The same instructions above. \n\n## Manual Run\n\nIf you want to run the docker image outside jetson-examples, here's the command:\n\n```bash\ndocker run --rm -p 7860:7860 --runtime=nvidia -v $(MODELS_DIR):/app feiticeir0/parler_tts:r36.2.0\n```\n\n**MODELS_DIR** is a directory where HuggingFace will place the models downloaded from its hub.  If you want to run the image several times, the code will only download the model once, if that diretory stays the same. \n\nThis is controlled by an environment variable called HF_HOME. \n\n[More info about HF environment variables](https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables)\n"
  },
  {
    "path": "reComputer/scripts/parler-tts/run.sh",
    "content": "#!/bin/bash\n\nMODELS_DIR=/home/$USER/models\n\n# get L4T version\n# it exports a variable IMAGE_TAG\nsource ./getVersion.sh\n\n# pull docker image\necho \"docker push feiticeir0/parler_tts:${IMAGE_TAG}\"\n\ndocker run \\\n\t--rm \\\n\t-p 7860:7860 \\\n\t--runtime=nvidia \\\n\t-v $(MODELS_DIR):/app \\\n\tfeiticeir0/parler_tts:${IMAGE_TAG}\n"
  },
  {
    "path": "reComputer/scripts/qwen3.5-4b/Dockerfile.jetson",
    "content": "# Jetson Orin (sm_87) llama.cpp inference image\n# Build flow is maintained outside this repo; this file is kept here as the\n# reference runtime image definition used by the demo script.\n\nFROM ubuntu:22.04\n\nENV DEBIAN_FRONTEND=noninteractive\n\nRUN apt-get update && \\\n    apt-get install -y --no-install-recommends \\\n        libgomp1 curl && \\\n    rm -rf /var/lib/apt/lists/*\n\nCOPY dist/bin/llama-server /usr/local/bin/\nCOPY dist/bin/llama-cli /usr/local/bin/\n\nRUN mkdir -p /usr/local/lib/llama\nCOPY dist/lib/ /usr/local/lib/llama/\nRUN echo \"/usr/local/lib/llama\" > /etc/ld.so.conf.d/llama.conf && ldconfig\n\nENV PATH=/usr/local/cuda/bin:$PATH\nENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/lib/llama\n\nVOLUME [\"/models\"]\nWORKDIR /models\n\nENV LLAMA_ARG_HOST=0.0.0.0\nENV LLAMA_ARG_PORT=8080\n\nEXPOSE 8080\n\nHEALTHCHECK --interval=30s --timeout=5s \\\n    CMD curl -f http://localhost:8080/health || exit 1\n\nENTRYPOINT [\"llama-server\"]\n"
  },
  {
    "path": "reComputer/scripts/qwen3.5-4b/README.md",
    "content": "# Jetson-Example: Run Qwen3.5-4B on NVIDIA Jetson\n\nThis example runs **Qwen3.5-4B** on Jetson Orin with **llama.cpp** and exposes an OpenAI-compatible API server.\n\nIt uses:\n- a prebuilt Docker image archive imported locally on first run\n- the `unsloth/Qwen3.5-4B-GGUF` model in `Q4_K_M` format\n\nSupported JetPack/L4T targets:\n- JetPack 6.1 -> L4T 36.3.0\n- JetPack 6.2 -> L4T 36.4.0\n- JetPack 6.2.1 -> L4T 36.4.3 / 36.4.4\n\nTest status:\n- validated on JetPack 6.2\n- expected to work on JetPack 6.1 to 6.2.1\n\n## Getting Started\n\n### Prerequisites\n- NVIDIA Jetson Orin device\n- Docker installed and available\n- `aria2` installed\n\n### Installation\n\nPyPI:\n```sh\npip install jetson-examples\n```\n\nGitHub:\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n## Usage\n\nStart the demo:\n\n```sh\nreComputer run qwen3.5-4b\n```\n\nThe first run downloads the image archive and model, then starts the server on:\n\n```text\nhttp://127.0.0.1:8080\n```\n\nCheck the model list:\n\n```sh\ncurl http://127.0.0.1:8080/v1/models\n```\n\nChat via OpenAI-compatible API:\n\n```sh\ncurl http://127.0.0.1:8080/v1/chat/completions \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"model\": \"qwen\",\n    \"messages\": [{\"role\": \"user\", \"content\": \"Hello!\"}],\n    \"max_tokens\": 512\n  }'\n```\n\nPython example:\n\n```python\nfrom openai import OpenAI\n\nclient = OpenAI(base_url=\"http://127.0.0.1:8080/v1\", api_key=\"none\")\nresponse = client.chat.completions.create(\n    model=\"qwen\",\n    messages=[{\"role\": \"user\", \"content\": \"Hello!\"}],\n)\nprint(response.choices[0].message.content)\n```\n\n## Environment Variables\n\n- `QWEN35_PORT`: host port, default `8080`\n- `QWEN35_CTX_SIZE`: context length, default `8192`\n- `QWEN35_GPU_LAYERS`: override automatic GPU layer selection\n- `QWEN35_MODELS_DIR`: model cache directory, default `$HOME/models`\n\n## Cleanup\n\nStop and remove the container:\n\n```sh\nreComputer clean qwen3.5-4b\n```\n\nThe downloaded image and model cache are kept for faster startup next time.\n"
  },
  {
    "path": "reComputer/scripts/qwen3.5-4b/clean.sh",
    "content": "#!/bin/bash\nset -euo pipefail\n\nCONTAINER_NAME=\"qwen3.5-4b\"\n\nensure_docker_access() {\n    if ! command -v docker >/dev/null 2>&1; then\n        echo \"docker command not found.\"\n        echo \"Please install Docker first, then rerun this command.\"\n        exit 1\n    fi\n\n    if docker info >/dev/null 2>&1; then\n        return 0\n    fi\n\n    if id -nG \"$USER\" | grep -qw docker; then\n        echo \"Current user is already in docker group, but docker is still unavailable.\"\n        echo \"Please make sure Docker daemon is running, for example:\"\n        echo \"sudo systemctl enable --now docker\"\n        exit 1\n    fi\n\n    echo \"Current user has no docker permission.\"\n    read -r -p \"Add current user ($USER) to docker group now? (y/n): \" choice\n    case \"$choice\" in\n        y|Y)\n            if ! sudo -v; then\n                echo \"Failed to authenticate sudo. Exiting.\"\n                exit 1\n            fi\n            if ! getent group docker >/dev/null 2>&1; then\n                sudo groupadd docker\n            fi\n            sudo usermod -aG docker \"$USER\"\n            echo \"Added $USER to docker group.\"\n            echo \"Please log out and log back in (or reboot), then rerun:\"\n            echo \"reComputer clean qwen3.5-4b\"\n            exit 1\n            ;;\n        *)\n            echo \"Skipped docker group setup.\"\n            echo \"You can run this manually:\"\n            echo \"sudo usermod -aG docker $USER\"\n            exit 1\n            ;;\n    esac\n}\n\nensure_docker_access\nDOCKER_CMD=(docker)\n\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    \"${DOCKER_CMD[@]}\" stop \"$CONTAINER_NAME\" >/dev/null\nfi\n\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -a -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    \"${DOCKER_CMD[@]}\" rm \"$CONTAINER_NAME\" >/dev/null\n    echo \"Container $CONTAINER_NAME removed.\"\nelse\n    echo \"Container $CONTAINER_NAME does not exist.\"\nfi\n\necho \"Image and model cache are kept locally for faster next startup.\"\n"
  },
  {
    "path": "reComputer/scripts/qwen3.5-4b/config.yaml",
    "content": "# Tested and compatible JetPack/L4T versions.\nALLOWED_L4T_VERSIONS:\n  - 36.3.0\n  - 36.4.0\n  - 36.4.3\n  - 36.4.4\nREQUIRED_DISK_SPACE: 15\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\n  - aria2\nDOCKER:\n  ENABLE: false\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/qwen3.5-4b/init.sh",
    "content": "#!/bin/bash\n\nsource \"$(dirname \"$(realpath \"$0\")\")/../utils.sh\"\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n"
  },
  {
    "path": "reComputer/scripts/qwen3.5-4b/run.sh",
    "content": "#!/bin/bash\nset -euo pipefail\n\nCONTAINER_NAME=\"qwen3.5-4b\"\nIMAGE_NAME=\"${QWEN35_IMAGE_NAME:-llama-jetson}\"\nIMAGE_ARCHIVE_URL=\"${QWEN35_IMAGE_ARCHIVE_URL:-https://seeedstudio88-my.sharepoint.com/personal/youjiang_yu_seeedstudio88_onmicrosoft_com/_layouts/15/download.aspx?share=IQBA2papRoneTrRhf5DQ_dOnAV3EvVgvJ3LKb1q8qltMlSM}\"\nMODEL_URL=\"${QWEN35_MODEL_URL:-https://huggingface.co/unsloth/Qwen3.5-4B-GGUF/resolve/main/Qwen3.5-4B-Q4_K_M.gguf}\"\nMODELS_DIR=\"${QWEN35_MODELS_DIR:-$HOME/models}\"\nMODEL_FILE=\"${QWEN35_MODEL_FILE:-$MODELS_DIR/Qwen3.5-4B-Q4_K_M.gguf}\"\nHOST_PORT=\"${QWEN35_PORT:-8080}\"\nCONTAINER_PORT=8080\nSTARTUP_TIMEOUT=\"${QWEN35_STARTUP_TIMEOUT:-600}\"\nGPU_FLAGS=()\nLIB_MOUNTS=()\n\nensure_docker_access() {\n    if ! command -v docker >/dev/null 2>&1; then\n        echo \"docker command not found.\"\n        echo \"Please install Docker first, then rerun this command.\"\n        exit 1\n    fi\n\n    if docker info >/dev/null 2>&1; then\n        return 0\n    fi\n\n    if id -nG \"$USER\" | grep -qw docker; then\n        echo \"Current user is already in docker group, but docker is still unavailable.\"\n        echo \"Please make sure Docker daemon is running, for example:\"\n        echo \"sudo systemctl enable --now docker\"\n        exit 1\n    fi\n\n    echo \"Current user has no docker permission.\"\n    read -r -p \"Add current user ($USER) to docker group now? (y/n): \" choice\n    case \"$choice\" in\n        y|Y)\n            if ! sudo -v; then\n                echo \"Failed to authenticate sudo. Exiting.\"\n                exit 1\n            fi\n            if ! getent group docker >/dev/null 2>&1; then\n                sudo groupadd docker\n            fi\n            sudo usermod -aG docker \"$USER\"\n            echo \"Added $USER to docker group.\"\n            echo \"Please log out and log back in (or reboot), then rerun:\"\n            echo \"reComputer run qwen3.5-4b\"\n            exit 1\n            ;;\n        *)\n            echo \"Skipped docker group setup.\"\n            echo \"You can run this manually:\"\n            echo \"sudo usermod -aG docker $USER\"\n            exit 1\n            ;;\n    esac\n}\n\nensure_docker_access\nDOCKER_CMD=(docker)\n\nensure_image() {\n    if \"${DOCKER_CMD[@]}\" image inspect \"$IMAGE_NAME\" >/dev/null 2>&1; then\n        echo \"Docker image already exists locally: $IMAGE_NAME\"\n        return 0\n    fi\n\n    local archive_path\n    archive_path=\"$(mktemp /tmp/qwen3.5-4b-image.XXXXXX.tar.gz)\"\n\n    echo \"Downloading Docker image archive...\"\n    aria2c \\\n        --continue=true \\\n        --max-connection-per-server=8 \\\n        --split=8 \\\n        --min-split-size=10M \\\n        --retry-wait=5 \\\n        --max-tries=0 \\\n        --dir=\"$(dirname \"$archive_path\")\" \\\n        --out=\"$(basename \"$archive_path\")\" \\\n        \"$IMAGE_ARCHIVE_URL\"\n\n    echo \"Importing Docker image...\"\n    \"${DOCKER_CMD[@]}\" load -i \"$archive_path\"\n    rm -f \"$archive_path\"\n}\n\nensure_model() {\n    mkdir -p \"$MODELS_DIR\"\n    if [ -f \"$MODEL_FILE\" ]; then\n        echo \"Model already exists locally: $MODEL_FILE\"\n        return 0\n    fi\n\n    echo \"Downloading model...\"\n    aria2c \\\n        --continue=true \\\n        --max-connection-per-server=8 \\\n        --split=8 \\\n        --min-split-size=10M \\\n        --retry-wait=5 \\\n        --max-tries=0 \\\n        --dir=\"$MODELS_DIR\" \\\n        --out=\"$(basename \"$MODEL_FILE\")\" \\\n        \"$MODEL_URL\"\n}\n\nselect_gpu_layers() {\n    local total_mem_mb\n    total_mem_mb=\"$(free -m | awk '/^Mem:/{print $2}')\"\n\n    if [ \"$total_mem_mb\" -ge 60000 ]; then\n        echo 99\n    elif [ \"$total_mem_mb\" -ge 14000 ]; then\n        echo 80\n    elif [ \"$total_mem_mb\" -ge 7000 ]; then\n        echo 40\n    else\n        echo 20\n    fi\n}\n\nprobe_gpu_mode() {\n    if \"${DOCKER_CMD[@]}\" run --rm --runtime nvidia \"$IMAGE_NAME\" --help >/dev/null 2>&1; then\n        GPU_FLAGS=(--runtime nvidia)\n        echo \"Using GPU mode: --runtime nvidia\"\n        return 0\n    fi\n\n    if \"${DOCKER_CMD[@]}\" run --rm --gpus all \"$IMAGE_NAME\" --help >/dev/null 2>&1; then\n        GPU_FLAGS=(--gpus all)\n        echo \"Using GPU mode: --gpus all\"\n        return 0\n    fi\n\n    echo \"Failed to detect a working Docker GPU mode.\"\n    echo \"Tried: --runtime nvidia and --gpus all\"\n    echo \"Please check Docker + NVIDIA Container Runtime on this device.\"\n    exit 1\n}\n\ncollect_library_mounts() {\n    local candidate\n    local candidates=(\n        \"/usr/local/cuda/lib64:/usr/local/cuda/lib64:ro\"\n        \"/usr/lib/aarch64-linux-gnu/nvidia:/usr/lib/aarch64-linux-gnu/nvidia:ro\"\n        \"/usr/lib/aarch64-linux-gnu/libcuda.so.1:/usr/lib/aarch64-linux-gnu/libcuda.so.1:ro\"\n    )\n\n    for candidate in \"${candidates[@]}\"; do\n        if [ -e \"${candidate%%:*}\" ]; then\n            LIB_MOUNTS+=(-v \"$candidate\")\n        fi\n    done\n}\n\nwait_for_server_ready() {\n    local endpoint=\"http://127.0.0.1:${HOST_PORT}/v1/models\"\n    local elapsed=0\n    local interval=5\n    local raw_response=\"\"\n    local response_body=\"\"\n    local http_code=\"000\"\n\n    if ! command -v curl >/dev/null 2>&1; then\n        echo \"curl not found, skip readiness probing.\"\n        return 0\n    fi\n\n    echo \"Waiting for Qwen server to be ready at ${endpoint} (timeout: ${STARTUP_TIMEOUT}s)...\"\n    while [ \"$elapsed\" -lt \"$STARTUP_TIMEOUT\" ]; do\n        if [ -z \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n            echo \"Container exited before model became ready.\"\n            echo \"Recent logs:\"\n            \"${DOCKER_CMD[@]}\" logs --tail 80 \"$CONTAINER_NAME\"\n            return 1\n        fi\n\n        raw_response=\"$(curl -s --max-time 3 -w \"\\n%{http_code}\" \"$endpoint\" 2>/dev/null || true)\"\n        http_code=\"$(printf '%s' \"$raw_response\" | tail -n 1)\"\n        response_body=\"$(printf '%s' \"$raw_response\" | sed '$d')\"\n\n        if [ \"$http_code\" = \"200\" ] && echo \"$response_body\" | grep -q \"\\\"data\\\"\"; then\n            return 0\n        fi\n\n        if [ \"$http_code\" = \"503\" ] && echo \"$response_body\" | grep -q \"Loading model\"; then\n            if [ $((elapsed % 30)) -eq 0 ]; then\n                echo \"Model is still loading... (${elapsed}s)\"\n            fi\n            sleep \"$interval\"\n            elapsed=$((elapsed + interval))\n            continue\n        fi\n\n        if [ $((elapsed % 30)) -eq 0 ]; then\n            echo \"Waiting model readiness... (${elapsed}s, http=${http_code})\"\n        fi\n        sleep \"$interval\"\n        elapsed=$((elapsed + interval))\n    done\n\n    echo \"Model is still not ready after ${STARTUP_TIMEOUT}s.\"\n    echo \"Recent logs:\"\n    \"${DOCKER_CMD[@]}\" logs --tail 80 \"$CONTAINER_NAME\"\n    return 1\n}\n\nensure_image\nensure_model\nprobe_gpu_mode\ncollect_library_mounts\n\nGPU_LAYERS=\"${QWEN35_GPU_LAYERS:-$(select_gpu_layers)}\"\necho \"Using --n-gpu-layers ${GPU_LAYERS}\"\n\nif [ \"$(\"${DOCKER_CMD[@]}\" ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    echo \"Container $CONTAINER_NAME is already running.\"\nelif [ \"$(\"${DOCKER_CMD[@]}\" ps -a -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    echo \"Container $CONTAINER_NAME already exists but is not running.\"\n    echo \"Recreating with current settings...\"\n    \"${DOCKER_CMD[@]}\" rm -f \"$CONTAINER_NAME\" >/dev/null\n    \"${DOCKER_CMD[@]}\" run -d \\\n        --name \"$CONTAINER_NAME\" \\\n        \"${GPU_FLAGS[@]}\" \\\n        -p \"${HOST_PORT}:${CONTAINER_PORT}\" \\\n        -v \"$MODELS_DIR\":/models \\\n        \"${LIB_MOUNTS[@]}\" \\\n        -e LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/lib/aarch64-linux-gnu/nvidia:/usr/lib/aarch64-linux-gnu:/usr/local/lib/llama \\\n        \"$IMAGE_NAME\" \\\n        --model \"/models/$(basename \"$MODEL_FILE\")\" \\\n        --ctx-size \"${QWEN35_CTX_SIZE:-8192}\" \\\n        --host 0.0.0.0 \\\n        --port \"${CONTAINER_PORT}\" \\\n        --n-gpu-layers \"${GPU_LAYERS}\" >/dev/null\nelse\n    echo \"Creating and starting container $CONTAINER_NAME...\"\n    \"${DOCKER_CMD[@]}\" run -d \\\n        --name \"$CONTAINER_NAME\" \\\n        \"${GPU_FLAGS[@]}\" \\\n        -p \"${HOST_PORT}:${CONTAINER_PORT}\" \\\n        -v \"$MODELS_DIR\":/models \\\n        \"${LIB_MOUNTS[@]}\" \\\n        -e LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/lib/aarch64-linux-gnu/nvidia:/usr/lib/aarch64-linux-gnu:/usr/local/lib/llama \\\n        \"$IMAGE_NAME\" \\\n        --model \"/models/$(basename \"$MODEL_FILE\")\" \\\n        --ctx-size \"${QWEN35_CTX_SIZE:-8192}\" \\\n        --host 0.0.0.0 \\\n        --port \"${CONTAINER_PORT}\" \\\n        --n-gpu-layers \"${GPU_LAYERS}\" >/dev/null\nfi\n\nif ! wait_for_server_ready; then\n    exit 1\nfi\n\necho \"Qwen3.5-4B server is ready at: http://127.0.0.1:${HOST_PORT}\"\necho \"Check models:\"\necho \"curl http://127.0.0.1:${HOST_PORT}/v1/models\"\necho \"Chat API example:\"\necho \"curl http://127.0.0.1:${HOST_PORT}/v1/chat/completions -H 'Content-Type: application/json' -d '{\\\"model\\\":\\\"qwen\\\",\\\"messages\\\":[{\\\"role\\\":\\\"user\\\",\\\"content\\\":\\\"Hello!\\\"}]}'\"\necho \"Follow server logs:\"\necho \"${DOCKER_CMD[*]} logs -f $CONTAINER_NAME\"\n"
  },
  {
    "path": "reComputer/scripts/ros1-jp6/README.md",
    "content": "# Jetson-Example: Run ROS 1 Noetic on NVIDIA Jetson\n\nThis example downloads a prebuilt ROS 1 Noetic Docker archive from a public OneDrive/SharePoint link, loads it into Docker as:\n\n```sh\nros:noetic\n```\n\nArchive size: about **1.27 GB**\n\nSupported JetPack/L4T versions:\n- JetPack 6.2 -> L4T 36.4.0\n- JetPack 6.2.1 -> L4T 36.4.3\n- JetPack 6.1 -> L4T 36.4.4\n\n## Getting Started\n\nPyPI (recommended):\n\n```sh\npip install jetson-examples\n```\n\nGitHub (developer):\n\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n## Usage\n\nLaunch an interactive shell in the container:\n\n```sh\nreComputer run ros1-jp6\n```\n\nThe example will:\n\n1. Download the Docker archive from SharePoint if it is not cached\n2. Run `docker load -i` to import the image\n3. Start the container with Jetson-friendly Docker flags\n\nThe SharePoint share link is a normal `:u:/...` public link. The downloader automatically appends `download=1`, so you do not need to manually rewrite the URL.\n\nCache location:\n\n```sh\n~/.cache/jetson-examples/ros1-jp6/ros-noetic-jp6.tar\n```\n\n## Verify The Image\n\nOnly prepare the image and skip container startup:\n\n```sh\nROS1_JP6_SKIP_RUN=1 reComputer run ros1-jp6\n```\n\nRun a non-interactive ROS smoke test:\n\n```sh\nROS1_JP6_COMMAND='source /opt/ros/noetic/setup.bash && rosversion -d' reComputer run ros1-jp6\n```\n\n## Export With docker save\n\nAfter the image is loaded locally, save it back to a tar archive:\n\n```sh\nROS1_JP6_SKIP_RUN=1 \\\nROS1_JP6_SAVE_PATH=/tmp/ros-noetic-jp6.tar \\\nreComputer run ros1-jp6\n```\n\nThis is equivalent to:\n\n```sh\ndocker save -o /tmp/ros-noetic-jp6.tar ros:noetic\n```\n\n## Environment Variables\n\nYou can override the default behavior with these variables:\n\n```sh\nROS1_JP6_SHARE_URL\nROS1_JP6_ARCHIVE_NAME\nROS1_JP6_CACHE_DIR\nROS1_JP6_IMAGE\nROS1_JP6_CONTAINER_NAME\nROS1_JP6_COMMAND\nROS1_JP6_SKIP_RUN\nROS1_JP6_SAVE_PATH\n```\n\n## Cleanup\n\nOnly remove the container:\n\n```sh\nreComputer clean ros1-jp6\n```\n\nThe local image cache and the downloaded archive are kept.\n"
  },
  {
    "path": "reComputer/scripts/ros1-jp6/clean.sh",
    "content": "#!/bin/bash\nset -euo pipefail\n\nCONTAINER_NAME=\"${ROS1_JP6_CONTAINER_NAME:-ros1-jp6}\"\n\nensure_docker_access() {\n    if ! command -v docker >/dev/null 2>&1; then\n        echo \"docker command not found.\"\n        echo \"Please install Docker first, then rerun this command.\"\n        exit 1\n    fi\n\n    if docker info >/dev/null 2>&1; then\n        return 0\n    fi\n\n    echo \"Docker daemon is not available to the current user.\"\n    echo \"Please make sure Docker is running and your user can access /var/run/docker.sock.\"\n    exit 1\n}\n\nensure_docker_access\n\nif [ \"$(docker ps -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    docker stop \"${CONTAINER_NAME}\"\nfi\n\nif [ \"$(docker ps -a -q -f name=^/${CONTAINER_NAME}$)\" ]; then\n    docker rm \"${CONTAINER_NAME}\"\n    echo \"Container ${CONTAINER_NAME} removed.\"\nelse\n    echo \"Container ${CONTAINER_NAME} does not exist.\"\nfi\n\necho \"Image cache and downloaded archive are kept locally.\"\n"
  },
  {
    "path": "reComputer/scripts/ros1-jp6/config.yaml",
    "content": "ALLOWED_L4T_VERSIONS:\n  - 36.4.0\n  - 36.4.3\n  - 36.4.4\nREQUIRED_DISK_SPACE: 10\nREQUIRED_MEM_SPACE: 4\nPACKAGES:\n  - nvidia-jetpack\n  - python3-requests\n  - python3-tqdm\nDOCKER:\n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/ros1-jp6/init.sh",
    "content": "#!/bin/bash\n\nsource \"$(dirname \"$(realpath \"$0\")\")/../utils.sh\"\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n"
  },
  {
    "path": "reComputer/scripts/ros1-jp6/run.sh",
    "content": "#!/bin/bash\nset -euo pipefail\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nDOWNLOADER_SCRIPT=\"${SCRIPT_DIR}/../nvblox/onedrive_downloader.py\"\nIMAGE_NAME=\"${ROS1_JP6_IMAGE:-ros:noetic}\"\nCONTAINER_NAME=\"${ROS1_JP6_CONTAINER_NAME:-ros1-jp6}\"\nSHARE_URL=\"${ROS1_JP6_SHARE_URL:-https://seeedstudio88-my.sharepoint.com/:u:/g/personal/youjiang_yu_seeedstudio88_onmicrosoft_com/IQCOgjRBDytqT4jKdktOzhdIAUf97NfnQJ4lk_DAHpLTaRY?e=Nw0RjJ}\"\nCACHE_DIR=\"${ROS1_JP6_CACHE_DIR:-$HOME/.cache/jetson-examples/ros1-jp6}\"\nARCHIVE_NAME=\"${ROS1_JP6_ARCHIVE_NAME:-ros-noetic-jp6.tar}\"\nARCHIVE_PATH=\"${CACHE_DIR%/}/${ARCHIVE_NAME}\"\nSAVE_PATH=\"${ROS1_JP6_SAVE_PATH:-}\"\nSKIP_RUN=\"${ROS1_JP6_SKIP_RUN:-0}\"\nCONTAINER_COMMAND=\"${ROS1_JP6_COMMAND:-bash}\"\nDOCKER_RUN_FLAGS=()\n\nensure_docker_access() {\n    if ! command -v docker >/dev/null 2>&1; then\n        echo \"docker command not found.\"\n        echo \"Please install Docker first, then rerun this command.\"\n        exit 1\n    fi\n\n    if docker info >/dev/null 2>&1; then\n        return 0\n    fi\n\n    if id -nG \"$USER\" | grep -qw docker; then\n        echo \"Current user is already in docker group, but docker is still unavailable.\"\n        echo \"Please make sure Docker daemon is running, for example:\"\n        echo \"sudo systemctl enable --now docker\"\n        exit 1\n    fi\n\n    echo \"Current user has no docker permission.\"\n    read -r -p \"Add current user ($USER) to docker group now? (y/n): \" choice\n    case \"$choice\" in\n        y|Y)\n            if ! sudo -v; then\n                echo \"Failed to authenticate sudo. Exiting.\"\n                exit 1\n            fi\n            if ! getent group docker >/dev/null 2>&1; then\n                sudo groupadd docker\n            fi\n            sudo usermod -aG docker \"$USER\"\n            echo \"Added $USER to docker group.\"\n            echo \"Please log out and log back in (or reboot), then rerun:\"\n            echo \"reComputer run ros1-jp6\"\n            exit 1\n            ;;\n        *)\n            echo \"Skipped docker group setup.\"\n            echo \"You can run this manually:\"\n            echo \"sudo usermod -aG docker $USER\"\n            exit 1\n            ;;\n    esac\n}\n\nrequire_downloader() {\n    if [[ ! -f \"${DOWNLOADER_SCRIPT}\" ]]; then\n        echo \"OneDrive downloader not found: ${DOWNLOADER_SCRIPT}\"\n        exit 1\n    fi\n}\n\nensure_archive() {\n    mkdir -p \"${CACHE_DIR}\"\n    if [[ -f \"${ARCHIVE_PATH}\" && -s \"${ARCHIVE_PATH}\" ]]; then\n        echo \"Using cached archive: ${ARCHIVE_PATH}\"\n        return 0\n    fi\n\n    require_downloader\n    echo \"Downloading ROS 1 archive from SharePoint...\"\n    python3 \"${DOWNLOADER_SCRIPT}\" \"${SHARE_URL}\" --filename \"${ARCHIVE_NAME}\" --output-dir \"${CACHE_DIR}\"\n}\n\nensure_image() {\n    if docker image inspect \"${IMAGE_NAME}\" >/dev/null 2>&1; then\n        echo \"Docker image already present: ${IMAGE_NAME}\"\n        return 0\n    fi\n\n    ensure_archive\n    echo \"Loading Docker image archive: ${ARCHIVE_PATH}\"\n    docker load -i \"${ARCHIVE_PATH}\"\n\n    if ! docker image inspect \"${IMAGE_NAME}\" >/dev/null 2>&1; then\n        echo \"Expected image not found after docker load: ${IMAGE_NAME}\"\n        exit 1\n    fi\n}\n\nmaybe_save_image() {\n    if [[ -z \"${SAVE_PATH}\" ]]; then\n        return 0\n    fi\n\n    mkdir -p \"$(dirname \"${SAVE_PATH}\")\"\n    echo \"Saving image ${IMAGE_NAME} to ${SAVE_PATH}\"\n    docker save -o \"${SAVE_PATH}\" \"${IMAGE_NAME}\"\n}\n\nprepare_run_flags() {\n    if docker run --rm --runtime nvidia \"${IMAGE_NAME}\" /bin/sh -lc \"exit 0\" >/dev/null 2>&1; then\n        DOCKER_RUN_FLAGS+=(--runtime nvidia)\n        echo \"Using GPU mode: --runtime nvidia\"\n        return 0\n    fi\n\n    if docker run --rm --gpus all \"${IMAGE_NAME}\" /bin/sh -lc \"exit 0\" >/dev/null 2>&1; then\n        DOCKER_RUN_FLAGS+=(--gpus all)\n        echo \"Using GPU mode: --gpus all\"\n        return 0\n    fi\n\n    echo \"Warning: no GPU runtime detected. Falling back to CPU-only container start.\"\n}\n\nrun_container() {\n    local tty_args=()\n    local docker_args=(\n        --rm\n        --name \"${CONTAINER_NAME}\"\n        --network host\n        --ipc host\n        --privileged\n        -v /dev:/dev\n    )\n\n    if [[ -t 0 && -t 1 ]]; then\n        tty_args=(-it)\n    fi\n\n    if [[ -n \"${DISPLAY:-}\" ]]; then\n        docker_args+=(\n            -e \"DISPLAY=${DISPLAY}\"\n            -e QT_X11_NO_MITSHM=1\n            -v /tmp/.X11-unix:/tmp/.X11-unix\n        )\n    fi\n\n    if [[ -n \"${ROS_MASTER_URI:-}\" ]]; then\n        docker_args+=(-e \"ROS_MASTER_URI=${ROS_MASTER_URI}\")\n    fi\n\n    if [[ -n \"${ROS_IP:-}\" ]]; then\n        docker_args+=(-e \"ROS_IP=${ROS_IP}\")\n    fi\n\n    if [[ -n \"${ROS_HOSTNAME:-}\" ]]; then\n        docker_args+=(-e \"ROS_HOSTNAME=${ROS_HOSTNAME}\")\n    fi\n\n    if docker ps -a -q -f name=\"^/${CONTAINER_NAME}$\" | grep -q .; then\n        docker rm -f \"${CONTAINER_NAME}\" >/dev/null 2>&1 || true\n    fi\n\n    echo \"Starting ${IMAGE_NAME}\"\n    docker run \"${tty_args[@]}\" \"${DOCKER_RUN_FLAGS[@]}\" \"${docker_args[@]}\" \"${IMAGE_NAME}\" /bin/bash -lc \"${CONTAINER_COMMAND}\"\n}\n\nensure_docker_access\nensure_image\nmaybe_save_image\n\nif [[ \"${SKIP_RUN}\" == \"1\" ]]; then\n    echo \"ROS1_JP6_SKIP_RUN=1, image preparation finished.\"\n    exit 0\nfi\n\nprepare_run_flags\nrun_container\n"
  },
  {
    "path": "reComputer/scripts/run.sh",
    "content": "#!/bin/bash\nhandle_error() {\n    echo \"An error occurred. Exiting...\"\n    exit 1\n}\ntrap 'handle_error' ERR\n\ncheck_is_jetson_or_not() {\n    model_file=\"/proc/device-tree/model\"\n    \n    if [ -f \"/proc/device-tree/model\" ]; then\n        model=$(tr -d '\\0' < /proc/device-tree/model | tr '[:upper:]' '[:lower:]')\n        if [[ $model =~ jetson|orin|nv|agx ]]; then\n            echo \"INFO: machine[$model] confirmed...\"\n        else\n            echo \"WARNING: machine[$model] maybe not support...\"\n            exit 1\n        fi\n    else\n        echo \"ERROR: machine[$model] not support this...\"\n        exit 1\n    fi\n}\ncheck_is_jetson_or_not\n\necho \"run example：$1\"\nBASE_PATH=/home/$USER/reComputer\n\n\ncd $JETSON_REPO_PATH\nscript_dir=$(dirname \"$0\")\n\ninit_script=$script_dir/$1/init.sh\nif [ -f $init_script ]; then\n    echo \"----example init----\"\n    bash $init_script\nelse\n    echo \"WARN: Example[$1] init.sh Not Found.\"\nfi\n\nstart_script=$script_dir/$1/run.sh\nif [ -f $start_script ]; then\n    echo \"----example start----\"\n    bash $start_script\nelse\n    echo \"ERROR: Example[$1] run.sh Not Found.\"\nfi\necho \"----example done----\"\n"
  },
  {
    "path": "reComputer/scripts/stable-diffusion-webui/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/stable-diffusion-webui/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/stable-diffusion-webui/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./run.sh $(./autotag stable-diffusion-webui)"
  },
  {
    "path": "reComputer/scripts/text-generation-webui/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/text-generation-webui/init.sh",
    "content": "#!/bin/bash\n\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/text-generation-webui/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n# download llm model\n./run.sh --workdir=/opt/text-generation-webui $(./autotag text-generation-webui) /bin/bash -c \\\n'python3 download-model.py --output=/data/models/text-generation-webui TheBloke/Llama-2-7b-Chat-GPTQ'\n\n# run text-generation-webui\n./run.sh $(./autotag text-generation-webui)"
  },
  {
    "path": "reComputer/scripts/ultralytics-yolo/LICENSE",
    "content": "MIT License\n\nCopyright (c) [2024] [Seeed Studio]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "reComputer/scripts/ultralytics-yolo/README.md",
    "content": "# Jetson-Example: Run Ultralytics YOLO Platform Service on NVIDIA Jetson Orin 🚀(**Supported YOLOV11**)\n\n## One-Click Quick Deployment of Plug-and-Play All Ultralytics YOLO for All Task Models with Web UI and HTTP API Interface\n<p align=\"center\">\n  <img src=\"images/Ultralytics-yolo.gif\" alt=\"Ultralytics YOLO\">\n</p>\n\n## Introduction 📘\nIn this project, you can quickly deploy all Ultralytics YOLO task models on Nvidia Jetson Orin devices with one click. This setup enables object detection, segmentation, human pose estimation, and classification. It supports uploading local videos, images, and using a webcam, and also allows one-click TensorRT model conversion. By accessing [http://127.0.0.1:5000](http://127.0.0.1:5000) on your local machine or within the same LAN, you can quickly start using Ultralytics YOLO. Additionally, an HTTP API method has been added at [http://127.0.0.1:5000/results](http://127.0.0.1:5000/results) to display detection data results for any task, and an additional Python script is provided to read YOLO detection data within Docker.\n\n## **Key Features**:\n\n1. **One-Click Deployment and Plug-and-Play**: Quickly deploy all YOLO task models on Nvidia Jetson Orin devices.\n2. **Comprehensive Task Support**: Enables object detection, segmentation, human pose estimation, and classification.\n3. **Versatile Input Options**: Supports uploading local videos, images, and using a webcam.\n4. **TensorRT Model Conversion**: Allows one-click conversion of models to TensorRT.\n5. **Web UI Access**: Easy access via [`http://127.0.0.1:5000`](http://127.0.0.1:5000) on the local machine or within the same LAN.\n6. **HTTP API Interface**: Added HTTP API at [`http://127.0.0.1:5000/results`](http://127.0.0.1:5000/results) to display detection data results.\n7. **Python Script Support**: Provides an additional Python script to read YOLO detection data within Docker.\n\n[![My Project](images/tasks.png)](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models)\nAll models implemented in this project are from the official [Ultralytics Yolo](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models).\n\n# Supported Task Models\n\n| Model Type  | Pre-trained Weights / Filenames                                                                                                     | Task                 | Inference | Validation | Training | Export |\n|-------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------|-----------|------------|----------|--------|\n| YOLOv5u     | yolov5nu, yolov5su, yolov5mu, yolov5lu, yolov5xu, yolov5n6u, yolov5s6u, yolov5m6u, yolov5l6u, yolov5x6u                              | Object Detection      | ✅        | ✅          | ✅        | ✅      |\n| YOLOv8      | yolov8n.pt, yolov8s.pt, yolov8m.pt, yolov8l.pt, yolov8x.pt                                                                           | Detection            | ✅        | ✅          | ✅        | ✅      |\n| YOLOv8-seg  | yolov8n-seg.pt, yolov8s-seg.pt, yolov8m-seg.pt, yolov8l-seg.pt, yolov8x-seg.pt                                                       | Instance Segmentation | ✅        | ✅          | ✅        | ✅      |\n| YOLOv8-pose | yolov8n-pose.pt, yolov8s-pose.pt, yolov8m-pose.pt, yolov8l-pose.pt, yolov8x-pose-p6.pt                                               | Pose/Keypoints        | ✅        | ✅          | ✅        | ✅      |\n| YOLOv8-obb  | yolov8n-obb.pt, yolov8s-obb.pt, yolov8m-obb.pt, yolov8l-obb.pt, yolov8x-obb.pt                                                       | Oriented Detection    | ✅        | ✅          | ✅        | ✅      |\n| YOLOv8-cls  | yolov8n-cls.pt, yolov8s-cls.pt, yolov8m-cls.pt, yolov8l-cls.pt, yolov8x-cls.pt                                                       | Classification        | ✅        | ✅          | ✅        | ✅      |\n| YOLOv11     | yolov11n.pt, yolov11s.pt, yolov11m.pt, yolov11l.pt, yolov11x.pt                                                                      | Detection            | ✅        | ✅          | ✅        | ✅      |\n| YOLOv11-seg | yolov11n-seg.pt, yolov11s-seg.pt, yolov11m-seg.pt, yolov11l-seg.pt, yolov11x-seg.pt                                                  | Instance Segmentation | ✅        | ✅          | ✅        | ✅      |\n| YOLOv11-pose| yolov11n-pose.pt, yolov11s-pose.pt, yolov11m-pose.pt, yolov11l-pose.pt, yolov11x-pose.pt                                              | Pose/Keypoints        | ✅        | ✅          | ✅        | ✅      |\n| YOLOv11-obb | yolov11n-obb.pt, yolov11s-obb.pt, yolov11m-obb.pt, yolov11l-obb.pt, yolov11x-obb.pt                                                  | Oriented Detection    | ✅        | ✅          | ✅        | ✅      |\n| YOLOv11-cls | yolov11n-cls.pt, yolov11s-cls.pt, yolov11m-cls.pt, yolov11l-cls.pt, yolov11x-cls.pt                                                  | Classification        | ✅        | ✅          | ✅        | ✅      |\n\n\n### Get a Jetson Orin Device 🛒\n| Device Model | Description | Link |\n|--------------|-------------|------|\n| Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |\n| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |\n\n## Quickstart ⚡\n\n### Modify Docker Daemon Configuration (Optional)\nTo enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:\n\n```json\n{\n  \"default-runtime\": \"nvidia\",\n  \"runtimes\": {\n    \"nvidia\": {\n      \"path\": \"nvidia-container-runtime\",\n      \"runtimeArgs\": []\n    }\n  },\n  \"storage-driver\": \"overlay2\",\n  \"data-root\": \"/var/lib/docker\",\n  \"log-driver\": \"json-file\",\n  \"log-opts\": {\n    \"max-size\": \"100m\",\n    \"max-file\": \"3\"\n  },\n  \"no-new-privileges\": true,\n  \"experimental\": false\n}\n```\n\nAfter modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:\n\n```sh\nsudo systemctl restart docker\n```\n\n### Installation via PyPI (Recommended) 🐍\n1. Install the package:\n    ```sh\n    pip install jetson-examples\n    ```\n\n2. Restart your reComputer:\n    ```sh\n    sudo reboot\n    ```\n\n3. Run Ultralytics YOLO on Jetson with one command:\n    ```sh\n    reComputer run ultralytics-yolo\n    ```\n4. \"Enter [`http://127.0.0.1:5001`](http://127.0.0.1:5001) or http://device_IP:5001 in your browser to access the Web UI.\"\n    <p align=\"center\">\n      <img src=\"images/ultralytics_fig1.png\" alt=\"Ultralytics YOLO\">\n    </p>\n\n- **Choose Model**: Select Yolo version and models for various tasks such as object detection, classification, segmentation, human pose estimation, OBB, etc.\n- **Upload Custom Model**: Users can upload their own trained YOLO models.\n- **Choose Input Type**: Users can select to input locally uploaded images, videos, or real-time camera devices.\n- **Enable TensorRT**: Choose whether to convert and use the TensorRT model. The initial conversion may require varying amounts of time.\n\n5. If you want to see the detection result data, you can enter [`http://127.0.0.1:5000/results`](http://127.0.0.1:5000/results) in your browser to view the `JSON` formatted data results. These results include `boxes` for object detection, `masks` for segmentation, `keypoints` for human pose estimation, and the `names` corresponding to all numerical categories.\n    <p align=\"center\">\n      <img src=\"images/ultralytics_fig2.png\" alt=\"Ultralytics YOLO\">\n    </p>\n    We also provide a Python script to help users integrate the data into their own programs.\n\n    ```python\n    import requests\n\n    def fetch_results():\n        response = requests.get('http://localhost:5001/results')\n        if response.status_code == 200:\n            results = response.json()\n            return results\n        else:\n            print('Failed to fetch results')\n            return None\n\n    results = fetch_results()\n    print(results)\n    ```\n\n\n## Notes 📝\n- To stop detection at any time, press the Stop button.\n- When accessing the WebUI from other devices within the same LAN, use the URL: `http://{Jetson_IP}:5000`.\n- You can view the JSON formatted detection results by accessing http://{Jetson_IP}:5000/results.\n- The first model conversion may require different amounts of time depending on the hardware and network environment, so please be patient.\n\n\n## Further Development 🔧\n- [Training a YOLO Model](https://wiki.seeedstudio.com/How_to_Train_and_Deploy_YOLOv8_on_reComputer/)\n- [TensorRT Acceleration](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/)\n- [Multistreams using Deepstream](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/#multistream-model-benchmarks) Tutorials.\n\n## License\n\nThis project is licensed under the MIT License.\n"
  },
  {
    "path": "reComputer/scripts/ultralytics-yolo/clean.sh",
    "content": "CONTAINER_NAME=\"ultralytics-yolo\"\n\n# Function to get L4T version\nget_l4t_version() {\n    local l4t_version=\"\"\n    local release_line=$(head -n 1 /etc/nv_tegra_release)\n    if [[ $release_line =~ R([0-9]+)\\ *\\(release\\),\\ REVISION:\\ ([0-9]+\\.[0-9]+) ]]; then\n        local major=\"${BASH_REMATCH[1]}\"\n        local revision=\"${BASH_REMATCH[2]}\"\n        l4t_version=\"${major}.${revision}\"\n    fi\n    echo \"$l4t_version\"\n}\n\nL4T_VERSION=$(get_l4t_version)\necho \"Detected L4T version: $L4T_VERSION\"\n\n# Determine the Docker image based on L4T version\nif [[ \"$L4T_VERSION\" == \"32.6.1\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack4:1.0\"\nelif [[ \"$L4T_VERSION\" == \"35.3.1\" || \"$L4T_VERSION\" == \"35.4.1\" || \"$L4T_VERSION\" == \"35.5.0\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack5:1.0\"\nelif [[ \"$L4T_VERSION\" == \"36.3.0\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack6:1.0\"\nelse\n    echo \"Error: L4T version $L4T_VERSION is not supported.\"\n    exit 1\nfi\n\necho \"Using Docker image: $IMAGE_NAME\"\nsudo rm -r ~/yolo_models\nsudo docker stop $CONTAINER_NAME\nsudo docker rm $CONTAINER_NAME\nsudo docker rmi $IMAGE_NAME"
  },
  {
    "path": "reComputer/scripts/ultralytics-yolo/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 32.6.1\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\n  - 36.4.0\n  - 36.4.3\nREQUIRED_DISK_SPACE: 16  # in GB\nREQUIRED_MEM_SPACE: 2\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/ultralytics-yolo/init.sh",
    "content": "#!/bin/bash\n\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n"
  },
  {
    "path": "reComputer/scripts/ultralytics-yolo/run.sh",
    "content": "#!/bin/bash\n\nCONTAINER_NAME=\"ultralytics-yolo\"\n\n# Function to get L4T version\nget_l4t_version() {\n    local l4t_version=\"\"\n    local release_line=$(head -n 1 /etc/nv_tegra_release)\n    if [[ $release_line =~ R([0-9]+)\\ *\\(release\\),\\ REVISION:\\ ([0-9]+\\.[0-9]+) ]]; then\n        local major=\"${BASH_REMATCH[1]}\"\n        local revision=\"${BASH_REMATCH[2]}\"\n        l4t_version=\"${major}.${revision}\"\n    fi\n    echo \"$l4t_version\"\n}\n\nL4T_VERSION=$(get_l4t_version)\necho \"Detected L4T version: $L4T_VERSION\"\n\n# Determine the Docker image based on L4T version\nif [[ \"$L4T_VERSION\" == \"32.6.1\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack4:1.0\"\nelif [[ \"$L4T_VERSION\" == \"35.3.1\" || \"$L4T_VERSION\" == \"35.4.1\" || \"$L4T_VERSION\" == \"35.5.0\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack5:1.0\"\nelif [[ \"$L4T_VERSION\" == \"36.3.0\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack6:1.0\"\nelif [[ \"$L4T_VERSION\" == \"36.4.0\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack61:v1.0\"\nelif [[ \"$L4T_VERSION\" == \"36.4.3\" ]]; then\n    IMAGE_NAME=\"yaohui1998/ultralytics-jetpack61:v1.0\"    \nelse\n    echo \"Error: L4T version $L4T_VERSION is not supported.\"\n    exit 1\nfi\n\necho \"Using Docker image: $IMAGE_NAME\"\n\n# Pull the Docker image\ndocker pull $IMAGE_NAME\n# make dir for save models\nmkdir ~/yolo_models\n\n# Check if the container with the specified name already exists\nif [ $(docker ps -a -q -f name=^/${CONTAINER_NAME}$) ]; then\n    echo \"Container $CONTAINER_NAME already exists. Starting and attaching...\"\n    echo \"Please open http://127.0.0.1:5000 to access the WebUI.\"\n    docker start $CONTAINER_NAME\n    docker exec -it $CONTAINER_NAME /bin/bash\nelse\n    echo \"Container $CONTAINER_NAME does not exist. Creating and starting...\"\n    docker run -it \\\n        --name $CONTAINER_NAME \\\n        --privileged \\\n        --network host \\\n        -v ~/yolo_models/:/usr/src/ultralytics/models/ \\\n        -v /tmp/.X11-unix:/tmp/.X11-unix \\\n        -v /dev/*:/dev/* \\\n        -v /etc/localtime:/etc/localtime:ro \\\n        --runtime nvidia \\\n        $IMAGE_NAME\nfi\n"
  },
  {
    "path": "reComputer/scripts/update.sh",
    "content": "#!/bin/bash\necho \"--update jetson-containers repo--\"\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\n\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers/tree/d1573a3e8d7ba3fef36ebb23a7391e60eaf64db7\"\n\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\n    # 5 publish to Test PyPI\n    read -p \"follow the newest version maybe bring bugs, are you sure about the update? (y/n): \" choice\n    if [[ $choice == \"y\" || $choice == \"Y\" ]]; then\n        cd $JETSON_REPO_PATH\n        git pull\n        pip3 install -r requirements.txt\n    else\n        echo \"skip update.\"\n    fi\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    sudo apt update; sudo apt install -y python3-pip\n    pip3 install -r requirements.txt\nfi"
  },
  {
    "path": "reComputer/scripts/utils.sh",
    "content": "#!/bin/bash\n\ncheck_base_env() \n{\n    # 1. Set color value\n    RED=$(tput setaf 1)\n    GREEN=$(tput setaf 2)\n    YELLOW=$(tput setaf 3)\n    BLUE=$(tput setaf 4)\n    MAGENTA=$(tput setaf 5)\n    CYAN=$(tput setaf 6)\n    RESET=$(tput sgr0)\n\n    # 2. Load config file\n    local CONFIG_FILE=$1\n    echo \"CONFIG_FILE_PATH=$CONFIG_FILE\"\n\n    if [[ ! -f \"$CONFIG_FILE\" ]]; then\n        echo \"Error: YAML file '$CONFIG_FILE' not found.\"\n        exit 1\n    fi\n    # Install yq for parsing YAML file\n    if ! command -v yq &> /dev/null\n    then\n        echo \"yq is not installed. Installing yq with pip3...\"\n        pip3 install yq\n        if command -v yq &> /dev/null\n        then\n            echo \"yq has been successfully installed.\"\n        else\n            echo \"Failed to install yq.\"\n            exit 1\n        fi\n    else\n        echo \"yq is already installed.\"\n    fi\n\n    if ! command -v jq &> /dev/null\n    then\n        echo \"jq is not installed. Installing jq...\"\n        sudo apt-get update\n        sudo apt-get install -y jq\n\n        if command -v jq &> /dev/null\n        then\n            echo \"jq has been successfully installed.\"\n            jq --version\n        else\n            echo \"Failed to install jq.\"\n            exit 1\n        fi\n    else\n        echo \"jq is already installed.\"\n        jq --version\n    fi\n    ALLOWED_L4T_VERSIONS=($(yq -r '.ALLOWED_L4T_VERSIONS[]' $CONFIG_FILE))\n    REQUIRED_DISK_SPACE=$(yq -r '.REQUIRED_DISK_SPACE' $CONFIG_FILE)\n    REQUIRED_MEM_SPACE=$(yq -r '.REQUIRED_MEM_SPACE' $CONFIG_FILE)\n    PACKAGES=($(yq -r '.PACKAGES[]' $CONFIG_FILE))\n    DOCKER=$(yq -r '.DOCKER.ENABLE' $CONFIG_FILE)\n    DESIRED_DAEMON_JSON=$(yq -r '.DOCKER.DAEMON' $CONFIG_FILE)\n    echo \"${ALLOWED_L4T_VERSIONS[@]}\"\n    # 3. Check L4T version\n    ARCH=$(uname -i)\n    if [ \"$ARCH\" = \"aarch64\" ]; then\n        # Check for L4T version string\n        L4T_VERSION_STRING=$(head -n 1 /etc/nv_tegra_release)\n\n        if [ -z \"$L4T_VERSION_STRING\" ]; then\n            L4T_VERSION_STRING=$(dpkg-query --showformat='${Version}' --show nvidia-l4t-core)\n        fi\n\n        L4T_RELEASE=$(echo \"$L4T_VERSION_STRING\" | cut -f 2 -d ' ' | grep -Po '(?<=R)[^;]+')\n        L4T_REVISION=$(echo \"$L4T_VERSION_STRING\" | cut -f 2 -d ',' | grep -Po '(?<=REVISION: )[^;]+')\n        L4T_VERSION=\"$L4T_RELEASE.$L4T_REVISION\"\n\n    elif [ \"$ARCH\" = \"x86_64\" ]; then\n        echo \"${RED}Unsupported architecture: $ARCH${RESET}\"\n        exit 1\n    fi\n\n    if [[ \" ${ALLOWED_L4T_VERSIONS[@]} \" =~ \" ${L4T_VERSION} \" ]]; then\n        echo \"L4T VERSION ${GREEN}${L4T_VERSION}${RESET} is in the allowed: ${GREEN}OK!${RESET}\"\n    else\n        echo \"${RED}L4T VERSION ${GREEN}${L4T_VERSION}${RESET}${RED} is not in the allowed versions list.${RESET}\"\n        echo \"${RED}The JetPack versions currently supported by this container are: ${GREEN}${ALLOWED_L4T_VERSIONS[@]}${RESET}${RED}. ${RESET}\"\n        echo \"${RED}For more information : https://github.com/Seeed-Projects/jetson-examples ${RESET}\"\n        exit 1\n    fi\n\n    # Install additional apt packages\n    for PACKAGE in $PACKAGES; do\n        if ! dpkg -l | grep -qw \"$PACKAGE\"; then\n            echo \"Installing $PACKAGE...\"\n            sudo apt-get install -y $PACKAGE\n        fi\n        echo \"$PACKAGE is installed: ${GREEN}OK!${RESET}\"\n    done\n\n    # 4. Check disk space\n    CURRENT_DISK_SPACE=$(df -BG --output=avail / | tail -1 | sed 's/[^0-9]*//g')\n    if [ \"$CURRENT_DISK_SPACE\" -lt \"$REQUIRED_DISK_SPACE\" ]; then\n        echo \"${RED}Insufficient disk space. Required: ${REQUIRED_DISK_SPACE}G, Available: ${CURRENT_DISK_SPACE}G. ${RESET}\"\n        exit 1\n    else\n        echo \"Required ${GREEN}${REQUIRED_DISK_SPACE}GB${RESET}/${GREEN}${CURRENT_DISK_SPACE}GB${RESET} disk space: ${GREEN}OK!${RESET}\"\n    fi\n\n    # 5. Check memory space\n    CURRENT_MEM_SPACE=$(free -g | awk '/^Mem:/{print $2}')\n    if [ \"$CURRENT_MEM_SPACE\" -lt \"$REQUIRED_MEM_SPACE\" ]; then\n        echo \"${RED}Insufficient memory: $CURRENT_MEM_SPACE GB (minimum required: $REQUIRED_MEM_SPACE GB).${RESET}\"\n        exit 1\n    else\n        echo \"Required ${GREEN}${REQUIRED_MEM_SPACE}GB${RESET}/${GREEN}${CURRENT_MEM_SPACE}GB${RESET} memory space: ${GREEN}OK!${RESET}\"\n    fi\n\n    # 6. Prepare Docker env\n    if [ \"$DOCKER\" = \"true\" ]; then\n        # 6.1 Check if Docker is installed\n        if ! command -v docker &> /dev/null; then\n            echo \"${BLUE}Docker is not installed. Installing Docker...${RESET}\"\n            sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common\n            curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -\n            sudo add-apt-repository \"deb [arch=arm64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"\n\n            sudo apt-get update\n            sudo apt-get install -y docker-ce\n            sudo systemctl enable docker\n            sudo systemctl start docker\n            sudo usermod -aG docker $USER\n            sudo systemctl restart docker\n            echo \"${BLUE}Permissions added. Please rerun the command.${RESET}\"\n            newgrp docker\n\n            echo \"Docker has been installed and configured.\"\n        fi\n        # 6.2 Modify the Docker configuration file\n        DAEMON_JSON_PATH=\"/etc/docker/daemon.json\"\n        NECESSARY_CONTENT=\n        if [ ! -f \"$DAEMON_JSON_PATH\" ]; then\n            echo \"${BLUE}Creating $DAEMON_JSON_PATH with the desired content...${RESET}\"\n            echo \"$DESIRED_DAEMON_JSON\" | sudo tee $DAEMON_JSON_PATH > /dev/null\n            sudo systemctl restart docker\n            echo \"${GREEN}$DAEMON_JSON_PATH has been created.${RESET}\"\n        elif [ \"$(jq -e '.[\"default-runtime\"] == \"nvidia\" and .runtimes.nvidia.path == \"nvidia-container-runtime\" and (.runtimes.nvidia.runtimeArgs | length == 0)' \"$DAEMON_JSON_PATH\")\" != \"true\" ]; then\n        # elif [ \"$(cat $DAEMON_JSON_PATH)\" != \"$DESIRED_DAEMON_JSON\" ]; then\n            echo \"${BLUE}Backing up the existing $DAEMON_JSON_PATH to /etc/docker/daemon_backup.json ...${RESET}\"\n            sudo cp \"$DAEMON_JSON_PATH\" \"/etc/docker/daemon_backup.json\"\n            echo \"${GREEN}Backup completed.${RESET}\"\n            echo \"${BLUE}Updating $DAEMON_JSON_PATH with the desired content...${RESET}\"\n            echo \"$DESIRED_DAEMON_JSON\" | sudo tee $DAEMON_JSON_PATH > /dev/null\n            sudo systemctl restart docker\n            echo \"${GREEN}$DAEMON_JSON_PATH has been updated.${RESET}\"\n        else\n            echo \"${GREEN}$DAEMON_JSON_PATH already exists and has the correct content.${RESET}\"\n        fi\n        # 6.3 Check permissions\n        if ! docker info &> /dev/null; then\n            echo \"The current user does not have permissions to use Docker. Adding permissions...\"\n            sudo usermod -aG docker $USER\n            sudo systemctl restart docker\n            echo \"${BLUE}Permissions added. Please rerun the command.${RESET}\"\n            newgrp docker\n        else\n            echo \"${GREEN}Docker is installed and the current user has permissions to use it.${RESET}\"\n        fi\n    else\n        echo \"No need to configure Docker.\"\n    fi\n}\n"
  },
  {
    "path": "reComputer/scripts/whisper/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 25  # in GB\nREQUIRED_MEM_SPACE: 7\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/whisper/init.sh",
    "content": "#!/bin/bash\n\n# check the runtime environment.\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\nBASE_PATH=/home/$USER/reComputer\nmkdir -p $BASE_PATH/\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\nBASE_JETSON_LAB_GIT=\"https://github.com/dusty-nv/jetson-containers\"\nif [ -d $JETSON_REPO_PATH ]; then\n    echo \"jetson-ai-lab existed.\"\nelse\n    echo \"jetson-ai-lab does not installed. start init...\"\n    cd $BASE_PATH/\n    git clone --depth=1 $BASE_JETSON_LAB_GIT\n    cd $JETSON_REPO_PATH\n    bash install.sh\nfi\n"
  },
  {
    "path": "reComputer/scripts/whisper/run.sh",
    "content": "#!/bin/bash\n\nBASE_PATH=/home/$USER/reComputer\nJETSON_REPO_PATH=\"$BASE_PATH/jetson-containers\"\ncd $JETSON_REPO_PATH\n\n./run.sh $(./autotag whisper)\n"
  },
  {
    "path": "reComputer/scripts/yolov10/Dockerfile",
    "content": "FROM dustynv/l4t-pytorch:r35.3.1\nWORKDIR /opt\n\nRUN pip3 install --no-cache-dir --verbose gradio==4.31.5\n\nRUN git clone https://github.com/THU-MIG/yolov10.git && \\\n    cd yolov10 && \\\n    sed -i '/opencv-python>=4.6.0/ s/^/# /' pyproject.toml && \\ \n    sed -i '/torch>=1.8.0/ s/^/# /' pyproject.toml && \\ \n    sed -i '/torchvision>=0.9.0/ s/^/# /' pyproject.toml && \\ \n    pip3 install -e . && \\\n    mkdir weights \n\nCMD cd /opt/yolov10 && ls weights && python3 app.py"
  },
  {
    "path": "reComputer/scripts/yolov10/README.md",
    "content": "# Quickly Experience YOLOv10 on Jetson\n\n\n## Hello\n\n💡 Here's an example of quickly deploying YOLOv10 on a Jetson device.\n\n🔥 Hightlights:\n- **Yolov10** is a state-of-the-art real-time object detection model. 🚀🔍\n- **Jetson-examples** is a toolkit designed to deploy containerized applications on NVIDIA Jetson devices. ✨\n- **Jetson** is powerful AI hardware platform for edge computing.💻\n\n🛠️ Follow the tutorial below to quickly experience the performance of YOLOv10 on edge computing devices.\n\n<div align=\"center\">\n  <img alt=\"yolov10\" width=\"1200px\" src=\"./assets/webui.png\">\n</div>\n\n## Get a Jetson Orin Device 🛒\n| Device Model | Description | Link |\n|--------------|-------------|------|\n| Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |\n| reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |\n\n\n## Getting Started\n\n- install **jetson-examples** by pip:\n    ```sh\n    pip3 install jetson-examples\n    ```\n- restart reComputer \n    ```sh\n    sudo restart\n    ```\n- run yolov10 on jetson in one line:\n    ```sh\n    reComputer run yolov10\n    ```\n- Please visit http://127.0.0.1:7860\n\n## Change Model\n\nThis example will automatically download the YOLOv10s model at startup. If you want to try different models, please use the following command to download the model and then select the appropriate model through the WebUI.\n\n\n> **Note:** You can also download the model via a browser and copy the model to `/home/$USER/reComputer/yolov10/weights`.\n\n| Model | Download Command |\n| :------------: | :----------------------: |\n| [YOLOv10-N](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10n.pt) |   `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10n.pt`  |\n| [YOLOv10-S](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10s.pt) |   `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10s.pt`  |\n| [YOLOv10-M](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10m.pt) |   `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10m.pt`  |\n| [YOLOv10-B](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10b.pt) |   `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10b.pt`  |\n| [YOLOv10-L](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10l.pt) |   `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10l.pt`  |\n| [YOLOv10-X](https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10x.pt) |   `sudo wget -P /home/$USER/reComputer/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10x.pt`  | \n\n\n## Build Docker Image\nOur provided container is built based on the `jetson-container`. This example provide a Dockerfile, allowing you to build a more suitable container according to your needs.\n\n```sh\nsudo docker build -t yolov10-jetson .\n```\n\n> **Note:**  Additionally, you can train models, test models, and export models within the Docker container environment. For detailed information, please refer to `THU-MIG/yolov10`.\n\n## Reference\n- https://github.com/THU-MIG/yolov10\n- https://github.com/dusty-nv/jetson-containers\n\n"
  },
  {
    "path": "reComputer/scripts/yolov10/clean.sh",
    "content": "#!/bin/bash\n\nsudo docker rmi youjiang9977/yolov10-jetson:5.1.1\nsudo rm -rf /home/$USER/reComputer/yolov10\n"
  },
  {
    "path": "reComputer/scripts/yolov10/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 20  # in GB\nREQUIRED_MEM_SPACE: 4\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/yolov10/init.sh",
    "content": "#!/bin/bash\n\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\n# make dirs\nBASE_PATH=/home/$USER/reComputer\nsudo mkdir -p $BASE_PATH/yolov10/weights\nsudo mkdir -p $BASE_PATH/yolov10/run\necho \"create workspace at $BASE_PATH/yolov10\"\n\n# download models\necho \"download yolov10 models\"\nWEIGHTS_FILE=$BASE_PATH/yolov10/weights/yolov10s.pt\nif [ ! -f $WEIGHTS_FILE ]; then\n    sudo wget -P $BASE_PATH/yolov10/weights https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10s.pt\nelse\n    echo \"Weights file already exists: $WEIGHTS_FILE\"\nfi\n\n"
  },
  {
    "path": "reComputer/scripts/yolov10/run.sh",
    "content": "#!/bin/bash\n\nsudo docker run -it --rm --net=host --runtime nvidia \\\n    -v /var/run/docker.sock:/var/run/docker.sock \\\n    -v /home/$USER/reComputer/yolov10/weights:/opt/yolov10/weights \\\n    -v /home/$USER/reComputer/yolov10/runs:/opt/yolov10/runs \\\n    youjiang9977/yolov10-jetson:5.1.1\n"
  },
  {
    "path": "reComputer/scripts/yolov8-rail-inspection/config.yaml",
    "content": "# The tested JetPack versions.\nALLOWED_L4T_VERSIONS:\n  - 35.3.1\n  - 35.4.1\n  - 35.5.0\n  - 36.3.0\nREQUIRED_DISK_SPACE: 20  # in GB\nREQUIRED_MEM_SPACE: 4\nPACKAGES:\n  - nvidia-jetpack\nDOCKER: \n  ENABLE: true\n  DAEMON: |\n    {\n      \"default-runtime\": \"nvidia\",\n      \"runtimes\": {\n        \"nvidia\": {\n          \"path\": \"nvidia-container-runtime\",\n          \"runtimeArgs\": []\n        }\n      },\n      \"storage-driver\": \"overlay2\",\n      \"data-root\": \"/var/lib/docker\",\n      \"log-driver\": \"json-file\",\n      \"log-opts\": {\n        \"max-size\": \"100m\",\n        \"max-file\": \"3\"\n      },\n      \"no-new-privileges\": true,\n      \"experimental\": false\n    }\n"
  },
  {
    "path": "reComputer/scripts/yolov8-rail-inspection/init.sh",
    "content": "#!/bin/bash\n\nsource $(dirname \"$(realpath \"$0\")\")/../utils.sh\ncheck_base_env \"$(dirname \"$(realpath \"$0\")\")/config.yaml\"\n\n"
  },
  {
    "path": "reComputer/scripts/yolov8-rail-inspection/readme.md",
    "content": "# Abstract\nThis project harnesses YOLOv8 technology, specifically tailored for precise identification and counting of bolts at fixed distances along a designated track, as well as for estimating odometer readings and vehicle speed calculations. It incorporates a test video stored within the ```/video``` directory of a Docker container, with the outcomes of these tests saved in the ```/result``` directory, subsequently relayed to the host machine's home directory via Docker mechanisms. Furthermore, the system offers real-time visualization of these processes through a WebUI accessible at ```http://127.0.0.1:5000``` within the local network.\n\n## Install\n\n\nPyPI(recommend)\n\n```sh\npip install jetson-examples\n```\n\nLinux (github trick)\n```sh\ncurl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh\n```\n\nGithub (for Developer)\n\n```sh\ngit clone https://github.com/Seeed-Projects/jetson-examples\ncd jetson-examples\npip install .\n```\n\n## Quickstart\n```sh\nreComputer run yolov8-rail-inspection\n```\n## Note\nThe display feature of the WebUI is experimental. Opening the WebUI visualization requires waiting for loading time of less than one minute. Optimization for this issue will be addressed in future updates.\n\n## FAQs\n1. The project has been tested on the Jetson Orin platform, and its execution entails the use of Docker; therefore, it is essential to ensure that all necessary Docker components are fully installed and functional.\n2. During program execution, you may encounter an ```ERROR: Could not open requirements file.``` This error message does not impact the normal operation of the program and can be safely ignored.\n3. The ultimate visualization of the results is presented through a web interface. Upon executing the command to run the ```reComputer yolov8-rail-inspection```, the terminal will output the URL for the visualization webpage. Upon clicking the link, you may need to wait a few seconds for the program to initialize and commence operation."
  },
  {
    "path": "reComputer/scripts/yolov8-rail-inspection/run.sh",
    "content": "#!/bin/bash\n\ndocker pull yaohui1998/bolt_inspection:1.0\n\nif [ \"$(docker ps -aq -f name=yolov8_rain_inspection)\" ]; then\n    echo \"Found existing container named yolov8_rain_inspection. Executing Python script inside the container...\"\n    docker start yolov8_rain_inspection\n    docker exec yolov8_rain_inspection python3 bolt_inspection.py\n    docker cp yolov8_rain_inspection:/usr/src/ultralytics/Jetson-example/result/ ~/\nelse\n    echo \"No existing container named counter found. Pulling image and running container...\"\n    docker run -it --rm --network host \\\n    --ipc=host \\\n    --runtime=nvidia \\\n    -v /tmp/.X11-unix:/tmp/.X11-unix:ro \\\n    -v /home:/home \\\n    -e DISPLAY=:0 \\\n    --privileged \\\n    --name yolov8_rain_inspection \\\n    --device=/dev/*:/dev/*  \\\n    yaohui1998/bolt_inspection:1.0\nfi "
  },
  {
    "path": "setup.py",
    "content": "from pathlib import Path\n\nfrom setuptools import setup\n\n\nREADME_PATH = Path(__file__).parent / \"README.md\"\nLONG_DESCRIPTION = README_PATH.read_text(encoding=\"utf-8\")\nPACKAGE_ROOT = Path(__file__).parent / \"reComputer\"\n\n\ndef package_files(root: Path):\n    files = []\n    for path in root.rglob(\"*\"):\n        if not path.is_file():\n            continue\n        if \"__pycache__\" in path.parts:\n            continue\n        if path.suffix in {\".pyc\", \".pyo\"}:\n            continue\n        files.append(path.relative_to(PACKAGE_ROOT).as_posix())\n    return sorted(files)\n\n\nsetup(\n    name=\"jetson-examples\",\n    version=\"0.2.5\",\n    author=\"luozhixin\",\n    author_email=\"zhixin.luo@seeed.cc\",\n    description=\"Running Gen AI models and applications on NVIDIA Jetson devices with one-line command\",\n    long_description=LONG_DESCRIPTION,\n    long_description_content_type=\"text/markdown\",\n    python_requires=\">=3.8\",\n    keywords=[\n        \"llama\",\n        \"llava\",\n        \"gpt\",\n        \"llm\",\n        \"nvidia\",\n        \"jetson\",\n        \"multimodal\",\n        \"jetson orin\",\n    ],\n    classifiers=[\n        \"Programming Language :: Python :: 3\",\n        \"License :: OSI Approved :: MIT License\",\n        \"Operating System :: OS Independent\",\n    ],\n    packages=[\"reComputer\"],\n    include_package_data=True,\n    package_data={\"reComputer\": package_files(PACKAGE_ROOT / \"scripts\")},\n    entry_points={\n        \"console_scripts\": [\n            \"reComputer=reComputer.main:run_script\",\n        ]\n    },\n    project_urls={\n        \"Homepage\": \"https://github.com/Seeed-Projects/jetson-examples\",\n        \"Issues\": \"https://github.com/Seeed-Projects/jetson-examples/issues\",\n    },\n)\n"
  }
]