[
  {
    "path": ".gitignore",
    "content": "# Logging\nlogs\ntmp\nwandb\n\n# Data\ndata\noutputs\nvideos\npretrained_model\n\n# callibration\n.cache/*\n\n# Apple\n.DS_Store\n\n# VS Code\n.vscode\n\n# HPC\nnautilus/*.yaml\n*.key\n\n# Slurm\nsbatch*.sh\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\n!tests/data\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Ignore .cache except calibration\n.cache/*\n.cache/calibration/\n.cache/calibration/**\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n"
  },
  {
    "path": "LICENSE",
    "content": "                                Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n\nCopyright 2024 SimpleAutomation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "README.md",
    "content": "# Make robots do something useful!\n\nOur goal is to make robots affordable so more people can try them out, discover useful applications, and eventually make money using them to do work.\n\nCurrently, it's a set of helper scripts on top of LeRobot, plus a $300 robot arm compatible with π₀ and other foundational robotics models.\n\n<img src=\"https://github.com/user-attachments/assets/4c30e970-5d89-48ea-86db-72ae8d1ab47a\" width=450/>\n\n# Intro\n\n<table>\n  <tr>\n    <td><video src=\"https://github.com/user-attachments/assets/62472ce6-3084-41ec-8245-32a3c10f4b79\" width=200/></td>\n    <td></td>\n  </tr>\n</table>\n\n\n\n# Installation\n\n1.  If you didn't install LeRobot, install it:\n\n```\ngit clone https://github.com/huggingface/lerobot.git\ncd lerobot\npip install -e .\n```\n\n2 Clone Simple Automation scripts to another folder\n\n```\ngit clone https://github.com/1g0rrr/SimpleAutomation.git\ncd SimpleAutomation\n```\n\n3 Setup ports for your robot in \"core/configs/robot/so100.yaml\".\n\n# Run\n\n### Use multiple ACT models to solve complex robotics tasks.\n\nFor example, in the lamp testing demo, we combined 3 models:\n\n1. For getting the lamp from a random position\n2. For precise insertion into the tester\n3. For sorting working/not working bulbs\n\n![unnamed](https://github.com/user-attachments/assets/d105cf69-1b82-4581-90b7-9a9cd0a4f595)\n\n### Run evaluation\n\n-   Change the config file for using your models \"core/configs/chains/lamp_testing.yaml\"\n-   While evaluating press \"right\" key to move to the next model\n\n```\npython core/models_chain.py evaluate \\\n  --robot-path core/configs/robot/so100.yaml \\\n  --chain-path core/configs/chains/lamp_testing.yaml\n```\n\n### Use LLM agent to run models\n```\npython core/models_chain.py llm_agent \\\n  --robot-path core/configs/robot/so100.yaml\n```\n\n### Run recording\n\n-   The difference from Lerobot's recording is added teleoperation between episodes. This is usefull to be able to switch between models in not \"resting\" position.\n\n```\npython core/models_chain.py record \\\n  --robot-path core/configs/robot/so100.yaml \\\n  --fps 30 \\\n  --root data \\\n  --repo-id 1g0rrr/koch_test21 \\\n  --tags tutorial \\\n  --warmup-time-s 5 \\\n  --episode-time-s 5 \\\n  --reset-time-s 5 \\\n  --num-episodes 2\n```\n\n### Run teleoperation:\n\nUse it for testing if all is working.\n\n```\npython core/models_chain.py teleoperate \\\n --robot-path core/configs/robot/so100.yaml \\\n --robot-overrides '~cameras'\n```\n\n\n\n# Tips\n\n-   Make sure you have all inintial positions in the following model to prevent robot from sudden movements.\n-   \"Pick and place\" task is hard for the model and gripper can grab object not precisely at the center. To solve this re-grab object at the beginning of next model.\n\n# Training\n\n### Train model in Google Colab:\n\nYou can model in Google Colab to save time.\nhttps://colab.research.google.com/github/1g0rrr/SimpleAutomation/blob/main/colab/SimpleAutomationTrainModel.ipynb\n\n-   It will take about 2.5 hours and $1.5 to train typical 80K steps.\n-   Choose A100 as the fastest GPU.\n-   Don't disconnect colab and don't close browser as all data will be deleted.\n\n# Hardware\n### S.A.M.01\n\n![arm2](https://github.com/user-attachments/assets/35113d53-93b1-4678-af15-463d563cd238)\n\nJoin https://discord.gg/NFsqq4CVhs to get recent information.\n\n### SO-100\n\nFollow [SO-100](https://github.com/TheRobotStudio/SO-ARM100) to build your arm.\n\n<img src=\"./media/so-100.png\" width=500/>\n\n\n# Join the community\n\nSay 👋 in our [public discord channel](https://discord.gg/NFsqq4CVhs). We help each other with assembling, training models, and making the robot do something useful. \n\nThank you for contributing to SimpleAutomation!\n\n## Star History\n\n[![Star History Chart](https://api.star-history.com/svg?repos=1g0rrr/simpleautomation&type=Timeline)](https://star-history.com/#1g0rrr/simpleautomation&Timeline)\n"
  },
  {
    "path": "colab/SimpleAutomationTrainModel.ipynb",
    "content": "{\n  \"cells\": [\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"cX-BT9Wte6tV\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"hugging_face_token = ''\\n\",\n        \"dataset_repo_id = 'test/test-dataset'\\n\",\n        \"model_repo_id = 'test/test-model'\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"bV6yVHzTq-dt\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"!git clone https://github.com/huggingface/lerobot.git\\n\",\n        \"!cd lerobot && pip install -e . --ignore-installed\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"4vccJQqd_afv\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"!pip install huggingface-hub\\n\",\n        \"!huggingface-cli login --token {hugging_face_token} --add-to-git-credential\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"EN9GQ7AurBFo\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"!cd lerobot && python lerobot/scripts/train.py \\\\\\n\",\n        \"  dataset_repo_id={dataset_repo_id} \\\\\\n\",\n        \"  policy=act_koch_real \\\\\\n\",\n        \"  env=koch_real \\\\\\n\",\n        \"  hydra.run.dir=outputs/train/mymodel \\\\\\n\",\n        \"  hydra.job.name=mymodel \\\\\\n\",\n        \"  device=cuda \\\\\\n\",\n        \"  wandb.enable=false\\n\",\n        \"\\n\",\n        \"\\n\",\n        \"!huggingface-cli upload {model_repo_id} lerobot/outputs/train/mymodel/checkpoints/last/pretrained_model\\n\",\n        \"\\n\",\n        \"from google.colab import runtime\\n\",\n        \"runtime.unassign()\"\n      ]\n    }\n  ],\n  \"metadata\": {\n    \"accelerator\": \"GPU\",\n    \"colab\": {\n      \"gpuType\": \"A100\",\n      \"machine_shape\": \"hm\",\n      \"provenance\": []\n    },\n    \"kernelspec\": {\n      \"display_name\": \"Python 3\",\n      \"name\": \"python3\"\n    },\n    \"language_info\": {\n      \"name\": \"python\"\n    }\n  },\n  \"nbformat\": 4,\n  \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "core/configs/chains/chip_testing.yaml",
    "content": "models:\n    eat_chips:\n        repo_id: \"1g0rrr/eat_chips\"\n        control_time_s: 120\n"
  },
  {
    "path": "core/configs/chains/eat_chips.yaml",
    "content": "llm_agent:\n    model: \"gpt-4o-mini\"\n    system_prompt: \"You're a helpful robot-arm assistant. Answer simple, fun and super concise.\"\n    tools:\n        eat_chips:\n            repo_id: \"1g0rrr/eat_chips\"\n            desc: \"Grab opne chip and give it to user.\"\n        grab_candy:\n            repo_id: \"1g0rrr/grab_candy\"\n            desc: \"Grab candy and give it to user.\"\n        grab_sponge:\n            repo_id: \"1g0rrr/grab_sponge\"\n            desc: \"Clean the desktop.\"\n        describe_area:\n          desc: \"Describe the area around you.\"\n          prompt: \"Describe in one phrase what objects you see on the table. Not including robot. Start answer with \"I see...\"\"\n"
  },
  {
    "path": "core/configs/chains/glue_stick.yaml",
    "content": "models:\n    grab_stick:\n        repo_id: \"1g0rrr/grab_stick\"\n        control_time_s: 120\n    pause:\n        control_time_s: 120\n    release_stick:\n        repo_id: \"1g0rrr/release_stick\"\n        control_time_s: 120\n"
  },
  {
    "path": "core/configs/chains/lamp_testing.yaml",
    "content": "models:\n    grab_orange:\n        repo_id: \"1g0rrr/grab_sponge\"\n        control_time_s: 9999\n    # grab_candy:\n    #     repo_id: \"1g0rrr/grab_candy\"\n    #     control_time_s: 120\n    # grab_sponge:\n    #     repo_id: \"1g0rrr/grab_sponge\"\n    #     control_time_s: 120\n\n    # insert_lamp:\n    #     repo_id: \"1g0rrr/insert_lamp\"\n    #     control_time_s: 120\n    # testing_lamp:\n    #     repo_id: \"1g0rrr/testing_lamp\"\n    #     control_time_s: 120\n"
  },
  {
    "path": "core/configs/robot/koch.yaml",
    "content": "_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot\nrobot_type: koch\ncalibration_dir: .cache/calibration/koch\n\n# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.\n# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as\n# the number of motors in your follower arms.\nmax_relative_target: null\n\nleader_arms:\n    main:\n        _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus\n        port: /dev/tty.usbmodem58760433151\n        motors:\n            # name: (index, model)\n            shoulder_pan: [1, \"xl330-m077\"]\n            shoulder_lift: [2, \"xl330-m077\"]\n            elbow_flex: [3, \"xl330-m077\"]\n            wrist_flex: [4, \"xl330-m077\"]\n            wrist_roll: [5, \"xl330-m077\"]\n            gripper: [6, \"xl330-m077\"]\n\nfollower_arms:\n    main:\n        _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus\n        port: /dev/tty.usbmodem58760434751\n        motors:\n            # name: (index, model)\n            shoulder_pan: [1, \"xl430-w250\"]\n            shoulder_lift: [2, \"xl430-w250\"]\n            elbow_flex: [3, \"xl330-m288\"]\n            wrist_flex: [4, \"xl330-m288\"]\n            wrist_roll: [5, \"xl330-m288\"]\n            gripper: [6, \"xl330-m288\"]\n\ncameras:\n    laptop:\n        _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera\n        camera_index: 0\n        fps: 30\n        width: 640\n        height: 480\n    phone:\n        _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera\n        camera_index: 1\n        fps: 30\n        width: 640\n        height: 480\n\n# ~ Koch specific settings ~\n# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible\n# to squeeze the gripper and have it spring back to an open position on its own.\ngripper_open_degree: 35.156\n"
  },
  {
    "path": "core/configs/robot/so100.yaml",
    "content": "_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot\nrobot_type: so100\ncalibration_dir: .cache/calibration/so_100\n\n# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.\n# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as\n# the number of motors in your follower arms.\nmax_relative_target: null\n# usbmodem58760434751\n# dev/tty.usbmodem58760434751 - leader with original bus\nleader_arms:\n    main:\n        _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus\n        # _target_: dummy_arm.SimulatedFollower\n        port: /dev/tty.usbmodem58760434771\n        motors:\n            # name: (index, model)\n            shoulder_pan: [1, \"sts3215\"]\n            shoulder_lift: [2, \"sts3215\"]\n            elbow_flex: [3, \"sts3215\"]\n            wrist_flex: [4, \"sts3215\"]\n            wrist_roll: [5, \"sts3215\"]\n            gripper: [6, \"sts3215\"]\n\nfollower_arms:\n    main:\n        _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus\n        # _target_: dummy_arm.SimulatedFollower\n        port: /dev/tty.usbmodem58760429711\n        motors:\n            # name: (index, model)\n            shoulder_pan: [1, \"sts3215\"]\n            shoulder_lift: [2, \"sts3215\"]\n            elbow_flex: [3, \"sts3215\"]\n            wrist_flex: [4, \"sts3215\"]\n            wrist_roll: [5, \"sts3215\"]\n            gripper: [6, \"sts3215\"]\n\ncameras:\n    laptop:\n        _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera\n        camera_index: 0\n        fps: 30\n        width: 640\n        height: 480\n    phone:\n        _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera\n        camera_index: 1\n        fps: 30\n        width: 640\n        height: 480\n# ~ Koch specific settings ~\n# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible\n# to squeeze the gripper and have it spring back to an open position on its own.\n# gripper_open_degree: 35.156\n"
  },
  {
    "path": "core/configs/robot/so100_dummy.yaml",
    "content": "_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot\nrobot_type: so100\ncalibration_dir: .cache/calibration/so_100\n\n# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.\n# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as\n# the number of motors in your follower arms.\nmax_relative_target: null\n# usbmodem58760434751\n# dev/tty.usbmodem58760434751 - leader with original bus\nleader_arms:\n    main:\n        # _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus\n        _target_: dummy_arm.SimulatedFollower\n        port: /dev/tty.usbmodem58760434771\n        motors:\n            # name: (index, model)\n            shoulder_pan: [1, \"sts3215\"]\n            shoulder_lift: [2, \"sts3215\"]\n            elbow_flex: [3, \"sts3215\"]\n            wrist_flex: [4, \"sts3215\"]\n            wrist_roll: [5, \"sts3215\"]\n            gripper: [6, \"sts3215\"]\n\nfollower_arms:\n    main:\n        # _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus\n        _target_: dummy_arm.SimulatedFollower\n        port: /dev/tty.usbmodem58760429711\n        motors:\n            # name: (index, model)\n            shoulder_pan: [1, \"sts3215\"]\n            shoulder_lift: [2, \"sts3215\"]\n            elbow_flex: [3, \"sts3215\"]\n            wrist_flex: [4, \"sts3215\"]\n            wrist_roll: [5, \"sts3215\"]\n            gripper: [6, \"sts3215\"]\n\ncameras:\n    laptop:\n        _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera\n        camera_index: 0\n        fps: 30\n        width: 640\n        height: 480\n    phone:\n        _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera\n        camera_index: 1\n        fps: 30\n        width: 640\n        height: 480\n# ~ Koch specific settings ~\n# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible\n# to squeeze the gripper and have it spring back to an open position on its own.\n# gripper_open_degree: 35.156\n"
  },
  {
    "path": "core/dummy_arm.py",
    "content": "import enum\nimport logging\nimport math\nimport time\nimport traceback\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport numpy as np\nimport tqdm\nimport mujoco\n\n\nfrom lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError\nfrom lerobot.common.utils.utils import capture_timestamp_utc\n\nclass SimulatedFollower:\n\n    def __init__(\n        self,\n        port: str,\n        # configuration,\n        motors: dict[str, tuple[int, str]],\n        extra_model_control_table: dict[str, list[tuple]] | None = None,\n        extra_model_resolution: dict[str, int] | None = None,\n        mock=False,\n    ):\n        # self.configuration = configuration\n        self.old_pos = np.zeros(12)\n        self.port = port\n        self.motors = {\n                    # name: (index, model)\n                    \"shoulder_pan\": (1, \"xl330-m077\"),\n                    \"shoulder_lift\": (2, \"xl330-m077\"),\n                    \"elbow_flex\": (3, \"xl330-m077\"),\n                    \"wrist_flex\": (4, \"xl330-m077\"),\n                    \"wrist_roll\": (5, \"xl330-m077\"),\n                    \"gripper\": (6, \"xl330-m077\"),\n                }\n        pass\n    \n    @property\n    def motor_names(self) -> list[str]:\n        return list(self.motors.keys())\n    \n    def connect(self):\n        self.is_connected = True\n        # self.data = self.configuration.data\n        # self.model = self.configuration.model\n\n        # init_pos_rad = [-1.5708, -1.5708, 1.5708, -1.5708, -1.5708, 0]\n        # self.data.qpos[-6:] = init_pos_rad\n        # self.old_pos = deepcopy(self.data.qpos[-6:])\n        # deep copy\n        # mujoco.mj_forward(self.model, self.data)\n\n        pass\n\n    def read(self, data_name, motor_names: str | list[str] | None = None):\n        values = np.zeros(6)\n        values = values.astype(np.int32)\n        return values\n\n    def set_calibration(self, calibration: dict[str, tuple[int, bool]]):\n        self.calibration = calibration\n\n    def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None = None):\n\n        if data_name in [\"Torque_Enable\", \"Operating_Mode\", \"Homing_Offset\", \"Drive_Mode\", \"Position_P_Gain\", \"Position_I_Gain\", \"Position_D_Gain\"]:\n            return np.array(None)\n\n        pass\n\n    def disconnect(self):\n        self.is_connected = False\n\n    def __del__(self):\n        if getattr(self, \"is_connected\", False):\n            self.disconnect()\n\nif __name__ == \"__main__\":\n    pass\n"
  },
  {
    "path": "core/models_chain.py",
    "content": "import argparse\nimport logging\nimport time\nfrom pathlib import Path\nfrom typing import List\nimport os\n\nfrom dotenv import load_dotenv, find_dotenv\nimport speech_recognition as sr\nfrom langchain_openai import ChatOpenAI\nimport shutil\nimport tqdm\nfrom langchain_core.tools import tool\nfrom langchain.agents import create_tool_calling_agent, AgentExecutor\n\nfrom lerobot.common.robot_devices.cameras.opencv import OpenCVCamera\n# from safetensors.torch import load_file, save_file\nfrom lerobot.common.datasets.lerobot_dataset import LeRobotDataset\nfrom lerobot.common.datasets.populate_dataset import (\n    create_lerobot_dataset,\n    delete_current_episode,\n    init_dataset,\n    save_current_episode,\n)\nfrom lerobot.common.robot_devices.control_utils import (\n    control_loop,\n    has_method,\n    init_keyboard_listener,\n    init_policy,\n    log_control_info,\n    record_episode,\n    reset_environment,\n    sanity_check_dataset_name,\n    stop_recording,\n    warmup_record,\n)\nfrom lerobot.common.robot_devices.robots.factory import make_robot\nfrom lerobot.common.robot_devices.robots.utils import Robot\nfrom lerobot.common.robot_devices.utils import busy_wait, safe_disconnect\nfrom lerobot.common.utils.utils import init_hydra_config, init_logging, log_say, none_or_int\n\n########################################################################################\n# Control modes\n########################################################################################\n\n\n@safe_disconnect\ndef calibrate(robot: Robot, arms: list[str] | None):\n    # TODO(aliberts): move this code in robots' classes\n    if robot.robot_type.startswith(\"stretch\"):\n        if not robot.is_connected:\n            robot.connect()\n        if not robot.is_homed():\n            robot.home()\n        return\n\n    unknown_arms = [arm_id for arm_id in arms if arm_id not in robot.available_arms]\n    available_arms_str = \" \".join(robot.available_arms)\n    unknown_arms_str = \" \".join(unknown_arms)\n\n    if arms is None or len(arms) == 0:\n        raise ValueError(\n            \"No arm provided. Use `--arms` as argument with one or more available arms.\\n\"\n            f\"For instance, to recalibrate all arms add: `--arms {available_arms_str}`\"\n        )\n\n    if len(unknown_arms) > 0:\n        raise ValueError(\n            f\"Unknown arms provided ('{unknown_arms_str}'). Available arms are `{available_arms_str}`.\"\n        )\n\n    for arm_id in arms:\n        arm_calib_path = robot.calibration_dir / f\"{arm_id}.json\"\n        if arm_calib_path.exists():\n            print(f\"Removing '{arm_calib_path}'\")\n            arm_calib_path.unlink()\n        else:\n            print(f\"Calibration file not found '{arm_calib_path}'\")\n\n    if robot.is_connected:\n        robot.disconnect()\n\n    # Calling `connect` automatically runs calibration\n    # when the calibration file is missing\n    robot.connect()\n    robot.disconnect()\n    print(\"Calibration is done! You can now teleoperate and record datasets!\")\n\n@safe_disconnect\ndef teleoperate(\n    robot: Robot, fps: int | None = None, teleop_time_s: float | None = None, display_cameras: bool = False\n):\n    control_loop(\n        robot,\n        control_time_s=teleop_time_s,\n        fps=fps,\n        teleoperate=True,\n        display_cameras=display_cameras,\n    )\n\n@safe_disconnect\ndef llm_agent(\n    robot: Robot, \n    chain_path: str | None = None,\n    fps: int | None = None, \n    teleop_time_s: float | None = None, \n    display_cameras: bool = True\n):\n    import pyttsx3\n    import base64\n    import cv2\n\n    from langchain.schema import SystemMessage\n    from langchain_core.prompts import (\n        ChatPromptTemplate,\n        HumanMessagePromptTemplate,\n        MessagesPlaceholder,\n    )\n\n\n    _ = load_dotenv(find_dotenv())\n    api_key = os.getenv(\"OPENAI_API_KEY\")\n    \n\n    robot.connect()\n    engine = pyttsx3.init()\n\n\n\n    models = {\n        \"grab_sponge\":  {\"repo_id\": \"1g0rrr/grab_sponge\", \"control_time_s\": 32},\n         \"grab_orange\": {\"repo_id\": \"1g0rrr/grab_orange\", \"control_time_s\": 10}, \n         \"grab_candy\":{\"repo_id\": \"1g0rrr/grab_candy\", \"control_time_s\": 10}\n    }\n\n    global policies \n    policies = {}\n\n    for model_name in models:\n        model = models[model_name]\n        policy_overrides = [\"device=cpu\"]\n        policy, policy_fps, device, use_amp = init_policy(model[\"repo_id\"], policy_overrides)\n        policies[model_name] = ({\"policy\": policy, \"policy_fps\": policy_fps, \"device\": device, \"use_amp\": use_amp, \"control_time_s\": model[\"control_time_s\"]})\n\n\n    @tool(return_direct=True)\n    def grab_sponge():\n        \"\"\"Clean the desktop.\n        \"\"\"\n        global policies\n        do_control_loop(policies[\"grab_sponge\"])\n\n        return \"Done\"\n    \n    @tool(return_direct=True)\n    def grab_orange():\n        \"\"\"Grab orange and give it to user.\n        \"\"\"\n        global policies\n        do_control_loop(policies[\"grab_orange\"])\n\n        return \"Done\"\n        \n    @tool(return_direct=True)\n    def grab_candy():\n        \"\"\"Grab candy and give it to user.\n        \"\"\"\n        global policies\n        do_control_loop(policies[\"grab_candy\"])\n\n        return \"Done\"\n\n    @tool(return_direct=True)\n    def describe_area():\n        \"\"\"Describing what I can see.\n        \"\"\"\n\n        llm = ChatOpenAI(temperature=0.1, model=llm_model, api_key=api_key)\n\n        cam1 = OpenCVCamera(camera_index=0, fps=30, width=640, height=480, color_mode=\"bgr\")\n        cam1.connect()\n        img = cam1.read()\n        \n        # cv2.imshow(\"Image\", img)\n        # cv2.waitKey(0)\n        # cv2.destroyAllWindows()\n\n        _, encoded_img = cv2.imencode('.png', img) \n        base64_img = base64.b64encode(encoded_img).decode(\"utf-8\")    \n        \n        mime_type = 'image/png'\n        encoded_image_url = f\"data:{mime_type};base64,{base64_img}\"\n\n\n        chat_prompt_template = ChatPromptTemplate.from_messages(\n            messages=[\n                SystemMessage(content='Describe in one phrase what objects you see on the table. Not including robot. Start answer with \"I see...\"'),\n                HumanMessagePromptTemplate.from_template(\n                     [{'image_url': \"{encoded_image_url}\", 'type': 'image_url'}],\n                )\n            ]\n        )\n\n        chain = chat_prompt_template | llm\n        res = chain.invoke({\"encoded_image_url\": encoded_image_url})\n\n        return res.content\n        \n    def do_control_loop(policy_obj):\n        global policies, models\n        control_loop(\n            robot=robot,\n            control_time_s=policy_obj[\"control_time_s\"],\n            display_cameras=display_cameras,\n            policy=policy_obj[\"policy\"],\n            device=policy_obj[\"device\"],\n            use_amp=policy_obj[\"use_amp\"],\n            fps = policy_obj[\"policy_fps\"],\n            teleoperate=False,\n        )\n\n    agent_prompt = ChatPromptTemplate.from_messages([\n        (\"system\", \"You're a helpful robot-arm assistant. Answer super concise.\"), \n        (\"human\", \"{input}\"), \n        (\"placeholder\", \"{agent_scratchpad}\"),\n    ])\n\n    llm_model = \"gpt-4o-mini\"\n\n    llm = ChatOpenAI(temperature=0.1, model=llm_model, api_key=api_key)\n\n    tools = [grab_sponge, grab_orange, grab_candy, describe_area]\n\n    agent = create_tool_calling_agent(llm, tools, agent_prompt)\n    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\n\n    r = sr.Recognizer()    \n\n    def listen():\n        with sr.Microphone() as source:\n            audio = r.listen(source)\n            print(\"Processing...\")\n        try:\n            text = r.recognize_google(audio)\n            return text\n        except Exception as e:\n            print(\"Error: \" + str(e))\n            return None\n\n\n    def generate_response(prompt):\n        completions = agent_executor.invoke({\"input\": prompt, })\n        message = completions[\"output\"]\n        return message\n\n    while True:\n        print(\"Listening...\")\n\n        audio_prompt = listen()\n        if audio_prompt is not None:\n            print(\"You: \" + audio_prompt)\n            response = generate_response(audio_prompt)\n            engine.say(response)\n            engine.runAndWait()\n\n            print(\"Robot: \" + response)\n            # to lower case\n            if audio_prompt.lower() == \"thank you\":\n                # Exit the program\n                exit()\n        else:\n            print(\"Can you repeat?\")\n            continue\n\n\n@safe_disconnect\ndef evaluate(\n    robot: Robot, \n    chain_path: str | None = None,\n    fps: int | None = None, \n    teleop_time_s: float | None = None, \n    display_cameras: bool = True\n):\n    robot_cfg = init_hydra_config(chain_path)\n\n    models = robot_cfg[\"models\"]\n    policies = []\n    for model_name in models:\n        model = models[model_name]\n        policy_overrides = [\"device=cpu\"]\n        policy, policy_fps, device, use_amp = init_policy(model[\"repo_id\"], policy_overrides)\n        policies.append({\"policy\": policy, \"policy_fps\": policy_fps, \"device\": device, \"use_amp\": use_amp, \"control_time_s\": model[\"control_time_s\"]})\n\n    listener, events = init_keyboard_listener()\n    for policy_obj in policies:\n        control_loop(\n            robot=robot,\n            control_time_s=policy_obj[\"control_time_s\"],\n            display_cameras=display_cameras,\n            events=events,\n            policy=policy_obj[\"policy\"],\n            device=policy_obj[\"device\"],\n            use_amp=policy_obj[\"use_amp\"],\n            fps = policy_obj[\"policy_fps\"],\n            teleoperate=False,\n        )\n        print(\"Model is done!\")\n\n    print(\"Teleoperation is done!\")\n\n\n@safe_disconnect\ndef record(\n    robot: Robot,\n    root: str,\n    repo_id: str,\n    pretrained_policy_name_or_path: str | None = None,\n    policy_overrides: List[str] | None = None,\n    fps: int | None = None,\n    warmup_time_s=2,\n    episode_time_s=10,\n    reset_time_s=5,\n    num_episodes=50,\n    video=True,\n    run_compute_stats=True,\n    push_to_hub=True,\n    tags=None,\n    num_image_writer_processes=1,\n    num_image_writer_threads_per_camera=1,\n    force_override=False,\n    display_cameras=True,\n    play_sounds=True,\n):\n    # TODO(rcadene): Add option to record logs\n    listener = None\n    events = None\n    policy = None\n    device = None\n    use_amp = None\n\n    # Load pretrained policy\n    if pretrained_policy_name_or_path is not None:\n        policy, policy_fps, device, use_amp = init_policy(pretrained_policy_name_or_path, policy_overrides)\n\n        if fps is None:\n            fps = policy_fps\n            logging.warning(f\"No fps provided, so using the fps from policy config ({policy_fps}).\")\n        elif fps != policy_fps:\n            logging.warning(\n                f\"There is a mismatch between the provided fps ({fps}) and the one from policy config ({policy_fps}).\"\n            )\n\n\n    # Create empty dataset or load existing saved episodes\n    sanity_check_dataset_name(repo_id, policy)\n    dataset = init_dataset(\n        repo_id,\n        root,\n        force_override,\n        fps,\n        video,\n        write_images=robot.has_camera,\n        num_image_writer_processes=num_image_writer_processes,\n        num_image_writer_threads=num_image_writer_threads_per_camera * robot.num_cameras,\n    )\n\n    if not robot.is_connected:\n        robot.connect()\n\n    listener, events = init_keyboard_listener()\n\n    # Execute a few seconds without recording to:\n    # 1. teleoperate the robot to move it in starting position if no policy provided,\n    # 2. give times to the robot devices to connect and start synchronizing,\n    # 3. place the cameras windows on screen\n    enable_teleoperation = policy is None\n    log_say(\"Warmup record\", play_sounds)\n    warmup_record(robot, events, enable_teleoperation, warmup_time_s, display_cameras, fps)\n\n    if has_method(robot, \"teleop_safety_stop\"):\n        robot.teleop_safety_stop()\n\n    while True:\n        if dataset[\"num_episodes\"] >= num_episodes:\n            break\n\n        episode_index = dataset[\"num_episodes\"]\n\n        # Visual sign in terminal that a recording is starting\n        print(\"============================================\")\n        print(\"============================================\")\n        print(\"===========  START RECORDING  ==============\")\n        print(\"============================================\")\n        print(\"============================================\")\n        print(\"============================================\")\n\n        log_say(f\"Recording episode {episode_index}\", play_sounds)\n        record_episode(\n            dataset=dataset,\n            robot=robot,\n            events=events,\n            episode_time_s=episode_time_s,\n            display_cameras=display_cameras,\n            policy=policy,\n            device=device,\n            use_amp=use_amp,\n            fps=fps,\n        )\n\n        # Execute a few seconds without recording to give time to manually reset the environment\n        # Current code logic doesn't allow to teleoperate during this time.\n        # TODO(rcadene): add an option to enable teleoperation during reset\n        # Skip reset for the last episode to be recorded\n        if not events[\"stop_recording\"] and (\n            (episode_index < num_episodes - 1) or events[\"rerecord_episode\"]\n        ):\n            log_say(\"Reset the environment\", play_sounds)\n            reset_environment(robot, events, reset_time_s)\n            # log_say(\"Prepare position\", play_sounds)\n            # warmup_record(robot, events, enable_teleoperation, warmup_time_s, display_cameras, fps)\n\n        if events[\"rerecord_episode\"]:\n            log_say(\"Re-record episode\", play_sounds)\n            events[\"rerecord_episode\"] = False\n            events[\"exit_early\"] = False\n            delete_current_episode(dataset)\n            continue\n\n        # Increment by one dataset[\"current_episode_index\"]\n        save_current_episode(dataset)\n\n        if events[\"stop_recording\"]:\n            break\n\n    log_say(\"Stop recording\", play_sounds, blocking=True)\n    stop_recording(robot, listener, display_cameras)\n\n    lerobot_dataset = create_lerobot_dataset(dataset, run_compute_stats, push_to_hub, tags, play_sounds)\n\n    data_dict = [\"observation.images.laptop\", \"observation.images.phone\"]\n    image_keys = [key for key in data_dict if \"image\" in key]\n    local_dir = Path(root) / repo_id\n    videos_dir = local_dir / \"videos\"\n\n    for episode_index in tqdm.tqdm(range(num_episodes)):\n        for key in image_keys:\n            # key = f\"observation.images.{name}\"\n            tmp_imgs_dir = videos_dir / f\"{key}_episode_{episode_index:06d}\"\n            shutil.rmtree(tmp_imgs_dir, ignore_errors=True)\n\n    log_say(\"Exiting\", play_sounds)\n    return lerobot_dataset\n\n\n@safe_disconnect\ndef replay(\n    robot: Robot, episode: int, fps: int | None = None, root=\"data\", repo_id=\"lerobot/debug\", play_sounds=True\n):\n    # TODO(rcadene, aliberts): refactor with control_loop, once `dataset` is an instance of LeRobotDataset\n    # TODO(rcadene): Add option to record logs\n    local_dir = Path(root) / repo_id\n    if not local_dir.exists():\n        raise ValueError(local_dir)\n\n    dataset = LeRobotDataset(repo_id, root=root)\n    items = dataset.hf_dataset.select_columns(\"action\")\n    from_idx = dataset.episode_data_index[\"from\"][episode].item()\n    to_idx = dataset.episode_data_index[\"to\"][episode].item()\n\n    if not robot.is_connected:\n        robot.connect()\n\n    log_say(\"Replaying episode\", play_sounds, blocking=True)\n    for idx in range(from_idx, to_idx):\n        start_episode_t = time.perf_counter()\n\n        action = items[idx][\"action\"]\n        robot.send_action(action)\n\n        dt_s = time.perf_counter() - start_episode_t\n        busy_wait(1 / fps - dt_s)\n\n        dt_s = time.perf_counter() - start_episode_t\n        log_control_info(robot, dt_s, fps=fps)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    subparsers = parser.add_subparsers(dest=\"mode\", required=True)\n\n    # Set common options for all the subparsers\n    base_parser = argparse.ArgumentParser(add_help=False)\n    base_parser.add_argument(\n        \"--robot-path\",\n        type=str,\n        default=\"lerobot/configs/robot/koch.yaml\",\n        help=\"Path to robot yaml file used to instantiate the robot using `make_robot` factory function.\",\n    )\n    base_parser.add_argument(\n        \"--robot-overrides\",\n        type=str,\n        nargs=\"*\",\n        help=\"Any key=value arguments to override config values (use dots for.nested=overrides)\",\n    )\n\n    parser_calib = subparsers.add_parser(\"calibrate\", parents=[base_parser])\n    parser_calib.add_argument(\n        \"--arms\",\n        type=str,\n        nargs=\"*\",\n        help=\"List of arms to calibrate (e.g. `--arms left_follower right_follower left_leader`)\",\n    )\n\n    parser_teleop = subparsers.add_parser(\"teleoperate\", parents=[base_parser])\n    parser_teleop.add_argument(\n        \"--fps\", type=none_or_int, default=None, help=\"Frames per second (set to None to disable)\"\n    )\n    parser_teleop.add_argument(\n        \"--display-cameras\",\n        type=int,\n        default=1,\n        help=\"Display all cameras on screen (set to 1 to display or 0).\",\n    )\n\n\n    parser_evaluate = subparsers.add_parser(\"evaluate\", parents=[base_parser])\n    parser_evaluate.add_argument(\n        \"--fps\", type=none_or_int, default=None, help=\"Frames per second (set to None to disable)\"\n    )\n    parser_evaluate.add_argument(\n        \"--chain-path\",\n        type=Path,\n        default=\"core/configs/chains/lamp_testing.yaml\",\n        help=\"Path to chain configuration yaml file').\",\n    )    \n\n\n    parser_llm_agent = subparsers.add_parser(\"llm_agent\", parents=[base_parser])\n    parser_llm_agent.add_argument(\n        \"--chain-path\",\n        type=Path,\n        default=\"core/configs/chains/clean_whiteboard.yaml\",\n        help=\"Path to chain configuration yaml file').\",\n    )    \n\n    parser_record = subparsers.add_parser(\"record\", parents=[base_parser])\n    parser_record.add_argument(\n        \"--fps\", type=none_or_int, default=None, help=\"Frames per second (set to None to disable)\"\n    )\n    parser_record.add_argument(\n        \"--root\",\n        type=Path,\n        default=\"data\",\n        help=\"Root directory where the dataset will be stored locally at '{root}/{repo_id}' (e.g. 'data/hf_username/dataset_name').\",\n    )\n    parser_record.add_argument(\n        \"--repo-id\",\n        type=str,\n        default=\"lerobot/test\",\n        help=\"Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).\",\n    )\n    parser_record.add_argument(\n        \"--warmup-time-s\",\n        type=int,\n        default=10,\n        help=\"Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize.\",\n    )\n    parser_record.add_argument(\n        \"--episode-time-s\",\n        type=int,\n        default=60,\n        help=\"Number of seconds for data recording for each episode.\",\n    )\n    parser_record.add_argument(\n        \"--reset-time-s\",\n        type=int,\n        default=60,\n        help=\"Number of seconds for resetting the environment after each episode.\",\n    )\n    parser_record.add_argument(\"--num-episodes\", type=int, default=50, help=\"Number of episodes to record.\")\n    parser_record.add_argument(\n        \"--run-compute-stats\",\n        type=int,\n        default=1,\n        help=\"By default, run the computation of the data statistics at the end of data collection. Compute intensive and not required to just replay an episode.\",\n    )\n    parser_record.add_argument(\n        \"--push-to-hub\",\n        type=int,\n        default=1,\n        help=\"Upload dataset to Hugging Face hub.\",\n    )\n    parser_record.add_argument(\n        \"--tags\",\n        type=str,\n        nargs=\"*\",\n        help=\"Add tags to your dataset on the hub.\",\n    )\n    parser_record.add_argument(\n        \"--num-image-writer-processes\",\n        type=int,\n        default=0,\n        help=(\n            \"Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only; \"\n            \"set to ≥1 to use subprocesses, each using threads to write images. The best number of processes \"\n            \"and threads depends on your system. We recommend 4 threads per camera with 0 processes. \"\n            \"If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses.\"\n        ),\n    )\n    parser_record.add_argument(\n        \"--num-image-writer-threads-per-camera\",\n        type=int,\n        default=8,\n        help=(\n            \"Number of threads writing the frames as png images on disk, per camera. \"\n            \"Too many threads might cause unstable teleoperation fps due to main thread being blocked. \"\n            \"Not enough threads might cause low camera fps.\"\n        ),\n    )\n    parser_record.add_argument(\n        \"--force-override\",\n        type=int,\n        default=0,\n        help=\"By default, data recording is resumed. When set to 1, delete the local directory and start data recording from scratch.\",\n    )\n    parser_record.add_argument(\n        \"-p\",\n        \"--pretrained-policy-name-or-path\",\n        type=str,\n        help=(\n            \"Either the repo ID of a model hosted on the Hub or a path to a directory containing weights \"\n            \"saved using `Policy.save_pretrained`.\"\n        ),\n    )\n    parser_record.add_argument(\n        \"--policy-overrides\",\n        type=str,\n        nargs=\"*\",\n        help=\"Any key=value arguments to override config values (use dots for.nested=overrides)\",\n    )\n\n    parser_replay = subparsers.add_parser(\"replay\", parents=[base_parser])\n    parser_replay.add_argument(\n        \"--fps\", type=none_or_int, default=None, help=\"Frames per second (set to None to disable)\"\n    )\n    parser_replay.add_argument(\n        \"--root\",\n        type=Path,\n        default=\"data\",\n        help=\"Root directory where the dataset will be stored locally at '{root}/{repo_id}' (e.g. 'data/hf_username/dataset_name').\",\n    )\n    parser_replay.add_argument(\n        \"--repo-id\",\n        type=str,\n        default=\"lerobot/test\",\n        help=\"Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).\",\n    )\n    parser_replay.add_argument(\"--episode\", type=int, default=0, help=\"Index of the episode to replay.\")\n\n    args = parser.parse_args()\n\n    init_logging()\n\n    control_mode = args.mode\n    robot_path = args.robot_path\n    robot_overrides = args.robot_overrides\n    kwargs = vars(args)\n    del kwargs[\"mode\"]\n    del kwargs[\"robot_path\"]\n    del kwargs[\"robot_overrides\"]\n\n    robot_cfg = init_hydra_config(robot_path, robot_overrides)\n    robot = make_robot(robot_cfg)\n\n    if control_mode == \"calibrate\":\n        calibrate(robot, **kwargs)\n\n    elif control_mode == \"teleoperate\":\n        teleoperate(robot, **kwargs)\n\n    elif control_mode == \"evaluate\":\n        evaluate(robot, **kwargs)\n\n    elif control_mode == \"llm_agent\":\n        llm_agent(robot, **kwargs)\n\n    elif control_mode == \"record\":\n        record(robot, **kwargs)\n\n    elif control_mode == \"replay\":\n        replay(robot, **kwargs)\n\n    if robot.is_connected:\n        # Disconnect manually to avoid a \"Core dump\" during process\n        # termination due to camera threads not properly exiting.\n        robot.disconnect()"
  },
  {
    "path": "core/test.py",
    "content": ""
  }
]