[
  {
    "path": ".dockerignore",
    "content": "# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------\n.git\n.cache\n.idea\nruns\noutput\ncoco\nstorage.googleapis.com\n\ndata/samples/*\n**/results*.csv\n*.jpg\n\n# Neural Network weights -----------------------------------------------------------------------------------------------\n**/*.pt\n**/*.pth\n**/*.onnx\n**/*.engine\n**/*.mlmodel\n**/*.torchscript\n**/*.torchscript.pt\n**/*.tflite\n**/*.h5\n**/*.pb\n*_saved_model/\n*_web_model/\n*_openvino_model/\n\n# Below Copied From .gitignore -----------------------------------------------------------------------------------------\n# Below Copied From .gitignore -----------------------------------------------------------------------------------------\n\n\n# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\nwandb/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# dotenv\n.env\n\n# virtualenv\n.venv*\nvenv*/\nENV*/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\n\n# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------\n\n# General\n.DS_Store\n.AppleDouble\n.LSOverride\n\n# Icon must end with two \\r\nIcon\nIcon?\n\n# Thumbnails\n._*\n\n# Files that might appear in the root of a volume\n.DocumentRevisions-V100\n.fseventsd\n.Spotlight-V100\n.TemporaryItems\n.Trashes\n.VolumeIcon.icns\n.com.apple.timemachine.donotpresent\n\n# Directories potentially created on remote AFP share\n.AppleDB\n.AppleDesktop\nNetwork Trash Folder\nTemporary Items\n.apdisk\n\n\n# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore\n# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm\n# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839\n\n# User-specific stuff:\n.idea/*\n.idea/**/workspace.xml\n.idea/**/tasks.xml\n.idea/dictionaries\n.html  # Bokeh Plots\n.pg  # TensorFlow Frozen Graphs\n.avi # videos\n\n# Sensitive or high-churn files:\n.idea/**/dataSources/\n.idea/**/dataSources.ids\n.idea/**/dataSources.local.xml\n.idea/**/sqlDataSources.xml\n.idea/**/dynamic.xml\n.idea/**/uiDesigner.xml\n\n# Gradle:\n.idea/**/gradle.xml\n.idea/**/libraries\n\n# CMake\ncmake-build-debug/\ncmake-build-release/\n\n# Mongo Explorer plugin:\n.idea/**/mongoSettings.xml\n\n## File-based project format:\n*.iws\n\n## Plugin-specific files:\n\n# IntelliJ\nout/\n\n# mpeltonen/sbt-idea plugin\n.idea_modules/\n\n# JIRA plugin\natlassian-ide-plugin.xml\n\n# Cursive Clojure plugin\n.idea/replstate.xml\n\n# Crashlytics plugin (for Android Studio and IntelliJ)\ncom_crashlytics_export_strings.xml\ncrashlytics.properties\ncrashlytics-build.properties\nfabric.properties\n"
  },
  {
    "path": ".gitattributes",
    "content": "# this drop notebooks from GitHub language stats\n*.ipynb linguist-vendored\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug-report.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: 🐛 Bug Report\ndescription: \"Problems with Ultralytics YOLOv5\"\nlabels: [bug, triage]\ntype: \"bug\"\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thank you for submitting an Ultralytics YOLOv5 🐛 Bug Report!\n\n  - type: checkboxes\n    attributes:\n      label: Search before asking\n      description: >\n        Please search the Ultralytics YOLOv5 [docs](https://docs.ultralytics.com/yolov5/) and [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists.\n      options:\n        - label: >\n            I have searched https://github.com/ultralytics/yolov5/issues and did not find a similar report.\n          required: true\n\n  - type: dropdown\n    attributes:\n      label: Project area\n      description: |\n        Help us route the report to the right maintainers.\n      multiple: true\n      options:\n        - \"Training\"\n        - \"Validation/testing\"\n        - \"Export/deployment\"\n        - \"Models/checkpoints\"\n        - \"Documentation\"\n        - \"Other\"\n    validations:\n      required: false\n\n  - type: textarea\n    attributes:\n      label: Bug\n      description: Please describe the issue in detail so we can reproduce it in Ultralytics YOLOv5. Include logs, screenshots, console output, and any context that helps explain the problem.\n      placeholder: |\n        💡 ProTip! Include as much information as possible (logs, tracebacks, screenshots, etc.) to receive the most helpful response.\n    validations:\n      required: true\n\n  - type: textarea\n    attributes:\n      label: Environment\n      description: Share the platform and version information relevant to your report.\n      placeholder: |\n        Please include:\n        - OS (e.g., Ubuntu 20.04, macOS 13.5, Windows 11)\n        - Language or framework version (Python, Swift, Flutter, etc.)\n        - Package or app version\n        - Hardware (e.g., CPU, GPU model, device model)\n        - Any other environment details\n    validations:\n      required: true\n\n  - type: textarea\n    attributes:\n      label: Minimal Reproducible Example\n      description: >\n        Provide the smallest possible snippet, command, or steps required to reproduce the issue. This helps us pinpoint problems faster.\n      placeholder: |\n        ```python\n        # Code or commands to reproduce your issue here\n        ```\n    validations:\n      required: false\n\n  - type: textarea\n    attributes:\n      label: Additional\n      description: Anything else you would like to share?\n\n  - type: checkboxes\n    attributes:\n      label: Are you willing to submit a PR?\n      description: >\n        (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) to help improve Ultralytics YOLOv5, especially if you know how to fix the issue.\n        See the Ultralytics [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started.\n      options:\n        - label: Yes I'd like to help by submitting a PR!\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nblank_issues_enabled: true\ncontact_links:\n  - name: 📘 YOLOv5 Docs\n    url: https://docs.ultralytics.com/yolov5/\n    about: Complete Ultralytics YOLOv5 documentation\n  - name: 💬 Forum\n    url: https://community.ultralytics.com/\n    about: Ask the Ultralytics community for workflow help\n  - name: 🎧 Discord\n    url: https://ultralytics.com/discord\n    about: Chat with the Ultralytics team and other builders\n  - name: ⌨️ Reddit\n    url: https://reddit.com/r/ultralytics\n    about: Discuss Ultralytics projects on Reddit\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature-request.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: 🚀 Feature Request\ndescription: \"Suggest an Ultralytics YOLOv5 improvement\"\nlabels: [enhancement]\ntype: \"feature\"\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thank you for submitting an Ultralytics YOLOv5 🚀 Feature Request!\n\n  - type: checkboxes\n    attributes:\n      label: Search before asking\n      description: >\n        Please search the Ultralytics YOLOv5 [docs](https://docs.ultralytics.com/yolov5/) and [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists.\n      options:\n        - label: >\n            I have searched https://github.com/ultralytics/yolov5/issues and did not find a similar request.\n          required: true\n\n  - type: textarea\n    attributes:\n      label: Description\n      description: Briefly describe the feature you would like to see added to Ultralytics YOLOv5.\n      placeholder: |\n        What new capability or improvement are you proposing?\n    validations:\n      required: true\n\n  - type: textarea\n    attributes:\n      label: Use case\n      description: Explain how this feature would be used and who benefits from it. Screenshots or mockups are welcome.\n      placeholder: |\n        How would this feature improve your workflow?\n\n  - type: textarea\n    attributes:\n      label: Additional\n      description: Anything else you would like to share?\n\n  - type: checkboxes\n    attributes:\n      label: Are you willing to submit a PR?\n      description: >\n        (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) to help improve Ultralytics YOLOv5.\n        See the Ultralytics [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started.\n      options:\n        - label: Yes I'd like to help by submitting a PR!\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/question.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: ❓ Question\ndescription: \"Ask an Ultralytics YOLOv5 question\"\nlabels: [question]\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thank you for asking an Ultralytics YOLOv5 ❓ Question!\n\n  - type: checkboxes\n    attributes:\n      label: Search before asking\n      description: >\n        Please search the Ultralytics YOLOv5 [docs](https://docs.ultralytics.com/yolov5/), [issues](https://github.com/ultralytics/yolov5/issues), and [Ultralytics discussions](https://github.com/orgs/ultralytics/discussions) to see if a similar question already exists.\n      options:\n        - label: >\n            I checked the docs, issues, and discussions and could not find an answer.\n          required: true\n\n  - type: textarea\n    attributes:\n      label: Question\n      description: What is your question? Provide as much detail as possible so we can assist with Ultralytics YOLOv5. Include code snippets, screenshots, logs, or links to notebooks/demos.\n      placeholder: |\n        💡 ProTip! Include as much information as possible (logs, tracebacks, screenshots, etc.) to receive the most helpful response.\n    validations:\n      required: true\n\n  - type: textarea\n    attributes:\n      label: Additional\n      description: Anything else you would like to share?\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Dependabot for package version updates\n# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates\n\nversion: 2\nupdates:\n  - package-ecosystem: pip\n    directory: \"/\"\n    schedule:\n      interval: weekly\n      time: \"04:00\"\n    open-pull-requests-limit: 10\n    labels:\n      - dependencies\n\n  - package-ecosystem: github-actions\n    directory: \"/.github/workflows\"\n    schedule:\n      interval: weekly\n      time: \"04:00\"\n    open-pull-requests-limit: 5\n    labels:\n      - dependencies\n"
  },
  {
    "path": ".github/workflows/ci-testing.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# YOLOv5 Continuous Integration (CI) GitHub Actions tests\n\nname: YOLOv5 CI\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches: [master]\n  pull_request:\n    branches: [master]\n  schedule:\n    - cron: \"0 0 * * *\" # runs at 00:00 UTC every day\n  workflow_dispatch:\n\njobs:\n  Benchmarks:\n    runs-on: ${{ matrix.os }}\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [ubuntu-latest]\n        python-version: [\"3.11\"] # requires python<=3.11\n        model: [yolov5n]\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-python@v6\n        with:\n          python-version: ${{ matrix.python-version }}\n      - uses: astral-sh/setup-uv@v7\n      - name: Install requirements\n        run: |\n          uv pip install --system -r requirements.txt coremltools openvino-dev \"tensorflow<=2.19.0\" \"keras>=3.5.0,<=3.12.0\" --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match\n          yolo checks\n          uv pip list\n      - name: Benchmark DetectionModel\n        run: |\n          python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29\n      - name: Benchmark SegmentationModel\n        run: |\n          python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22\n      - name: Test predictions\n        run: |\n          python export.py --weights ${{ matrix.model }}-cls.pt --include onnx --img 224\n          python detect.py --weights ${{ matrix.model }}.onnx --img 320\n          python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320\n          python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224\n\n  Tests:\n    timeout-minutes: 60\n    runs-on: ${{ matrix.os }}\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [ubuntu-latest, windows-latest, macos-14] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049\n        python-version: [\"3.11\"]\n        model: [yolov5n]\n        include:\n          - os: ubuntu-latest\n            python-version: \"3.8\" # torch 1.8.0 requires python >=3.6, <=3.8\n            model: yolov5n\n            torch: \"1.8.0\" # min torch version CI https://pypi.org/project/torchvision/\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-python@v6\n        with:\n          python-version: ${{ matrix.python-version }}\n      - uses: astral-sh/setup-uv@v7\n      - name: Install requirements\n        run: |\n          torch=\"\"\n          if [ \"${{ matrix.torch }}\" == \"1.8.0\" ]; then\n            torch=\"torch==1.8.0 torchvision==0.9.0\"\n          fi\n          uv pip install --system -r requirements.txt $torch --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match\n        shell: bash # for Windows compatibility\n      - name: Check environment\n        run: |\n          yolo checks\n          pip list\n      - name: Test detection\n        shell: bash # for Windows compatibility\n        run: |\n          # export PYTHONPATH=\"$PWD\"  # to run '$ python *.py' files in subdirectories\n          m=${{ matrix.model }}  # official weights\n          b=runs/train/exp/weights/best  # best.pt checkpoint\n          python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu  # train\n          for d in cpu; do  # devices\n            for w in $m $b; do  # weights\n              python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d  # val\n              python detect.py --imgsz 64 --weights $w.pt --device $d  # detect\n            done\n          done\n          python hubconf.py --model $m  # hub\n          # python models/tf.py --weights $m.pt  # build TF model\n          python models/yolo.py --cfg $m.yaml  # build PyTorch model\n          python export.py --weights $m.pt --img 64 --include torchscript  # export\n          python - <<EOF\n          import torch\n          im = torch.zeros([1, 3, 64, 64])\n          for path in '$m', '$b':\n              model = torch.hub.load('.', 'custom', path=path, source='local')\n              print(model('data/images/bus.jpg'))\n              model(im)  # warmup, build grids for trace\n              torch.jit.trace(model, [im])\n          EOF\n      - name: Test segmentation\n        shell: bash # for Windows compatibility\n        run: |\n          m=${{ matrix.model }}-seg  # official weights\n          b=runs/train-seg/exp/weights/best  # best.pt checkpoint\n          python segment/train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu  # train\n          python segment/train.py --imgsz 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device cpu  # train\n          for d in cpu; do  # devices\n            for w in $m $b; do  # weights\n              python segment/val.py --imgsz 64 --batch 32 --weights $w.pt --device $d  # val\n              python segment/predict.py --imgsz 64 --weights $w.pt --device $d  # predict\n              python export.py --weights $w.pt --img 64 --include torchscript --device $d  # export\n            done\n          done\n      - name: Test classification\n        shell: bash # for Windows compatibility\n        run: |\n          m=${{ matrix.model }}-cls.pt  # official weights\n          b=runs/train-cls/exp/weights/best.pt  # best.pt checkpoint\n          python classify/train.py --imgsz 32 --model $m --data mnist160 --epochs 1  # train\n          python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist160  # val\n          python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist160/test/7/60.png  # predict\n          python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg  # predict\n          python export.py --weights $b --img 64 --include torchscript  # export\n          python - <<EOF\n          import torch\n          for path in '$m', '$b':\n              model = torch.hub.load('.', 'custom', path=path, source='local')\n          EOF\n\n  Summary:\n    runs-on: ubuntu-latest\n    needs: [Benchmarks, Tests]\n    if: always()\n    steps:\n      - name: Check for failure and notify\n        if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') && github.run_attempt == '1'\n        uses: slackapi/slack-github-action@v3.0.1\n        with:\n          webhook-type: incoming-webhook\n          webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}\n          payload: |\n            text: \"<!channel> GitHub Actions error for ${{ github.workflow }} ❌\\n\\n\\n*Repository:* https://github.com/${{ github.repository }}\\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\\n*Author:* ${{ github.actor }}\\n*Event:* ${{ github.event_name }}\\n\"\n"
  },
  {
    "path": ".github/workflows/cla.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA\n# This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged\n\nname: CLA Assistant\non:\n  issue_comment:\n    types:\n      - created\n  pull_request_target:\n    types:\n      - reopened\n      - opened\n      - synchronize\n\npermissions:\n  actions: write\n  contents: write\n  pull-requests: write\n  statuses: write\n\njobs:\n  CLA:\n    if: github.repository == 'ultralytics/yolov5'\n    runs-on: ubuntu-latest\n    steps:\n      - name: CLA Assistant\n        if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target'\n        uses: contributor-assistant/github-action@v2.6.1\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          # Must be repository secret PAT\n          PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }}\n        with:\n          path-to-signatures: \"signatures/version1/cla.json\"\n          path-to-document: \"https://docs.ultralytics.com/help/CLA\" # CLA document\n          # Branch must not be protected\n          branch: \"cla-signatures\"\n          allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot*\n\n          remote-organization-name: ultralytics\n          remote-repository-name: cla\n          custom-pr-sign-comment: \"I have read the CLA Document and I sign the CLA\"\n          custom-allsigned-prcomment: All Contributors have signed the CLA. ✅\n"
  },
  {
    "path": ".github/workflows/docker.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5\n\nname: Publish Docker Images\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches: [master]\n  workflow_dispatch:\n\njobs:\n  docker:\n    if: github.repository == 'ultralytics/yolov5'\n    name: Push Docker image to Docker Hub\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout repo\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 0 # copy full .git directory to access full git history in Docker images\n\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@v3\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to Docker Hub\n        uses: docker/login-action@v3\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Build and push arm64 image\n        uses: docker/build-push-action@v6\n        continue-on-error: true\n        with:\n          context: .\n          platforms: linux/arm64\n          file: utils/docker/Dockerfile-arm64\n          push: true\n          tags: ultralytics/yolov5:latest-arm64\n\n      - name: Build and push CPU image\n        uses: docker/build-push-action@v6\n        continue-on-error: true\n        with:\n          context: .\n          file: utils/docker/Dockerfile-cpu\n          push: true\n          tags: ultralytics/yolov5:latest-cpu\n\n      - name: Build and push GPU image\n        uses: docker/build-push-action@v6\n        continue-on-error: true\n        with:\n          context: .\n          file: utils/docker/Dockerfile\n          push: true\n          tags: ultralytics/yolov5:latest\n"
  },
  {
    "path": ".github/workflows/format.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Ultralytics Actions https://github.com/ultralytics/actions\n# This workflow formats code and documentation in PRs to Ultralytics standards\n\nname: Ultralytics Actions\n\non:\n  issues:\n    types: [opened]\n  pull_request:\n    branches: [main, master]\n    types: [opened, closed, synchronize, review_requested]\n\npermissions:\n  contents: write # Modify code in PRs\n  pull-requests: write # Add comments and labels to PRs\n  issues: write # Add comments and labels to issues\n\njobs:\n  actions:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Run Ultralytics Actions\n        uses: ultralytics/actions@main\n        with:\n          token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN }} # Auto-generated token\n          labels: true # Auto-label issues/PRs using AI\n          python: true # Format Python with Ruff and docformatter\n          prettier: true # Format YAML, JSON, Markdown, CSS\n          spelling: true # Check spelling with codespell\n          links: false # Check broken links with Lychee\n          summary: true # Generate AI-powered PR summaries\n          openai_api_key: ${{ secrets.OPENAI_API_KEY }} # Powers PR summaries, labels and comments\n          brave_api_key: ${{ secrets.BRAVE_API_KEY }} # Used for broken link resolution\n          first_issue_response: |\n            👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/).\n            If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it.\n            If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/).\n            ## Requirements\n            [**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started:\n            ```bash\n            git clone https://github.com/ultralytics/yolov5  # clone\n            cd yolov5\n            pip install -r requirements.txt  # install\n            ```\n            ## Environments\n            YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda-zone)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n            - **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n            - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n            - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n            - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n            ## Status\n            <a href=\"https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml\"><img src=\"https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg\" alt=\"YOLOv5 CI\"></a>\n            If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
  },
  {
    "path": ".github/workflows/links.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee\n# Ignores the following status codes to reduce false positives:\n#   - 403(OpenVINO, 'forbidden')\n#   - 429(Instagram, 'too many requests')\n#   - 500(Zenodo, 'cached')\n#   - 502(Zenodo, 'bad gateway')\n#   - 999(LinkedIn, 'unknown status code')\n\nname: Check Broken links\n\npermissions:\n  contents: read\n\non:\n  workflow_dispatch:\n  schedule:\n    - cron: \"0 0 * * *\" # runs at 00:00 UTC every day\n\njobs:\n  Links:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n\n      - name: Install lychee\n        run: curl -sSfL \"https://github.com/lycheeverse/lychee/releases/latest/download/lychee-x86_64-unknown-linux-gnu.tar.gz\" | sudo tar xz -C /usr/local/bin\n\n      - name: Test Markdown and HTML links with retry\n        uses: ultralytics/actions/retry@main\n        with:\n          timeout_minutes: 5\n          retry_delay_seconds: 60\n          retries: 2\n          run: |\n            lychee \\\n            --scheme 'https' \\\n            --timeout 60 \\\n            --insecure \\\n            --accept 100..=103,200..=299,401,403,429,500,502,999 \\\n            --exclude-all-private \\\n            --exclude 'https?://(www\\.)?(linkedin\\.com|twitter\\.com|x\\.com|instagram\\.com|kaggle\\.com|fonts\\.gstatic\\.com|url\\.com)' \\\n            --exclude-path './**/ci.yml' \\\n            --github-token ${{ secrets.GITHUB_TOKEN }} \\\n            --header \"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36\" \\\n            './**/*.md' \\\n            './**/*.html' | tee -a $GITHUB_STEP_SUMMARY\n\n            # Raise error if broken links found\n            if ! grep -q \"0 Errors\" $GITHUB_STEP_SUMMARY; then\n              exit 1\n            fi\n\n      - name: Test Markdown, HTML, YAML, Python and Notebook links with retry\n        if: github.event_name == 'workflow_dispatch'\n        uses: ultralytics/actions/retry@main\n        with:\n          timeout_minutes: 5\n          retry_delay_seconds: 60\n          retries: 2\n          run: |\n            lychee \\\n            --scheme 'https' \\\n            --timeout 60 \\\n            --insecure \\\n            --accept 100..=103,200..=299,429,999 \\\n            --exclude-all-private \\\n            --exclude 'https?://(www\\.)?(linkedin\\.com|twitter\\.com|x\\.com|instagram\\.com|kaggle\\.com|fonts\\.gstatic\\.com|url\\.com)' \\\n            --exclude-path './**/ci.yml' \\\n            --github-token ${{ secrets.GITHUB_TOKEN }} \\\n            --header \"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36\" \\\n            './**/*.md' \\\n            './**/*.html' \\\n            './**/*.yml' \\\n            './**/*.yaml' \\\n            './**/*.py' \\\n            './**/*.ipynb' | tee -a $GITHUB_STEP_SUMMARY\n\n            # Raise error if broken links found\n            if ! grep -q \"0 Errors\" $GITHUB_STEP_SUMMARY; then\n              exit 1\n            fi\n"
  },
  {
    "path": ".github/workflows/merge-main-into-prs.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Automatically merges repository 'main' branch into all open PRs to keep them up-to-date\n# Action runs on updates to main branch so when one PR merges to main all others update\n\nname: Merge main into PRs\n\npermissions:\n  contents: read\n  pull-requests: write\n\non:\n  workflow_dispatch:\n  # push:\n  #   branches:\n  #     - ${{ github.event.repository.default_branch }}\n\njobs:\n  Merge:\n    if: github.repository == 'ultralytics/yolov5'\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n      - uses: actions/setup-python@v6\n        with:\n          python-version: \"3.x\"\n          cache: \"pip\"\n      - name: Install requirements\n        run: |\n          pip install pygithub\n      - name: Merge default branch into PRs\n        shell: python\n        run: |\n          from github import Github\n          import os\n\n          g = Github(os.getenv('GITHUB_TOKEN'))\n          repo = g.get_repo(os.getenv('GITHUB_REPOSITORY'))\n\n          # Fetch the default branch name\n          default_branch_name = repo.default_branch\n          default_branch = repo.get_branch(default_branch_name)\n\n          for pr in repo.get_pulls(state='open', sort='created'):\n              try:\n                  # Get full names for repositories and branches\n                  base_repo_name = repo.full_name\n                  head_repo_name = pr.head.repo.full_name\n                  base_branch_name = pr.base.ref\n                  head_branch_name = pr.head.ref\n\n                  # Check if PR is behind the default branch\n                  comparison = repo.compare(default_branch.commit.sha, pr.head.sha)\n                  \n                  if comparison.behind_by > 0:\n                      print(f\"⚠️ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is behind {default_branch_name} by {comparison.behind_by} commit(s).\")\n                      \n                      # Attempt to update the branch\n                      try:\n                          success = pr.update_branch()\n                          assert success, \"Branch update failed\"\n                          print(f\"✅ Successfully merged '{default_branch_name}' into PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}).\")\n                      except Exception as update_error:\n                          print(f\"❌ Could not update PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}): {update_error}\")\n                          print(\"   This might be due to branch protection rules or insufficient permissions.\")\n                  else:\n                      print(f\"✅ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is up to date with {default_branch_name}.\")\n              except Exception as e:\n                  print(f\"❌ Could not process PR #{pr.number}: {e}\")\n\n        env:\n          GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }}\n          GITHUB_REPOSITORY: ${{ github.repository }}\n"
  },
  {
    "path": ".github/workflows/stale.yml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: Close stale issues\non:\n  schedule:\n    - cron: \"0 0 * * *\" # Runs at 00:00 UTC every day\n\njobs:\n  stale:\n    permissions:\n      issues: write\n      pull-requests: write\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/stale@v10\n        with:\n          repo-token: ${{ secrets.GITHUB_TOKEN }}\n\n          stale-issue-message: |\n            👋 Hello there! We wanted to give you a friendly reminder that this issue has not had any recent activity and may be closed soon, but don't worry - you can always reopen it if needed. If you still have any questions or concerns, please feel free to let us know how we can help.\n\n            For additional resources and information, please see the links below:\n\n            - **Docs**: https://docs.ultralytics.com\n            - **Platform**: https://platform.ultralytics.com\n            - **Community**: https://community.ultralytics.com\n\n            Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed!\n\n            Thank you for your contributions to YOLO 🚀 and Vision AI ⭐\n\n          stale-pr-message: |\n            👋 Hello there! We wanted to let you know that we've decided to close this pull request due to inactivity. We appreciate the effort you put into contributing to our project, but unfortunately, not all contributions are suitable or aligned with our product roadmap.\n\n            We hope you understand our decision, and please don't let it discourage you from contributing to open source projects in the future. We value all of our community members and their contributions, and we encourage you to keep exploring new projects and ways to get involved.\n\n            For additional resources and information, please see the links below:\n\n            - **Docs**: https://docs.ultralytics.com\n            - **Platform**: https://platform.ultralytics.com\n            - **Community**: https://community.ultralytics.com\n\n            Thank you for your contributions to YOLO 🚀 and Vision AI ⭐\n\n          days-before-issue-stale: 30\n          days-before-issue-close: 10\n          days-before-pr-stale: 90\n          days-before-pr-close: 30\n          exempt-issue-labels: \"documentation,tutorial,TODO\"\n          operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting.\n"
  },
  {
    "path": ".gitignore",
    "content": "# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------\n*.jpg\n*.jpeg\n*.png\n*.bmp\n*.tif\n*.tiff\n*.heic\n*.JPG\n*.JPEG\n*.PNG\n*.BMP\n*.TIF\n*.TIFF\n*.HEIC\n*.mp4\n*.mov\n*.MOV\n*.avi\n*.data\n*.json\n*.cfg\n!setup.cfg\n!cfg/yolov3*.cfg\n\nstorage.googleapis.com\nruns/*\ndata/*\ndata/images/*\n!data/*.yaml\n!data/hyps\n!data/scripts\n!data/images\n!data/images/zidane.jpg\n!data/images/bus.jpg\n!data/*.sh\n\nresults*.csv\n\n# Datasets -------------------------------------------------------------------------------------------------------------\ncoco/\ncoco128/\nVOC/\n\n# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------\n*.m~\n*.mat\n!targets*.mat\n\n# Neural Network weights -----------------------------------------------------------------------------------------------\n*.weights\n*.pt\n*.pb\n*.onnx\n*.engine\n*.mlmodel\n*.mlpackage\n*.torchscript\n*.tflite\n*.h5\n*_saved_model/\n*_web_model/\n*_openvino_model/\n*_paddle_model/\ndarknet53.conv.74\nyolov3-tiny.conv.15\n\n# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n/wandb/\n.installed.cfg\n*.egg\n\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# dotenv\n.env\n\n# virtualenv\n.venv*\nvenv*/\nENV*/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\n\n# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------\n\n# General\n.DS_Store\n.AppleDouble\n.LSOverride\n\n# Icon must end with two \\r\nIcon\nIcon?\n\n# Thumbnails\n._*\n\n# Files that might appear in the root of a volume\n.DocumentRevisions-V100\n.fseventsd\n.Spotlight-V100\n.TemporaryItems\n.Trashes\n.VolumeIcon.icns\n.com.apple.timemachine.donotpresent\n\n# Directories potentially created on remote AFP share\n.AppleDB\n.AppleDesktop\nNetwork Trash Folder\nTemporary Items\n.apdisk\n\n\n# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore\n# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm\n# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839\n\n# User-specific stuff:\n.idea/*\n.idea/**/workspace.xml\n.idea/**/tasks.xml\n.idea/dictionaries\n.html  # Bokeh Plots\n.pg  # TensorFlow Frozen Graphs\n.avi # videos\n\n# Sensitive or high-churn files:\n.idea/**/dataSources/\n.idea/**/dataSources.ids\n.idea/**/dataSources.local.xml\n.idea/**/sqlDataSources.xml\n.idea/**/dynamic.xml\n.idea/**/uiDesigner.xml\n\n# Gradle:\n.idea/**/gradle.xml\n.idea/**/libraries\n\n# CMake\ncmake-build-debug/\ncmake-build-release/\n\n# Mongo Explorer plugin:\n.idea/**/mongoSettings.xml\n\n## File-based project format:\n*.iws\n\n## Plugin-specific files:\n\n# IntelliJ\nout/\n\n# mpeltonen/sbt-idea plugin\n.idea_modules/\n\n# JIRA plugin\natlassian-ide-plugin.xml\n\n# Cursive Clojure plugin\n.idea/replstate.xml\n\n# Crashlytics plugin (for Android Studio and IntelliJ)\ncom_crashlytics_export_strings.xml\ncrashlytics.properties\ncrashlytics-build.properties\nfabric.properties\n"
  },
  {
    "path": "CITATION.cff",
    "content": "cff-version: 1.2.0\npreferred-citation:\n  type: software\n  message: If you use YOLOv5, please cite it as below.\n  authors:\n  - family-names: Jocher\n    given-names: Glenn\n    orcid: \"https://orcid.org/0000-0001-5950-6979\"\n  title: \"YOLOv5 by Ultralytics\"\n  version: 7.0\n  doi: 10.5281/zenodo.3908559\n  date-released: 2020-5-29\n  license: AGPL-3.0\n  url: \"https://github.com/ultralytics/yolov5\"\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg\" width=\"320\" alt=\"Ultralytics logo\"></a>\n\n# Contributing to YOLO 🚀\n\nWe value your input and are committed to making contributing to YOLO as easy and transparent as possible. Whether you're:\n\n- Reporting a bug\n- Discussing the current state of the codebase\n- Submitting a fix\n- Proposing a new feature\n- Interested in becoming a maintainer\n\nUltralytics YOLO thrives thanks to the collective efforts of our community. Every improvement you contribute helps push the boundaries of what's possible in AI! 😃\n\n## 🛠️ Submitting a Pull Request (PR)\n\nSubmitting a PR is straightforward! Here’s an example showing how to update `requirements.txt` in four simple steps:\n\n### 1. Select the File to Update\n\nClick on `requirements.txt` in the GitHub repository.\n\n<p align=\"center\"><img width=\"800\" alt=\"PR_step1\" src=\"https://user-images.githubusercontent.com/26833433/122260847-08be2600-ced4-11eb-828b-8287ace4136c.png\"></p>\n\n### 2. Click 'Edit this file'\n\nFind the 'Edit this file' button in the top-right corner.\n\n<p align=\"center\"><img width=\"800\" alt=\"PR_step2\" src=\"https://user-images.githubusercontent.com/26833433/122260844-06f46280-ced4-11eb-9eec-b8a24be519ca.png\"></p>\n\n### 3. Make Your Changes\n\nFor example, update the `matplotlib` version from `3.2.2` to `3.3`.\n\n<p align=\"center\"><img width=\"800\" alt=\"PR_step3\" src=\"https://user-images.githubusercontent.com/26833433/122260853-0a87e980-ced4-11eb-9fd2-3650fb6e0842.png\"></p>\n\n### 4. Preview Changes and Submit Your PR\n\nClick the **Preview changes** tab to review your updates. At the bottom, select 'Create a new branch for this commit', give your branch a descriptive name like `fix/matplotlib_version`, and click the green **Propose changes** button. Your PR is now submitted for review! 😃\n\n<p align=\"center\"><img width=\"800\" alt=\"PR_step4\" src=\"https://user-images.githubusercontent.com/26833433/122260856-0b208000-ced4-11eb-8e8e-77b6151cbcc3.png\"></p>\n\n### PR Best Practices\n\nTo ensure your work is integrated smoothly, please:\n\n- ✅ Make sure your PR is **up-to-date** with the `ultralytics/yolov5` `master` branch. If your branch is behind, update it using the 'Update branch' button or by running `git pull` and `git merge master` locally.\n\n<p align=\"center\"><img width=\"751\" alt=\"Screenshot 2022-08-29 at 22 47 15\" src=\"https://user-images.githubusercontent.com/26833433/187295893-50ed9f44-b2c9-4138-a614-de69bd1753d7.png\"></p>\n\n- ✅ Ensure all YOLO Continuous Integration (CI) **checks are passing**.\n\n<p align=\"center\"><img width=\"751\" alt=\"Screenshot 2022-08-29 at 22 47 03\" src=\"https://user-images.githubusercontent.com/26833433/187296922-545c5498-f64a-4d8c-8300-5fa764360da6.png\"></p>\n\n- ✅ Limit your changes to the **minimum** required for your bug fix or feature.  \n  _\"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is.\"_ — Bruce Lee\n\n## 🐛 Submitting a Bug Report\n\nIf you encounter an issue with YOLO, please submit a bug report!\n\nTo help us investigate, we need to be able to reproduce the problem. Follow these guidelines to provide what we need to get started:\n\nWhen asking a question or reporting a bug, you'll get better help if you provide **code** that others can easily understand and use to **reproduce** the issue. This is known as a [minimum reproducible example](https://docs.ultralytics.com/help/minimum-reproducible-example/). Your code should be:\n\n- ✅ **Minimal** – Use as little code as possible that still produces the issue\n- ✅ **Complete** – Include all parts needed for someone else to reproduce the problem\n- ✅ **Reproducible** – Test your code to ensure it actually reproduces the issue\n\nAdditionally, for [Ultralytics](https://www.ultralytics.com/) to assist you, your code should be:\n\n- ✅ **Current** – Ensure your code is up-to-date with the latest [master branch](https://github.com/ultralytics/yolov5/tree/master). Use `git pull` or `git clone` to get the latest version and confirm your issue hasn't already been fixed.\n- ✅ **Unmodified** – The problem must be reproducible without any custom modifications to the repository. [Ultralytics](https://www.ultralytics.com/) does not provide support for custom code ⚠️.\n\nIf your issue meets these criteria, please close your current issue and open a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose), including your [minimum reproducible example](https://docs.ultralytics.com/help/minimum-reproducible-example/) to help us diagnose your problem.\n\n## 📄 License\n\nBy contributing, you agree that your contributions will be licensed under the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/).\n\n---\n\nFor more details on contributing, check out the [Ultralytics open-source contributing guide](https://docs.ultralytics.com/help/contributing/), and explore our [Ultralytics blog](https://www.ultralytics.com/blog) for community highlights and best practices.\n\nWe welcome your contributions—thank you for helping make Ultralytics YOLO better! 🚀\n\n[![Ultralytics open-source contributors](https://raw.githubusercontent.com/ultralytics/assets/main/im/image-contributors.png)](https://github.com/ultralytics/ultralytics/graphs/contributors)\n"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <https://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<https://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n  <p>\n    <a href=\"https://platform.ultralytics.com/?utm_source=github&utm_medium=referral&utm_campaign=platform_launch&utm_content=banner&utm_term=ultralytics_github\" target=\"_blank\">\n      <img width=\"100%\" src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\" alt=\"Ultralytics YOLO banner\"></a>\n  </p>\n\n[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\n\n<div>\n    <a href=\"https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml\"><img src=\"https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg\" alt=\"YOLOv5 CI Testing\"></a>\n    <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n    <a href=\"https://discord.com/invite/ultralytics\"><img alt=\"Discord\" src=\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\"></a> <a href=\"https://community.ultralytics.com/\"><img alt=\"Ultralytics Forums\" src=\"https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue\"></a> <a href=\"https://www.reddit.com/r/ultralytics/\"><img alt=\"Ultralytics Reddit\" src=\"https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue\"></a>\n    <br>\n    <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n    <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n    <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n  </div>\n  <br>\n\nUltralytics YOLOv5 🚀 is a cutting-edge, state-of-the-art (SOTA) computer vision model developed by [Ultralytics](https://www.ultralytics.com/). Based on the [PyTorch](https://pytorch.org/) framework, YOLOv5 is renowned for its ease of use, speed, and accuracy. It incorporates insights and best practices from extensive research and development, making it a popular choice for a wide range of vision AI tasks, including [object detection](https://docs.ultralytics.com/tasks/detect/), [image segmentation](https://docs.ultralytics.com/tasks/segment/), and [image classification](https://docs.ultralytics.com/tasks/classify/).\n\nWe hope the resources here help you get the most out of YOLOv5. Please browse the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5/) for detailed information, raise an issue on [GitHub](https://github.com/ultralytics/yolov5/issues/new/choose) for support, and join our [Discord community](https://discord.com/invite/ultralytics) for questions and discussions!\n\nTo request an Enterprise License, please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).\n\n<div align=\"center\">\n  <a href=\"https://github.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png\" width=\"2%\" alt=\"Ultralytics GitHub\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://www.linkedin.com/company/ultralytics/\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png\" width=\"2%\" alt=\"Ultralytics LinkedIn\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://twitter.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png\" width=\"2%\" alt=\"Ultralytics Twitter\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://youtube.com/ultralytics?sub_confirmation=1\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png\" width=\"2%\" alt=\"Ultralytics YouTube\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://www.tiktok.com/@ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png\" width=\"2%\" alt=\"Ultralytics TikTok\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://ultralytics.com/bilibili\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png\" width=\"2%\" alt=\"Ultralytics BiliBili\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://discord.com/invite/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png\" width=\"2%\" alt=\"Ultralytics Discord\"></a>\n</div>\n\n</div>\n<br>\n\n## 🚀 YOLO11: The Next Evolution\n\nWe are excited to announce the launch of **Ultralytics YOLO11** 🚀, the latest advancement in our state-of-the-art (SOTA) vision models! Available now at the [Ultralytics YOLO GitHub repository](https://github.com/ultralytics/ultralytics), YOLO11 builds on our legacy of speed, precision, and ease of use. Whether you're tackling [object detection](https://docs.ultralytics.com/tasks/detect/), [instance segmentation](https://docs.ultralytics.com/tasks/segment/), [pose estimation](https://docs.ultralytics.com/tasks/pose/), [image classification](https://docs.ultralytics.com/tasks/classify/), or [oriented object detection (OBB)](https://docs.ultralytics.com/tasks/obb/), YOLO11 delivers the performance and versatility needed to excel in diverse applications.\n\nGet started today and unlock the full potential of YOLO11! Visit the [Ultralytics Docs](https://docs.ultralytics.com/) for comprehensive guides and resources:\n\n[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://clickpy.clickhouse.com/dashboard/ultralytics)\n\n```bash\n# Install the ultralytics package\npip install ultralytics\n```\n\n<div align=\"center\">\n  <a href=\"https://platform.ultralytics.com/ultralytics/yolo26\" target=\"_blank\">\n  <img width=\"100%\" src=\"https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png\" alt=\"Ultralytics YOLO Performance Comparison\"></a>\n</div>\n\n## 📚 Documentation\n\nSee the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5/) for full documentation on training, testing, and deployment. See below for quickstart examples.\n\n<details open>\n<summary>Install</summary>\n\nClone the repository and install dependencies in a [**Python>=3.8.0**](https://www.python.org/) environment. Ensure you have [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) installed.\n\n```bash\n# Clone the YOLOv5 repository\ngit clone https://github.com/ultralytics/yolov5\n\n# Navigate to the cloned directory\ncd yolov5\n\n# Install required packages\npip install -r requirements.txt\n```\n\n</details>\n\n<details open>\n<summary>Inference with PyTorch Hub</summary>\n\nUse YOLOv5 via [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) for inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) are automatically downloaded from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).\n\n```python\nimport torch\n\n# Load a YOLOv5 model (options: yolov5n, yolov5s, yolov5m, yolov5l, yolov5x)\nmodel = torch.hub.load(\"ultralytics/yolov5\", \"yolov5s\")  # Default: yolov5s\n\n# Define the input image source (URL, local file, PIL image, OpenCV frame, numpy array, or list)\nimg = \"https://ultralytics.com/images/zidane.jpg\"  # Example image\n\n# Perform inference (handles batching, resizing, normalization automatically)\nresults = model(img)\n\n# Process the results (options: .print(), .show(), .save(), .crop(), .pandas())\nresults.print()  # Print results to console\nresults.show()  # Display results in a window\nresults.save()  # Save results to runs/detect/exp\n```\n\n</details>\n\n<details>\n<summary>Inference with detect.py</summary>\n\nThe `detect.py` script runs inference on various sources. It automatically downloads [models](https://github.com/ultralytics/yolov5/tree/master/models) from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saves the results to the `runs/detect` directory.\n\n```bash\n# Run inference using a webcam\npython detect.py --weights yolov5s.pt --source 0\n\n# Run inference on a local image file\npython detect.py --weights yolov5s.pt --source img.jpg\n\n# Run inference on a local video file\npython detect.py --weights yolov5s.pt --source vid.mp4\n\n# Run inference on a screen capture\npython detect.py --weights yolov5s.pt --source screen\n\n# Run inference on a directory of images\npython detect.py --weights yolov5s.pt --source path/to/images/\n\n# Run inference on a text file listing image paths\npython detect.py --weights yolov5s.pt --source list.txt\n\n# Run inference on a text file listing stream URLs\npython detect.py --weights yolov5s.pt --source list.streams\n\n# Run inference using a glob pattern for images\npython detect.py --weights yolov5s.pt --source 'path/to/*.jpg'\n\n# Run inference on a YouTube video URL\npython detect.py --weights yolov5s.pt --source 'https://youtu.be/LNwODJXcvt4'\n\n# Run inference on an RTSP, RTMP, or HTTP stream\npython detect.py --weights yolov5s.pt --source 'rtsp://example.com/media.mp4'\n```\n\n</details>\n\n<details>\n<summary>Training</summary>\n\nThe commands below demonstrate how to reproduce YOLOv5 [COCO dataset](https://docs.ultralytics.com/datasets/detect/coco/) results. Both [models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) are downloaded automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are approximately 1/2/4/6/8 days on a single [NVIDIA V100 GPU](https://www.nvidia.com/en-us/data-center/v100/). Using [Multi-GPU training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) can significantly reduce training time. Use the largest `--batch-size` your hardware allows, or use `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). The batch sizes shown below are for V100-16GB GPUs.\n\n```bash\n# Train YOLOv5n on COCO for 300 epochs\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128\n\n# Train YOLOv5s on COCO for 300 epochs\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5s.yaml --batch-size 64\n\n# Train YOLOv5m on COCO for 300 epochs\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5m.yaml --batch-size 40\n\n# Train YOLOv5l on COCO for 300 epochs\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5l.yaml --batch-size 24\n\n# Train YOLOv5x on COCO for 300 epochs\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5x.yaml --batch-size 16\n```\n\n<img width=\"800\" src=\"https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png\" alt=\"YOLOv5 Training Results\">\n\n</details>\n\n<details open>\n<summary>Tutorials</summary>\n\n- **[Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/)** 🚀 **RECOMMENDED**: Learn how to train YOLOv5 on your own datasets.\n- **[Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/)** ☘️: Improve your model's performance with expert tips.\n- **[Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)**: Speed up training using multiple GPUs.\n- **[PyTorch Hub Integration](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/)** 🌟 **NEW**: Easily load models using PyTorch Hub.\n- **[Model Export (TFLite, ONNX, CoreML, TensorRT)](https://docs.ultralytics.com/yolov5/tutorials/model_export/)** 🚀: Convert your models to various deployment formats like [ONNX](https://onnx.ai/) or [TensorRT](https://developer.nvidia.com/tensorrt).\n- **[NVIDIA Jetson Deployment](https://docs.ultralytics.com/guides/nvidia-jetson/)** 🌟 **NEW**: Deploy YOLOv5 on [NVIDIA Jetson](https://developer.nvidia.com/embedded-computing) devices.\n- **[Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)**: Enhance prediction accuracy with TTA.\n- **[Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/)**: Combine multiple models for better performance.\n- **[Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/)**: Optimize models for size and speed.\n- **[Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/)**: Automatically find the best training hyperparameters.\n- **[Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/)**: Adapt pretrained models to new tasks efficiently using [transfer learning](https://www.ultralytics.com/glossary/transfer-learning).\n- **[Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/)** 🌟 **NEW**: Understand the YOLOv5 model architecture.\n- **[Ultralytics Platform Training](https://platform.ultralytics.com)** 🚀 **RECOMMENDED**: Train and deploy YOLO models using Ultralytics Platform.\n- **[ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/)**: Integrate with [ClearML](https://clear.ml/) for experiment tracking.\n- **[Neural Magic DeepSparse Integration](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/)**: Accelerate inference with DeepSparse.\n- **[Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/)** 🌟 **NEW**: Log experiments using [Comet ML](https://www.comet.com/site/).\n\n</details>\n\n## 🧩 Integrations\n\nOur key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with partners like [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/), [Comet ML](https://docs.ultralytics.com/integrations/comet/), [Roboflow](https://docs.ultralytics.com/integrations/roboflow/), and [Intel OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow. Explore more at [Ultralytics Integrations](https://docs.ultralytics.com/integrations/).\n\n<a href=\"https://docs.ultralytics.com/integrations/\" target=\"_blank\">\n    <img width=\"100%\" src=\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\" alt=\"Ultralytics active learning integrations\">\n</a>\n<br>\n<br>\n\n<div align=\"center\">\n  <a href=\"https://platform.ultralytics.com\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png\" width=\"10%\" alt=\"Ultralytics Platform logo\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"15%\" height=\"0\" alt=\"space\">\n  <a href=\"https://docs.ultralytics.com/integrations/weights-biases/\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png\" width=\"10%\" alt=\"Weights & Biases logo\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"15%\" height=\"0\" alt=\"space\">\n  <a href=\"https://docs.ultralytics.com/integrations/comet/\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png\" width=\"10%\" alt=\"Comet ML logo\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"15%\" height=\"0\" alt=\"space\">\n  <a href=\"https://docs.ultralytics.com/integrations/neural-magic/\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png\" width=\"10%\" alt=\"Neural Magic logo\"></a>\n</div>\n\n|                                                         Ultralytics Platform 🌟                                                          |                                                          Weights & Biases                                                           |                                                                              Comet                                                                              |                                                        Neural Magic                                                         |\n| :--------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: |\n| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics Platform](https://platform.ultralytics.com). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/). | Free forever, [Comet ML](https://docs.ultralytics.com/integrations/comet/) lets you save YOLO models, resume training, and interactively visualize predictions. | Run YOLO inference up to 6x faster with [Neural Magic DeepSparse](https://docs.ultralytics.com/integrations/neural-magic/). |\n\n## ⭐ Ultralytics Platform\n\nExperience seamless AI development with [Ultralytics Platform](https://platform.ultralytics.com) ⭐, the ultimate platform for building, training, and deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models. Visualize datasets, train [YOLOv5](https://docs.ultralytics.com/models/yolov5/) and [YOLOv8](https://docs.ultralytics.com/models/yolov8/) 🚀 models, and deploy them to real-world applications without writing any code. Transform images into actionable insights using our cutting-edge tools and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** today!\n\n<a align=\"center\" href=\"https://platform.ultralytics.com\" target=\"_blank\">\n<img width=\"100%\" src=\"https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png\" alt=\"Ultralytics Platform Platform Screenshot\"></a>\n\n## 🤔 Why YOLOv5?\n\nYOLOv5 is designed for simplicity and ease of use. We prioritize real-world performance and accessibility.\n\n<p align=\"left\"><img width=\"800\" src=\"https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png\" alt=\"YOLOv5 Performance Chart\"></p>\n<details>\n  <summary>YOLOv5-P5 640 Figure</summary>\n\n<p align=\"left\"><img width=\"800\" src=\"https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png\" alt=\"YOLOv5 P5 640 Performance Chart\"></p>\n</details>\n<details>\n  <summary>Figure Notes</summary>\n\n- **COCO AP val** denotes the [mean Average Precision (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map) at [Intersection over Union (IoU)](https://www.ultralytics.com/glossary/intersection-over-union-iou) thresholds from 0.5 to 0.95, measured on the 5,000-image [COCO val2017 dataset](https://docs.ultralytics.com/datasets/detect/coco/) across various inference sizes (256 to 1536 pixels).\n- **GPU Speed** measures the average inference time per image on the [COCO val2017 dataset](https://docs.ultralytics.com/datasets/detect/coco/) using an [AWS p3.2xlarge V100 instance](https://aws.amazon.com/ec2/instance-types/p4/) with a batch size of 32.\n- **EfficientDet** data is sourced from the [google/automl repository](https://github.com/google/automl) at batch size 8.\n- **Reproduce** these results using the command: `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`\n\n</details>\n\n### Pretrained Checkpoints\n\nThis table shows the performance metrics for various YOLOv5 models trained on the COCO dataset.\n\n| Model                                                                                                                                                                    | Size<br><sup>(pixels) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | Speed<br><sup>CPU b1<br>(ms) | Speed<br><sup>V100 b1<br>(ms) | Speed<br><sup>V100 b32<br>(ms) | Params<br><sup>(M) | FLOPs<br><sup>@640 (B) |\n| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- |\n| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt)                                                                                       | 640                   | 28.0                 | 45.7              | **45**                       | **6.3**                       | **0.6**                        | **1.9**            | **4.5**                |\n| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt)                                                                                       | 640                   | 37.4                 | 56.8              | 98                           | 6.4                           | 0.9                            | 7.2                | 16.5                   |\n| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt)                                                                                       | 640                   | 45.4                 | 64.1              | 224                          | 8.2                           | 1.7                            | 21.2               | 49.0                   |\n| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt)                                                                                       | 640                   | 49.0                 | 67.3              | 430                          | 10.1                          | 2.7                            | 46.5               | 109.1                  |\n| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt)                                                                                       | 640                   | 50.7                 | 68.9              | 766                          | 12.1                          | 4.8                            | 86.7               | 205.7                  |\n|                                                                                                                                                                          |                       |                      |                   |                              |                               |                                |                    |                        |\n| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt)                                                                                     | 1280                  | 36.0                 | 54.4              | 153                          | 8.1                           | 2.1                            | 3.2                | 4.6                    |\n| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt)                                                                                     | 1280                  | 44.8                 | 63.7              | 385                          | 8.2                           | 3.6                            | 12.6               | 16.8                   |\n| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt)                                                                                     | 1280                  | 51.3                 | 69.3              | 887                          | 11.1                          | 6.8                            | 35.7               | 50.0                   |\n| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt)                                                                                     | 1280                  | 53.7                 | 71.3              | 1784                         | 15.8                          | 10.5                           | 76.8               | 111.4                  |\n| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+ [[TTA]](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) | 1280<br>1536          | 55.0<br>**55.8**     | 72.7<br>**72.7**  | 3136<br>-                    | 26.2<br>-                     | 19.4<br>-                      | 140.7<br>-         | 209.8<br>-             |\n\n<details>\n  <summary>Table Notes</summary>\n\n- All checkpoints were trained for 300 epochs using default settings. Nano (n) and Small (s) models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyperparameters, while Medium (m), Large (l), and Extra-Large (x) models use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).\n- **mAP<sup>val</sup>** values represent single-model, single-scale performance on the [COCO val2017 dataset](https://docs.ultralytics.com/datasets/detect/coco/).<br>Reproduce using: `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`\n- **Speed** metrics are averaged over COCO val images using an [AWS p3.2xlarge V100 instance](https://aws.amazon.com/ec2/instance-types/p4/). Non-Maximum Suppression (NMS) time (~1 ms/image) is not included.<br>Reproduce using: `python val.py --data coco.yaml --img 640 --task speed --batch 1`\n- **TTA** ([Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)) includes reflection and scale augmentations for improved accuracy.<br>Reproduce using: `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`\n\n</details>\n\n## 🖼️ Segmentation\n\nThe YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) introduced [instance segmentation](https://docs.ultralytics.com/tasks/segment/) models that achieve state-of-the-art performance. These models are designed for easy training, validation, and deployment. For full details, see the [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and explore the [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart examples.\n\n<details>\n  <summary>Segmentation Checkpoints</summary>\n\n<div align=\"center\">\n<a align=\"center\" href=\"https://www.ultralytics.com/yolo\" target=\"_blank\">\n<img width=\"800\" src=\"https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png\" alt=\"YOLOv5 Segmentation Performance Chart\"></a>\n</div>\n\nYOLOv5 segmentation models were trained on the [COCO dataset](https://docs.ultralytics.com/datasets/segment/coco/) for 300 epochs at an image size of 640 pixels using A100 GPUs. Models were exported to [ONNX](https://onnx.ai/) FP32 for CPU speed tests and [TensorRT](https://developer.nvidia.com/tensorrt) FP16 for GPU speed tests. All speed tests were conducted on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for reproducibility.\n\n| Model                                                                                      | Size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Train Time<br><sup>300 epochs<br>A100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TRT A100<br>(ms) | Params<br><sup>(M) | FLOPs<br><sup>@640 (B) |\n| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- |\n| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640                   | 27.6                 | 23.4                  | 80:17                                         | **62.7**                       | **1.2**                        | **2.0**            | **7.1**                |\n| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640                   | 37.6                 | 31.7                  | 88:16                                         | 173.3                          | 1.4                            | 7.6                | 26.4                   |\n| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640                   | 45.0                 | 37.1                  | 108:36                                        | 427.0                          | 2.2                            | 22.0               | 70.8                   |\n| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640                   | 49.0                 | 39.9                  | 66:43 (2x)                                    | 857.4                          | 2.9                            | 47.9               | 147.7                  |\n| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640                   | **50.7**             | **41.4**              | 62:56 (3x)                                    | 1579.2                         | 4.5                            | 88.8               | 265.7                  |\n\n- All checkpoints were trained for 300 epochs using the SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at an image size of 640 pixels, using default settings.<br>Training runs are logged at [https://wandb.ai/glenn-jocher/YOLOv5_v70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official).\n- **Accuracy** values represent single-model, single-scale performance on the COCO dataset.<br>Reproduce using: `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`\n- **Speed** metrics are averaged over 100 inference images using a [Colab Pro A100 High-RAM instance](https://colab.research.google.com/signup). Values indicate inference speed only (NMS adds approximately 1ms per image).<br>Reproduce using: `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`\n- **Export** to ONNX (FP32) and TensorRT (FP16) was performed using `export.py`.<br>Reproduce using: `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`\n\n</details>\n\n<details>\n  <summary>Segmentation Usage Examples &nbsp;<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a></summary>\n\n### Train\n\nYOLOv5 segmentation training supports automatic download of the [COCO128-seg dataset](https://docs.ultralytics.com/datasets/segment/coco8-seg/) via the `--data coco128-seg.yaml` argument. For the full [COCO-segments dataset](https://docs.ultralytics.com/datasets/segment/coco/), download it manually using `bash data/scripts/get_coco.sh --train --val --segments` and then train with `python train.py --data coco.yaml`.\n\n```bash\n# Train on a single GPU\npython segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640\n\n# Train using Multi-GPU Distributed Data Parallel (DDP)\npython -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3\n```\n\n### Val\n\nValidate the mask [mean Average Precision (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map) of YOLOv5s-seg on the COCO dataset:\n\n```bash\n# Download COCO validation segments split (780MB, 5000 images)\nbash data/scripts/get_coco.sh --val --segments\n\n# Validate the model\npython segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640\n```\n\n### Predict\n\nUse the pretrained YOLOv5m-seg.pt model to perform segmentation on `bus.jpg`:\n\n```bash\n# Run prediction\npython segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg\n```\n\n```python\n# Load model from PyTorch Hub (Note: Inference support might vary)\nmodel = torch.hub.load(\"ultralytics/yolov5\", \"custom\", \"yolov5m-seg.pt\")\n```\n\n| ![Zidane Segmentation Example](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![Bus Segmentation Example](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) |\n| :-----------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------: |\n\n### Export\n\nExport the YOLOv5s-seg model to ONNX and TensorRT formats:\n\n```bash\n# Export model\npython export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0\n```\n\n</details>\n\n## 🏷️ Classification\n\nYOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases/v6.2) introduced support for [image classification](https://docs.ultralytics.com/tasks/classify/) model training, validation, and deployment. Check the [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) for details and the [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart guides.\n\n<details>\n  <summary>Classification Checkpoints</summary>\n\n<br>\n\nYOLOv5-cls classification models were trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) for 90 epochs using a 4xA100 instance. [ResNet](https://arxiv.org/abs/1512.03385) and [EfficientNet](https://arxiv.org/abs/1905.11946) models were trained alongside under identical settings for comparison. Models were exported to [ONNX](https://onnx.ai/) FP32 (CPU speed tests) and [TensorRT](https://developer.nvidia.com/tensorrt) FP16 (GPU speed tests). All speed tests were run on Google [Colab Pro](https://colab.research.google.com/signup) for reproducibility.\n\n| Model                                                                                              | Size<br><sup>(pixels) | Acc<br><sup>top1 | Acc<br><sup>top5 | Training<br><sup>90 epochs<br>4xA100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TensorRT V100<br>(ms) | Params<br><sup>(M) | FLOPs<br><sup>@224 (B) |\n| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- |\n| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt)         | 224                   | 64.6             | 85.4             | 7:59                                         | **3.3**                        | **0.5**                             | **2.5**            | **0.5**                |\n| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt)         | 224                   | 71.5             | 90.2             | 8:09                                         | 6.6                            | 0.6                                 | 5.4                | 1.4                    |\n| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt)         | 224                   | 75.9             | 92.9             | 10:06                                        | 15.5                           | 0.9                                 | 12.9               | 3.9                    |\n| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt)         | 224                   | 78.0             | 94.0             | 11:56                                        | 26.9                           | 1.4                                 | 26.5               | 8.5                    |\n| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt)         | 224                   | **79.0**         | **94.4**         | 15:04                                        | 54.3                           | 1.8                                 | 48.1               | 15.9                   |\n|                                                                                                    |                       |                  |                  |                                              |                                |                                     |                    |                        |\n| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt)               | 224                   | 70.3             | 89.5             | **6:47**                                     | 11.2                           | 0.5                                 | 11.7               | 3.7                    |\n| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt)               | 224                   | 73.9             | 91.8             | 8:33                                         | 20.6                           | 0.9                                 | 21.8               | 7.4                    |\n| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt)               | 224                   | 76.8             | 93.4             | 11:10                                        | 23.4                           | 1.0                                 | 25.6               | 8.5                    |\n| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt)             | 224                   | 78.5             | 94.3             | 17:10                                        | 42.1                           | 1.9                                 | 44.5               | 15.9                   |\n|                                                                                                    |                       |                  |                  |                                              |                                |                                     |                    |                        |\n| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224                   | 75.1             | 92.4             | 13:03                                        | 12.5                           | 1.3                                 | 5.3                | 1.0                    |\n| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224                   | 76.4             | 93.2             | 17:04                                        | 14.9                           | 1.6                                 | 7.8                | 1.5                    |\n| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224                   | 76.6             | 93.4             | 17:10                                        | 15.9                           | 1.6                                 | 9.1                | 1.7                    |\n| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224                   | 77.7             | 94.0             | 19:19                                        | 18.9                           | 1.9                                 | 12.2               | 2.4                    |\n\n<details>\n  <summary>Table Notes (click to expand)</summary>\n\n- All checkpoints were trained for 90 epochs using the SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at an image size of 224 pixels, using default settings.<br>Training runs are logged at [https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2).\n- **Accuracy** values (top-1 and top-5) represent single-model, single-scale performance on the [ImageNet-1k dataset](https://docs.ultralytics.com/datasets/classify/imagenet/).<br>Reproduce using: `python classify/val.py --data ../datasets/imagenet --img 224`\n- **Speed** metrics are averaged over 100 inference images using a Google [Colab Pro V100 High-RAM instance](https://colab.research.google.com/signup).<br>Reproduce using: `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`\n- **Export** to ONNX (FP32) and TensorRT (FP16) was performed using `export.py`.<br>Reproduce using: `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`\n\n</details>\n</details>\n\n<details>\n  <summary>Classification Usage Examples &nbsp;<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a></summary>\n\n### Train\n\nYOLOv5 classification training supports automatic download for datasets like [MNIST](https://docs.ultralytics.com/datasets/classify/mnist/), [Fashion-MNIST](https://docs.ultralytics.com/datasets/classify/fashion-mnist/), [CIFAR10](https://docs.ultralytics.com/datasets/classify/cifar10/), [CIFAR100](https://docs.ultralytics.com/datasets/classify/cifar100/), [Imagenette](https://docs.ultralytics.com/datasets/classify/imagenette/), [Imagewoof](https://docs.ultralytics.com/datasets/classify/imagewoof/), and [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) using the `--data` argument. For example, start training on MNIST with `--data mnist`.\n\n```bash\n# Train on a single GPU using CIFAR-100 dataset\npython classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128\n\n# Train using Multi-GPU DDP on ImageNet dataset\npython -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3\n```\n\n### Val\n\nValidate the accuracy of the YOLOv5m-cls model on the ImageNet-1k validation dataset:\n\n```bash\n# Download ImageNet validation split (6.3GB, 50,000 images)\nbash data/scripts/get_imagenet.sh --val\n\n# Validate the model\npython classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224\n```\n\n### Predict\n\nUse the pretrained YOLOv5s-cls.pt model to classify the image `bus.jpg`:\n\n```bash\n# Run prediction\npython classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg\n```\n\n```python\n# Load model from PyTorch Hub\nmodel = torch.hub.load(\"ultralytics/yolov5\", \"custom\", \"yolov5s-cls.pt\")\n```\n\n### Export\n\nExport trained YOLOv5s-cls, ResNet50, and EfficientNet_b0 models to ONNX and TensorRT formats:\n\n```bash\n# Export models\npython export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224\n```\n\n</details>\n\n## ☁️ Environments\n\nGet started quickly with our pre-configured environments. Click the icons below for setup details.\n\n<div align=\"center\">\n  <a href=\"https://bit.ly/yolov5-paperspace-notebook\" title=\"Run on Paperspace Gradient\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gradient.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" title=\"Open in Google Colab\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-colab-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\" title=\"Open in Kaggle\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-kaggle-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://hub.docker.com/r/ultralytics/yolov5\" title=\"Pull Docker Image\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-docker-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/\" title=\"AWS Quickstart Guide\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-aws-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/\" title=\"GCP Quickstart Guide\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gcp-small.png\" width=\"10%\" /></a>\n</div>\n\n## 🤝 Contribute\n\nWe welcome your contributions! Making YOLOv5 accessible and effective is a community effort. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started. Share your feedback through the [YOLOv5 Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). Thank you to all our contributors for making YOLOv5 better!\n\n[![Ultralytics open-source contributors](https://raw.githubusercontent.com/ultralytics/assets/main/im/image-contributors.png)](https://github.com/ultralytics/yolov5/graphs/contributors)\n\n## 📜 License\n\nUltralytics provides two licensing options to meet different needs:\n\n- **AGPL-3.0 License**: An [OSI-approved](https://opensource.org/license/agpl-v3) open-source license ideal for academic research, personal projects, and testing. It promotes open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details.\n- **Enterprise License**: Tailored for commercial applications, this license allows seamless integration of Ultralytics software and AI models into commercial products and services, bypassing the open-source requirements of AGPL-3.0. For commercial use cases, please contact us via [Ultralytics Licensing](https://www.ultralytics.com/license).\n\n## 📧 Contact\n\nFor bug reports and feature requests related to YOLOv5, please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For general questions, discussions, and community support, join our [Discord server](https://discord.com/invite/ultralytics)!\n\n<br>\n<div align=\"center\">\n  <a href=\"https://github.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png\" width=\"3%\" alt=\"Ultralytics GitHub\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://www.linkedin.com/company/ultralytics/\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png\" width=\"3%\" alt=\"Ultralytics LinkedIn\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://twitter.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png\" width=\"3%\" alt=\"Ultralytics Twitter\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://youtube.com/ultralytics?sub_confirmation=1\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png\" width=\"3%\" alt=\"Ultralytics YouTube\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://www.tiktok.com/@ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png\" width=\"3%\" alt=\"Ultralytics TikTok\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://ultralytics.com/bilibili\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png\" width=\"3%\" alt=\"Ultralytics BiliBili\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://discord.com/invite/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png\" width=\"3%\" alt=\"Ultralytics Discord\"></a>\n</div>\n"
  },
  {
    "path": "README.zh-CN.md",
    "content": "<div align=\"center\">\n  <p>\n    <a href=\"https://platform.ultralytics.com/?utm_source=github&utm_medium=referral&utm_campaign=platform_launch&utm_content=banner&utm_term=ultralytics_github\" target=\"_blank\">\n      <img width=\"100%\" src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\" alt=\"Ultralytics YOLO 横幅\"></a>\n  </p>\n\n[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\n\n<div>\n    <a href=\"https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml\"><img src=\"https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg\" alt=\"YOLOv5 CI 测试\"></a>\n    <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker 拉取次数\"></a>\n    <a href=\"https://discord.com/invite/ultralytics\"><img alt=\"Discord\" src=\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\"></a> <a href=\"https://community.ultralytics.com/\"><img alt=\"Ultralytics 论坛\" src=\"https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue\"></a> <a href=\"https://www.reddit.com/r/ultralytics/\"><img alt=\"Ultralytics Reddit\" src=\"https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue\"></a>\n    <br>\n    <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"在 Gradient 上运行\"></a>\n    <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"在 Colab 中打开\"></a>\n    <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"在 Kaggle 中打开\"></a>\n  </div>\n  <br>\n\nUltralytics YOLOv5 🚀 是由 [Ultralytics](https://www.ultralytics.com/) 开发的尖端、达到业界顶尖水平（SOTA）的计算机视觉模型。基于 [PyTorch](https://pytorch.org/) 框架，YOLOv5 以其易用性、速度和准确性而闻名。它融合了广泛研究和开发的见解与最佳实践，使其成为各种视觉 AI 任务的热门选择，包括[目标检测](https://docs.ultralytics.com/tasks/detect/)、[图像分割](https://docs.ultralytics.com/tasks/segment/)和[图像分类](https://docs.ultralytics.com/tasks/classify/)。\n\n我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 [YOLOv5 文档](https://docs.ultralytics.com/yolov5/)获取详细信息，在 [GitHub](https://github.com/ultralytics/yolov5/issues/new/choose) 上提出 issue 以获得支持，并加入我们的 [Discord 社区](https://discord.com/invite/ultralytics)进行提问和讨论！\n\n如需申请企业许可证，请填写 [Ultralytics 授权许可](https://www.ultralytics.com/license) 表格。\n\n<div align=\"center\">\n  <a href=\"https://github.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png\" width=\"2%\" alt=\"Ultralytics GitHub\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://www.linkedin.com/company/ultralytics/\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png\" width=\"2%\" alt=\"Ultralytics LinkedIn\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://twitter.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png\" width=\"2%\" alt=\"Ultralytics Twitter\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://youtube.com/ultralytics?sub_confirmation=1\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png\" width=\"2%\" alt=\"Ultralytics YouTube\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://www.tiktok.com/@ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png\" width=\"2%\" alt=\"Ultralytics TikTok\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://ultralytics.com/bilibili\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png\" width=\"2%\" alt=\"Ultralytics BiliBili\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"2%\" alt=\"space\">\n  <a href=\"https://discord.com/invite/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png\" width=\"2%\" alt=\"Ultralytics Discord\"></a>\n</div>\n\n</div>\n<br>\n\n## 🚀 YOLO11：下一代进化\n\n我们激动地宣布推出 **Ultralytics YOLO11** 🚀，这是我们业界顶尖（SOTA）视觉模型的最新进展！YOLO11 现已在 [Ultralytics YOLO GitHub 仓库](https://github.com/ultralytics/ultralytics)发布，它继承了我们速度快、精度高和易于使用的传统。无论您是处理[目标检测](https://docs.ultralytics.com/tasks/detect/)、[实例分割](https://docs.ultralytics.com/tasks/segment/)、[姿态估计](https://docs.ultralytics.com/tasks/pose/)、[图像分类](https://docs.ultralytics.com/tasks/classify/)还是[旋转目标检测 (OBB)](https://docs.ultralytics.com/tasks/obb/)，YOLO11 都能提供在多样化应用中脱颖而出所需的性能和多功能性。\n\n立即开始，释放 YOLO11 的全部潜力！访问 [Ultralytics 文档](https://docs.ultralytics.com/)获取全面的指南和资源：\n\n[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://clickpy.clickhouse.com/dashboard/ultralytics)\n\n```bash\n# 安装 ultralytics 包\npip install ultralytics\n```\n\n<div align=\"center\">\n  <a href=\"https://platform.ultralytics.com/ultralytics/yolo26\" target=\"_blank\">\n  <img width=\"100%\" src=\"https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png\" alt=\"Ultralytics YOLO 性能比较\"></a>\n</div>\n\n## 📚 文档\n\n请参阅 [YOLOv5 文档](https://docs.ultralytics.com/yolov5/)，了解有关训练、测试和部署的完整文档。请参阅下方的快速入门示例。\n\n<details open>\n<summary>安装</summary>\n\n克隆仓库并在 [**Python>=3.8.0**](https://www.python.org/) 环境中安装依赖项。确保您已安装 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/)。\n\n```bash\n# 克隆 YOLOv5 仓库\ngit clone https://github.com/ultralytics/yolov5\n\n# 导航到克隆的目录\ncd yolov5\n\n# 安装所需的包\npip install -r requirements.txt\n```\n\n</details>\n\n<details open>\n<summary>使用 PyTorch Hub 进行推理</summary>\n\n通过 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 使用 YOLOv5 进行推理。[模型](https://github.com/ultralytics/yolov5/tree/master/models) 会自动从最新的 YOLOv5 [发布版本](https://github.com/ultralytics/yolov5/releases)下载。\n\n```python\nimport torch\n\n# 加载 YOLOv5 模型（选项：yolov5n, yolov5s, yolov5m, yolov5l, yolov5x）\nmodel = torch.hub.load(\"ultralytics/yolov5\", \"yolov5s\")  # 默认：yolov5s\n\n# 定义输入图像源（URL、本地文件、PIL 图像、OpenCV 帧、numpy 数组或列表）\nimg = \"https://ultralytics.com/images/zidane.jpg\"  # 示例图像\n\n# 执行推理（自动处理批处理、调整大小、归一化）\nresults = model(img)\n\n# 处理结果（选项：.print(), .show(), .save(), .crop(), .pandas()）\nresults.print()  # 将结果打印到控制台\nresults.show()  # 在窗口中显示结果\nresults.save()  # 将结果保存到 runs/detect/exp\n```\n\n</details>\n\n<details>\n<summary>使用 detect.py 进行推理</summary>\n\n`detect.py` 脚本在各种来源上运行推理。它会自动从最新的 YOLOv5 [发布版本](https://github.com/ultralytics/yolov5/releases)下载[模型](https://github.com/ultralytics/yolov5/tree/master/models)，并将结果保存到 `runs/detect` 目录。\n\n```bash\n# 使用网络摄像头运行推理\npython detect.py --weights yolov5s.pt --source 0\n\n# 对本地图像文件运行推理\npython detect.py --weights yolov5s.pt --source img.jpg\n\n# 对本地视频文件运行推理\npython detect.py --weights yolov5s.pt --source vid.mp4\n\n# 对屏幕截图运行推理\npython detect.py --weights yolov5s.pt --source screen\n\n# 对图像目录运行推理\npython detect.py --weights yolov5s.pt --source path/to/images/\n\n# 对列出图像路径的文本文件运行推理\npython detect.py --weights yolov5s.pt --source list.txt\n\n# 对列出流 URL 的文本文件运行推理\npython detect.py --weights yolov5s.pt --source list.streams\n\n# 使用 glob 模式对图像运行推理\npython detect.py --weights yolov5s.pt --source 'path/to/*.jpg'\n\n# 对 YouTube 视频 URL 运行推理\npython detect.py --weights yolov5s.pt --source 'https://youtu.be/LNwODJXcvt4'\n\n# 对 RTSP、RTMP 或 HTTP 流运行推理\npython detect.py --weights yolov5s.pt --source 'rtsp://example.com/media.mp4'\n```\n\n</details>\n\n<details>\n<summary>训练</summary>\n\n以下命令演示了如何复现 YOLOv5 在 [COCO 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上的结果。[模型](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)都会自动从最新的 YOLOv5 [发布版本](https://github.com/ultralytics/yolov5/releases)下载。YOLOv5n/s/m/l/x 的训练时间在单个 [NVIDIA V100 GPU](https://www.nvidia.com/en-us/data-center/v100/) 上大约需要 1/2/4/6/8 天。使用[多 GPU 训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)可以显著减少训练时间。请使用硬件允许的最大 `--batch-size`，或使用 `--batch-size -1` 以启用 YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092)。下面显示的批处理大小适用于 V100-16GB GPU。\n\n```bash\n# 在 COCO 上训练 YOLOv5n 300 个周期\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128\n\n# 在 COCO 上训练 YOLOv5s 300 个周期\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5s.yaml --batch-size 64\n\n# 在 COCO 上训练 YOLOv5m 300 个周期\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5m.yaml --batch-size 40\n\n# 在 COCO 上训练 YOLOv5l 300 个周期\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5l.yaml --batch-size 24\n\n# 在 COCO 上训练 YOLOv5x 300 个周期\npython train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5x.yaml --batch-size 16\n```\n\n<img width=\"800\" src=\"https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png\" alt=\"YOLOv5 训练结果\">\n\n</details>\n\n<details open>\n<summary>教程</summary>\n\n- **[训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/)** 🚀 **推荐**：学习如何在您自己的数据集上训练 YOLOv5。\n- **[获得最佳训练结果的技巧](https://docs.ultralytics.com/guides/model-training-tips/)** ☘️：利用专家技巧提升模型性能。\n- **[多 GPU 训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)**：使用多个 GPU 加速训练。\n- **[PyTorch Hub 集成](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/)** 🌟 **新增**：使用 PyTorch Hub 轻松加载模型。\n- **[模型导出 (TFLite, ONNX, CoreML, TensorRT)](https://docs.ultralytics.com/yolov5/tutorials/model_export/)** 🚀：将您的模型转换为各种部署格式，如 [ONNX](https://onnx.ai/) 或 [TensorRT](https://developer.nvidia.com/tensorrt)。\n- **[NVIDIA Jetson 部署](https://docs.ultralytics.com/guides/nvidia-jetson/)** 🌟 **新增**：在 [NVIDIA Jetson](https://developer.nvidia.com/embedded-computing) 设备上部署 YOLOv5。\n- **[测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)**：使用 TTA 提高预测准确性。\n- **[模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/)**：组合多个模型以获得更好的性能。\n- **[模型剪枝/稀疏化](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/)**：优化模型的大小和速度。\n- **[超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/)**：自动找到最佳训练超参数。\n- **[使用冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/)**：使用[迁移学习](https://www.ultralytics.com/glossary/transfer-learning)高效地将预训练模型应用于新任务。\n- **[架构摘要](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/)** 🌟 **新增**：了解 YOLOv5 模型架构。\n- **[Ultralytics Platform 训练](https://platform.ultralytics.com)** 🚀 **推荐**：使用 Ultralytics Platform 训练和部署 YOLO 模型。\n- **[ClearML 日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/)**：与 [ClearML](https://clear.ml/) 集成以进行实验跟踪。\n- **[Neural Magic DeepSparse 集成](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/)**：使用 DeepSparse 加速推理。\n- **[Comet 日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/)** 🌟 **新增**：使用 [Comet ML](https://www.comet.com/site/) 记录实验。\n\n</details>\n\n## 🧩 集成\n\n我们与领先 AI 平台的关键集成扩展了 Ultralytics 产品的功能，增强了诸如数据集标注、训练、可视化和模型管理等任务。了解 Ultralytics 如何与 [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/)、[Comet ML](https://docs.ultralytics.com/integrations/comet/)、[Roboflow](https://docs.ultralytics.com/integrations/roboflow/) 和 [Intel OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 等合作伙伴协作，优化您的 AI 工作流程。在 [Ultralytics 集成](https://docs.ultralytics.com/integrations/) 探索更多信息。\n\n<a href=\"https://docs.ultralytics.com/integrations/\" target=\"_blank\">\n    <img width=\"100%\" src=\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\" alt=\"Ultralytics 主动学习集成\">\n</a>\n<br>\n<br>\n\n<div align=\"center\">\n  <a href=\"https://platform.ultralytics.com\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png\" width=\"10%\" alt=\"Ultralytics Platform logo\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"15%\" height=\"0\" alt=\"space\">\n  <a href=\"https://docs.ultralytics.com/integrations/weights-biases/\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png\" width=\"10%\" alt=\"Weights & Biases logo\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"15%\" height=\"0\" alt=\"space\">\n  <a href=\"https://docs.ultralytics.com/integrations/comet/\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png\" width=\"10%\" alt=\"Comet ML logo\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"15%\" height=\"0\" alt=\"space\">\n  <a href=\"https://docs.ultralytics.com/integrations/neural-magic/\">\n    <img src=\"https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png\" width=\"10%\" alt=\"Neural Magic logo\"></a>\n</div>\n\n|                                              Ultralytics Platform 🌟                                               |                                              Weights & Biases                                               |                                                           Comet                                                            |                                                      Neural Magic                                                       |\n| :----------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------: |\n| 简化 YOLO 工作流程：使用 [Ultralytics Platform](https://platform.ultralytics.com) 轻松标注、训练和部署。立即试用！ | 使用 [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/) 跟踪实验、超参数和结果。 | 永久免费的 [Comet ML](https://docs.ultralytics.com/integrations/comet/) 让您保存 YOLO 模型、恢复训练并交互式地可视化预测。 | 使用 [Neural Magic DeepSparse](https://docs.ultralytics.com/integrations/neural-magic/) 将 YOLO 推理速度提高多达 6 倍。 |\n\n## ⭐ Ultralytics Platform\n\n通过 [Ultralytics Platform](https://platform.ultralytics.com) ⭐ 体验无缝的 AI 开发，这是构建、训练和部署[计算机视觉](https://www.ultralytics.com/glossary/computer-vision-cv)模型的终极平台。可视化数据集，训练 [YOLOv5](https://docs.ultralytics.com/models/yolov5/) 和 [YOLOv8](https://docs.ultralytics.com/models/yolov8/) 🚀 模型，并将它们部署到实际应用中，无需编写任何代码。使用我们尖端的工具和用户友好的 [Ultralytics App](https://www.ultralytics.com/app-install) 将图像转化为可操作的见解。今天就**免费**开始您的旅程吧！\n\n<a align=\"center\" href=\"https://platform.ultralytics.com\" target=\"_blank\">\n<img width=\"100%\" src=\"https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png\" alt=\"Ultralytics Platform 平台截图\"></a>\n\n## 🤔 为何选择 YOLOv5？\n\nYOLOv5 的设计旨在简单易用。我们优先考虑实际性能和可访问性。\n\n<p align=\"left\"><img width=\"800\" src=\"https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png\" alt=\"YOLOv5 性能图表\"></p>\n<details>\n  <summary>YOLOv5-P5 640 图表</summary>\n\n<p align=\"left\"><img width=\"800\" src=\"https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png\" alt=\"YOLOv5 P5 640 性能图表\"></p>\n</details>\n<details>\n  <summary>图表说明</summary>\n\n- **COCO AP val** 表示在 [交并比 (IoU)](https://www.ultralytics.com/glossary/intersection-over-union-iou) 阈值从 0.5 到 0.95 范围内的[平均精度均值 (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map)，在包含 5000 张图像的 [COCO val2017 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上，使用各种推理尺寸（256 到 1536 像素）测量得出。\n- **GPU Speed** 使用批处理大小为 32 的 [AWS p3.2xlarge V100 实例](https://aws.amazon.com/ec2/instance-types/p4/)，测量在 [COCO val2017 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上每张图像的平均推理时间。\n- **EfficientDet** 数据来源于 [google/automl 仓库](https://github.com/google/automl)，批处理大小为 8。\n- **复现**这些结果请使用命令：`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`\n\n</details>\n\n### 预训练权重\n\n此表显示了在 COCO 数据集上训练的各种 YOLOv5 模型的性能指标。\n\n| 模型                                                                                                                                                                     | 尺寸<br><sup>(像素) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | 速度<br><sup>CPU b1<br>(毫秒) | 速度<br><sup>V100 b1<br>(毫秒) | 速度<br><sup>V100 b32<br>(毫秒) | 参数<br><sup>(M) | FLOPs<br><sup>@640 (B) |\n| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | -------------------- | ----------------- | ----------------------------- | ------------------------------ | ------------------------------- | ---------------- | ---------------------- |\n| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt)                                                                                       | 640                 | 28.0                 | 45.7              | **45**                        | **6.3**                        | **0.6**                         | **1.9**          | **4.5**                |\n| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt)                                                                                       | 640                 | 37.4                 | 56.8              | 98                            | 6.4                            | 0.9                             | 7.2              | 16.5                   |\n| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt)                                                                                       | 640                 | 45.4                 | 64.1              | 224                           | 8.2                            | 1.7                             | 21.2             | 49.0                   |\n| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt)                                                                                       | 640                 | 49.0                 | 67.3              | 430                           | 10.1                           | 2.7                             | 46.5             | 109.1                  |\n| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt)                                                                                       | 640                 | 50.7                 | 68.9              | 766                           | 12.1                           | 4.8                             | 86.7             | 205.7                  |\n|                                                                                                                                                                          |                     |                      |                   |                               |                                |                                 |                  |                        |\n| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt)                                                                                     | 1280                | 36.0                 | 54.4              | 153                           | 8.1                            | 2.1                             | 3.2              | 4.6                    |\n| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt)                                                                                     | 1280                | 44.8                 | 63.7              | 385                           | 8.2                            | 3.6                             | 12.6             | 16.8                   |\n| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt)                                                                                     | 1280                | 51.3                 | 69.3              | 887                           | 11.1                           | 6.8                             | 35.7             | 50.0                   |\n| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt)                                                                                     | 1280                | 53.7                 | 71.3              | 1784                          | 15.8                           | 10.5                            | 76.8             | 111.4                  |\n| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+ [[TTA]](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) | 1280<br>1536        | 55.0<br>**55.8**     | 72.7<br>**72.7**  | 3136<br>-                     | 26.2<br>-                      | 19.4<br>-                       | 140.7<br>-       | 209.8<br>-             |\n\n<details>\n  <summary>表格说明</summary>\n\n- 所有预训练权重均使用默认设置训练了 300 个周期。Nano (n) 和 Small (s) 模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) 超参数，而 Medium (m)、Large (l) 和 Extra-Large (x) 模型使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml)。\n- **mAP<sup>val</sup>** 值表示在 [COCO val2017 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上的单模型、单尺度性能。<br>复现请使用：`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`\n- **速度**指标是在 [AWS p3.2xlarge V100 实例](https://aws.amazon.com/ec2/instance-types/p4/)上对 COCO val 图像进行平均测量的。不包括非极大值抑制 (NMS) 时间（约 1 毫秒/图像）。<br>复现请使用：`python val.py --data coco.yaml --img 640 --task speed --batch 1`\n- **TTA** ([测试时增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)) 包括反射和尺度增强以提高准确性。<br>复现请使用：`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`\n\n</details>\n\n## 🖼️ 分割\n\nYOLOv5 [v7.0 版本](https://github.com/ultralytics/yolov5/releases/v7.0) 引入了[实例分割](https://docs.ultralytics.com/tasks/segment/)模型，达到了业界顶尖的性能。这些模型设计用于轻松训练、验证和部署。有关完整详细信息，请参阅[发布说明](https://github.com/ultralytics/yolov5/releases/v7.0)，并探索 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)以获取快速入门示例。\n\n<details>\n  <summary>分割预训练权重</summary>\n\n<div align=\"center\">\n<a align=\"center\" href=\"https://www.ultralytics.com/yolo\" target=\"_blank\">\n<img width=\"800\" src=\"https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png\" alt=\"YOLOv5 分割性能图表\"></a>\n</div>\n\nYOLOv5 分割模型在 [COCO 数据集](https://docs.ultralytics.com/datasets/segment/coco/)上使用 A100 GPU 以 640 像素的图像大小训练了 300 个周期。模型导出为 [ONNX](https://onnx.ai/) FP32 用于 CPU 速度测试，导出为 [TensorRT](https://developer.nvidia.com/tensorrt) FP16 用于 GPU 速度测试。所有速度测试均在 Google [Colab Pro](https://colab.research.google.com/signup) 笔记本上进行，以确保可复现性。\n\n| 模型                                                                                       | 尺寸<br><sup>(像素) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | 训练时间<br><sup>300 周期<br>A100 (小时) | 速度<br><sup>ONNX CPU<br>(毫秒) | 速度<br><sup>TRT A100<br>(毫秒) | 参数<br><sup>(M) | FLOPs<br><sup>@640 (B) |\n| ------------------------------------------------------------------------------------------ | ------------------- | -------------------- | --------------------- | ---------------------------------------- | ------------------------------- | ------------------------------- | ---------------- | ---------------------- |\n| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640                 | 27.6                 | 23.4                  | 80:17                                    | **62.7**                        | **1.2**                         | **2.0**          | **7.1**                |\n| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640                 | 37.6                 | 31.7                  | 88:16                                    | 173.3                           | 1.4                             | 7.6              | 26.4                   |\n| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640                 | 45.0                 | 37.1                  | 108:36                                   | 427.0                           | 2.2                             | 22.0             | 70.8                   |\n| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640                 | 49.0                 | 39.9                  | 66:43 (2x)                               | 857.4                           | 2.9                             | 47.9             | 147.7                  |\n| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640                 | **50.7**             | **41.4**              | 62:56 (3x)                               | 1579.2                          | 4.5                             | 88.8             | 265.7                  |\n\n- 所有预训练权重均使用 SGD 优化器，`lr0=0.01` 和 `weight_decay=5e-5`，在 640 像素的图像大小下，使用默认设置训练了 300 个周期。<br>训练运行记录在 [https://wandb.ai/glenn-jocher/YOLOv5_v70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official)。\n- **准确度**值表示在 COCO 数据集上的单模型、单尺度性能。<br>复现请使用：`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`\n- **速度**指标是在 [Colab Pro A100 High-RAM 实例](https://colab.research.google.com/signup)上对 100 张推理图像进行平均测量的。值仅表示推理速度（NMS 约增加 1 毫秒/图像）。<br>复现请使用：`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`\n- **导出**到 ONNX (FP32) 和 TensorRT (FP16) 是使用 `export.py` 完成的。<br>复现请使用：`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`\n\n</details>\n\n<details>\n  <summary>分割使用示例 &nbsp;<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"在 Colab 中打开\"></a></summary>\n\n### 训练\n\nYOLOv5 分割训练支持通过 `--data coco128-seg.yaml` 参数自动下载 [COCO128-seg 数据集](https://docs.ultralytics.com/datasets/segment/coco8-seg/)。对于完整的 [COCO-segments 数据集](https://docs.ultralytics.com/datasets/segment/coco/)，请使用 `bash data/scripts/get_coco.sh --train --val --segments` 手动下载，然后使用 `python train.py --data coco.yaml` 进行训练。\n\n```bash\n# 在单个 GPU 上训练\npython segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640\n\n# 使用多 GPU 分布式数据并行 (DDP) 进行训练\npython -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3\n```\n\n### 验证\n\n在 COCO 数据集上验证 YOLOv5s-seg 的掩码[平均精度均值 (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map)：\n\n```bash\n# 下载 COCO 验证分割集 (780MB, 5000 张图像)\nbash data/scripts/get_coco.sh --val --segments\n\n# 验证模型\npython segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640\n```\n\n### 预测\n\n使用预训练的 YOLOv5m-seg.pt 模型对 `bus.jpg` 执行分割：\n\n```bash\n# 运行预测\npython segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg\n```\n\n```python\n# 从 PyTorch Hub 加载模型（注意：推理支持可能有所不同）\nmodel = torch.hub.load(\"ultralytics/yolov5\", \"custom\", \"yolov5m-seg.pt\")\n```\n\n| ![Zidane 分割示例](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![Bus 分割示例](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) |\n| :-----------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: |\n\n### 导出\n\n将 YOLOv5s-seg 模型导出为 ONNX 和 TensorRT 格式：\n\n```bash\n# 导出模型\npython export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0\n```\n\n</details>\n\n## 🏷️ 分类\n\nYOLOv5 [v6.2 版本](https://github.com/ultralytics/yolov5/releases/v6.2) 引入了对[图像分类](https://docs.ultralytics.com/tasks/classify/)模型训练、验证和部署的支持。请查看[发布说明](https://github.com/ultralytics/yolov5/releases/v6.2)了解详细信息，并参阅 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)获取快速入门指南。\n\n<details>\n  <summary>分类预训练权重</summary>\n\n<br>\n\nYOLOv5-cls 分类模型在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 上使用 4xA100 实例训练了 90 个周期。[ResNet](https://arxiv.org/abs/1512.03385) 和 [EfficientNet](https://arxiv.org/abs/1905.11946) 模型在相同设置下一起训练以进行比较。模型导出为 [ONNX](https://onnx.ai/) FP32（用于 CPU 速度测试）和 [TensorRT](https://developer.nvidia.com/tensorrt) FP16（用于 GPU 速度测试）。所有速度测试均在 Google [Colab Pro](https://colab.research.google.com/signup) 上运行，以确保可复现性。\n\n| 模型                                                                                               | 尺寸<br><sup>(像素) | 准确率<br><sup>top1 | 准确率<br><sup>top5 | 训练<br><sup>90 周期<br>4xA100 (小时) | 速度<br><sup>ONNX CPU<br>(毫秒) | 速度<br><sup>TensorRT V100<br>(毫秒) | 参数<br><sup>(M) | FLOPs<br><sup>@224 (B) |\n| -------------------------------------------------------------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------------------------- | ------------------------------- | ------------------------------------ | ---------------- | ---------------------- |\n| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt)         | 224                 | 64.6                | 85.4                | 7:59                                  | **3.3**                         | **0.5**                              | **2.5**          | **0.5**                |\n| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt)         | 224                 | 71.5                | 90.2                | 8:09                                  | 6.6                             | 0.6                                  | 5.4              | 1.4                    |\n| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt)         | 224                 | 75.9                | 92.9                | 10:06                                 | 15.5                            | 0.9                                  | 12.9             | 3.9                    |\n| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt)         | 224                 | 78.0                | 94.0                | 11:56                                 | 26.9                            | 1.4                                  | 26.5             | 8.5                    |\n| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt)         | 224                 | **79.0**            | **94.4**            | 15:04                                 | 54.3                            | 1.8                                  | 48.1             | 15.9                   |\n|                                                                                                    |                     |                     |                     |                                       |                                 |                                      |                  |                        |\n| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt)               | 224                 | 70.3                | 89.5                | **6:47**                              | 11.2                            | 0.5                                  | 11.7             | 3.7                    |\n| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt)               | 224                 | 73.9                | 91.8                | 8:33                                  | 20.6                            | 0.9                                  | 21.8             | 7.4                    |\n| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt)               | 224                 | 76.8                | 93.4                | 11:10                                 | 23.4                            | 1.0                                  | 25.6             | 8.5                    |\n| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt)             | 224                 | 78.5                | 94.3                | 17:10                                 | 42.1                            | 1.9                                  | 44.5             | 15.9                   |\n|                                                                                                    |                     |                     |                     |                                       |                                 |                                      |                  |                        |\n| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224                 | 75.1                | 92.4                | 13:03                                 | 12.5                            | 1.3                                  | 5.3              | 1.0                    |\n| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224                 | 76.4                | 93.2                | 17:04                                 | 14.9                            | 1.6                                  | 7.8              | 1.5                    |\n| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224                 | 76.6                | 93.4                | 17:10                                 | 15.9                            | 1.6                                  | 9.1              | 1.7                    |\n| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224                 | 77.7                | 94.0                | 19:19                                 | 18.9                            | 1.9                                  | 12.2             | 2.4                    |\n\n<details>\n  <summary>表格说明（点击展开）</summary>\n\n- 所有预训练权重均使用 SGD 优化器，`lr0=0.001` 和 `weight_decay=5e-5`，在 224 像素的图像大小下，使用默认设置训练了 90 个周期。<br>训练运行记录在 [https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2)。\n- **准确度**值（top-1 和 top-5）表示在 [ImageNet-1k 数据集](https://docs.ultralytics.com/datasets/classify/imagenet/)上的单模型、单尺度性能。<br>复现请使用：`python classify/val.py --data ../datasets/imagenet --img 224`\n- **速度**指标是在 Google [Colab Pro V100 High-RAM 实例](https://colab.research.google.com/signup)上对 100 张推理图像进行平均测量的。<br>复现请使用：`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`\n- **导出**到 ONNX (FP32) 和 TensorRT (FP16) 是使用 `export.py` 完成的。<br>复现请使用：`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`\n\n</details>\n</details>\n\n<details>\n  <summary>分类使用示例 &nbsp;<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"在 Colab 中打开\"></a></summary>\n\n### 训练\n\nYOLOv5 分类训练支持使用 `--data` 参数自动下载诸如 [MNIST](https://docs.ultralytics.com/datasets/classify/mnist/)、[Fashion-MNIST](https://docs.ultralytics.com/datasets/classify/fashion-mnist/)、[CIFAR10](https://docs.ultralytics.com/datasets/classify/cifar10/)、[CIFAR100](https://docs.ultralytics.com/datasets/classify/cifar100/)、[Imagenette](https://docs.ultralytics.com/datasets/classify/imagenette/)、[Imagewoof](https://docs.ultralytics.com/datasets/classify/imagewoof/) 和 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 等数据集。例如，使用 `--data mnist` 开始在 MNIST 上训练。\n\n```bash\n# 使用 CIFAR-100 数据集在单个 GPU 上训练\npython classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128\n\n# 在 ImageNet 数据集上使用多 GPU DDP 进行训练\npython -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3\n```\n\n### 验证\n\n在 ImageNet-1k 验证数据集上验证 YOLOv5m-cls 模型的准确性：\n\n```bash\n# 下载 ImageNet 验证集 (6.3GB, 50,000 张图像)\nbash data/scripts/get_imagenet.sh --val\n\n# 验证模型\npython classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224\n```\n\n### 预测\n\n使用预训练的 YOLOv5s-cls.pt 模型对图像 `bus.jpg` 进行分类：\n\n```bash\n# 运行预测\npython classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg\n```\n\n```python\n# 从 PyTorch Hub 加载模型\nmodel = torch.hub.load(\"ultralytics/yolov5\", \"custom\", \"yolov5s-cls.pt\")\n```\n\n### 导出\n\n将训练好的 YOLOv5s-cls、ResNet50 和 EfficientNet_b0 模型导出为 ONNX 和 TensorRT 格式：\n\n```bash\n# 导出模型\npython export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224\n```\n\n</details>\n\n## ☁️ 环境\n\n使用我们预配置的环境快速开始。点击下面的图标查看设置详情。\n\n<div align=\"center\">\n  <a href=\"https://bit.ly/yolov5-paperspace-notebook\" title=\"在 Paperspace Gradient 上运行\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gradient.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" title=\"在 Google Colab 中打开\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-colab-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://www.kaggle.com/models/ultralytics/yolov5\" title=\"在 Kaggle 中打开\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-kaggle-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://hub.docker.com/r/ultralytics/yolov5\" title=\"拉取 Docker 镜像\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-docker-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/\" title=\"AWS 快速入门指南\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-aws-small.png\" width=\"10%\" /></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"5%\" alt=\"\" />\n  <a href=\"https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/\" title=\"GCP 快速入门指南\">\n    <img src=\"https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gcp-small.png\" width=\"10%\" /></a>\n</div>\n\n## 🤝 贡献\n\n我们欢迎您的贡献！让 YOLOv5 变得易于访问和有效是社区的共同努力。请参阅我们的[贡献指南](https://docs.ultralytics.com/help/contributing/)开始。通过 [YOLOv5 调查](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)分享您的反馈。感谢所有为使 YOLOv5 变得更好而做出贡献的人！\n\n[![Ultralytics 开源贡献者](https://raw.githubusercontent.com/ultralytics/assets/main/im/image-contributors.png)](https://github.com/ultralytics/yolov5/graphs/contributors)\n\n## 📜 许可证\n\nUltralytics 提供两种许可选项以满足不同需求：\n\n- **AGPL-3.0 许可证**：一种 [OSI 批准的](https://opensource.org/license/agpl-v3)开源许可证，非常适合学术研究、个人项目和测试。它促进开放协作和知识共享。详情请参阅 [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件。\n- **企业许可证**：专为商业应用量身定制，此许可证允许将 Ultralytics 软件和 AI 模型无缝集成到商业产品和服务中，绕过 AGPL-3.0 的开源要求。对于商业用例，请通过 [Ultralytics 授权许可](https://www.ultralytics.com/license)联系我们。\n\n## 📧 联系\n\n对于与 YOLOv5 相关的错误报告和功能请求，请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。对于一般问题、讨论和社区支持，请加入我们的 [Discord 服务器](https://discord.com/invite/ultralytics)！\n\n<br>\n<div align=\"center\">\n  <a href=\"https://github.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png\" width=\"3%\" alt=\"Ultralytics GitHub\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://www.linkedin.com/company/ultralytics/\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png\" width=\"3%\" alt=\"Ultralytics LinkedIn\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://twitter.com/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png\" width=\"3%\" alt=\"Ultralytics Twitter\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://youtube.com/ultralytics?sub_confirmation=1\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png\" width=\"3%\" alt=\"Ultralytics YouTube\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://www.tiktok.com/@ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png\" width=\"3%\" alt=\"Ultralytics TikTok\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://ultralytics.com/bilibili\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png\" width=\"3%\" alt=\"Ultralytics BiliBili\"></a>\n  <img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png\" width=\"3%\" alt=\"space\">\n  <a href=\"https://discord.com/invite/ultralytics\"><img src=\"https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png\" width=\"3%\" alt=\"Ultralytics Discord\"></a>\n</div>\n"
  },
  {
    "path": "benchmarks.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 benchmarks on all supported export formats.\n\nFormat                      | `export.py --include`         | Model\n---                         | ---                           | ---\nPyTorch                     | -                             | yolov5s.pt\nTorchScript                 | `torchscript`                 | yolov5s.torchscript\nONNX                        | `onnx`                        | yolov5s.onnx\nOpenVINO                    | `openvino`                    | yolov5s_openvino_model/\nTensorRT                    | `engine`                      | yolov5s.engine\nCoreML                      | `coreml`                      | yolov5s.mlpackage\nTensorFlow SavedModel       | `saved_model`                 | yolov5s_saved_model/\nTensorFlow GraphDef         | `pb`                          | yolov5s.pb\nTensorFlow Lite             | `tflite`                      | yolov5s.tflite\nTensorFlow Edge TPU         | `edgetpu`                     | yolov5s_edgetpu.tflite\nTensorFlow.js               | `tfjs`                        | yolov5s_web_model/\n\nRequirements:\n    $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu  # CPU\n    $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow  # GPU\n    $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com  # TensorRT\n\nUsage:\n    $ python benchmarks.py --weights yolov5s.pt --img 640\n\"\"\"\n\nimport argparse\nimport platform\nimport sys\nimport time\nfrom pathlib import Path\n\nimport pandas as pd\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n# ROOT = ROOT.relative_to(Path.cwd())  # relative\n\nimport export\nfrom models.experimental import attempt_load\nfrom models.yolo import SegmentationModel\nfrom segment.val import run as val_seg\nfrom utils import notebook_init\nfrom utils.general import LOGGER, check_yaml, file_size, print_args\nfrom utils.torch_utils import select_device\nfrom val import run as val_det\n\n\ndef run(\n    weights=ROOT / \"yolov5s.pt\",  # weights path\n    imgsz=640,  # inference size (pixels)\n    batch_size=1,  # batch size\n    data=ROOT / \"data/coco128.yaml\",  # dataset.yaml path\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    half=False,  # use FP16 half-precision inference\n    test=False,  # test exports only\n    pt_only=False,  # test PyTorch only\n    hard_fail=False,  # throw error on benchmark failure\n):\n    \"\"\"Run YOLOv5 benchmarks on multiple export formats and log results for model performance evaluation.\n\n    Args:\n        weights (Path | str): Path to the model weights file (default: ROOT / \"yolov5s.pt\").\n        imgsz (int): Inference size in pixels (default: 640).\n        batch_size (int): Batch size for inference (default: 1).\n        data (Path | str): Path to the dataset.yaml file (default: ROOT / \"data/coco128.yaml\").\n        device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: \"\").\n        half (bool): Use FP16 half-precision inference (default: False).\n        test (bool): Test export formats only (default: False).\n        pt_only (bool): Test PyTorch format only (default: False).\n        hard_fail (bool): Throw an error on benchmark failure if True (default: False).\n\n    Returns:\n        None. Logs information about the benchmark results, including the format, size, mAP50-95, and inference time.\n\n    Examples:\n        ```python\n        $ python benchmarks.py --weights yolov5s.pt --img 640\n        ```\n\n        Install required packages:\n          $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu  # CPU support\n          $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow   # GPU support\n          $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com  # TensorRT\n\n        Run benchmarks:\n          $ python benchmarks.py --weights yolov5s.pt --img 640\n\n    Notes:\n        Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML,\n            TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js\n            are unsupported.\n    \"\"\"\n    y, t = [], time.time()\n    device = select_device(device)\n    model_type = type(attempt_load(weights, fuse=False))  # DetectionModel, SegmentationModel, etc.\n    for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows():  # index, (name, file, suffix, CPU, GPU)\n        try:\n            assert i not in (9, 10), \"inference not supported\"  # Edge TPU and TF.js are unsupported\n            assert i != 5 or platform.system() == \"Darwin\", \"inference only supported on macOS>=10.13\"  # CoreML\n            if \"cpu\" in device.type:\n                assert cpu, \"inference not supported on CPU\"\n            if \"cuda\" in device.type:\n                assert gpu, \"inference not supported on GPU\"\n\n            # Export\n            if f == \"-\":\n                w = weights  # PyTorch format\n            else:\n                w = export.run(\n                    weights=weights, imgsz=[imgsz], include=[f], batch_size=batch_size, device=device, half=half\n                )[-1]  # all others\n            assert suffix in str(w), \"export failed\"\n\n            # Validate\n            if model_type == SegmentationModel:\n                result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task=\"speed\", half=half)\n                metric = result[0][7]  # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))\n            else:  # DetectionModel:\n                result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task=\"speed\", half=half)\n                metric = result[0][3]  # (p, r, map50, map, *loss(box, obj, cls))\n            speed = result[2][1]  # times (preprocess, inference, postprocess)\n            y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)])  # MB, mAP, t_inference\n        except Exception as e:\n            if hard_fail:\n                assert type(e) is AssertionError, f\"Benchmark --hard-fail for {name}: {e}\"\n            LOGGER.warning(f\"WARNING ⚠️ Benchmark failure for {name}: {e}\")\n            y.append([name, None, None, None])  # mAP, t_inference\n        if pt_only and i == 0:\n            break  # break after PyTorch\n\n    # Print results\n    LOGGER.info(\"\\n\")\n    parse_opt()\n    notebook_init()  # print system info\n    c = [\"Format\", \"Size (MB)\", \"mAP50-95\", \"Inference time (ms)\"] if map else [\"Format\", \"Export\", \"\", \"\"]\n    py = pd.DataFrame(y, columns=c)\n    LOGGER.info(f\"\\nBenchmarks complete ({time.time() - t:.2f}s)\")\n    LOGGER.info(str(py if map else py.iloc[:, :2]))\n    if hard_fail and isinstance(hard_fail, str):\n        metrics = py[\"mAP50-95\"].array  # values to compare to floor\n        floor = eval(hard_fail)  # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n\n        assert all(x > floor for x in metrics if pd.notna(x)), f\"HARD FAIL: mAP50-95 < floor {floor}\"\n    return py\n\n\ndef test(\n    weights=ROOT / \"yolov5s.pt\",  # weights path\n    imgsz=640,  # inference size (pixels)\n    batch_size=1,  # batch size\n    data=ROOT / \"data/coco128.yaml\",  # dataset.yaml path\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    half=False,  # use FP16 half-precision inference\n    test=False,  # test exports only\n    pt_only=False,  # test PyTorch only\n    hard_fail=False,  # throw error on benchmark failure\n):\n    \"\"\"Run YOLOv5 export tests for all supported formats and log the results, including export statuses.\n\n    Args:\n        weights (Path | str): Path to the model weights file (.pt format). Default is 'ROOT / \"yolov5s.pt\"'.\n        imgsz (int): Inference image size (in pixels). Default is 640.\n        batch_size (int): Batch size for testing. Default is 1.\n        data (Path | str): Path to the dataset configuration file (.yaml format). Default is 'ROOT /\n            \"data/coco128.yaml\"'.\n        device (str): Device for running the tests, can be 'cpu' or a specific CUDA device ('0', '0,1,2,3', etc.).\n            Default is an empty string.\n        half (bool): Use FP16 half-precision for inference if True. Default is False.\n        test (bool): Test export formats only without running inference. Default is False.\n        pt_only (bool): Test only the PyTorch model if True. Default is False.\n        hard_fail (bool): Raise error on export or test failure if True. Default is False.\n\n    Returns:\n        pd.DataFrame: DataFrame containing the results of the export tests, including format names and export statuses.\n\n    Examples:\n        ```python\n        $ python benchmarks.py --weights yolov5s.pt --img 640\n        ```\n\n        Install required packages:\n            $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu  # CPU support\n            $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow   # GPU support\n            $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com  # TensorRT\n        Run export tests:\n            $ python benchmarks.py --weights yolov5s.pt --img 640\n\n    Notes:\n        Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow\n        SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js are unsupported.\n    \"\"\"\n    y, t = [], time.time()\n    device = select_device(device)\n    for i, (name, f, suffix, gpu) in export.export_formats().iterrows():  # index, (name, file, suffix, gpu-capable)\n        try:\n            w = (\n                weights\n                if f == \"-\"\n                else export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1]\n            )  # weights\n            assert suffix in str(w), \"export failed\"\n            y.append([name, True])\n        except Exception:\n            y.append([name, False])  # mAP, t_inference\n\n    # Print results\n    LOGGER.info(\"\\n\")\n    parse_opt()\n    notebook_init()  # print system info\n    py = pd.DataFrame(y, columns=[\"Format\", \"Export\"])\n    LOGGER.info(f\"\\nExports complete ({time.time() - t:.2f}s)\")\n    LOGGER.info(str(py))\n    return py\n\n\ndef parse_opt():\n    \"\"\"Parses command-line arguments for YOLOv5 model inference configuration.\n\n    Args:\n        weights (str): The path to the weights file. Defaults to 'ROOT / \"yolov5s.pt\"'.\n        imgsz (int): Inference size in pixels. Defaults to 640.\n        batch_size (int): Batch size. Defaults to 1.\n        data (str): Path to the dataset YAML file. Defaults to 'ROOT / \"data/coco128.yaml\"'.\n        device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu'. Defaults to an empty string (auto-select).\n        half (bool): Use FP16 half-precision inference. This is a flag and defaults to False.\n        test (bool): Test exports only. This is a flag and defaults to False.\n        pt_only (bool): Test PyTorch only. This is a flag and defaults to False.\n        hard_fail (bool | str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum\n            metric floor, e.g., '0.29'. Defaults to False.\n\n    Returns:\n        argparse.Namespace: Parsed command-line arguments encapsulated in an argparse Namespace object.\n\n    Notes:\n        The function modifies the 'opt.data' by checking and validating the YAML path using 'check_yaml()'.\n        The parsed arguments are printed for reference using 'print_args()'.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", type=str, default=ROOT / \"yolov5s.pt\", help=\"weights path\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=640, help=\"inference size (pixels)\")\n    parser.add_argument(\"--batch-size\", type=int, default=1, help=\"batch size\")\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"dataset.yaml path\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"use FP16 half-precision inference\")\n    parser.add_argument(\"--test\", action=\"store_true\", help=\"test exports only\")\n    parser.add_argument(\"--pt-only\", action=\"store_true\", help=\"test PyTorch only\")\n    parser.add_argument(\"--hard-fail\", nargs=\"?\", const=True, default=False, help=\"Exception on error or < min metric\")\n    opt = parser.parse_args()\n    opt.data = check_yaml(opt.data)  # check YAML\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes YOLOv5 benchmark tests or main training/inference routines based on the provided command-line arguments.\n\n    Args:\n        opt (argparse.Namespace): Parsed command-line arguments including options for weights, image size, batch size,\n            data configuration, device, and other flags for inference settings.\n\n    Returns:\n        None: This function does not return any value. It leverages side-effects such as logging and running benchmarks.\n\n    Examples:\n        ```python\n        if __name__ == \"__main__\":\n            opt = parse_opt()\n            main(opt)\n        ```\n\n    Notes:\n        - For a complete list of supported export formats and their respective requirements, refer to the\n          [Ultralytics YOLOv5 Export Formats](https://github.com/ultralytics/yolov5#export-formats).\n        - Ensure that you have installed all necessary dependencies by following the installation instructions detailed in\n          the [main repository](https://github.com/ultralytics/yolov5#installation).\n\n        ```shell\n        # Running benchmarks on default weights and image size\n        $ python benchmarks.py --weights yolov5s.pt --img 640\n        ```\n    \"\"\"\n    test(**vars(opt)) if opt.test else run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "classify/predict.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.\n\nUsage - sources:\n    $ python classify/predict.py --weights yolov5s-cls.pt --source 0                               # webcam\n                                                                   img.jpg                         # image\n                                                                   vid.mp4                         # video\n                                                                   screen                          # screenshot\n                                                                   path/                           # directory\n                                                                   list.txt                        # list of images\n                                                                   list.streams                    # list of streams\n                                                                   'path/*.jpg'                    # glob\n                                                                   'https://youtu.be/LNwODJXcvt4'  # YouTube\n                                                                   'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\n\nUsage - formats:\n    $ python classify/predict.py --weights yolov5s-cls.pt                 # PyTorch\n                                           yolov5s-cls.torchscript        # TorchScript\n                                           yolov5s-cls.onnx               # ONNX Runtime or OpenCV DNN with --dnn\n                                           yolov5s-cls_openvino_model     # OpenVINO\n                                           yolov5s-cls.engine             # TensorRT\n                                           yolov5s-cls.mlmodel            # CoreML (macOS-only)\n                                           yolov5s-cls_saved_model        # TensorFlow SavedModel\n                                           yolov5s-cls.pb                 # TensorFlow GraphDef\n                                           yolov5s-cls.tflite             # TensorFlow Lite\n                                           yolov5s-cls_edgetpu.tflite     # TensorFlow Edge TPU\n                                           yolov5s-cls_paddle_model       # PaddlePaddle\n\"\"\"\n\nimport argparse\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom ultralytics.utils.plotting import Annotator\n\nfrom models.common import DetectMultiBackend\nfrom utils.augmentations import classify_transforms\nfrom utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams\nfrom utils.general import (\n    LOGGER,\n    Profile,\n    check_file,\n    check_img_size,\n    check_imshow,\n    check_requirements,\n    colorstr,\n    cv2,\n    increment_path,\n    print_args,\n    strip_optimizer,\n)\nfrom utils.torch_utils import select_device, smart_inference_mode\n\n\n@smart_inference_mode()\ndef run(\n    weights=ROOT / \"yolov5s-cls.pt\",  # model.pt path(s)\n    source=ROOT / \"data/images\",  # file/dir/URL/glob/screen/0(webcam)\n    data=ROOT / \"data/coco128.yaml\",  # dataset.yaml path\n    imgsz=(224, 224),  # inference size (height, width)\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    view_img=False,  # show results\n    save_txt=False,  # save results to *.txt\n    nosave=False,  # do not save images/videos\n    augment=False,  # augmented inference\n    visualize=False,  # visualize features\n    update=False,  # update all models\n    project=ROOT / \"runs/predict-cls\",  # save results to project/name\n    name=\"exp\",  # save results to project/name\n    exist_ok=False,  # existing project/name ok, do not increment\n    half=False,  # use FP16 half-precision inference\n    dnn=False,  # use OpenCV DNN for ONNX inference\n    vid_stride=1,  # video frame-rate stride\n):\n    \"\"\"Conducts YOLOv5 classification inference on diverse input sources and saves results.\"\"\"\n    source = str(source)\n    save_img = not nosave and not source.endswith(\".txt\")  # save inference images\n    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)\n    is_url = source.lower().startswith((\"rtsp://\", \"rtmp://\", \"http://\", \"https://\"))\n    webcam = source.isnumeric() or source.endswith(\".streams\") or (is_url and not is_file)\n    screenshot = source.lower().startswith(\"screen\")\n    if is_url and is_file:\n        source = check_file(source)  # download\n\n    # Directories\n    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run\n    (save_dir / \"labels\" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir\n\n    # Load model\n    device = select_device(device)\n    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)\n    stride, names, pt = model.stride, model.names, model.pt\n    imgsz = check_img_size(imgsz, s=stride)  # check image size\n\n    # Dataloader\n    bs = 1  # batch_size\n    if webcam:\n        view_img = check_imshow(warn=True)\n        dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)\n        bs = len(dataset)\n    elif screenshot:\n        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)\n    else:\n        dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)\n    vid_path, vid_writer = [None] * bs, [None] * bs\n\n    # Run inference\n    model.warmup(imgsz=(1 if pt else bs, 3, *imgsz))  # warmup\n    seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))\n    for path, im, im0s, vid_cap, s in dataset:\n        with dt[0]:\n            im = torch.Tensor(im).to(model.device)\n            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32\n            if len(im.shape) == 3:\n                im = im[None]  # expand for batch dim\n\n        # Inference\n        with dt[1]:\n            results = model(im)\n\n        # Post-process\n        with dt[2]:\n            pred = F.softmax(results, dim=1)  # probabilities\n\n        # Process predictions\n        for i, prob in enumerate(pred):  # per image\n            seen += 1\n            if webcam:  # batch_size >= 1\n                p, im0, frame = path[i], im0s[i].copy(), dataset.count\n                s += f\"{i}: \"\n            else:\n                p, im0, frame = path, im0s.copy(), getattr(dataset, \"frame\", 0)\n\n            p = Path(p)  # to Path\n            save_path = str(save_dir / p.name)  # im.jpg\n            txt_path = str(save_dir / \"labels\" / p.stem) + (\"\" if dataset.mode == \"image\" else f\"_{frame}\")  # im.txt\n\n            s += \"{:g}x{:g} \".format(*im.shape[2:])  # print string\n            annotator = Annotator(im0, example=str(names), pil=True)\n\n            # Print results\n            top5i = prob.argsort(0, descending=True)[:5].tolist()  # top 5 indices\n            s += f\"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, \"\n\n            # Write results\n            text = \"\\n\".join(f\"{prob[j]:.2f} {names[j]}\" for j in top5i)\n            if save_img or view_img:  # Add bbox to image\n                annotator.text([32, 32], text, txt_color=(255, 255, 255))\n            if save_txt:  # Write to file\n                with open(f\"{txt_path}.txt\", \"a\") as f:\n                    f.write(text + \"\\n\")\n\n            # Stream results\n            im0 = annotator.result()\n            if view_img:\n                if platform.system() == \"Linux\" and p not in windows:\n                    windows.append(p)\n                    cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)\n                    cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])\n                cv2.imshow(str(p), im0)\n                cv2.waitKey(1)  # 1 millisecond\n\n            # Save results (image with detections)\n            if save_img:\n                if dataset.mode == \"image\":\n                    cv2.imwrite(save_path, im0)\n                else:  # 'video' or 'stream'\n                    if vid_path[i] != save_path:  # new video\n                        vid_path[i] = save_path\n                        if isinstance(vid_writer[i], cv2.VideoWriter):\n                            vid_writer[i].release()  # release previous video writer\n                        if vid_cap:  # video\n                            fps = vid_cap.get(cv2.CAP_PROP_FPS)\n                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n                        else:  # stream\n                            fps, w, h = 30, im0.shape[1], im0.shape[0]\n                        save_path = str(Path(save_path).with_suffix(\".mp4\"))  # force *.mp4 suffix on results videos\n                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n                    vid_writer[i].write(im0)\n\n        # Print time (inference-only)\n        LOGGER.info(f\"{s}{dt[1].dt * 1e3:.1f}ms\")\n\n    # Print results\n    t = tuple(x.t / seen * 1e3 for x in dt)  # speeds per image\n    LOGGER.info(f\"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}\" % t)\n    if save_txt or save_img:\n        s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else \"\"\n        LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\n    if update:\n        strip_optimizer(weights[0])  # update model (to fix SourceChangeWarning)\n\n\ndef parse_opt():\n    \"\"\"Parses command line arguments for YOLOv5 inference settings including model, source, device, and image size.\"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", nargs=\"+\", type=str, default=ROOT / \"yolov5s-cls.pt\", help=\"model path(s)\")\n    parser.add_argument(\"--source\", type=str, default=ROOT / \"data/images\", help=\"file/dir/URL/glob/screen/0(webcam)\")\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"(optional) dataset.yaml path\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", nargs=\"+\", type=int, default=[224], help=\"inference size h,w\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--view-img\", action=\"store_true\", help=\"show results\")\n    parser.add_argument(\"--save-txt\", action=\"store_true\", help=\"save results to *.txt\")\n    parser.add_argument(\"--nosave\", action=\"store_true\", help=\"do not save images/videos\")\n    parser.add_argument(\"--augment\", action=\"store_true\", help=\"augmented inference\")\n    parser.add_argument(\"--visualize\", action=\"store_true\", help=\"visualize features\")\n    parser.add_argument(\"--update\", action=\"store_true\", help=\"update all models\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/predict-cls\", help=\"save results to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save results to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"use FP16 half-precision inference\")\n    parser.add_argument(\"--dnn\", action=\"store_true\", help=\"use OpenCV DNN for ONNX inference\")\n    parser.add_argument(\"--vid-stride\", type=int, default=1, help=\"video frame-rate stride\")\n    opt = parser.parse_args()\n    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes YOLOv5 model inference with options for ONNX DNN and video frame-rate stride adjustments.\"\"\"\n    check_requirements(ROOT / \"requirements.txt\", exclude=(\"tensorboard\", \"thop\"))\n    run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "classify/train.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTrain a YOLOv5 classifier model on a classification dataset.\n\nUsage - Single-GPU training:\n    $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224\n\nUsage - Multi-GPU DDP training:\n    $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3\n\nDatasets:           --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'\nYOLOv5-cls models:  --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt\nTorchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html\n\"\"\"\n\nimport argparse\nimport os\nimport subprocess\nimport sys\nimport time\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport torch\nimport torch.distributed as dist\nimport torch.hub as hub\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torchvision\nfrom torch.cuda import amp\nfrom tqdm import tqdm\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom classify import val as validate\nfrom models.experimental import attempt_load\nfrom models.yolo import ClassificationModel, DetectionModel\nfrom utils.dataloaders import create_classification_dataloader\nfrom utils.general import (\n    DATASETS_DIR,\n    LOGGER,\n    TQDM_BAR_FORMAT,\n    WorkingDirectory,\n    check_git_info,\n    check_git_status,\n    check_requirements,\n    colorstr,\n    download,\n    increment_path,\n    init_seeds,\n    print_args,\n    yaml_save,\n)\nfrom utils.loggers import GenericLogger\nfrom utils.plots import imshow_cls\nfrom utils.torch_utils import (\n    ModelEMA,\n    de_parallel,\n    model_info,\n    reshape_classifier_output,\n    select_device,\n    smart_DDP,\n    smart_optimizer,\n    smartCrossEntropyLoss,\n    torch_distributed_zero_first,\n)\n\nLOCAL_RANK = int(os.getenv(\"LOCAL_RANK\", -1))  # https://pytorch.org/docs/stable/elastic/run.html\nRANK = int(os.getenv(\"RANK\", -1))\nWORLD_SIZE = int(os.getenv(\"WORLD_SIZE\", 1))\nGIT_INFO = check_git_info()\n\n\ndef train(opt, device):\n    \"\"\"Trains a YOLOv5 model, managing datasets, model optimization, logging, and saving checkpoints.\"\"\"\n    init_seeds(opt.seed + 1 + RANK, deterministic=True)\n    save_dir, data, bs, epochs, nw, imgsz, pretrained = (\n        opt.save_dir,\n        Path(opt.data),\n        opt.batch_size,\n        opt.epochs,\n        min(os.cpu_count() - 1, opt.workers),\n        opt.imgsz,\n        str(opt.pretrained).lower() == \"true\",\n    )\n    cuda = device.type != \"cpu\"\n\n    # Directories\n    wdir = save_dir / \"weights\"\n    wdir.mkdir(parents=True, exist_ok=True)  # make dir\n    last, best = wdir / \"last.pt\", wdir / \"best.pt\"\n\n    # Save run settings\n    yaml_save(save_dir / \"opt.yaml\", vars(opt))\n\n    # Logger\n    logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None\n\n    # Download Dataset\n    with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):\n        data_dir = data if data.is_dir() else (DATASETS_DIR / data)\n        if not data_dir.is_dir():\n            LOGGER.info(f\"\\nDataset not found ⚠️, missing path {data_dir}, attempting download...\")\n            t = time.time()\n            if str(data) == \"imagenet\":\n                subprocess.run([\"bash\", str(ROOT / \"data/scripts/get_imagenet.sh\")], shell=True, check=True)\n            else:\n                url = f\"https://github.com/ultralytics/assets/releases/download/v0.0.0/{data}.zip\"\n                download(url, dir=data_dir.parent)\n            s = f\"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\\n\"\n            LOGGER.info(s)\n\n    # Dataloaders\n    nc = len([x for x in (data_dir / \"train\").glob(\"*\") if x.is_dir()])  # number of classes\n    trainloader = create_classification_dataloader(\n        path=data_dir / \"train\",\n        imgsz=imgsz,\n        batch_size=bs // WORLD_SIZE,\n        augment=True,\n        cache=opt.cache,\n        rank=LOCAL_RANK,\n        workers=nw,\n    )\n\n    test_dir = data_dir / \"test\" if (data_dir / \"test\").exists() else data_dir / \"val\"  # data/test or data/val\n    if RANK in {-1, 0}:\n        testloader = create_classification_dataloader(\n            path=test_dir,\n            imgsz=imgsz,\n            batch_size=bs // WORLD_SIZE * 2,\n            augment=False,\n            cache=opt.cache,\n            rank=-1,\n            workers=nw,\n        )\n\n    # Model\n    with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):\n        if Path(opt.model).is_file() or opt.model.endswith(\".pt\"):\n            model = attempt_load(opt.model, device=\"cpu\", fuse=False)\n        elif opt.model in torchvision.models.__dict__:  # TorchVision models i.e. resnet50, efficientnet_b0\n            model = torchvision.models.__dict__[opt.model](weights=\"IMAGENET1K_V1\" if pretrained else None)\n        else:\n            m = hub.list(\"ultralytics/yolov5\")  # + hub.list('pytorch/vision')  # models\n            raise ModuleNotFoundError(f\"--model {opt.model} not found. Available models are: \\n\" + \"\\n\".join(m))\n        if isinstance(model, DetectionModel):\n            LOGGER.warning(\"WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'\")\n            model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10)  # convert to classification model\n        reshape_classifier_output(model, nc)  # update class count\n    for m in model.modules():\n        if not pretrained and hasattr(m, \"reset_parameters\"):\n            m.reset_parameters()\n        if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:\n            m.p = opt.dropout  # set dropout\n    for p in model.parameters():\n        p.requires_grad = True  # for training\n    model = model.to(device)\n\n    # Info\n    if RANK in {-1, 0}:\n        model.names = trainloader.dataset.classes  # attach class names\n        model.transforms = testloader.dataset.torch_transforms  # attach inference transforms\n        model_info(model)\n        if opt.verbose:\n            LOGGER.info(model)\n        images, labels = next(iter(trainloader))\n        file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / \"train_images.jpg\")\n        logger.log_images(file, name=\"Train Examples\")\n        logger.log_graph(model, imgsz)  # log model\n\n    # Optimizer\n    optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)\n\n    # Scheduler\n    lrf = 0.01  # final lr (fraction of lr0)\n\n    # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf  # cosine\n    def lf(x):\n        \"\"\"Linear learning rate scheduler function, scaling learning rate from initial value to `lrf` over `epochs`.\"\"\"\n        return (1 - x / epochs) * (1 - lrf) + lrf  # linear\n\n    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n    # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,\n    #                                    final_div_factor=1 / 25 / lrf)\n\n    # EMA\n    ema = ModelEMA(model) if RANK in {-1, 0} else None\n\n    # DDP mode\n    if cuda and RANK != -1:\n        model = smart_DDP(model)\n\n    # Train\n    t0 = time.time()\n    criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing)  # loss function\n    best_fitness = 0.0\n    scaler = amp.GradScaler(enabled=cuda)\n    val = test_dir.stem  # 'val' or 'test'\n    LOGGER.info(\n        f\"Image sizes {imgsz} train, {imgsz} test\\n\"\n        f\"Using {nw * WORLD_SIZE} dataloader workers\\n\"\n        f\"Logging results to {colorstr('bold', save_dir)}\\n\"\n        f\"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\\n\\n\"\n        f\"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}\"\n    )\n    for epoch in range(epochs):  # loop over the dataset multiple times\n        tloss, vloss, fitness = 0.0, 0.0, 0.0  # train loss, val loss, fitness\n        model.train()\n        if RANK != -1:\n            trainloader.sampler.set_epoch(epoch)\n        pbar = enumerate(trainloader)\n        if RANK in {-1, 0}:\n            pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)\n        for i, (images, labels) in pbar:  # progress bar\n            images, labels = images.to(device, non_blocking=True), labels.to(device)\n\n            # Forward\n            with amp.autocast(enabled=cuda):  # stability issues when enabled\n                loss = criterion(model(images), labels)\n\n            # Backward\n            scaler.scale(loss).backward()\n\n            # Optimize\n            scaler.unscale_(optimizer)  # unscale gradients\n            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients\n            scaler.step(optimizer)\n            scaler.update()\n            optimizer.zero_grad()\n            if ema:\n                ema.update(model)\n\n            if RANK in {-1, 0}:\n                # Print\n                tloss = (tloss * i + loss.item()) / (i + 1)  # update mean losses\n                mem = \"%.3gG\" % (torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0)  # (GB)\n                pbar.desc = f\"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}\" + \" \" * 36\n\n                # Test\n                if i == len(pbar) - 1:  # last batch\n                    top1, top5, vloss = validate.run(\n                        model=ema.ema, dataloader=testloader, criterion=criterion, pbar=pbar\n                    )  # test accuracy, loss\n                    fitness = top1  # define fitness as top1 accuracy\n\n        # Scheduler\n        scheduler.step()\n\n        # Log metrics\n        if RANK in {-1, 0}:\n            # Best fitness\n            if fitness > best_fitness:\n                best_fitness = fitness\n\n            # Log\n            metrics = {\n                \"train/loss\": tloss,\n                f\"{val}/loss\": vloss,\n                \"metrics/accuracy_top1\": top1,\n                \"metrics/accuracy_top5\": top5,\n                \"lr/0\": optimizer.param_groups[0][\"lr\"],\n            }  # learning rate\n            logger.log_metrics(metrics, epoch)\n\n            # Save model\n            final_epoch = epoch + 1 == epochs\n            if (not opt.nosave) or final_epoch:\n                ckpt = {\n                    \"epoch\": epoch,\n                    \"best_fitness\": best_fitness,\n                    \"model\": deepcopy(ema.ema).half(),  # deepcopy(de_parallel(model)).half(),\n                    \"ema\": None,  # deepcopy(ema.ema).half(),\n                    \"updates\": ema.updates,\n                    \"optimizer\": None,  # optimizer.state_dict(),\n                    \"opt\": vars(opt),\n                    \"git\": GIT_INFO,  # {remote, branch, commit} if a git repo\n                    \"date\": datetime.now().isoformat(),\n                }\n\n                # Save last, best and delete\n                torch.save(ckpt, last)\n                if best_fitness == fitness:\n                    torch.save(ckpt, best)\n                del ckpt\n\n    # Train complete\n    if RANK in {-1, 0} and final_epoch:\n        LOGGER.info(\n            f\"\\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)\"\n            f\"\\nResults saved to {colorstr('bold', save_dir)}\"\n            f\"\\nPredict:         python classify/predict.py --weights {best} --source im.jpg\"\n            f\"\\nValidate:        python classify/val.py --weights {best} --data {data_dir}\"\n            f\"\\nExport:          python export.py --weights {best} --include onnx\"\n            f\"\\nPyTorch Hub:     model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')\"\n            f\"\\nVisualize:       https://netron.app\\n\"\n        )\n\n        # Plot examples\n        images, labels = (x[:25] for x in next(iter(testloader)))  # first 25 images and labels\n        pred = torch.max(ema.ema(images.to(device)), 1)[1]\n        file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / \"test_images.jpg\")\n\n        # Log results\n        meta = {\"epochs\": epochs, \"top1_acc\": best_fitness, \"date\": datetime.now().isoformat()}\n        logger.log_images(file, name=\"Test Examples (true-predicted)\", epoch=epoch)\n        logger.log_model(best, epochs, metadata=meta)\n\n\ndef parse_opt(known=False):\n    \"\"\"Parses command line arguments for YOLOv5 training including model path, dataset, epochs, and more, returning\n    parsed arguments.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--model\", type=str, default=\"yolov5s-cls.pt\", help=\"initial weights path\")\n    parser.add_argument(\"--data\", type=str, default=\"imagenette160\", help=\"cifar10, cifar100, mnist, imagenet, ...\")\n    parser.add_argument(\"--epochs\", type=int, default=10, help=\"total training epochs\")\n    parser.add_argument(\"--batch-size\", type=int, default=64, help=\"total batch size for all GPUs\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=224, help=\"train, val image size (pixels)\")\n    parser.add_argument(\"--nosave\", action=\"store_true\", help=\"only save final checkpoint\")\n    parser.add_argument(\"--cache\", type=str, nargs=\"?\", const=\"ram\", help='--cache images in \"ram\" (default) or \"disk\"')\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--workers\", type=int, default=8, help=\"max dataloader workers (per RANK in DDP mode)\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/train-cls\", help=\"save to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--pretrained\", nargs=\"?\", const=True, default=True, help=\"start from i.e. --pretrained False\")\n    parser.add_argument(\"--optimizer\", choices=[\"SGD\", \"Adam\", \"AdamW\", \"RMSProp\"], default=\"Adam\", help=\"optimizer\")\n    parser.add_argument(\"--lr0\", type=float, default=0.001, help=\"initial learning rate\")\n    parser.add_argument(\"--decay\", type=float, default=5e-5, help=\"weight decay\")\n    parser.add_argument(\"--label-smoothing\", type=float, default=0.1, help=\"Label smoothing epsilon\")\n    parser.add_argument(\"--cutoff\", type=int, default=None, help=\"Model layer cutoff index for Classify() head\")\n    parser.add_argument(\"--dropout\", type=float, default=None, help=\"Dropout (fraction)\")\n    parser.add_argument(\"--verbose\", action=\"store_true\", help=\"Verbose mode\")\n    parser.add_argument(\"--seed\", type=int, default=0, help=\"Global training seed\")\n    parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"Automatic DDP Multi-GPU argument, do not modify\")\n    return parser.parse_known_args()[0] if known else parser.parse_args()\n\n\ndef main(opt):\n    \"\"\"Executes YOLOv5 training with given options, handling device setup and DDP mode; includes pre-training checks.\"\"\"\n    if RANK in {-1, 0}:\n        print_args(vars(opt))\n        check_git_status()\n        check_requirements(ROOT / \"requirements.txt\")\n\n    # DDP mode\n    device = select_device(opt.device, batch_size=opt.batch_size)\n    if LOCAL_RANK != -1:\n        assert opt.batch_size != -1, \"AutoBatch is coming soon for classification, please pass a valid --batch-size\"\n        assert opt.batch_size % WORLD_SIZE == 0, f\"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE\"\n        assert torch.cuda.device_count() > LOCAL_RANK, \"insufficient CUDA devices for DDP command\"\n        torch.cuda.set_device(LOCAL_RANK)\n        device = torch.device(\"cuda\", LOCAL_RANK)\n        dist.init_process_group(backend=\"nccl\" if dist.is_nccl_available() else \"gloo\")\n\n    # Parameters\n    opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)  # increment run\n\n    # Train\n    train(opt, device)\n\n\ndef run(**kwargs):\n    \"\"\"Executes YOLOv5 model training or inference with specified parameters, returning updated options.\n\n    Example: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')\n    \"\"\"\n    opt = parse_opt(True)\n    for k, v in kwargs.items():\n        setattr(opt, k, v)\n    main(opt)\n    return opt\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "classify/tutorial.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"t6MPjfT5NrKQ\"\n   },\n   \"source\": [\n    \"<div align=\\\"center\\\">\\n\",\n    \"  <a href=\\\"https://ultralytics.com/yolo\\\" target=\\\"_blank\\\">\\n\",\n    \"    <img width=\\\"1024\\\" src=\\\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\\\">\\n\",\n    \"  </a>\\n\",\n    \"\\n\",\n    \"  [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\\n\",\n    \"\\n\",\n    \"  <a href=\\\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml\\\"><img src=\\\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg\\\" alt=\\\"Ultralytics CI\\\"></a>\\n\",\n    \"  <a href=\\\"https://console.paperspace.com/github/ultralytics/ultralytics\\\"><img src=\\\"https://assets.paperspace.io/img/gradient-badge.svg\\\" alt=\\\"Run on Gradient\\\"/></a>\\n\",\n    \"  <a href=\\\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb\\\"><img src=\\\"https://colab.research.google.com/assets/colab-badge.svg\\\" alt=\\\"Open In Colab\\\"></a>\\n\",\n    \"  <a href=\\\"https://www.kaggle.com/models/ultralytics/yolo11\\\"><img src=\\\"https://kaggle.com/static/images/open-in-kaggle.svg\\\" alt=\\\"Open In Kaggle\\\"></a>\\n\",\n    \"\\n\",\n    \"  <a href=\\\"https://ultralytics.com/discord\\\"><img alt=\\\"Discord\\\" src=\\\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\\\"></a>\\n\",\n    \"  <a href=\\\"https://community.ultralytics.com\\\"><img alt=\\\"Ultralytics Forums\\\" src=\\\"https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue\\\"></a>\\n\",\n    \"  <a href=\\\"https://reddit.com/r/ultralytics\\\"><img alt=\\\"Ultralytics Reddit\\\" src=\\\"https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue\\\"></a>\\n\",\n    \"</div>\\n\",\n    \"\\n\",\n    \"This **Ultralytics YOLOv5 Classification Colab Notebook** is the easiest way to get started with [YOLO models](https://www.ultralytics.com/yolo)—no installation needed. Built by [Ultralytics](https://www.ultralytics.com/), the creators of YOLO, this notebook walks you through running **state-of-the-art** models directly in your browser.\\n\",\n    \"\\n\",\n    \"Ultralytics models are constantly updated for performance and flexibility. They're **fast**, **accurate**, and **easy to use**, and they excel at [object detection](https://docs.ultralytics.com/tasks/detect/), [tracking](https://docs.ultralytics.com/modes/track/), [instance segmentation](https://docs.ultralytics.com/tasks/segment/), [image classification](https://docs.ultralytics.com/tasks/classify/), and [pose estimation](https://docs.ultralytics.com/tasks/pose/).\\n\",\n    \"\\n\",\n    \"Find detailed documentation in the [Ultralytics Docs](https://docs.ultralytics.com/). Get support via [GitHub Issues](https://github.com/ultralytics/ultralytics/issues/new/choose). Join discussions on [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), and the [Ultralytics Community Forums](https://community.ultralytics.com/)!\\n\",\n    \"\\n\",\n    \"Request an Enterprise License for commercial use at [Ultralytics Licensing](https://www.ultralytics.com/license).\\n\",\n    \"\\n\",\n    \"<br>\\n\",\n    \"<div>\\n\",\n    \"  <a href=\\\"https://www.youtube.com/watch?v=ZN3nRZT7b24\\\" target=\\\"_blank\\\">\\n\",\n    \"    <img src=\\\"https://img.youtube.com/vi/ZN3nRZT7b24/maxresdefault.jpg\\\" alt=\\\"Ultralytics Video\\\" width=\\\"640\\\" style=\\\"border-radius: 10px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);\\\">\\n\",\n    \"  </a>\\n\",\n    \"\\n\",\n    \"  <p style=\\\"font-size: 16px; font-family: Arial, sans-serif; color: #555;\\\">\\n\",\n    \"    <strong>Watch: </strong> How to Train\\n\",\n    \"    <a href=\\\"https://github.com/ultralytics/ultralytics\\\">Ultralytics</a>\\n\",\n    \"    <a href=\\\"https://docs.ultralytics.com/models/yolo11/\\\">YOLO11</a> Model on Custom Dataset using Google Colab Notebook 🚀\\n\",\n    \"  </p>\\n\",\n    \"</div>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"7mGmQbAO5pQb\"\n   },\n   \"source\": [\n    \"# Setup\\n\",\n    \"\\n\",\n    \"Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"wbvMlHd_QwMG\",\n    \"outputId\": \"0806e375-610d-4ec0-c867-763dbb518279\"\n   },\n   \"source\": [\n    \"!git clone https://github.com/ultralytics/yolov5  # clone\\n\",\n    \"%cd yolov5\\n\",\n    \"%pip install -qr requirements.txt  # install\\n\",\n    \"\\n\",\n    \"import torch\\n\",\n    \"\\n\",\n    \"import utils\\n\",\n    \"\\n\",\n    \"display = utils.notebook_init()  # checks\"\n   ],\n   \"execution_count\": 1,\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"YOLOv5 🚀 v7.0-414-g78daef4b Python-3.12.6 torch-2.6.0 CPU\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Setup complete ✅ (12 CPUs, 24.0 GB RAM, 139.0/460.4 GB disk)\\n\"\n     ]\n    }\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"4JnkELT0cIJg\"\n   },\n   \"source\": [\n    \"# 1. Predict\\n\",\n    \"\\n\",\n    \"`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\\n\",\n    \"\\n\",\n    \"```shell\\n\",\n    \"python classify/predict.py --source 0  # webcam\\n\",\n    \"                              img.jpg  # image \\n\",\n    \"                              vid.mp4  # video\\n\",\n    \"                              screen  # screenshot\\n\",\n    \"                              path/  # directory\\n\",\n    \"                              'path/*.jpg'  # glob\\n\",\n    \"                              'https://youtu.be/LNwODJXcvt4'  # YouTube\\n\",\n    \"                              'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\\n\",\n    \"```\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"zR9ZbuQCH7FX\",\n    \"outputId\": \"50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1mclassify/predict: \\u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\\n\",\n      \"YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\\n\",\n      \"\\n\",\n      \"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\\n\",\n      \"100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\\n\",\n      \"\\n\",\n      \"Fusing layers... \\n\",\n      \"Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\\n\",\n      \"image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\\n\",\n      \"image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\\n\",\n      \"Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\\n\",\n      \"Results saved to \\u001b[1mruns/predict-cls/exp\\u001b[0m\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\\n\",\n    \"# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"hkAzDWJ7cWTr\"\n   },\n   \"source\": [\n    \"&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\\n\",\n    \"<img align=\\\"left\\\" src=\\\"https://user-images.githubusercontent.com/26833433/202808393-50deb439-ae1b-4246-a685-7560c9b37211.jpg\\\" width=\\\"600\\\">\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"0eq1SMWl6Sfn\"\n   },\n   \"source\": [\n    \"# 2. Validate\\n\",\n    \"Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"WQPtK1QYVaD_\",\n    \"outputId\": \"20fc0630-141e-4a90-ea06-342cbd7ce496\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"--2022-11-22 19:53:40--  https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\\n\",\n      \"Resolving image-net.org (image-net.org)... 171.64.68.16\\n\",\n      \"Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\\n\",\n      \"HTTP request sent, awaiting response... 200 OK\\n\",\n      \"Length: 6744924160 (6.3G) [application/x-tar]\\n\",\n      \"Saving to: ‘ILSVRC2012_img_val.tar’\\n\",\n      \"\\n\",\n      \"ILSVRC2012_img_val. 100%[===================>]   6.28G  16.1MB/s    in 10m 52s \\n\",\n      \"\\n\",\n      \"2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\\n\",\n      \"\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# Download Imagenet val (6.3G, 50000 images)\\n\",\n    \"!bash data/scripts/get_imagenet.sh --val\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"X58w8JLpMnjH\",\n    \"outputId\": \"41843132-98e2-4c25-d474-4cd7b246fb8e\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1mclassify/val: \\u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\\n\",\n      \"YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\\n\",\n      \"\\n\",\n      \"Fusing layers... \\n\",\n      \"Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\\n\",\n      \"validating: 100% 391/391 [04:57<00:00,  1.31it/s]\\n\",\n      \"                   Class      Images    top1_acc    top5_acc\\n\",\n      \"                     all       50000       0.715       0.902\\n\",\n      \"                   tench          50        0.94        0.98\\n\",\n      \"                goldfish          50        0.88        0.92\\n\",\n      \"       great white shark          50        0.78        0.96\\n\",\n      \"             tiger shark          50        0.68        0.96\\n\",\n      \"        hammerhead shark          50        0.82        0.92\\n\",\n      \"            electric ray          50        0.76         0.9\\n\",\n      \"                stingray          50         0.7         0.9\\n\",\n      \"                    cock          50        0.78        0.92\\n\",\n      \"                     hen          50        0.84        0.96\\n\",\n      \"                 ostrich          50        0.98           1\\n\",\n      \"               brambling          50         0.9        0.96\\n\",\n      \"               goldfinch          50        0.92        0.98\\n\",\n      \"             house finch          50        0.88        0.96\\n\",\n      \"                   junco          50        0.94        0.98\\n\",\n      \"          indigo bunting          50        0.86        0.88\\n\",\n      \"          American robin          50         0.9        0.96\\n\",\n      \"                  bulbul          50        0.84        0.96\\n\",\n      \"                     jay          50         0.9        0.96\\n\",\n      \"                  magpie          50        0.84        0.96\\n\",\n      \"               chickadee          50         0.9           1\\n\",\n      \"         American dipper          50        0.82        0.92\\n\",\n      \"                    kite          50        0.76        0.94\\n\",\n      \"              bald eagle          50        0.92           1\\n\",\n      \"                 vulture          50        0.96           1\\n\",\n      \"          great grey owl          50        0.94        0.98\\n\",\n      \"         fire salamander          50        0.96        0.98\\n\",\n      \"             smooth newt          50        0.58        0.94\\n\",\n      \"                    newt          50        0.74         0.9\\n\",\n      \"      spotted salamander          50        0.86        0.94\\n\",\n      \"                 axolotl          50        0.86        0.96\\n\",\n      \"       American bullfrog          50        0.78        0.92\\n\",\n      \"               tree frog          50        0.84        0.96\\n\",\n      \"             tailed frog          50        0.48         0.8\\n\",\n      \"   loggerhead sea turtle          50        0.68        0.94\\n\",\n      \"  leatherback sea turtle          50         0.5         0.8\\n\",\n      \"              mud turtle          50        0.64        0.84\\n\",\n      \"                terrapin          50        0.52        0.98\\n\",\n      \"              box turtle          50        0.84        0.98\\n\",\n      \"            banded gecko          50         0.7        0.88\\n\",\n      \"            green iguana          50        0.76        0.94\\n\",\n      \"          Carolina anole          50        0.58        0.96\\n\",\n      \"desert grassland whiptail lizard          50        0.82        0.94\\n\",\n      \"                   agama          50        0.74        0.92\\n\",\n      \"   frilled-necked lizard          50        0.84        0.86\\n\",\n      \"        alligator lizard          50        0.58        0.78\\n\",\n      \"            Gila monster          50        0.72         0.8\\n\",\n      \"   European green lizard          50        0.42         0.9\\n\",\n      \"               chameleon          50        0.76        0.84\\n\",\n      \"           Komodo dragon          50        0.86        0.96\\n\",\n      \"          Nile crocodile          50         0.7        0.84\\n\",\n      \"      American alligator          50        0.76        0.96\\n\",\n      \"             triceratops          50         0.9        0.94\\n\",\n      \"              worm snake          50        0.76        0.88\\n\",\n      \"       ring-necked snake          50         0.8        0.92\\n\",\n      \" eastern hog-nosed snake          50        0.58        0.88\\n\",\n      \"      smooth green snake          50         0.6        0.94\\n\",\n      \"               kingsnake          50        0.82         0.9\\n\",\n      \"            garter snake          50        0.88        0.94\\n\",\n      \"             water snake          50         0.7        0.94\\n\",\n      \"              vine snake          50        0.66        0.76\\n\",\n      \"             night snake          50        0.34        0.82\\n\",\n      \"         boa constrictor          50         0.8        0.96\\n\",\n      \"     African rock python          50        0.48        0.76\\n\",\n      \"            Indian cobra          50        0.82        0.94\\n\",\n      \"             green mamba          50        0.54        0.86\\n\",\n      \"               sea snake          50        0.62         0.9\\n\",\n      \"    Saharan horned viper          50        0.56        0.86\\n\",\n      \"eastern diamondback rattlesnake          50         0.6        0.86\\n\",\n      \"              sidewinder          50        0.28        0.86\\n\",\n      \"               trilobite          50        0.98        0.98\\n\",\n      \"              harvestman          50        0.86        0.94\\n\",\n      \"                scorpion          50        0.86        0.94\\n\",\n      \"    yellow garden spider          50        0.92        0.96\\n\",\n      \"             barn spider          50        0.38        0.98\\n\",\n      \"  European garden spider          50        0.62        0.98\\n\",\n      \"    southern black widow          50        0.88        0.94\\n\",\n      \"               tarantula          50        0.94           1\\n\",\n      \"             wolf spider          50        0.82        0.92\\n\",\n      \"                    tick          50        0.74        0.84\\n\",\n      \"               centipede          50        0.68        0.82\\n\",\n      \"            black grouse          50        0.88        0.98\\n\",\n      \"               ptarmigan          50        0.78        0.94\\n\",\n      \"           ruffed grouse          50        0.88           1\\n\",\n      \"          prairie grouse          50        0.92           1\\n\",\n      \"                 peacock          50        0.88         0.9\\n\",\n      \"                   quail          50         0.9        0.94\\n\",\n      \"               partridge          50        0.74        0.96\\n\",\n      \"             grey parrot          50         0.9        0.96\\n\",\n      \"                   macaw          50        0.88        0.98\\n\",\n      \"sulphur-crested cockatoo          50        0.86        0.92\\n\",\n      \"                lorikeet          50        0.96           1\\n\",\n      \"                  coucal          50        0.82        0.88\\n\",\n      \"               bee eater          50        0.96        0.98\\n\",\n      \"                hornbill          50         0.9        0.96\\n\",\n      \"             hummingbird          50        0.88        0.96\\n\",\n      \"                 jacamar          50        0.92        0.94\\n\",\n      \"                  toucan          50        0.84        0.94\\n\",\n      \"                    duck          50        0.76        0.94\\n\",\n      \"  red-breasted merganser          50        0.86        0.96\\n\",\n      \"                   goose          50        0.74        0.96\\n\",\n      \"              black swan          50        0.94        0.98\\n\",\n      \"                  tusker          50        0.54        0.92\\n\",\n      \"                 echidna          50        0.98           1\\n\",\n      \"                platypus          50        0.72        0.84\\n\",\n      \"                 wallaby          50        0.78        0.88\\n\",\n      \"                   koala          50        0.84        0.92\\n\",\n      \"                  wombat          50        0.78        0.84\\n\",\n      \"               jellyfish          50        0.88        0.96\\n\",\n      \"             sea anemone          50        0.72         0.9\\n\",\n      \"             brain coral          50        0.88        0.96\\n\",\n      \"                flatworm          50         0.8        0.98\\n\",\n      \"                nematode          50        0.86         0.9\\n\",\n      \"                   conch          50        0.74        0.88\\n\",\n      \"                   snail          50        0.78        0.88\\n\",\n      \"                    slug          50        0.74        0.82\\n\",\n      \"                sea slug          50        0.88        0.98\\n\",\n      \"                  chiton          50        0.88        0.98\\n\",\n      \"      chambered nautilus          50        0.88        0.92\\n\",\n      \"          Dungeness crab          50        0.78        0.94\\n\",\n      \"               rock crab          50        0.68        0.86\\n\",\n      \"            fiddler crab          50        0.64        0.86\\n\",\n      \"           red king crab          50        0.76        0.96\\n\",\n      \"        American lobster          50        0.78        0.96\\n\",\n      \"           spiny lobster          50        0.74        0.88\\n\",\n      \"                crayfish          50        0.56        0.86\\n\",\n      \"             hermit crab          50        0.78        0.96\\n\",\n      \"                  isopod          50        0.66        0.78\\n\",\n      \"             white stork          50        0.88        0.96\\n\",\n      \"             black stork          50        0.84        0.98\\n\",\n      \"               spoonbill          50        0.96           1\\n\",\n      \"                flamingo          50        0.94           1\\n\",\n      \"       little blue heron          50        0.92        0.98\\n\",\n      \"             great egret          50         0.9        0.96\\n\",\n      \"                 bittern          50        0.86        0.94\\n\",\n      \"            crane (bird)          50        0.62         0.9\\n\",\n      \"                 limpkin          50        0.98           1\\n\",\n      \"        common gallinule          50        0.92        0.96\\n\",\n      \"           American coot          50         0.9        0.98\\n\",\n      \"                 bustard          50        0.92        0.96\\n\",\n      \"         ruddy turnstone          50        0.94           1\\n\",\n      \"                  dunlin          50        0.86        0.94\\n\",\n      \"         common redshank          50         0.9        0.96\\n\",\n      \"               dowitcher          50        0.84        0.96\\n\",\n      \"           oystercatcher          50        0.86        0.94\\n\",\n      \"                 pelican          50        0.92        0.96\\n\",\n      \"            king penguin          50        0.88        0.96\\n\",\n      \"               albatross          50         0.9           1\\n\",\n      \"              grey whale          50        0.84        0.92\\n\",\n      \"            killer whale          50        0.92           1\\n\",\n      \"                  dugong          50        0.84        0.96\\n\",\n      \"                sea lion          50        0.82        0.92\\n\",\n      \"               Chihuahua          50        0.66        0.84\\n\",\n      \"           Japanese Chin          50        0.72        0.98\\n\",\n      \"                 Maltese          50        0.76        0.94\\n\",\n      \"               Pekingese          50        0.84        0.94\\n\",\n      \"                Shih Tzu          50        0.74        0.96\\n\",\n      \"    King Charles Spaniel          50        0.88        0.98\\n\",\n      \"                Papillon          50        0.86        0.94\\n\",\n      \"             toy terrier          50        0.48        0.94\\n\",\n      \"     Rhodesian Ridgeback          50        0.76        0.98\\n\",\n      \"            Afghan Hound          50        0.84           1\\n\",\n      \"            Basset Hound          50         0.8        0.92\\n\",\n      \"                  Beagle          50        0.82        0.96\\n\",\n      \"              Bloodhound          50        0.48        0.72\\n\",\n      \"      Bluetick Coonhound          50        0.86        0.94\\n\",\n      \" Black and Tan Coonhound          50        0.54         0.8\\n\",\n      \"Treeing Walker Coonhound          50        0.66        0.98\\n\",\n      \"        English foxhound          50        0.32        0.84\\n\",\n      \"       Redbone Coonhound          50        0.62        0.94\\n\",\n      \"                  borzoi          50        0.92           1\\n\",\n      \"         Irish Wolfhound          50        0.48        0.88\\n\",\n      \"       Italian Greyhound          50        0.76        0.98\\n\",\n      \"                 Whippet          50        0.74        0.92\\n\",\n      \"            Ibizan Hound          50         0.6        0.86\\n\",\n      \"      Norwegian Elkhound          50        0.88        0.98\\n\",\n      \"              Otterhound          50        0.62         0.9\\n\",\n      \"                  Saluki          50        0.72        0.92\\n\",\n      \"      Scottish Deerhound          50        0.86        0.98\\n\",\n      \"              Weimaraner          50        0.88        0.94\\n\",\n      \"Staffordshire Bull Terrier          50        0.66        0.98\\n\",\n      \"American Staffordshire Terrier          50        0.64        0.92\\n\",\n      \"      Bedlington Terrier          50         0.9        0.92\\n\",\n      \"          Border Terrier          50        0.86        0.92\\n\",\n      \"      Kerry Blue Terrier          50        0.78        0.98\\n\",\n      \"           Irish Terrier          50         0.7        0.96\\n\",\n      \"         Norfolk Terrier          50        0.68         0.9\\n\",\n      \"         Norwich Terrier          50        0.72           1\\n\",\n      \"       Yorkshire Terrier          50        0.66         0.9\\n\",\n      \"        Wire Fox Terrier          50        0.64        0.98\\n\",\n      \"        Lakeland Terrier          50        0.74        0.92\\n\",\n      \"        Sealyham Terrier          50        0.76         0.9\\n\",\n      \"        Airedale Terrier          50        0.82        0.92\\n\",\n      \"           Cairn Terrier          50        0.76         0.9\\n\",\n      \"      Australian Terrier          50        0.48        0.84\\n\",\n      \"  Dandie Dinmont Terrier          50        0.82        0.92\\n\",\n      \"          Boston Terrier          50        0.92           1\\n\",\n      \"     Miniature Schnauzer          50        0.68         0.9\\n\",\n      \"         Giant Schnauzer          50        0.72        0.98\\n\",\n      \"      Standard Schnauzer          50        0.74           1\\n\",\n      \"        Scottish Terrier          50        0.76        0.96\\n\",\n      \"         Tibetan Terrier          50        0.48           1\\n\",\n      \"Australian Silky Terrier          50        0.66        0.96\\n\",\n      \"Soft-coated Wheaten Terrier          50        0.74        0.96\\n\",\n      \"West Highland White Terrier          50        0.88        0.96\\n\",\n      \"              Lhasa Apso          50        0.68        0.96\\n\",\n      \"   Flat-Coated Retriever          50        0.72        0.94\\n\",\n      \"  Curly-coated Retriever          50        0.82        0.94\\n\",\n      \"        Golden Retriever          50        0.86        0.94\\n\",\n      \"      Labrador Retriever          50        0.82        0.94\\n\",\n      \"Chesapeake Bay Retriever          50        0.76        0.96\\n\",\n      \"German Shorthaired Pointer          50         0.8        0.96\\n\",\n      \"                  Vizsla          50        0.68        0.96\\n\",\n      \"          English Setter          50         0.7           1\\n\",\n      \"            Irish Setter          50         0.8         0.9\\n\",\n      \"           Gordon Setter          50        0.84        0.92\\n\",\n      \"                Brittany          50        0.84        0.96\\n\",\n      \"         Clumber Spaniel          50        0.92        0.96\\n\",\n      \"English Springer Spaniel          50        0.88           1\\n\",\n      \"  Welsh Springer Spaniel          50        0.92           1\\n\",\n      \"         Cocker Spaniels          50         0.7        0.94\\n\",\n      \"          Sussex Spaniel          50        0.72        0.92\\n\",\n      \"     Irish Water Spaniel          50        0.88        0.98\\n\",\n      \"                  Kuvasz          50        0.66         0.9\\n\",\n      \"              Schipperke          50         0.9        0.98\\n\",\n      \"             Groenendael          50         0.8        0.94\\n\",\n      \"                Malinois          50        0.86        0.98\\n\",\n      \"                  Briard          50        0.52         0.8\\n\",\n      \"       Australian Kelpie          50         0.6        0.88\\n\",\n      \"                Komondor          50        0.88        0.94\\n\",\n      \"    Old English Sheepdog          50        0.94        0.98\\n\",\n      \"       Shetland Sheepdog          50        0.74         0.9\\n\",\n      \"                  collie          50         0.6        0.96\\n\",\n      \"           Border Collie          50        0.74        0.96\\n\",\n      \"    Bouvier des Flandres          50        0.78        0.94\\n\",\n      \"              Rottweiler          50        0.88        0.96\\n\",\n      \"     German Shepherd Dog          50         0.8        0.98\\n\",\n      \"               Dobermann          50        0.68        0.96\\n\",\n      \"      Miniature Pinscher          50        0.76        0.88\\n\",\n      \"Greater Swiss Mountain Dog          50        0.68        0.94\\n\",\n      \"    Bernese Mountain Dog          50        0.96           1\\n\",\n      \"  Appenzeller Sennenhund          50        0.22           1\\n\",\n      \"  Entlebucher Sennenhund          50        0.64        0.98\\n\",\n      \"                   Boxer          50         0.7        0.92\\n\",\n      \"             Bullmastiff          50        0.78        0.98\\n\",\n      \"         Tibetan Mastiff          50        0.88        0.96\\n\",\n      \"          French Bulldog          50        0.84        0.94\\n\",\n      \"              Great Dane          50        0.54         0.9\\n\",\n      \"             St. Bernard          50        0.92           1\\n\",\n      \"                   husky          50        0.46        0.98\\n\",\n      \"        Alaskan Malamute          50        0.76        0.96\\n\",\n      \"          Siberian Husky          50        0.46        0.98\\n\",\n      \"               Dalmatian          50        0.94        0.98\\n\",\n      \"           Affenpinscher          50        0.78         0.9\\n\",\n      \"                 Basenji          50        0.92        0.94\\n\",\n      \"                     pug          50        0.94        0.98\\n\",\n      \"              Leonberger          50           1           1\\n\",\n      \"            Newfoundland          50        0.78        0.96\\n\",\n      \"   Pyrenean Mountain Dog          50        0.78        0.96\\n\",\n      \"                 Samoyed          50        0.96           1\\n\",\n      \"              Pomeranian          50        0.98           1\\n\",\n      \"               Chow Chow          50         0.9        0.96\\n\",\n      \"                Keeshond          50        0.88        0.94\\n\",\n      \"      Griffon Bruxellois          50        0.84        0.98\\n\",\n      \"    Pembroke Welsh Corgi          50        0.82        0.94\\n\",\n      \"    Cardigan Welsh Corgi          50        0.66        0.98\\n\",\n      \"              Toy Poodle          50        0.52        0.88\\n\",\n      \"        Miniature Poodle          50        0.52        0.92\\n\",\n      \"         Standard Poodle          50         0.8           1\\n\",\n      \"    Mexican hairless dog          50        0.88        0.98\\n\",\n      \"               grey wolf          50        0.82        0.92\\n\",\n      \"     Alaskan tundra wolf          50        0.78        0.98\\n\",\n      \"                red wolf          50        0.48         0.9\\n\",\n      \"                  coyote          50        0.64        0.86\\n\",\n      \"                   dingo          50        0.76        0.88\\n\",\n      \"                   dhole          50         0.9        0.98\\n\",\n      \"        African wild dog          50        0.98           1\\n\",\n      \"                   hyena          50        0.88        0.96\\n\",\n      \"                 red fox          50        0.54        0.92\\n\",\n      \"                 kit fox          50        0.72        0.98\\n\",\n      \"              Arctic fox          50        0.94           1\\n\",\n      \"                grey fox          50         0.7        0.94\\n\",\n      \"               tabby cat          50        0.54        0.92\\n\",\n      \"               tiger cat          50        0.22        0.94\\n\",\n      \"             Persian cat          50         0.9        0.98\\n\",\n      \"             Siamese cat          50        0.96           1\\n\",\n      \"            Egyptian Mau          50        0.54         0.8\\n\",\n      \"                  cougar          50         0.9           1\\n\",\n      \"                    lynx          50        0.72        0.88\\n\",\n      \"                 leopard          50        0.78        0.98\\n\",\n      \"            snow leopard          50         0.9        0.98\\n\",\n      \"                  jaguar          50         0.7        0.94\\n\",\n      \"                    lion          50         0.9        0.98\\n\",\n      \"                   tiger          50        0.92        0.98\\n\",\n      \"                 cheetah          50        0.94        0.98\\n\",\n      \"              brown bear          50        0.94        0.98\\n\",\n      \"     American black bear          50         0.8           1\\n\",\n      \"              polar bear          50        0.84        0.96\\n\",\n      \"              sloth bear          50        0.72        0.92\\n\",\n      \"                mongoose          50         0.7        0.92\\n\",\n      \"                 meerkat          50        0.82        0.92\\n\",\n      \"            tiger beetle          50        0.92        0.94\\n\",\n      \"                 ladybug          50        0.86        0.94\\n\",\n      \"           ground beetle          50        0.64        0.94\\n\",\n      \"         longhorn beetle          50        0.62        0.88\\n\",\n      \"             leaf beetle          50        0.64        0.98\\n\",\n      \"             dung beetle          50        0.86        0.98\\n\",\n      \"       rhinoceros beetle          50        0.86        0.94\\n\",\n      \"                  weevil          50         0.9           1\\n\",\n      \"                     fly          50        0.78        0.94\\n\",\n      \"                     bee          50        0.68        0.94\\n\",\n      \"                     ant          50        0.68        0.78\\n\",\n      \"             grasshopper          50         0.5        0.92\\n\",\n      \"                 cricket          50        0.64        0.92\\n\",\n      \"            stick insect          50        0.64        0.92\\n\",\n      \"               cockroach          50        0.72         0.8\\n\",\n      \"                  mantis          50        0.64        0.86\\n\",\n      \"                  cicada          50         0.9        0.96\\n\",\n      \"              leafhopper          50        0.88        0.94\\n\",\n      \"                lacewing          50        0.78        0.92\\n\",\n      \"               dragonfly          50        0.82        0.98\\n\",\n      \"               damselfly          50        0.82           1\\n\",\n      \"             red admiral          50        0.94        0.96\\n\",\n      \"                 ringlet          50        0.86        0.98\\n\",\n      \"       monarch butterfly          50         0.9        0.92\\n\",\n      \"             small white          50         0.9           1\\n\",\n      \"        sulfur butterfly          50        0.92           1\\n\",\n      \"gossamer-winged butterfly          50        0.88           1\\n\",\n      \"                starfish          50        0.88        0.92\\n\",\n      \"              sea urchin          50        0.84        0.94\\n\",\n      \"            sea cucumber          50        0.66        0.84\\n\",\n      \"       cottontail rabbit          50        0.72        0.94\\n\",\n      \"                    hare          50        0.84        0.96\\n\",\n      \"           Angora rabbit          50        0.94        0.98\\n\",\n      \"                 hamster          50        0.96           1\\n\",\n      \"               porcupine          50        0.88        0.98\\n\",\n      \"            fox squirrel          50        0.76        0.94\\n\",\n      \"                  marmot          50        0.92        0.96\\n\",\n      \"                  beaver          50        0.78        0.94\\n\",\n      \"              guinea pig          50        0.78        0.94\\n\",\n      \"           common sorrel          50        0.96        0.98\\n\",\n      \"                   zebra          50        0.94        0.96\\n\",\n      \"                     pig          50         0.5        0.76\\n\",\n      \"               wild boar          50        0.84        0.96\\n\",\n      \"                 warthog          50        0.84        0.96\\n\",\n      \"            hippopotamus          50        0.88        0.96\\n\",\n      \"                      ox          50        0.48        0.94\\n\",\n      \"           water buffalo          50        0.78        0.94\\n\",\n      \"                   bison          50        0.88        0.96\\n\",\n      \"                     ram          50        0.58        0.92\\n\",\n      \"           bighorn sheep          50        0.66           1\\n\",\n      \"             Alpine ibex          50        0.92        0.98\\n\",\n      \"              hartebeest          50        0.94           1\\n\",\n      \"                  impala          50        0.82        0.96\\n\",\n      \"                 gazelle          50         0.7        0.96\\n\",\n      \"               dromedary          50         0.9           1\\n\",\n      \"                   llama          50        0.82        0.94\\n\",\n      \"                  weasel          50        0.44        0.92\\n\",\n      \"                    mink          50        0.78        0.96\\n\",\n      \"        European polecat          50        0.46         0.9\\n\",\n      \"     black-footed ferret          50        0.68        0.96\\n\",\n      \"                   otter          50        0.66        0.88\\n\",\n      \"                   skunk          50        0.96        0.96\\n\",\n      \"                  badger          50        0.86        0.92\\n\",\n      \"               armadillo          50        0.88         0.9\\n\",\n      \"        three-toed sloth          50        0.96           1\\n\",\n      \"               orangutan          50        0.78        0.92\\n\",\n      \"                 gorilla          50        0.82        0.94\\n\",\n      \"              chimpanzee          50        0.84        0.94\\n\",\n      \"                  gibbon          50        0.76        0.86\\n\",\n      \"                 siamang          50        0.68        0.94\\n\",\n      \"                  guenon          50         0.8        0.94\\n\",\n      \"            patas monkey          50        0.62        0.82\\n\",\n      \"                  baboon          50         0.9        0.98\\n\",\n      \"                 macaque          50         0.8        0.86\\n\",\n      \"                  langur          50         0.6        0.82\\n\",\n      \" black-and-white colobus          50        0.86         0.9\\n\",\n      \"        proboscis monkey          50           1           1\\n\",\n      \"                marmoset          50        0.74        0.98\\n\",\n      \"   white-headed capuchin          50        0.72         0.9\\n\",\n      \"           howler monkey          50        0.86        0.94\\n\",\n      \"                    titi          50         0.5         0.9\\n\",\n      \"Geoffroy's spider monkey          50        0.42         0.8\\n\",\n      \"  common squirrel monkey          50        0.76        0.92\\n\",\n      \"       ring-tailed lemur          50        0.72        0.94\\n\",\n      \"                   indri          50         0.9        0.96\\n\",\n      \"          Asian elephant          50        0.58        0.92\\n\",\n      \"   African bush elephant          50         0.7        0.98\\n\",\n      \"               red panda          50        0.94        0.94\\n\",\n      \"             giant panda          50        0.94        0.98\\n\",\n      \"                   snoek          50        0.74         0.9\\n\",\n      \"                     eel          50         0.6        0.84\\n\",\n      \"             coho salmon          50        0.84        0.96\\n\",\n      \"             rock beauty          50        0.88        0.98\\n\",\n      \"               clownfish          50        0.78        0.98\\n\",\n      \"                sturgeon          50        0.68        0.94\\n\",\n      \"                 garfish          50        0.62         0.8\\n\",\n      \"                lionfish          50        0.96        0.96\\n\",\n      \"              pufferfish          50        0.88        0.96\\n\",\n      \"                  abacus          50        0.74        0.88\\n\",\n      \"                   abaya          50        0.84        0.92\\n\",\n      \"           academic gown          50        0.42        0.86\\n\",\n      \"               accordion          50         0.8         0.9\\n\",\n      \"         acoustic guitar          50         0.5        0.76\\n\",\n      \"        aircraft carrier          50         0.8        0.96\\n\",\n      \"                airliner          50        0.92           1\\n\",\n      \"                 airship          50        0.76        0.82\\n\",\n      \"                   altar          50        0.64        0.98\\n\",\n      \"               ambulance          50        0.88        0.98\\n\",\n      \"      amphibious vehicle          50        0.64        0.94\\n\",\n      \"            analog clock          50        0.52        0.92\\n\",\n      \"                  apiary          50        0.82        0.96\\n\",\n      \"                   apron          50         0.7        0.84\\n\",\n      \"         waste container          50         0.4         0.8\\n\",\n      \"           assault rifle          50        0.42        0.84\\n\",\n      \"                backpack          50        0.34        0.64\\n\",\n      \"                  bakery          50         0.4        0.68\\n\",\n      \"            balance beam          50         0.8        0.98\\n\",\n      \"                 balloon          50        0.86        0.96\\n\",\n      \"           ballpoint pen          50        0.52        0.96\\n\",\n      \"                Band-Aid          50         0.7         0.9\\n\",\n      \"                   banjo          50        0.84           1\\n\",\n      \"                baluster          50        0.68        0.94\\n\",\n      \"                 barbell          50        0.56         0.9\\n\",\n      \"            barber chair          50         0.7        0.92\\n\",\n      \"              barbershop          50        0.54        0.86\\n\",\n      \"                    barn          50        0.96        0.96\\n\",\n      \"               barometer          50        0.84        0.98\\n\",\n      \"                  barrel          50        0.56        0.88\\n\",\n      \"             wheelbarrow          50        0.66        0.88\\n\",\n      \"                baseball          50        0.74        0.98\\n\",\n      \"              basketball          50        0.88        0.98\\n\",\n      \"                bassinet          50        0.66        0.92\\n\",\n      \"                 bassoon          50        0.74        0.98\\n\",\n      \"            swimming cap          50        0.62        0.88\\n\",\n      \"              bath towel          50        0.54        0.78\\n\",\n      \"                 bathtub          50         0.4        0.88\\n\",\n      \"           station wagon          50        0.66        0.84\\n\",\n      \"              lighthouse          50        0.78        0.94\\n\",\n      \"                  beaker          50        0.52        0.68\\n\",\n      \"            military cap          50        0.84        0.96\\n\",\n      \"             beer bottle          50        0.66        0.88\\n\",\n      \"              beer glass          50         0.6        0.84\\n\",\n      \"                bell-cot          50        0.56        0.96\\n\",\n      \"                     bib          50        0.58        0.82\\n\",\n      \"          tandem bicycle          50        0.86        0.96\\n\",\n      \"                  bikini          50        0.56        0.88\\n\",\n      \"             ring binder          50        0.64        0.84\\n\",\n      \"              binoculars          50        0.54        0.78\\n\",\n      \"               birdhouse          50        0.86        0.94\\n\",\n      \"               boathouse          50        0.74        0.92\\n\",\n      \"               bobsleigh          50        0.92        0.96\\n\",\n      \"                bolo tie          50         0.8        0.94\\n\",\n      \"             poke bonnet          50        0.64        0.86\\n\",\n      \"                bookcase          50        0.66        0.92\\n\",\n      \"               bookstore          50        0.62        0.88\\n\",\n      \"              bottle cap          50        0.58         0.7\\n\",\n      \"                     bow          50        0.72        0.86\\n\",\n      \"                 bow tie          50         0.7         0.9\\n\",\n      \"                   brass          50        0.92        0.96\\n\",\n      \"                     bra          50         0.5         0.7\\n\",\n      \"              breakwater          50        0.62        0.86\\n\",\n      \"             breastplate          50         0.4         0.9\\n\",\n      \"                   broom          50         0.6        0.86\\n\",\n      \"                  bucket          50        0.66         0.8\\n\",\n      \"                  buckle          50         0.5        0.68\\n\",\n      \"        bulletproof vest          50         0.5        0.78\\n\",\n      \"        high-speed train          50        0.94        0.96\\n\",\n      \"            butcher shop          50        0.74        0.94\\n\",\n      \"                 taxicab          50        0.64        0.86\\n\",\n      \"                cauldron          50        0.44        0.66\\n\",\n      \"                  candle          50        0.48        0.74\\n\",\n      \"                  cannon          50        0.88        0.94\\n\",\n      \"                   canoe          50        0.94           1\\n\",\n      \"              can opener          50        0.66        0.86\\n\",\n      \"                cardigan          50        0.68         0.8\\n\",\n      \"              car mirror          50        0.94        0.96\\n\",\n      \"                carousel          50        0.94        0.98\\n\",\n      \"                tool kit          50        0.56        0.78\\n\",\n      \"                  carton          50        0.42         0.7\\n\",\n      \"               car wheel          50        0.38        0.74\\n\",\n      \"automated teller machine          50        0.76        0.94\\n\",\n      \"                cassette          50        0.52         0.8\\n\",\n      \"         cassette player          50        0.28         0.9\\n\",\n      \"                  castle          50        0.78        0.88\\n\",\n      \"               catamaran          50        0.78           1\\n\",\n      \"               CD player          50        0.52        0.82\\n\",\n      \"                   cello          50        0.82           1\\n\",\n      \"            mobile phone          50        0.68        0.86\\n\",\n      \"                   chain          50        0.38        0.66\\n\",\n      \"        chain-link fence          50         0.7        0.84\\n\",\n      \"              chain mail          50        0.64         0.9\\n\",\n      \"                chainsaw          50        0.84        0.92\\n\",\n      \"                   chest          50        0.68        0.92\\n\",\n      \"              chiffonier          50        0.26        0.64\\n\",\n      \"                   chime          50        0.62        0.84\\n\",\n      \"           china cabinet          50        0.82        0.96\\n\",\n      \"      Christmas stocking          50        0.92        0.94\\n\",\n      \"                  church          50        0.62         0.9\\n\",\n      \"           movie theater          50        0.58        0.88\\n\",\n      \"                 cleaver          50        0.32        0.62\\n\",\n      \"          cliff dwelling          50        0.88           1\\n\",\n      \"                   cloak          50        0.32        0.64\\n\",\n      \"                   clogs          50        0.58        0.88\\n\",\n      \"         cocktail shaker          50        0.62         0.7\\n\",\n      \"              coffee mug          50        0.44        0.72\\n\",\n      \"             coffeemaker          50        0.64        0.92\\n\",\n      \"                    coil          50        0.66        0.84\\n\",\n      \"        combination lock          50        0.64        0.84\\n\",\n      \"       computer keyboard          50         0.7        0.82\\n\",\n      \"     confectionery store          50        0.54        0.86\\n\",\n      \"          container ship          50        0.82        0.98\\n\",\n      \"             convertible          50        0.78        0.98\\n\",\n      \"               corkscrew          50        0.82        0.92\\n\",\n      \"                  cornet          50        0.46        0.88\\n\",\n      \"             cowboy boot          50        0.64         0.8\\n\",\n      \"              cowboy hat          50        0.64        0.82\\n\",\n      \"                  cradle          50        0.38         0.8\\n\",\n      \"         crane (machine)          50        0.78        0.94\\n\",\n      \"            crash helmet          50        0.92        0.96\\n\",\n      \"                   crate          50        0.52        0.82\\n\",\n      \"              infant bed          50        0.74           1\\n\",\n      \"               Crock Pot          50        0.78         0.9\\n\",\n      \"            croquet ball          50         0.9        0.96\\n\",\n      \"                  crutch          50        0.46         0.7\\n\",\n      \"                 cuirass          50        0.54        0.86\\n\",\n      \"                     dam          50        0.74        0.92\\n\",\n      \"                    desk          50         0.6        0.86\\n\",\n      \"        desktop computer          50        0.54        0.94\\n\",\n      \"   rotary dial telephone          50        0.88        0.94\\n\",\n      \"                  diaper          50        0.68        0.84\\n\",\n      \"           digital clock          50        0.54        0.76\\n\",\n      \"           digital watch          50        0.58        0.86\\n\",\n      \"            dining table          50        0.76         0.9\\n\",\n      \"               dishcloth          50        0.94           1\\n\",\n      \"              dishwasher          50        0.44        0.78\\n\",\n      \"              disc brake          50        0.98           1\\n\",\n      \"                    dock          50        0.54        0.94\\n\",\n      \"                dog sled          50        0.84           1\\n\",\n      \"                    dome          50        0.72        0.92\\n\",\n      \"                 doormat          50        0.56        0.82\\n\",\n      \"            drilling rig          50        0.84        0.96\\n\",\n      \"                    drum          50        0.38        0.68\\n\",\n      \"               drumstick          50        0.56        0.72\\n\",\n      \"                dumbbell          50        0.62         0.9\\n\",\n      \"              Dutch oven          50         0.7        0.84\\n\",\n      \"            electric fan          50        0.82        0.86\\n\",\n      \"         electric guitar          50        0.62        0.84\\n\",\n      \"     electric locomotive          50        0.92        0.98\\n\",\n      \"    entertainment center          50         0.9        0.98\\n\",\n      \"                envelope          50        0.44        0.86\\n\",\n      \"        espresso machine          50        0.72        0.94\\n\",\n      \"             face powder          50         0.7        0.92\\n\",\n      \"             feather boa          50         0.7        0.84\\n\",\n      \"          filing cabinet          50        0.88        0.98\\n\",\n      \"                fireboat          50        0.94        0.98\\n\",\n      \"             fire engine          50        0.84         0.9\\n\",\n      \"       fire screen sheet          50        0.62        0.76\\n\",\n      \"                flagpole          50        0.74        0.88\\n\",\n      \"                   flute          50        0.36        0.72\\n\",\n      \"           folding chair          50        0.62        0.84\\n\",\n      \"         football helmet          50        0.86        0.94\\n\",\n      \"                forklift          50         0.8        0.92\\n\",\n      \"                fountain          50        0.84        0.94\\n\",\n      \"            fountain pen          50        0.76        0.92\\n\",\n      \"         four-poster bed          50        0.78        0.94\\n\",\n      \"             freight car          50        0.96           1\\n\",\n      \"             French horn          50        0.76        0.92\\n\",\n      \"              frying pan          50        0.36        0.78\\n\",\n      \"                fur coat          50        0.84        0.96\\n\",\n      \"           garbage truck          50         0.9        0.98\\n\",\n      \"                gas mask          50        0.84        0.92\\n\",\n      \"                gas pump          50         0.9        0.98\\n\",\n      \"                  goblet          50        0.68        0.82\\n\",\n      \"                 go-kart          50         0.9           1\\n\",\n      \"               golf ball          50        0.84         0.9\\n\",\n      \"               golf cart          50        0.78        0.86\\n\",\n      \"                 gondola          50        0.98        0.98\\n\",\n      \"                    gong          50        0.74        0.92\\n\",\n      \"                    gown          50        0.62        0.96\\n\",\n      \"             grand piano          50         0.7        0.96\\n\",\n      \"              greenhouse          50         0.8        0.98\\n\",\n      \"                  grille          50        0.72         0.9\\n\",\n      \"           grocery store          50        0.66        0.94\\n\",\n      \"              guillotine          50        0.86        0.92\\n\",\n      \"                barrette          50        0.52        0.66\\n\",\n      \"              hair spray          50         0.5        0.74\\n\",\n      \"              half-track          50        0.78         0.9\\n\",\n      \"                  hammer          50        0.56        0.76\\n\",\n      \"                  hamper          50        0.64        0.84\\n\",\n      \"              hair dryer          50        0.56        0.74\\n\",\n      \"      hand-held computer          50        0.42        0.86\\n\",\n      \"            handkerchief          50        0.78        0.94\\n\",\n      \"         hard disk drive          50        0.76        0.84\\n\",\n      \"               harmonica          50         0.7        0.88\\n\",\n      \"                    harp          50        0.88        0.96\\n\",\n      \"               harvester          50        0.78           1\\n\",\n      \"                 hatchet          50        0.54        0.74\\n\",\n      \"                 holster          50        0.66        0.84\\n\",\n      \"            home theater          50        0.64        0.94\\n\",\n      \"               honeycomb          50        0.56        0.88\\n\",\n      \"                    hook          50         0.3         0.6\\n\",\n      \"              hoop skirt          50        0.64        0.86\\n\",\n      \"          horizontal bar          50        0.68        0.98\\n\",\n      \"     horse-drawn vehicle          50        0.88        0.94\\n\",\n      \"               hourglass          50        0.88        0.96\\n\",\n      \"                    iPod          50        0.76        0.94\\n\",\n      \"            clothes iron          50        0.82        0.88\\n\",\n      \"         jack-o'-lantern          50        0.98        0.98\\n\",\n      \"                   jeans          50        0.68        0.84\\n\",\n      \"                    jeep          50        0.72         0.9\\n\",\n      \"                 T-shirt          50        0.72        0.96\\n\",\n      \"           jigsaw puzzle          50        0.84        0.94\\n\",\n      \"         pulled rickshaw          50        0.86        0.94\\n\",\n      \"                joystick          50         0.8         0.9\\n\",\n      \"                  kimono          50        0.84        0.96\\n\",\n      \"                knee pad          50        0.62        0.88\\n\",\n      \"                    knot          50        0.66         0.8\\n\",\n      \"                lab coat          50         0.8        0.96\\n\",\n      \"                   ladle          50        0.36        0.64\\n\",\n      \"               lampshade          50        0.48        0.84\\n\",\n      \"         laptop computer          50        0.26        0.88\\n\",\n      \"              lawn mower          50        0.78        0.96\\n\",\n      \"                lens cap          50        0.46        0.72\\n\",\n      \"             paper knife          50        0.26         0.5\\n\",\n      \"                 library          50        0.54         0.9\\n\",\n      \"                lifeboat          50        0.92        0.98\\n\",\n      \"                 lighter          50        0.56        0.78\\n\",\n      \"               limousine          50        0.76        0.92\\n\",\n      \"             ocean liner          50        0.88        0.94\\n\",\n      \"                lipstick          50        0.74         0.9\\n\",\n      \"            slip-on shoe          50        0.74        0.92\\n\",\n      \"                  lotion          50         0.5        0.86\\n\",\n      \"                 speaker          50        0.52        0.68\\n\",\n      \"                   loupe          50        0.32        0.52\\n\",\n      \"                 sawmill          50        0.72         0.9\\n\",\n      \"        magnetic compass          50        0.52        0.82\\n\",\n      \"                mail bag          50        0.68        0.92\\n\",\n      \"                 mailbox          50        0.82        0.92\\n\",\n      \"                  tights          50        0.22        0.94\\n\",\n      \"               tank suit          50        0.24         0.9\\n\",\n      \"           manhole cover          50        0.96        0.98\\n\",\n      \"                  maraca          50        0.74         0.9\\n\",\n      \"                 marimba          50        0.84        0.94\\n\",\n      \"                    mask          50        0.44        0.82\\n\",\n      \"                   match          50        0.66         0.9\\n\",\n      \"                 maypole          50        0.96           1\\n\",\n      \"                    maze          50         0.8        0.96\\n\",\n      \"           measuring cup          50        0.54        0.76\\n\",\n      \"          medicine chest          50         0.6        0.84\\n\",\n      \"                megalith          50         0.8        0.92\\n\",\n      \"              microphone          50        0.52         0.7\\n\",\n      \"          microwave oven          50        0.48        0.72\\n\",\n      \"        military uniform          50        0.62        0.84\\n\",\n      \"                milk can          50        0.68        0.82\\n\",\n      \"                 minibus          50         0.7           1\\n\",\n      \"               miniskirt          50        0.46        0.76\\n\",\n      \"                 minivan          50        0.38         0.8\\n\",\n      \"                 missile          50         0.4        0.84\\n\",\n      \"                  mitten          50        0.76        0.88\\n\",\n      \"             mixing bowl          50         0.8        0.92\\n\",\n      \"             mobile home          50        0.54        0.78\\n\",\n      \"                 Model T          50        0.92        0.96\\n\",\n      \"                   modem          50        0.58        0.86\\n\",\n      \"               monastery          50        0.44         0.9\\n\",\n      \"                 monitor          50         0.4        0.86\\n\",\n      \"                   moped          50        0.56        0.94\\n\",\n      \"                  mortar          50        0.68        0.94\\n\",\n      \"     square academic cap          50         0.5        0.84\\n\",\n      \"                  mosque          50         0.9           1\\n\",\n      \"            mosquito net          50         0.9        0.98\\n\",\n      \"                 scooter          50         0.9        0.98\\n\",\n      \"           mountain bike          50        0.78        0.96\\n\",\n      \"                    tent          50        0.88        0.96\\n\",\n      \"          computer mouse          50        0.42        0.82\\n\",\n      \"               mousetrap          50        0.76        0.88\\n\",\n      \"              moving van          50         0.4        0.72\\n\",\n      \"                  muzzle          50         0.5        0.72\\n\",\n      \"                    nail          50        0.68        0.74\\n\",\n      \"              neck brace          50        0.56        0.68\\n\",\n      \"                necklace          50        0.86           1\\n\",\n      \"                  nipple          50         0.7        0.88\\n\",\n      \"       notebook computer          50        0.34        0.84\\n\",\n      \"                 obelisk          50         0.8        0.92\\n\",\n      \"                    oboe          50         0.6        0.84\\n\",\n      \"                 ocarina          50         0.8        0.86\\n\",\n      \"                odometer          50        0.96           1\\n\",\n      \"              oil filter          50        0.58        0.82\\n\",\n      \"                   organ          50        0.82         0.9\\n\",\n      \"            oscilloscope          50         0.9        0.96\\n\",\n      \"               overskirt          50         0.2         0.7\\n\",\n      \"            bullock cart          50         0.7        0.94\\n\",\n      \"             oxygen mask          50        0.46        0.84\\n\",\n      \"                  packet          50         0.5        0.78\\n\",\n      \"                  paddle          50        0.56        0.94\\n\",\n      \"            paddle wheel          50        0.86        0.96\\n\",\n      \"                 padlock          50        0.74        0.78\\n\",\n      \"              paintbrush          50        0.62         0.8\\n\",\n      \"                 pajamas          50        0.56        0.92\\n\",\n      \"                  palace          50        0.64        0.96\\n\",\n      \"               pan flute          50        0.84        0.86\\n\",\n      \"             paper towel          50        0.66        0.84\\n\",\n      \"               parachute          50        0.92        0.94\\n\",\n      \"           parallel bars          50        0.62        0.96\\n\",\n      \"              park bench          50        0.74         0.9\\n\",\n      \"           parking meter          50        0.84        0.92\\n\",\n      \"           passenger car          50         0.5        0.82\\n\",\n      \"                   patio          50        0.58        0.84\\n\",\n      \"                payphone          50        0.74        0.92\\n\",\n      \"                pedestal          50        0.52         0.9\\n\",\n      \"             pencil case          50        0.64        0.92\\n\",\n      \"        pencil sharpener          50        0.52        0.78\\n\",\n      \"                 perfume          50         0.7         0.9\\n\",\n      \"              Petri dish          50         0.6         0.8\\n\",\n      \"             photocopier          50        0.88        0.98\\n\",\n      \"                plectrum          50         0.7        0.84\\n\",\n      \"             Pickelhaube          50        0.72        0.86\\n\",\n      \"            picket fence          50        0.84        0.94\\n\",\n      \"            pickup truck          50        0.64        0.92\\n\",\n      \"                    pier          50        0.52        0.82\\n\",\n      \"              piggy bank          50        0.82        0.94\\n\",\n      \"             pill bottle          50        0.76        0.86\\n\",\n      \"                  pillow          50        0.76         0.9\\n\",\n      \"          ping-pong ball          50        0.84        0.88\\n\",\n      \"                pinwheel          50        0.76        0.88\\n\",\n      \"             pirate ship          50        0.76        0.94\\n\",\n      \"                 pitcher          50        0.46        0.84\\n\",\n      \"              hand plane          50        0.84        0.94\\n\",\n      \"             planetarium          50        0.88        0.98\\n\",\n      \"             plastic bag          50        0.36        0.62\\n\",\n      \"              plate rack          50        0.52        0.78\\n\",\n      \"                    plow          50        0.78        0.88\\n\",\n      \"                 plunger          50        0.42         0.7\\n\",\n      \"         Polaroid camera          50        0.84        0.92\\n\",\n      \"                    pole          50        0.38        0.74\\n\",\n      \"              police van          50        0.76        0.94\\n\",\n      \"                  poncho          50        0.58        0.86\\n\",\n      \"          billiard table          50         0.8        0.88\\n\",\n      \"             soda bottle          50        0.56        0.94\\n\",\n      \"                     pot          50        0.78        0.92\\n\",\n      \"          potter's wheel          50         0.9        0.94\\n\",\n      \"             power drill          50        0.42        0.72\\n\",\n      \"              prayer rug          50         0.7        0.86\\n\",\n      \"                 printer          50        0.54        0.86\\n\",\n      \"                  prison          50         0.7         0.9\\n\",\n      \"              projectile          50        0.28         0.9\\n\",\n      \"               projector          50        0.62        0.84\\n\",\n      \"             hockey puck          50        0.92        0.96\\n\",\n      \"            punching bag          50         0.6        0.68\\n\",\n      \"                   purse          50        0.42        0.78\\n\",\n      \"                   quill          50        0.68        0.84\\n\",\n      \"                   quilt          50        0.64         0.9\\n\",\n      \"                race car          50        0.72        0.92\\n\",\n      \"                  racket          50        0.72         0.9\\n\",\n      \"                radiator          50        0.66        0.76\\n\",\n      \"                   radio          50        0.64        0.92\\n\",\n      \"         radio telescope          50         0.9        0.96\\n\",\n      \"             rain barrel          50         0.8        0.98\\n\",\n      \"    recreational vehicle          50        0.84        0.94\\n\",\n      \"                    reel          50        0.72        0.82\\n\",\n      \"           reflex camera          50        0.72        0.92\\n\",\n      \"            refrigerator          50         0.7         0.9\\n\",\n      \"          remote control          50         0.7        0.88\\n\",\n      \"              restaurant          50         0.5        0.66\\n\",\n      \"                revolver          50        0.82           1\\n\",\n      \"                   rifle          50        0.38         0.7\\n\",\n      \"           rocking chair          50        0.62        0.84\\n\",\n      \"              rotisserie          50        0.88        0.92\\n\",\n      \"                  eraser          50        0.54        0.76\\n\",\n      \"              rugby ball          50        0.86        0.94\\n\",\n      \"                   ruler          50        0.68        0.86\\n\",\n      \"            running shoe          50        0.78        0.94\\n\",\n      \"                    safe          50        0.82        0.92\\n\",\n      \"              safety pin          50         0.4        0.62\\n\",\n      \"             salt shaker          50        0.66         0.9\\n\",\n      \"                  sandal          50        0.66        0.86\\n\",\n      \"                  sarong          50        0.64        0.86\\n\",\n      \"               saxophone          50        0.66        0.88\\n\",\n      \"                scabbard          50        0.76        0.92\\n\",\n      \"          weighing scale          50        0.58        0.78\\n\",\n      \"              school bus          50        0.92           1\\n\",\n      \"                schooner          50        0.84           1\\n\",\n      \"              scoreboard          50         0.9        0.96\\n\",\n      \"              CRT screen          50        0.14         0.7\\n\",\n      \"                   screw          50         0.9        0.98\\n\",\n      \"             screwdriver          50         0.3        0.58\\n\",\n      \"               seat belt          50        0.88        0.94\\n\",\n      \"          sewing machine          50        0.76         0.9\\n\",\n      \"                  shield          50        0.56        0.82\\n\",\n      \"              shoe store          50        0.78        0.96\\n\",\n      \"                   shoji          50         0.8        0.92\\n\",\n      \"         shopping basket          50        0.52        0.88\\n\",\n      \"           shopping cart          50        0.76        0.92\\n\",\n      \"                  shovel          50        0.62        0.84\\n\",\n      \"              shower cap          50         0.7        0.84\\n\",\n      \"          shower curtain          50        0.64        0.82\\n\",\n      \"                     ski          50        0.74        0.92\\n\",\n      \"                ski mask          50        0.72        0.88\\n\",\n      \"            sleeping bag          50        0.68         0.8\\n\",\n      \"              slide rule          50        0.72        0.88\\n\",\n      \"            sliding door          50        0.44        0.78\\n\",\n      \"            slot machine          50        0.94        0.98\\n\",\n      \"                 snorkel          50        0.86        0.98\\n\",\n      \"              snowmobile          50        0.88           1\\n\",\n      \"                snowplow          50        0.84        0.98\\n\",\n      \"          soap dispenser          50        0.56        0.86\\n\",\n      \"             soccer ball          50        0.86        0.96\\n\",\n      \"                    sock          50        0.62        0.76\\n\",\n      \" solar thermal collector          50        0.72        0.96\\n\",\n      \"                sombrero          50         0.6        0.84\\n\",\n      \"               soup bowl          50        0.56        0.94\\n\",\n      \"               space bar          50        0.34        0.88\\n\",\n      \"            space heater          50        0.52        0.74\\n\",\n      \"           space shuttle          50        0.82        0.96\\n\",\n      \"                 spatula          50         0.3         0.6\\n\",\n      \"               motorboat          50        0.86           1\\n\",\n      \"              spider web          50         0.7         0.9\\n\",\n      \"                 spindle          50        0.86        0.98\\n\",\n      \"              sports car          50         0.6        0.94\\n\",\n      \"               spotlight          50        0.26         0.6\\n\",\n      \"                   stage          50        0.68        0.86\\n\",\n      \"        steam locomotive          50        0.94           1\\n\",\n      \"     through arch bridge          50        0.84        0.96\\n\",\n      \"              steel drum          50        0.82         0.9\\n\",\n      \"             stethoscope          50         0.6        0.82\\n\",\n      \"                   scarf          50         0.5        0.92\\n\",\n      \"              stone wall          50        0.76         0.9\\n\",\n      \"               stopwatch          50        0.58         0.9\\n\",\n      \"                   stove          50        0.46        0.74\\n\",\n      \"                strainer          50        0.64        0.84\\n\",\n      \"                    tram          50        0.88        0.96\\n\",\n      \"               stretcher          50         0.6         0.8\\n\",\n      \"                   couch          50         0.8        0.96\\n\",\n      \"                   stupa          50        0.88        0.88\\n\",\n      \"               submarine          50        0.72        0.92\\n\",\n      \"                    suit          50         0.4        0.78\\n\",\n      \"                 sundial          50        0.58        0.74\\n\",\n      \"                sunglass          50        0.14        0.58\\n\",\n      \"              sunglasses          50        0.28        0.58\\n\",\n      \"               sunscreen          50        0.32         0.7\\n\",\n      \"       suspension bridge          50         0.6        0.94\\n\",\n      \"                     mop          50        0.74        0.92\\n\",\n      \"              sweatshirt          50        0.28        0.66\\n\",\n      \"                swimsuit          50        0.52        0.82\\n\",\n      \"                   swing          50        0.76        0.84\\n\",\n      \"                  switch          50        0.56        0.76\\n\",\n      \"                 syringe          50        0.62        0.82\\n\",\n      \"              table lamp          50         0.6        0.88\\n\",\n      \"                    tank          50         0.8        0.96\\n\",\n      \"             tape player          50        0.46        0.76\\n\",\n      \"                  teapot          50        0.84           1\\n\",\n      \"              teddy bear          50        0.82        0.94\\n\",\n      \"              television          50         0.6         0.9\\n\",\n      \"             tennis ball          50         0.7        0.94\\n\",\n      \"           thatched roof          50        0.88         0.9\\n\",\n      \"           front curtain          50         0.8        0.92\\n\",\n      \"                 thimble          50         0.6         0.8\\n\",\n      \"       threshing machine          50        0.56        0.88\\n\",\n      \"                  throne          50        0.72        0.82\\n\",\n      \"               tile roof          50        0.72        0.94\\n\",\n      \"                 toaster          50        0.66        0.84\\n\",\n      \"            tobacco shop          50        0.42         0.7\\n\",\n      \"             toilet seat          50        0.62        0.88\\n\",\n      \"                   torch          50        0.64        0.84\\n\",\n      \"              totem pole          50        0.92        0.98\\n\",\n      \"               tow truck          50        0.62        0.88\\n\",\n      \"               toy store          50         0.6        0.94\\n\",\n      \"                 tractor          50        0.76        0.98\\n\",\n      \"      semi-trailer truck          50        0.78        0.92\\n\",\n      \"                    tray          50        0.46        0.64\\n\",\n      \"             trench coat          50        0.54        0.72\\n\",\n      \"                tricycle          50        0.72        0.94\\n\",\n      \"                trimaran          50         0.7        0.98\\n\",\n      \"                  tripod          50        0.58        0.86\\n\",\n      \"          triumphal arch          50        0.92        0.98\\n\",\n      \"              trolleybus          50         0.9           1\\n\",\n      \"                trombone          50        0.54        0.88\\n\",\n      \"                     tub          50        0.24        0.82\\n\",\n      \"               turnstile          50        0.84        0.94\\n\",\n      \"     typewriter keyboard          50        0.68        0.98\\n\",\n      \"                umbrella          50        0.52         0.7\\n\",\n      \"                unicycle          50        0.74        0.96\\n\",\n      \"           upright piano          50        0.76         0.9\\n\",\n      \"          vacuum cleaner          50        0.62         0.9\\n\",\n      \"                    vase          50         0.5        0.78\\n\",\n      \"                   vault          50        0.76        0.92\\n\",\n      \"                  velvet          50         0.2        0.42\\n\",\n      \"         vending machine          50         0.9           1\\n\",\n      \"                vestment          50        0.54        0.82\\n\",\n      \"                 viaduct          50        0.78        0.86\\n\",\n      \"                  violin          50        0.68        0.78\\n\",\n      \"              volleyball          50        0.86           1\\n\",\n      \"             waffle iron          50        0.72        0.88\\n\",\n      \"              wall clock          50        0.54        0.88\\n\",\n      \"                  wallet          50        0.52         0.9\\n\",\n      \"                wardrobe          50        0.68        0.88\\n\",\n      \"       military aircraft          50         0.9        0.98\\n\",\n      \"                    sink          50        0.72        0.96\\n\",\n      \"         washing machine          50        0.78        0.94\\n\",\n      \"            water bottle          50        0.54        0.74\\n\",\n      \"               water jug          50        0.22        0.74\\n\",\n      \"             water tower          50         0.9        0.96\\n\",\n      \"             whiskey jug          50        0.64        0.74\\n\",\n      \"                 whistle          50        0.72        0.84\\n\",\n      \"                     wig          50        0.84         0.9\\n\",\n      \"           window screen          50        0.68         0.8\\n\",\n      \"            window shade          50        0.52        0.76\\n\",\n      \"             Windsor tie          50        0.22        0.66\\n\",\n      \"             wine bottle          50        0.42        0.82\\n\",\n      \"                    wing          50        0.54        0.96\\n\",\n      \"                     wok          50        0.46        0.82\\n\",\n      \"            wooden spoon          50        0.58         0.8\\n\",\n      \"                    wool          50        0.32        0.82\\n\",\n      \"        split-rail fence          50        0.74         0.9\\n\",\n      \"               shipwreck          50        0.84        0.96\\n\",\n      \"                    yawl          50        0.78        0.96\\n\",\n      \"                    yurt          50        0.84           1\\n\",\n      \"                 website          50        0.98           1\\n\",\n      \"              comic book          50        0.62         0.9\\n\",\n      \"               crossword          50        0.84        0.88\\n\",\n      \"            traffic sign          50        0.78         0.9\\n\",\n      \"           traffic light          50         0.8        0.94\\n\",\n      \"             dust jacket          50        0.72        0.94\\n\",\n      \"                    menu          50        0.82        0.96\\n\",\n      \"                   plate          50        0.44        0.88\\n\",\n      \"               guacamole          50         0.8        0.92\\n\",\n      \"                consomme          50        0.54        0.88\\n\",\n      \"                 hot pot          50        0.86        0.98\\n\",\n      \"                  trifle          50        0.92        0.98\\n\",\n      \"               ice cream          50        0.68        0.94\\n\",\n      \"                 ice pop          50        0.62        0.84\\n\",\n      \"                baguette          50        0.62        0.88\\n\",\n      \"                   bagel          50        0.64        0.92\\n\",\n      \"                 pretzel          50        0.72        0.88\\n\",\n      \"            cheeseburger          50         0.9           1\\n\",\n      \"                 hot dog          50        0.74        0.94\\n\",\n      \"           mashed potato          50        0.74         0.9\\n\",\n      \"                 cabbage          50        0.84        0.96\\n\",\n      \"                broccoli          50         0.9        0.96\\n\",\n      \"             cauliflower          50        0.82           1\\n\",\n      \"                zucchini          50        0.74         0.9\\n\",\n      \"        spaghetti squash          50         0.8        0.96\\n\",\n      \"            acorn squash          50        0.82        0.96\\n\",\n      \"        butternut squash          50         0.7        0.94\\n\",\n      \"                cucumber          50         0.6        0.96\\n\",\n      \"               artichoke          50        0.84        0.94\\n\",\n      \"             bell pepper          50        0.84        0.98\\n\",\n      \"                 cardoon          50        0.88        0.94\\n\",\n      \"                mushroom          50        0.38        0.92\\n\",\n      \"            Granny Smith          50         0.9        0.96\\n\",\n      \"              strawberry          50         0.6        0.88\\n\",\n      \"                  orange          50         0.7        0.92\\n\",\n      \"                   lemon          50        0.78        0.98\\n\",\n      \"                     fig          50        0.82        0.96\\n\",\n      \"               pineapple          50        0.86        0.96\\n\",\n      \"                  banana          50        0.84        0.96\\n\",\n      \"               jackfruit          50         0.9        0.98\\n\",\n      \"           custard apple          50        0.86        0.96\\n\",\n      \"             pomegranate          50        0.82        0.98\\n\",\n      \"                     hay          50         0.8        0.92\\n\",\n      \"               carbonara          50        0.88        0.94\\n\",\n      \"         chocolate syrup          50        0.46        0.84\\n\",\n      \"                   dough          50         0.4         0.6\\n\",\n      \"                meatloaf          50        0.58        0.84\\n\",\n      \"                   pizza          50        0.84        0.96\\n\",\n      \"                 pot pie          50        0.68         0.9\\n\",\n      \"                 burrito          50         0.8        0.98\\n\",\n      \"                red wine          50        0.54        0.82\\n\",\n      \"                espresso          50        0.64        0.88\\n\",\n      \"                     cup          50        0.38         0.7\\n\",\n      \"                  eggnog          50        0.38         0.7\\n\",\n      \"                     alp          50        0.54        0.88\\n\",\n      \"                  bubble          50         0.8        0.96\\n\",\n      \"                   cliff          50        0.64           1\\n\",\n      \"              coral reef          50        0.72        0.96\\n\",\n      \"                  geyser          50        0.94           1\\n\",\n      \"               lakeshore          50        0.54        0.88\\n\",\n      \"              promontory          50        0.58        0.94\\n\",\n      \"                   shoal          50         0.6        0.96\\n\",\n      \"                seashore          50        0.44        0.78\\n\",\n      \"                  valley          50        0.72        0.94\\n\",\n      \"                 volcano          50        0.78        0.96\\n\",\n      \"         baseball player          50        0.72        0.94\\n\",\n      \"              bridegroom          50        0.72        0.88\\n\",\n      \"             scuba diver          50         0.8           1\\n\",\n      \"                rapeseed          50        0.94        0.98\\n\",\n      \"                   daisy          50        0.96        0.98\\n\",\n      \"   yellow lady's slipper          50           1           1\\n\",\n      \"                    corn          50         0.4        0.88\\n\",\n      \"                   acorn          50        0.92        0.98\\n\",\n      \"                rose hip          50        0.92        0.98\\n\",\n      \"     horse chestnut seed          50        0.94        0.98\\n\",\n      \"            coral fungus          50        0.96        0.96\\n\",\n      \"                  agaric          50        0.82        0.94\\n\",\n      \"               gyromitra          50        0.98           1\\n\",\n      \"      stinkhorn mushroom          50         0.8        0.94\\n\",\n      \"              earth star          50        0.98           1\\n\",\n      \"        hen-of-the-woods          50         0.8        0.96\\n\",\n      \"                  bolete          50        0.74        0.94\\n\",\n      \"                     ear          50        0.48        0.94\\n\",\n      \"            toilet paper          50        0.36        0.68\\n\",\n      \"Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\\n\",\n      \"Results saved to \\u001b[1mruns/val-cls/exp\\u001b[0m\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# Validate YOLOv5s on Imagenet val\\n\",\n    \"!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"ZY2VXXXu74w5\"\n   },\n   \"source\": \"# 3. Train\\n\\n<p align=\\\"\\\"><a href=\\\"https://platform.ultralytics.com\\\"><img width=\\\"1000\\\" src=\\\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\\\"/></a></p>\\n\\nTrain a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\\n\\n- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\\nautomatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\\n- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\\n<br><br>\\n\\nA **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\"\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"id\": \"i3oKtE4g-aNn\"\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# @title Select YOLOv5 🚀 logger {run: 'auto'}\\n\",\n    \"logger = \\\"Comet\\\"  # @param ['Comet', 'ClearML', 'TensorBoard']\\n\",\n    \"\\n\",\n    \"if logger == \\\"Comet\\\":\\n\",\n    \"    %pip install -q comet_ml\\n\",\n    \"    import comet_ml\\n\",\n    \"\\n\",\n    \"    comet_ml.init()\\n\",\n    \"elif logger == \\\"ClearML\\\":\\n\",\n    \"    %pip install -q clearml\\n\",\n    \"    import clearml\\n\",\n    \"\\n\",\n    \"    clearml.browser_login()\\n\",\n    \"elif logger == \\\"TensorBoard\\\":\\n\",\n    \"    %load_ext tensorboard\\n\",\n    \"    %tensorboard --logdir runs/train\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"1NcFxRcFdJ_O\",\n    \"outputId\": \"77c8d487-16db-4073-b3ea-06cabf2e7766\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1mclassify/train: \\u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\\n\",\n      \"\\u001b[34m\\u001b[1mgithub: \\u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\\n\",\n      \"YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\\n\",\n      \"\\n\",\n      \"\\u001b[34m\\u001b[1mTensorBoard: \\u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\\n\",\n      \"\\n\",\n      \"Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\\n\",\n      \"Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenette160.zip to /content/datasets/imagenette160.zip...\\n\",\n      \"100% 103M/103M [00:00<00:00, 347MB/s] \\n\",\n      \"Unzipping /content/datasets/imagenette160.zip...\\n\",\n      \"Dataset download success ✅ (3.3s), saved to \\u001b[1m/content/datasets/imagenette160\\u001b[0m\\n\",\n      \"\\n\",\n      \"\\u001b[34m\\u001b[1malbumentations: \\u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\\n\",\n      \"Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\\n\",\n      \"\\u001b[34m\\u001b[1moptimizer:\\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\\n\",\n      \"Image sizes 224 train, 224 test\\n\",\n      \"Using 1 dataloader workers\\n\",\n      \"Logging results to \\u001b[1mruns/train-cls/exp\\u001b[0m\\n\",\n      \"Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\\n\",\n      \"\\n\",\n      \"     Epoch   GPU_mem  train_loss    val_loss    top1_acc    top5_acc\\n\",\n      \"       1/5     1.47G        1.05       0.974       0.828       0.975: 100% 148/148 [00:38<00:00,  3.82it/s]\\n\",\n      \"       2/5     1.73G       0.895       0.766       0.911       0.994: 100% 148/148 [00:36<00:00,  4.03it/s]\\n\",\n      \"       3/5     1.73G        0.82       0.704       0.934       0.996: 100% 148/148 [00:35<00:00,  4.20it/s]\\n\",\n      \"       4/5     1.73G       0.766       0.664       0.951       0.998: 100% 148/148 [00:36<00:00,  4.05it/s]\\n\",\n      \"       5/5     1.73G       0.724       0.634       0.959       0.997: 100% 148/148 [00:37<00:00,  3.94it/s]\\n\",\n      \"\\n\",\n      \"Training complete (0.052 hours)\\n\",\n      \"Results saved to \\u001b[1mruns/train-cls/exp\\u001b[0m\\n\",\n      \"Predict:         python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\\n\",\n      \"Validate:        python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\\n\",\n      \"Export:          python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\\n\",\n      \"PyTorch Hub:     model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\\n\",\n      \"Visualize:       https://netron.app\\n\",\n      \"\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# Train YOLOv5s Classification on Imagenette160 for 3 epochs\\n\",\n    \"!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"15glLzbQx5u0\"\n   },\n   \"source\": [\n    \"# 4. Visualize\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"nWOsI5wJR1o3\"\n   },\n   \"source\": [\n    \"## Comet Logging and Visualization 🌟 NEW\\n\",\n    \"\\n\",\n    \"[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\\n\",\n    \"\\n\",\n    \"Getting started is easy:\\n\",\n    \"```shell\\n\",\n    \"pip install comet_ml  # 1. install\\n\",\n    \"export COMET_API_KEY=<Your API Key>  # 2. paste API key\\n\",\n    \"python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt  # 3. train\\n\",\n    \"```\\n\",\n    \"To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\\n\",\n    \"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\\n\",\n    \"\\n\",\n    \"<a href=\\\"https://bit.ly/yolov5-readme-comet2\\\">\\n\",\n    \"<img alt=\\\"Comet Dashboard\\\" src=\\\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\\\" width=\\\"1280\\\"/></a>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"Lay2WsTjNJzP\"\n   },\n   \"source\": [\n    \"## ClearML Logging and Automation 🌟 NEW\\n\",\n    \"\\n\",\n    \"[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\\n\",\n    \"\\n\",\n    \"- `pip install clearml`\\n\",\n    \"- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\\n\",\n    \"\\n\",\n    \"You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\\n\",\n    \"\\n\",\n    \"You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\\n\",\n    \"\\n\",\n    \"<a href=\\\"https://cutt.ly/yolov5-notebook-clearml\\\">\\n\",\n    \"<img alt=\\\"ClearML Experiment Management UI\\\" src=\\\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\\\" width=\\\"1280\\\"/></a>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"-WPvRbS5Swl6\"\n   },\n   \"source\": [\n    \"## Local Logging\\n\",\n    \"\\n\",\n    \"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\\n\",\n    \"\\n\",\n    \"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \\n\",\n    \"\\n\",\n    \"<img alt=\\\"Local logging results\\\" src=\\\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\\\" width=\\\"1280\\\"/>\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"Zelyeqbyt3GD\"\n   },\n   \"source\": [\n    \"# Environments\\n\",\n    \"\\n\",\n    \"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\\n\",\n    \"\\n\",\n    \"- **Notebooks** with free GPU: <a href=\\\"https://bit.ly/yolov5-paperspace-notebook\\\"><img src=\\\"https://assets.paperspace.io/img/gradient-badge.svg\\\" alt=\\\"Run on Gradient\\\"></a> <a href=\\\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\\\"><img src=\\\"https://colab.research.google.com/assets/colab-badge.svg\\\" alt=\\\"Open In Colab\\\"></a> <a href=\\\"https://www.kaggle.com/models/ultralytics/yolov5\\\"><img src=\\\"https://kaggle.com/static/images/open-in-kaggle.svg\\\" alt=\\\"Open In Kaggle\\\"></a>\\n\",\n    \"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\\n\",\n    \"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\\n\",\n    \"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\\\"https://hub.docker.com/r/ultralytics/yolov5\\\"><img src=\\\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\\\" alt=\\\"Docker Pulls\\\"></a>\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"6Qu7Iesl0p54\"\n   },\n   \"source\": [\n    \"# Status\\n\",\n    \"\\n\",\n    \"![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\\n\",\n    \"\\n\",\n    \"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"IEijrePND_2I\"\n   },\n   \"source\": [\n    \"# Appendix\\n\",\n    \"\\n\",\n    \"Additional content below.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"id\": \"GMusP4OAxFu6\"\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\\n\",\n    \"\\n\",\n    \"model = torch.hub.load(\\n\",\n    \"    \\\"ultralytics/yolov5\\\", \\\"yolov5s\\\", force_reload=True, trust_repo=True\\n\",\n    \")  # or yolov5n - yolov5x6 or custom\\n\",\n    \"im = \\\"https://ultralytics.com/images/zidane.jpg\\\"  # file, Path, PIL.Image, OpenCV, nparray, list\\n\",\n    \"results = model(im)  # inference\\n\",\n    \"results.print()  # or .show(), .save(), .crop(), .pandas(), etc.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"accelerator\": \"GPU\",\n  \"colab\": {\n   \"name\": \"YOLOv5 Classification Tutorial\",\n   \"provenance\": []\n  },\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.7.12\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}"
  },
  {
    "path": "classify/val.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nValidate a trained YOLOv5 classification model on a classification dataset.\n\nUsage:\n    $ bash data/scripts/get_imagenet.sh --val  # download ImageNet val split (6.3G, 50000 images)\n    $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224  # validate ImageNet\n\nUsage - formats:\n    $ python classify/val.py --weights yolov5s-cls.pt                 # PyTorch\n                                       yolov5s-cls.torchscript        # TorchScript\n                                       yolov5s-cls.onnx               # ONNX Runtime or OpenCV DNN with --dnn\n                                       yolov5s-cls_openvino_model     # OpenVINO\n                                       yolov5s-cls.engine             # TensorRT\n                                       yolov5s-cls.mlmodel            # CoreML (macOS-only)\n                                       yolov5s-cls_saved_model        # TensorFlow SavedModel\n                                       yolov5s-cls.pb                 # TensorFlow GraphDef\n                                       yolov5s-cls.tflite             # TensorFlow Lite\n                                       yolov5s-cls_edgetpu.tflite     # TensorFlow Edge TPU\n                                       yolov5s-cls_paddle_model       # PaddlePaddle\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nfrom pathlib import Path\n\nimport torch\nfrom tqdm import tqdm\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom models.common import DetectMultiBackend\nfrom utils.dataloaders import create_classification_dataloader\nfrom utils.general import (\n    LOGGER,\n    TQDM_BAR_FORMAT,\n    Profile,\n    check_img_size,\n    check_requirements,\n    colorstr,\n    increment_path,\n    print_args,\n)\nfrom utils.torch_utils import select_device, smart_inference_mode\n\n\n@smart_inference_mode()\ndef run(\n    data=ROOT / \"../datasets/mnist\",  # dataset dir\n    weights=ROOT / \"yolov5s-cls.pt\",  # model.pt path(s)\n    batch_size=128,  # batch size\n    imgsz=224,  # inference size (pixels)\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    workers=8,  # max dataloader workers (per RANK in DDP mode)\n    verbose=False,  # verbose output\n    project=ROOT / \"runs/val-cls\",  # save to project/name\n    name=\"exp\",  # save to project/name\n    exist_ok=False,  # existing project/name ok, do not increment\n    half=False,  # use FP16 half-precision inference\n    dnn=False,  # use OpenCV DNN for ONNX inference\n    model=None,\n    dataloader=None,\n    criterion=None,\n    pbar=None,\n):\n    \"\"\"Validates a YOLOv5 classification model on a dataset, computing metrics like top1 and top5 accuracy.\"\"\"\n    # Initialize/load model and set device\n    training = model is not None\n    if training:  # called by train.py\n        device, pt, jit, engine = next(model.parameters()).device, True, False, False  # get model device, PyTorch model\n        half &= device.type != \"cpu\"  # half precision only supported on CUDA\n        model.half() if half else model.float()\n    else:  # called directly\n        device = select_device(device, batch_size=batch_size)\n\n        # Directories\n        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run\n        save_dir.mkdir(parents=True, exist_ok=True)  # make dir\n\n        # Load model\n        model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)\n        stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine\n        imgsz = check_img_size(imgsz, s=stride)  # check image size\n        half = model.fp16  # FP16 supported on limited backends with CUDA\n        if engine:\n            batch_size = model.batch_size\n        else:\n            device = model.device\n            if not (pt or jit):\n                batch_size = 1  # export.py models default to batch-size 1\n                LOGGER.info(f\"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models\")\n\n        # Dataloader\n        data = Path(data)\n        test_dir = data / \"test\" if (data / \"test\").exists() else data / \"val\"  # data/test or data/val\n        dataloader = create_classification_dataloader(\n            path=test_dir, imgsz=imgsz, batch_size=batch_size, augment=False, rank=-1, workers=workers\n        )\n\n    model.eval()\n    pred, targets, loss, dt = [], [], 0, (Profile(device=device), Profile(device=device), Profile(device=device))\n    n = len(dataloader)  # number of batches\n    action = \"validating\" if dataloader.dataset.root.stem == \"val\" else \"testing\"\n    desc = f\"{pbar.desc[:-36]}{action:>36}\" if pbar else f\"{action}\"\n    bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)\n    with torch.cuda.amp.autocast(enabled=device.type != \"cpu\"):\n        for images, labels in bar:\n            with dt[0]:\n                images, labels = images.to(device, non_blocking=True), labels.to(device)\n\n            with dt[1]:\n                y = model(images)\n\n            with dt[2]:\n                pred.append(y.argsort(1, descending=True)[:, :5])\n                targets.append(labels)\n                if criterion:\n                    loss += criterion(y, labels)\n\n    loss /= n\n    pred, targets = torch.cat(pred), torch.cat(targets)\n    correct = (targets[:, None] == pred).float()\n    acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1)  # (top1, top5) accuracy\n    top1, top5 = acc.mean(0).tolist()\n\n    if pbar:\n        pbar.desc = f\"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}\"\n    if verbose:  # all classes\n        LOGGER.info(f\"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}\")\n        LOGGER.info(f\"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}\")\n        for i, c in model.names.items():\n            acc_i = acc[targets == i]\n            top1i, top5i = acc_i.mean(0).tolist()\n            LOGGER.info(f\"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}\")\n\n        # Print results\n        t = tuple(x.t / len(dataloader.dataset.samples) * 1e3 for x in dt)  # speeds per image\n        shape = (1, 3, imgsz, imgsz)\n        LOGGER.info(f\"Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}\" % t)\n        LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}\")\n\n    return top1, top5, loss\n\n\ndef parse_opt():\n    \"\"\"Parses and returns command line arguments for YOLOv5 model evaluation and inference settings.\"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"../datasets/mnist\", help=\"dataset path\")\n    parser.add_argument(\"--weights\", nargs=\"+\", type=str, default=ROOT / \"yolov5s-cls.pt\", help=\"model.pt path(s)\")\n    parser.add_argument(\"--batch-size\", type=int, default=128, help=\"batch size\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=224, help=\"inference size (pixels)\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--workers\", type=int, default=8, help=\"max dataloader workers (per RANK in DDP mode)\")\n    parser.add_argument(\"--verbose\", nargs=\"?\", const=True, default=True, help=\"verbose output\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/val-cls\", help=\"save to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"use FP16 half-precision inference\")\n    parser.add_argument(\"--dnn\", action=\"store_true\", help=\"use OpenCV DNN for ONNX inference\")\n    opt = parser.parse_args()\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes the YOLOv5 model prediction workflow, handling argument parsing and requirement checks.\"\"\"\n    check_requirements(ROOT / \"requirements.txt\", exclude=(\"tensorboard\", \"thop\"))\n    run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "data/Argoverse.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI\n# Example usage: python train.py --data Argoverse.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── Argoverse  ← downloads here (31.3 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/Argoverse # dataset root dir\ntrain: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images\nval: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images\ntest: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview\n\n# Classes\nnames:\n  0: person\n  1: bicycle\n  2: car\n  3: motorcycle\n  4: bus\n  5: truck\n  6: traffic_light\n  7: stop_sign\n\n# Download script/URL (optional) ---------------------------------------------------------------------------------------\ndownload: |\n  import json\n\n  from tqdm import tqdm\n  from utils.general import download, Path\n\n\n  def argoverse2yolo(set):\n      labels = {}\n      a = json.load(open(set, \"rb\"))\n      for annot in tqdm(a['annotations'], desc=f\"Converting {set} to YOLOv5 format...\"):\n          img_id = annot['image_id']\n          img_name = a['images'][img_id]['name']\n          img_label_name = f'{img_name[:-3]}txt'\n\n          cls = annot['category_id']  # instance class id\n          x_center, y_center, width, height = annot['bbox']\n          x_center = (x_center + width / 2) / 1920.0  # offset and scale\n          y_center = (y_center + height / 2) / 1200.0  # offset and scale\n          width /= 1920.0  # scale\n          height /= 1200.0  # scale\n\n          img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]\n          if not img_dir.exists():\n              img_dir.mkdir(parents=True, exist_ok=True)\n\n          k = str(img_dir / img_label_name)\n          if k not in labels:\n              labels[k] = []\n          labels[k].append(f\"{cls} {x_center} {y_center} {width} {height}\\n\")\n\n      for k in labels:\n          with open(k, \"w\") as f:\n              f.writelines(labels[k])\n\n\n  # Download\n  dir = Path(yaml['path'])  # dataset root dir\n  urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']\n  download(urls, dir=dir, delete=False)\n\n  # Convert\n  annotations_dir = 'Argoverse-HD/annotations/'\n  (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images')  # rename 'tracking' to 'images'\n  for d in \"train.json\", \"val.json\":\n      argoverse2yolo(dir / annotations_dir / d)  # convert VisDrone annotations to YOLO labels\n"
  },
  {
    "path": "data/GlobalWheat2020.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan\n# Example usage: python train.py --data GlobalWheat2020.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── GlobalWheat2020  ← downloads here (7.0 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/GlobalWheat2020 # dataset root dir\ntrain: # train images (relative to 'path') 3422 images\n  - images/arvalis_1\n  - images/arvalis_2\n  - images/arvalis_3\n  - images/ethz_1\n  - images/rres_1\n  - images/inrae_1\n  - images/usask_1\nval: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)\n  - images/ethz_1\ntest: # test images (optional) 1276 images\n  - images/utokyo_1\n  - images/utokyo_2\n  - images/nau_1\n  - images/uq_1\n\n# Classes\nnames:\n  0: wheat_head\n\n# Download script/URL (optional) ---------------------------------------------------------------------------------------\ndownload: |\n  from utils.general import download, Path\n\n\n  # Download\n  dir = Path(yaml['path'])  # dataset root dir\n  urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',\n          'https://github.com/ultralytics/assets/releases/download/v0.0.0/GlobalWheat2020_labels.zip']\n  download(urls, dir=dir)\n\n  # Make Directories\n  for p in 'annotations', 'images', 'labels':\n      (dir / p).mkdir(parents=True, exist_ok=True)\n\n  # Move\n  for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \\\n           'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':\n      (dir / p).rename(dir / 'images' / p)  # move to /images\n      f = (dir / p).with_suffix('.json')  # json file\n      if f.exists():\n          f.rename((dir / 'annotations' / p).with_suffix('.json'))  # move to /annotations\n"
  },
  {
    "path": "data/ImageNet.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University\n# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels\n# Example usage: python classify/train.py --data imagenet\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet  ← downloads here (144 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/imagenet # dataset root dir\ntrain: train # train images (relative to 'path') 1281167 images\nval: val # val images (relative to 'path') 50000 images\ntest: # test images (optional)\n\n# Classes\nnames:\n  0: tench\n  1: goldfish\n  2: great white shark\n  3: tiger shark\n  4: hammerhead shark\n  5: electric ray\n  6: stingray\n  7: cock\n  8: hen\n  9: ostrich\n  10: brambling\n  11: goldfinch\n  12: house finch\n  13: junco\n  14: indigo bunting\n  15: American robin\n  16: bulbul\n  17: jay\n  18: magpie\n  19: chickadee\n  20: American dipper\n  21: kite\n  22: bald eagle\n  23: vulture\n  24: great grey owl\n  25: fire salamander\n  26: smooth newt\n  27: newt\n  28: spotted salamander\n  29: axolotl\n  30: American bullfrog\n  31: tree frog\n  32: tailed frog\n  33: loggerhead sea turtle\n  34: leatherback sea turtle\n  35: mud turtle\n  36: terrapin\n  37: box turtle\n  38: banded gecko\n  39: green iguana\n  40: Carolina anole\n  41: desert grassland whiptail lizard\n  42: agama\n  43: frilled-necked lizard\n  44: alligator lizard\n  45: Gila monster\n  46: European green lizard\n  47: chameleon\n  48: Komodo dragon\n  49: Nile crocodile\n  50: American alligator\n  51: triceratops\n  52: worm snake\n  53: ring-necked snake\n  54: eastern hog-nosed snake\n  55: smooth green snake\n  56: kingsnake\n  57: garter snake\n  58: water snake\n  59: vine snake\n  60: night snake\n  61: boa constrictor\n  62: African rock python\n  63: Indian cobra\n  64: green mamba\n  65: sea snake\n  66: Saharan horned viper\n  67: eastern diamondback rattlesnake\n  68: sidewinder\n  69: trilobite\n  70: harvestman\n  71: scorpion\n  72: yellow garden spider\n  73: barn spider\n  74: European garden spider\n  75: southern black widow\n  76: tarantula\n  77: wolf spider\n  78: tick\n  79: centipede\n  80: black grouse\n  81: ptarmigan\n  82: ruffed grouse\n  83: prairie grouse\n  84: peacock\n  85: quail\n  86: partridge\n  87: grey parrot\n  88: macaw\n  89: sulphur-crested cockatoo\n  90: lorikeet\n  91: coucal\n  92: bee eater\n  93: hornbill\n  94: hummingbird\n  95: jacamar\n  96: toucan\n  97: duck\n  98: red-breasted merganser\n  99: goose\n  100: black swan\n  101: tusker\n  102: echidna\n  103: platypus\n  104: wallaby\n  105: koala\n  106: wombat\n  107: jellyfish\n  108: sea anemone\n  109: brain coral\n  110: flatworm\n  111: nematode\n  112: conch\n  113: snail\n  114: slug\n  115: sea slug\n  116: chiton\n  117: chambered nautilus\n  118: Dungeness crab\n  119: rock crab\n  120: fiddler crab\n  121: red king crab\n  122: American lobster\n  123: spiny lobster\n  124: crayfish\n  125: hermit crab\n  126: isopod\n  127: white stork\n  128: black stork\n  129: spoonbill\n  130: flamingo\n  131: little blue heron\n  132: great egret\n  133: bittern\n  134: crane (bird)\n  135: limpkin\n  136: common gallinule\n  137: American coot\n  138: bustard\n  139: ruddy turnstone\n  140: dunlin\n  141: common redshank\n  142: dowitcher\n  143: oystercatcher\n  144: pelican\n  145: king penguin\n  146: albatross\n  147: grey whale\n  148: killer whale\n  149: dugong\n  150: sea lion\n  151: Chihuahua\n  152: Japanese Chin\n  153: Maltese\n  154: Pekingese\n  155: Shih Tzu\n  156: King Charles Spaniel\n  157: Papillon\n  158: toy terrier\n  159: Rhodesian Ridgeback\n  160: Afghan Hound\n  161: Basset Hound\n  162: Beagle\n  163: Bloodhound\n  164: Bluetick Coonhound\n  165: Black and Tan Coonhound\n  166: Treeing Walker Coonhound\n  167: English foxhound\n  168: Redbone Coonhound\n  169: borzoi\n  170: Irish Wolfhound\n  171: Italian Greyhound\n  172: Whippet\n  173: Ibizan Hound\n  174: Norwegian Elkhound\n  175: Otterhound\n  176: Saluki\n  177: Scottish Deerhound\n  178: Weimaraner\n  179: Staffordshire Bull Terrier\n  180: American Staffordshire Terrier\n  181: Bedlington Terrier\n  182: Border Terrier\n  183: Kerry Blue Terrier\n  184: Irish Terrier\n  185: Norfolk Terrier\n  186: Norwich Terrier\n  187: Yorkshire Terrier\n  188: Wire Fox Terrier\n  189: Lakeland Terrier\n  190: Sealyham Terrier\n  191: Airedale Terrier\n  192: Cairn Terrier\n  193: Australian Terrier\n  194: Dandie Dinmont Terrier\n  195: Boston Terrier\n  196: Miniature Schnauzer\n  197: Giant Schnauzer\n  198: Standard Schnauzer\n  199: Scottish Terrier\n  200: Tibetan Terrier\n  201: Australian Silky Terrier\n  202: Soft-coated Wheaten Terrier\n  203: West Highland White Terrier\n  204: Lhasa Apso\n  205: Flat-Coated Retriever\n  206: Curly-coated Retriever\n  207: Golden Retriever\n  208: Labrador Retriever\n  209: Chesapeake Bay Retriever\n  210: German Shorthaired Pointer\n  211: Vizsla\n  212: English Setter\n  213: Irish Setter\n  214: Gordon Setter\n  215: Brittany\n  216: Clumber Spaniel\n  217: English Springer Spaniel\n  218: Welsh Springer Spaniel\n  219: Cocker Spaniels\n  220: Sussex Spaniel\n  221: Irish Water Spaniel\n  222: Kuvasz\n  223: Schipperke\n  224: Groenendael\n  225: Malinois\n  226: Briard\n  227: Australian Kelpie\n  228: Komondor\n  229: Old English Sheepdog\n  230: Shetland Sheepdog\n  231: collie\n  232: Border Collie\n  233: Bouvier des Flandres\n  234: Rottweiler\n  235: German Shepherd Dog\n  236: Dobermann\n  237: Miniature Pinscher\n  238: Greater Swiss Mountain Dog\n  239: Bernese Mountain Dog\n  240: Appenzeller Sennenhund\n  241: Entlebucher Sennenhund\n  242: Boxer\n  243: Bullmastiff\n  244: Tibetan Mastiff\n  245: French Bulldog\n  246: Great Dane\n  247: St. Bernard\n  248: husky\n  249: Alaskan Malamute\n  250: Siberian Husky\n  251: Dalmatian\n  252: Affenpinscher\n  253: Basenji\n  254: pug\n  255: Leonberger\n  256: Newfoundland\n  257: Pyrenean Mountain Dog\n  258: Samoyed\n  259: Pomeranian\n  260: Chow Chow\n  261: Keeshond\n  262: Griffon Bruxellois\n  263: Pembroke Welsh Corgi\n  264: Cardigan Welsh Corgi\n  265: Toy Poodle\n  266: Miniature Poodle\n  267: Standard Poodle\n  268: Mexican hairless dog\n  269: grey wolf\n  270: Alaskan tundra wolf\n  271: red wolf\n  272: coyote\n  273: dingo\n  274: dhole\n  275: African wild dog\n  276: hyena\n  277: red fox\n  278: kit fox\n  279: Arctic fox\n  280: grey fox\n  281: tabby cat\n  282: tiger cat\n  283: Persian cat\n  284: Siamese cat\n  285: Egyptian Mau\n  286: cougar\n  287: lynx\n  288: leopard\n  289: snow leopard\n  290: jaguar\n  291: lion\n  292: tiger\n  293: cheetah\n  294: brown bear\n  295: American black bear\n  296: polar bear\n  297: sloth bear\n  298: mongoose\n  299: meerkat\n  300: tiger beetle\n  301: ladybug\n  302: ground beetle\n  303: longhorn beetle\n  304: leaf beetle\n  305: dung beetle\n  306: rhinoceros beetle\n  307: weevil\n  308: fly\n  309: bee\n  310: ant\n  311: grasshopper\n  312: cricket\n  313: stick insect\n  314: cockroach\n  315: mantis\n  316: cicada\n  317: leafhopper\n  318: lacewing\n  319: dragonfly\n  320: damselfly\n  321: red admiral\n  322: ringlet\n  323: monarch butterfly\n  324: small white\n  325: sulfur butterfly\n  326: gossamer-winged butterfly\n  327: starfish\n  328: sea urchin\n  329: sea cucumber\n  330: cottontail rabbit\n  331: hare\n  332: Angora rabbit\n  333: hamster\n  334: porcupine\n  335: fox squirrel\n  336: marmot\n  337: beaver\n  338: guinea pig\n  339: common sorrel\n  340: zebra\n  341: pig\n  342: wild boar\n  343: warthog\n  344: hippopotamus\n  345: ox\n  346: water buffalo\n  347: bison\n  348: ram\n  349: bighorn sheep\n  350: Alpine ibex\n  351: hartebeest\n  352: impala\n  353: gazelle\n  354: dromedary\n  355: llama\n  356: weasel\n  357: mink\n  358: European polecat\n  359: black-footed ferret\n  360: otter\n  361: skunk\n  362: badger\n  363: armadillo\n  364: three-toed sloth\n  365: orangutan\n  366: gorilla\n  367: chimpanzee\n  368: gibbon\n  369: siamang\n  370: guenon\n  371: patas monkey\n  372: baboon\n  373: macaque\n  374: langur\n  375: black-and-white colobus\n  376: proboscis monkey\n  377: marmoset\n  378: white-headed capuchin\n  379: howler monkey\n  380: titi\n  381: Geoffroy's spider monkey\n  382: common squirrel monkey\n  383: ring-tailed lemur\n  384: indri\n  385: Asian elephant\n  386: African bush elephant\n  387: red panda\n  388: giant panda\n  389: snoek\n  390: eel\n  391: coho salmon\n  392: rock beauty\n  393: clownfish\n  394: sturgeon\n  395: garfish\n  396: lionfish\n  397: pufferfish\n  398: abacus\n  399: abaya\n  400: academic gown\n  401: accordion\n  402: acoustic guitar\n  403: aircraft carrier\n  404: airliner\n  405: airship\n  406: altar\n  407: ambulance\n  408: amphibious vehicle\n  409: analog clock\n  410: apiary\n  411: apron\n  412: waste container\n  413: assault rifle\n  414: backpack\n  415: bakery\n  416: balance beam\n  417: balloon\n  418: ballpoint pen\n  419: Band-Aid\n  420: banjo\n  421: baluster\n  422: barbell\n  423: barber chair\n  424: barbershop\n  425: barn\n  426: barometer\n  427: barrel\n  428: wheelbarrow\n  429: baseball\n  430: basketball\n  431: bassinet\n  432: bassoon\n  433: swimming cap\n  434: bath towel\n  435: bathtub\n  436: station wagon\n  437: lighthouse\n  438: beaker\n  439: military cap\n  440: beer bottle\n  441: beer glass\n  442: bell-cot\n  443: bib\n  444: tandem bicycle\n  445: bikini\n  446: ring binder\n  447: binoculars\n  448: birdhouse\n  449: boathouse\n  450: bobsleigh\n  451: bolo tie\n  452: poke bonnet\n  453: bookcase\n  454: bookstore\n  455: bottle cap\n  456: bow\n  457: bow tie\n  458: brass\n  459: bra\n  460: breakwater\n  461: breastplate\n  462: broom\n  463: bucket\n  464: buckle\n  465: bulletproof vest\n  466: high-speed train\n  467: butcher shop\n  468: taxicab\n  469: cauldron\n  470: candle\n  471: cannon\n  472: canoe\n  473: can opener\n  474: cardigan\n  475: car mirror\n  476: carousel\n  477: tool kit\n  478: carton\n  479: car wheel\n  480: automated teller machine\n  481: cassette\n  482: cassette player\n  483: castle\n  484: catamaran\n  485: CD player\n  486: cello\n  487: mobile phone\n  488: chain\n  489: chain-link fence\n  490: chain mail\n  491: chainsaw\n  492: chest\n  493: chiffonier\n  494: chime\n  495: china cabinet\n  496: Christmas stocking\n  497: church\n  498: movie theater\n  499: cleaver\n  500: cliff dwelling\n  501: cloak\n  502: clogs\n  503: cocktail shaker\n  504: coffee mug\n  505: coffeemaker\n  506: coil\n  507: combination lock\n  508: computer keyboard\n  509: confectionery store\n  510: container ship\n  511: convertible\n  512: corkscrew\n  513: cornet\n  514: cowboy boot\n  515: cowboy hat\n  516: cradle\n  517: crane (machine)\n  518: crash helmet\n  519: crate\n  520: infant bed\n  521: Crock Pot\n  522: croquet ball\n  523: crutch\n  524: cuirass\n  525: dam\n  526: desk\n  527: desktop computer\n  528: rotary dial telephone\n  529: diaper\n  530: digital clock\n  531: digital watch\n  532: dining table\n  533: dishcloth\n  534: dishwasher\n  535: disc brake\n  536: dock\n  537: dog sled\n  538: dome\n  539: doormat\n  540: drilling rig\n  541: drum\n  542: drumstick\n  543: dumbbell\n  544: Dutch oven\n  545: electric fan\n  546: electric guitar\n  547: electric locomotive\n  548: entertainment center\n  549: envelope\n  550: espresso machine\n  551: face powder\n  552: feather boa\n  553: filing cabinet\n  554: fireboat\n  555: fire engine\n  556: fire screen sheet\n  557: flagpole\n  558: flute\n  559: folding chair\n  560: football helmet\n  561: forklift\n  562: fountain\n  563: fountain pen\n  564: four-poster bed\n  565: freight car\n  566: French horn\n  567: frying pan\n  568: fur coat\n  569: garbage truck\n  570: gas mask\n  571: gas pump\n  572: goblet\n  573: go-kart\n  574: golf ball\n  575: golf cart\n  576: gondola\n  577: gong\n  578: gown\n  579: grand piano\n  580: greenhouse\n  581: grille\n  582: grocery store\n  583: guillotine\n  584: barrette\n  585: hair spray\n  586: half-track\n  587: hammer\n  588: hamper\n  589: hair dryer\n  590: hand-held computer\n  591: handkerchief\n  592: hard disk drive\n  593: harmonica\n  594: harp\n  595: harvester\n  596: hatchet\n  597: holster\n  598: home theater\n  599: honeycomb\n  600: hook\n  601: hoop skirt\n  602: horizontal bar\n  603: horse-drawn vehicle\n  604: hourglass\n  605: iPod\n  606: clothes iron\n  607: jack-o'-lantern\n  608: jeans\n  609: jeep\n  610: T-shirt\n  611: jigsaw puzzle\n  612: pulled rickshaw\n  613: joystick\n  614: kimono\n  615: knee pad\n  616: knot\n  617: lab coat\n  618: ladle\n  619: lampshade\n  620: laptop computer\n  621: lawn mower\n  622: lens cap\n  623: paper knife\n  624: library\n  625: lifeboat\n  626: lighter\n  627: limousine\n  628: ocean liner\n  629: lipstick\n  630: slip-on shoe\n  631: lotion\n  632: speaker\n  633: loupe\n  634: sawmill\n  635: magnetic compass\n  636: mail bag\n  637: mailbox\n  638: tights\n  639: tank suit\n  640: manhole cover\n  641: maraca\n  642: marimba\n  643: mask\n  644: match\n  645: maypole\n  646: maze\n  647: measuring cup\n  648: medicine chest\n  649: megalith\n  650: microphone\n  651: microwave oven\n  652: military uniform\n  653: milk can\n  654: minibus\n  655: miniskirt\n  656: minivan\n  657: missile\n  658: mitten\n  659: mixing bowl\n  660: mobile home\n  661: Model T\n  662: modem\n  663: monastery\n  664: monitor\n  665: moped\n  666: mortar\n  667: square academic cap\n  668: mosque\n  669: mosquito net\n  670: scooter\n  671: mountain bike\n  672: tent\n  673: computer mouse\n  674: mousetrap\n  675: moving van\n  676: muzzle\n  677: nail\n  678: neck brace\n  679: necklace\n  680: nipple\n  681: notebook computer\n  682: obelisk\n  683: oboe\n  684: ocarina\n  685: odometer\n  686: oil filter\n  687: organ\n  688: oscilloscope\n  689: overskirt\n  690: bullock cart\n  691: oxygen mask\n  692: packet\n  693: paddle\n  694: paddle wheel\n  695: padlock\n  696: paintbrush\n  697: pajamas\n  698: palace\n  699: pan flute\n  700: paper towel\n  701: parachute\n  702: parallel bars\n  703: park bench\n  704: parking meter\n  705: passenger car\n  706: patio\n  707: payphone\n  708: pedestal\n  709: pencil case\n  710: pencil sharpener\n  711: perfume\n  712: Petri dish\n  713: photocopier\n  714: plectrum\n  715: Pickelhaube\n  716: picket fence\n  717: pickup truck\n  718: pier\n  719: piggy bank\n  720: pill bottle\n  721: pillow\n  722: ping-pong ball\n  723: pinwheel\n  724: pirate ship\n  725: pitcher\n  726: hand plane\n  727: planetarium\n  728: plastic bag\n  729: plate rack\n  730: plow\n  731: plunger\n  732: Polaroid camera\n  733: pole\n  734: police van\n  735: poncho\n  736: billiard table\n  737: soda bottle\n  738: pot\n  739: potter's wheel\n  740: power drill\n  741: prayer rug\n  742: printer\n  743: prison\n  744: projectile\n  745: projector\n  746: hockey puck\n  747: punching bag\n  748: purse\n  749: quill\n  750: quilt\n  751: race car\n  752: racket\n  753: radiator\n  754: radio\n  755: radio telescope\n  756: rain barrel\n  757: recreational vehicle\n  758: reel\n  759: reflex camera\n  760: refrigerator\n  761: remote control\n  762: restaurant\n  763: revolver\n  764: rifle\n  765: rocking chair\n  766: rotisserie\n  767: eraser\n  768: rugby ball\n  769: ruler\n  770: running shoe\n  771: safe\n  772: safety pin\n  773: salt shaker\n  774: sandal\n  775: sarong\n  776: saxophone\n  777: scabbard\n  778: weighing scale\n  779: school bus\n  780: schooner\n  781: scoreboard\n  782: CRT screen\n  783: screw\n  784: screwdriver\n  785: seat belt\n  786: sewing machine\n  787: shield\n  788: shoe store\n  789: shoji\n  790: shopping basket\n  791: shopping cart\n  792: shovel\n  793: shower cap\n  794: shower curtain\n  795: ski\n  796: ski mask\n  797: sleeping bag\n  798: slide rule\n  799: sliding door\n  800: slot machine\n  801: snorkel\n  802: snowmobile\n  803: snowplow\n  804: soap dispenser\n  805: soccer ball\n  806: sock\n  807: solar thermal collector\n  808: sombrero\n  809: soup bowl\n  810: space bar\n  811: space heater\n  812: space shuttle\n  813: spatula\n  814: motorboat\n  815: spider web\n  816: spindle\n  817: sports car\n  818: spotlight\n  819: stage\n  820: steam locomotive\n  821: through arch bridge\n  822: steel drum\n  823: stethoscope\n  824: scarf\n  825: stone wall\n  826: stopwatch\n  827: stove\n  828: strainer\n  829: tram\n  830: stretcher\n  831: couch\n  832: stupa\n  833: submarine\n  834: suit\n  835: sundial\n  836: sunglass\n  837: sunglasses\n  838: sunscreen\n  839: suspension bridge\n  840: mop\n  841: sweatshirt\n  842: swimsuit\n  843: swing\n  844: switch\n  845: syringe\n  846: table lamp\n  847: tank\n  848: tape player\n  849: teapot\n  850: teddy bear\n  851: television\n  852: tennis ball\n  853: thatched roof\n  854: front curtain\n  855: thimble\n  856: threshing machine\n  857: throne\n  858: tile roof\n  859: toaster\n  860: tobacco shop\n  861: toilet seat\n  862: torch\n  863: totem pole\n  864: tow truck\n  865: toy store\n  866: tractor\n  867: semi-trailer truck\n  868: tray\n  869: trench coat\n  870: tricycle\n  871: trimaran\n  872: tripod\n  873: triumphal arch\n  874: trolleybus\n  875: trombone\n  876: tub\n  877: turnstile\n  878: typewriter keyboard\n  879: umbrella\n  880: unicycle\n  881: upright piano\n  882: vacuum cleaner\n  883: vase\n  884: vault\n  885: velvet\n  886: vending machine\n  887: vestment\n  888: viaduct\n  889: violin\n  890: volleyball\n  891: waffle iron\n  892: wall clock\n  893: wallet\n  894: wardrobe\n  895: military aircraft\n  896: sink\n  897: washing machine\n  898: water bottle\n  899: water jug\n  900: water tower\n  901: whiskey jug\n  902: whistle\n  903: wig\n  904: window screen\n  905: window shade\n  906: Windsor tie\n  907: wine bottle\n  908: wing\n  909: wok\n  910: wooden spoon\n  911: wool\n  912: split-rail fence\n  913: shipwreck\n  914: yawl\n  915: yurt\n  916: website\n  917: comic book\n  918: crossword\n  919: traffic sign\n  920: traffic light\n  921: dust jacket\n  922: menu\n  923: plate\n  924: guacamole\n  925: consomme\n  926: hot pot\n  927: trifle\n  928: ice cream\n  929: ice pop\n  930: baguette\n  931: bagel\n  932: pretzel\n  933: cheeseburger\n  934: hot dog\n  935: mashed potato\n  936: cabbage\n  937: broccoli\n  938: cauliflower\n  939: zucchini\n  940: spaghetti squash\n  941: acorn squash\n  942: butternut squash\n  943: cucumber\n  944: artichoke\n  945: bell pepper\n  946: cardoon\n  947: mushroom\n  948: Granny Smith\n  949: strawberry\n  950: orange\n  951: lemon\n  952: fig\n  953: pineapple\n  954: banana\n  955: jackfruit\n  956: custard apple\n  957: pomegranate\n  958: hay\n  959: carbonara\n  960: chocolate syrup\n  961: dough\n  962: meatloaf\n  963: pizza\n  964: pot pie\n  965: burrito\n  966: red wine\n  967: espresso\n  968: cup\n  969: eggnog\n  970: alp\n  971: bubble\n  972: cliff\n  973: coral reef\n  974: geyser\n  975: lakeshore\n  976: promontory\n  977: shoal\n  978: seashore\n  979: valley\n  980: volcano\n  981: baseball player\n  982: bridegroom\n  983: scuba diver\n  984: rapeseed\n  985: daisy\n  986: yellow lady's slipper\n  987: corn\n  988: acorn\n  989: rose hip\n  990: horse chestnut seed\n  991: coral fungus\n  992: agaric\n  993: gyromitra\n  994: stinkhorn mushroom\n  995: earth star\n  996: hen-of-the-woods\n  997: bolete\n  998: ear\n  999: toilet paper\n\n# Download script/URL (optional)\ndownload: data/scripts/get_imagenet.sh\n"
  },
  {
    "path": "data/ImageNet10.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University\n# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels\n# Example usage: python classify/train.py --data imagenet\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet10  ← downloads here\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/imagenet10 # dataset root dir\ntrain: train # train images (relative to 'path') 1281167 images\nval: val # val images (relative to 'path') 50000 images\ntest: # test images (optional)\n\n# Classes\nnames:\n  0: tench\n  1: goldfish\n  2: great white shark\n  3: tiger shark\n  4: hammerhead shark\n  5: electric ray\n  6: stingray\n  7: cock\n  8: hen\n  9: ostrich\n\n# Download script/URL (optional)\ndownload: data/scripts/get_imagenet10.sh\n"
  },
  {
    "path": "data/ImageNet100.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University\n# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels\n# Example usage: python classify/train.py --data imagenet\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet100  ← downloads here\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/imagenet100 # dataset root dir\ntrain: train # train images (relative to 'path') 1281167 images\nval: val # val images (relative to 'path') 50000 images\ntest: # test images (optional)\n\n# Classes\nnames:\n  0: tench\n  1: goldfish\n  2: great white shark\n  3: tiger shark\n  4: hammerhead shark\n  5: electric ray\n  6: stingray\n  7: cock\n  8: hen\n  9: ostrich\n  10: brambling\n  11: goldfinch\n  12: house finch\n  13: junco\n  14: indigo bunting\n  15: American robin\n  16: bulbul\n  17: jay\n  18: magpie\n  19: chickadee\n  20: American dipper\n  21: kite\n  22: bald eagle\n  23: vulture\n  24: great grey owl\n  25: fire salamander\n  26: smooth newt\n  27: newt\n  28: spotted salamander\n  29: axolotl\n  30: American bullfrog\n  31: tree frog\n  32: tailed frog\n  33: loggerhead sea turtle\n  34: leatherback sea turtle\n  35: mud turtle\n  36: terrapin\n  37: box turtle\n  38: banded gecko\n  39: green iguana\n  40: Carolina anole\n  41: desert grassland whiptail lizard\n  42: agama\n  43: frilled-necked lizard\n  44: alligator lizard\n  45: Gila monster\n  46: European green lizard\n  47: chameleon\n  48: Komodo dragon\n  49: Nile crocodile\n  50: American alligator\n  51: triceratops\n  52: worm snake\n  53: ring-necked snake\n  54: eastern hog-nosed snake\n  55: smooth green snake\n  56: kingsnake\n  57: garter snake\n  58: water snake\n  59: vine snake\n  60: night snake\n  61: boa constrictor\n  62: African rock python\n  63: Indian cobra\n  64: green mamba\n  65: sea snake\n  66: Saharan horned viper\n  67: eastern diamondback rattlesnake\n  68: sidewinder\n  69: trilobite\n  70: harvestman\n  71: scorpion\n  72: yellow garden spider\n  73: barn spider\n  74: European garden spider\n  75: southern black widow\n  76: tarantula\n  77: wolf spider\n  78: tick\n  79: centipede\n  80: black grouse\n  81: ptarmigan\n  82: ruffed grouse\n  83: prairie grouse\n  84: peacock\n  85: quail\n  86: partridge\n  87: grey parrot\n  88: macaw\n  89: sulphur-crested cockatoo\n  90: lorikeet\n  91: coucal\n  92: bee eater\n  93: hornbill\n  94: hummingbird\n  95: jacamar\n  96: toucan\n  97: duck\n  98: red-breasted merganser\n  99: goose\n# Download script/URL (optional)\ndownload: data/scripts/get_imagenet100.sh\n"
  },
  {
    "path": "data/ImageNet1000.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University\n# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels\n# Example usage: python classify/train.py --data imagenet\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet100  ← downloads here\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/imagenet1000 # dataset root dir\ntrain: train # train images (relative to 'path') 1281167 images\nval: val # val images (relative to 'path') 50000 images\ntest: # test images (optional)\n\n# Classes\nnames:\n  0: tench\n  1: goldfish\n  2: great white shark\n  3: tiger shark\n  4: hammerhead shark\n  5: electric ray\n  6: stingray\n  7: cock\n  8: hen\n  9: ostrich\n  10: brambling\n  11: goldfinch\n  12: house finch\n  13: junco\n  14: indigo bunting\n  15: American robin\n  16: bulbul\n  17: jay\n  18: magpie\n  19: chickadee\n  20: American dipper\n  21: kite\n  22: bald eagle\n  23: vulture\n  24: great grey owl\n  25: fire salamander\n  26: smooth newt\n  27: newt\n  28: spotted salamander\n  29: axolotl\n  30: American bullfrog\n  31: tree frog\n  32: tailed frog\n  33: loggerhead sea turtle\n  34: leatherback sea turtle\n  35: mud turtle\n  36: terrapin\n  37: box turtle\n  38: banded gecko\n  39: green iguana\n  40: Carolina anole\n  41: desert grassland whiptail lizard\n  42: agama\n  43: frilled-necked lizard\n  44: alligator lizard\n  45: Gila monster\n  46: European green lizard\n  47: chameleon\n  48: Komodo dragon\n  49: Nile crocodile\n  50: American alligator\n  51: triceratops\n  52: worm snake\n  53: ring-necked snake\n  54: eastern hog-nosed snake\n  55: smooth green snake\n  56: kingsnake\n  57: garter snake\n  58: water snake\n  59: vine snake\n  60: night snake\n  61: boa constrictor\n  62: African rock python\n  63: Indian cobra\n  64: green mamba\n  65: sea snake\n  66: Saharan horned viper\n  67: eastern diamondback rattlesnake\n  68: sidewinder\n  69: trilobite\n  70: harvestman\n  71: scorpion\n  72: yellow garden spider\n  73: barn spider\n  74: European garden spider\n  75: southern black widow\n  76: tarantula\n  77: wolf spider\n  78: tick\n  79: centipede\n  80: black grouse\n  81: ptarmigan\n  82: ruffed grouse\n  83: prairie grouse\n  84: peacock\n  85: quail\n  86: partridge\n  87: grey parrot\n  88: macaw\n  89: sulphur-crested cockatoo\n  90: lorikeet\n  91: coucal\n  92: bee eater\n  93: hornbill\n  94: hummingbird\n  95: jacamar\n  96: toucan\n  97: duck\n  98: red-breasted merganser\n  99: goose\n  100: black swan\n  101: tusker\n  102: echidna\n  103: platypus\n  104: wallaby\n  105: koala\n  106: wombat\n  107: jellyfish\n  108: sea anemone\n  109: brain coral\n  110: flatworm\n  111: nematode\n  112: conch\n  113: snail\n  114: slug\n  115: sea slug\n  116: chiton\n  117: chambered nautilus\n  118: Dungeness crab\n  119: rock crab\n  120: fiddler crab\n  121: red king crab\n  122: American lobster\n  123: spiny lobster\n  124: crayfish\n  125: hermit crab\n  126: isopod\n  127: white stork\n  128: black stork\n  129: spoonbill\n  130: flamingo\n  131: little blue heron\n  132: great egret\n  133: bittern\n  134: crane (bird)\n  135: limpkin\n  136: common gallinule\n  137: American coot\n  138: bustard\n  139: ruddy turnstone\n  140: dunlin\n  141: common redshank\n  142: dowitcher\n  143: oystercatcher\n  144: pelican\n  145: king penguin\n  146: albatross\n  147: grey whale\n  148: killer whale\n  149: dugong\n  150: sea lion\n  151: Chihuahua\n  152: Japanese Chin\n  153: Maltese\n  154: Pekingese\n  155: Shih Tzu\n  156: King Charles Spaniel\n  157: Papillon\n  158: toy terrier\n  159: Rhodesian Ridgeback\n  160: Afghan Hound\n  161: Basset Hound\n  162: Beagle\n  163: Bloodhound\n  164: Bluetick Coonhound\n  165: Black and Tan Coonhound\n  166: Treeing Walker Coonhound\n  167: English foxhound\n  168: Redbone Coonhound\n  169: borzoi\n  170: Irish Wolfhound\n  171: Italian Greyhound\n  172: Whippet\n  173: Ibizan Hound\n  174: Norwegian Elkhound\n  175: Otterhound\n  176: Saluki\n  177: Scottish Deerhound\n  178: Weimaraner\n  179: Staffordshire Bull Terrier\n  180: American Staffordshire Terrier\n  181: Bedlington Terrier\n  182: Border Terrier\n  183: Kerry Blue Terrier\n  184: Irish Terrier\n  185: Norfolk Terrier\n  186: Norwich Terrier\n  187: Yorkshire Terrier\n  188: Wire Fox Terrier\n  189: Lakeland Terrier\n  190: Sealyham Terrier\n  191: Airedale Terrier\n  192: Cairn Terrier\n  193: Australian Terrier\n  194: Dandie Dinmont Terrier\n  195: Boston Terrier\n  196: Miniature Schnauzer\n  197: Giant Schnauzer\n  198: Standard Schnauzer\n  199: Scottish Terrier\n  200: Tibetan Terrier\n  201: Australian Silky Terrier\n  202: Soft-coated Wheaten Terrier\n  203: West Highland White Terrier\n  204: Lhasa Apso\n  205: Flat-Coated Retriever\n  206: Curly-coated Retriever\n  207: Golden Retriever\n  208: Labrador Retriever\n  209: Chesapeake Bay Retriever\n  210: German Shorthaired Pointer\n  211: Vizsla\n  212: English Setter\n  213: Irish Setter\n  214: Gordon Setter\n  215: Brittany\n  216: Clumber Spaniel\n  217: English Springer Spaniel\n  218: Welsh Springer Spaniel\n  219: Cocker Spaniels\n  220: Sussex Spaniel\n  221: Irish Water Spaniel\n  222: Kuvasz\n  223: Schipperke\n  224: Groenendael\n  225: Malinois\n  226: Briard\n  227: Australian Kelpie\n  228: Komondor\n  229: Old English Sheepdog\n  230: Shetland Sheepdog\n  231: collie\n  232: Border Collie\n  233: Bouvier des Flandres\n  234: Rottweiler\n  235: German Shepherd Dog\n  236: Dobermann\n  237: Miniature Pinscher\n  238: Greater Swiss Mountain Dog\n  239: Bernese Mountain Dog\n  240: Appenzeller Sennenhund\n  241: Entlebucher Sennenhund\n  242: Boxer\n  243: Bullmastiff\n  244: Tibetan Mastiff\n  245: French Bulldog\n  246: Great Dane\n  247: St. Bernard\n  248: husky\n  249: Alaskan Malamute\n  250: Siberian Husky\n  251: Dalmatian\n  252: Affenpinscher\n  253: Basenji\n  254: pug\n  255: Leonberger\n  256: Newfoundland\n  257: Pyrenean Mountain Dog\n  258: Samoyed\n  259: Pomeranian\n  260: Chow Chow\n  261: Keeshond\n  262: Griffon Bruxellois\n  263: Pembroke Welsh Corgi\n  264: Cardigan Welsh Corgi\n  265: Toy Poodle\n  266: Miniature Poodle\n  267: Standard Poodle\n  268: Mexican hairless dog\n  269: grey wolf\n  270: Alaskan tundra wolf\n  271: red wolf\n  272: coyote\n  273: dingo\n  274: dhole\n  275: African wild dog\n  276: hyena\n  277: red fox\n  278: kit fox\n  279: Arctic fox\n  280: grey fox\n  281: tabby cat\n  282: tiger cat\n  283: Persian cat\n  284: Siamese cat\n  285: Egyptian Mau\n  286: cougar\n  287: lynx\n  288: leopard\n  289: snow leopard\n  290: jaguar\n  291: lion\n  292: tiger\n  293: cheetah\n  294: brown bear\n  295: American black bear\n  296: polar bear\n  297: sloth bear\n  298: mongoose\n  299: meerkat\n  300: tiger beetle\n  301: ladybug\n  302: ground beetle\n  303: longhorn beetle\n  304: leaf beetle\n  305: dung beetle\n  306: rhinoceros beetle\n  307: weevil\n  308: fly\n  309: bee\n  310: ant\n  311: grasshopper\n  312: cricket\n  313: stick insect\n  314: cockroach\n  315: mantis\n  316: cicada\n  317: leafhopper\n  318: lacewing\n  319: dragonfly\n  320: damselfly\n  321: red admiral\n  322: ringlet\n  323: monarch butterfly\n  324: small white\n  325: sulfur butterfly\n  326: gossamer-winged butterfly\n  327: starfish\n  328: sea urchin\n  329: sea cucumber\n  330: cottontail rabbit\n  331: hare\n  332: Angora rabbit\n  333: hamster\n  334: porcupine\n  335: fox squirrel\n  336: marmot\n  337: beaver\n  338: guinea pig\n  339: common sorrel\n  340: zebra\n  341: pig\n  342: wild boar\n  343: warthog\n  344: hippopotamus\n  345: ox\n  346: water buffalo\n  347: bison\n  348: ram\n  349: bighorn sheep\n  350: Alpine ibex\n  351: hartebeest\n  352: impala\n  353: gazelle\n  354: dromedary\n  355: llama\n  356: weasel\n  357: mink\n  358: European polecat\n  359: black-footed ferret\n  360: otter\n  361: skunk\n  362: badger\n  363: armadillo\n  364: three-toed sloth\n  365: orangutan\n  366: gorilla\n  367: chimpanzee\n  368: gibbon\n  369: siamang\n  370: guenon\n  371: patas monkey\n  372: baboon\n  373: macaque\n  374: langur\n  375: black-and-white colobus\n  376: proboscis monkey\n  377: marmoset\n  378: white-headed capuchin\n  379: howler monkey\n  380: titi\n  381: Geoffroy's spider monkey\n  382: common squirrel monkey\n  383: ring-tailed lemur\n  384: indri\n  385: Asian elephant\n  386: African bush elephant\n  387: red panda\n  388: giant panda\n  389: snoek\n  390: eel\n  391: coho salmon\n  392: rock beauty\n  393: clownfish\n  394: sturgeon\n  395: garfish\n  396: lionfish\n  397: pufferfish\n  398: abacus\n  399: abaya\n  400: academic gown\n  401: accordion\n  402: acoustic guitar\n  403: aircraft carrier\n  404: airliner\n  405: airship\n  406: altar\n  407: ambulance\n  408: amphibious vehicle\n  409: analog clock\n  410: apiary\n  411: apron\n  412: waste container\n  413: assault rifle\n  414: backpack\n  415: bakery\n  416: balance beam\n  417: balloon\n  418: ballpoint pen\n  419: Band-Aid\n  420: banjo\n  421: baluster\n  422: barbell\n  423: barber chair\n  424: barbershop\n  425: barn\n  426: barometer\n  427: barrel\n  428: wheelbarrow\n  429: baseball\n  430: basketball\n  431: bassinet\n  432: bassoon\n  433: swimming cap\n  434: bath towel\n  435: bathtub\n  436: station wagon\n  437: lighthouse\n  438: beaker\n  439: military cap\n  440: beer bottle\n  441: beer glass\n  442: bell-cot\n  443: bib\n  444: tandem bicycle\n  445: bikini\n  446: ring binder\n  447: binoculars\n  448: birdhouse\n  449: boathouse\n  450: bobsleigh\n  451: bolo tie\n  452: poke bonnet\n  453: bookcase\n  454: bookstore\n  455: bottle cap\n  456: bow\n  457: bow tie\n  458: brass\n  459: bra\n  460: breakwater\n  461: breastplate\n  462: broom\n  463: bucket\n  464: buckle\n  465: bulletproof vest\n  466: high-speed train\n  467: butcher shop\n  468: taxicab\n  469: cauldron\n  470: candle\n  471: cannon\n  472: canoe\n  473: can opener\n  474: cardigan\n  475: car mirror\n  476: carousel\n  477: tool kit\n  478: carton\n  479: car wheel\n  480: automated teller machine\n  481: cassette\n  482: cassette player\n  483: castle\n  484: catamaran\n  485: CD player\n  486: cello\n  487: mobile phone\n  488: chain\n  489: chain-link fence\n  490: chain mail\n  491: chainsaw\n  492: chest\n  493: chiffonier\n  494: chime\n  495: china cabinet\n  496: Christmas stocking\n  497: church\n  498: movie theater\n  499: cleaver\n  500: cliff dwelling\n  501: cloak\n  502: clogs\n  503: cocktail shaker\n  504: coffee mug\n  505: coffeemaker\n  506: coil\n  507: combination lock\n  508: computer keyboard\n  509: confectionery store\n  510: container ship\n  511: convertible\n  512: corkscrew\n  513: cornet\n  514: cowboy boot\n  515: cowboy hat\n  516: cradle\n  517: crane (machine)\n  518: crash helmet\n  519: crate\n  520: infant bed\n  521: Crock Pot\n  522: croquet ball\n  523: crutch\n  524: cuirass\n  525: dam\n  526: desk\n  527: desktop computer\n  528: rotary dial telephone\n  529: diaper\n  530: digital clock\n  531: digital watch\n  532: dining table\n  533: dishcloth\n  534: dishwasher\n  535: disc brake\n  536: dock\n  537: dog sled\n  538: dome\n  539: doormat\n  540: drilling rig\n  541: drum\n  542: drumstick\n  543: dumbbell\n  544: Dutch oven\n  545: electric fan\n  546: electric guitar\n  547: electric locomotive\n  548: entertainment center\n  549: envelope\n  550: espresso machine\n  551: face powder\n  552: feather boa\n  553: filing cabinet\n  554: fireboat\n  555: fire engine\n  556: fire screen sheet\n  557: flagpole\n  558: flute\n  559: folding chair\n  560: football helmet\n  561: forklift\n  562: fountain\n  563: fountain pen\n  564: four-poster bed\n  565: freight car\n  566: French horn\n  567: frying pan\n  568: fur coat\n  569: garbage truck\n  570: gas mask\n  571: gas pump\n  572: goblet\n  573: go-kart\n  574: golf ball\n  575: golf cart\n  576: gondola\n  577: gong\n  578: gown\n  579: grand piano\n  580: greenhouse\n  581: grille\n  582: grocery store\n  583: guillotine\n  584: barrette\n  585: hair spray\n  586: half-track\n  587: hammer\n  588: hamper\n  589: hair dryer\n  590: hand-held computer\n  591: handkerchief\n  592: hard disk drive\n  593: harmonica\n  594: harp\n  595: harvester\n  596: hatchet\n  597: holster\n  598: home theater\n  599: honeycomb\n  600: hook\n  601: hoop skirt\n  602: horizontal bar\n  603: horse-drawn vehicle\n  604: hourglass\n  605: iPod\n  606: clothes iron\n  607: jack-o'-lantern\n  608: jeans\n  609: jeep\n  610: T-shirt\n  611: jigsaw puzzle\n  612: pulled rickshaw\n  613: joystick\n  614: kimono\n  615: knee pad\n  616: knot\n  617: lab coat\n  618: ladle\n  619: lampshade\n  620: laptop computer\n  621: lawn mower\n  622: lens cap\n  623: paper knife\n  624: library\n  625: lifeboat\n  626: lighter\n  627: limousine\n  628: ocean liner\n  629: lipstick\n  630: slip-on shoe\n  631: lotion\n  632: speaker\n  633: loupe\n  634: sawmill\n  635: magnetic compass\n  636: mail bag\n  637: mailbox\n  638: tights\n  639: tank suit\n  640: manhole cover\n  641: maraca\n  642: marimba\n  643: mask\n  644: match\n  645: maypole\n  646: maze\n  647: measuring cup\n  648: medicine chest\n  649: megalith\n  650: microphone\n  651: microwave oven\n  652: military uniform\n  653: milk can\n  654: minibus\n  655: miniskirt\n  656: minivan\n  657: missile\n  658: mitten\n  659: mixing bowl\n  660: mobile home\n  661: Model T\n  662: modem\n  663: monastery\n  664: monitor\n  665: moped\n  666: mortar\n  667: square academic cap\n  668: mosque\n  669: mosquito net\n  670: scooter\n  671: mountain bike\n  672: tent\n  673: computer mouse\n  674: mousetrap\n  675: moving van\n  676: muzzle\n  677: nail\n  678: neck brace\n  679: necklace\n  680: nipple\n  681: notebook computer\n  682: obelisk\n  683: oboe\n  684: ocarina\n  685: odometer\n  686: oil filter\n  687: organ\n  688: oscilloscope\n  689: overskirt\n  690: bullock cart\n  691: oxygen mask\n  692: packet\n  693: paddle\n  694: paddle wheel\n  695: padlock\n  696: paintbrush\n  697: pajamas\n  698: palace\n  699: pan flute\n  700: paper towel\n  701: parachute\n  702: parallel bars\n  703: park bench\n  704: parking meter\n  705: passenger car\n  706: patio\n  707: payphone\n  708: pedestal\n  709: pencil case\n  710: pencil sharpener\n  711: perfume\n  712: Petri dish\n  713: photocopier\n  714: plectrum\n  715: Pickelhaube\n  716: picket fence\n  717: pickup truck\n  718: pier\n  719: piggy bank\n  720: pill bottle\n  721: pillow\n  722: ping-pong ball\n  723: pinwheel\n  724: pirate ship\n  725: pitcher\n  726: hand plane\n  727: planetarium\n  728: plastic bag\n  729: plate rack\n  730: plow\n  731: plunger\n  732: Polaroid camera\n  733: pole\n  734: police van\n  735: poncho\n  736: billiard table\n  737: soda bottle\n  738: pot\n  739: potter's wheel\n  740: power drill\n  741: prayer rug\n  742: printer\n  743: prison\n  744: projectile\n  745: projector\n  746: hockey puck\n  747: punching bag\n  748: purse\n  749: quill\n  750: quilt\n  751: race car\n  752: racket\n  753: radiator\n  754: radio\n  755: radio telescope\n  756: rain barrel\n  757: recreational vehicle\n  758: reel\n  759: reflex camera\n  760: refrigerator\n  761: remote control\n  762: restaurant\n  763: revolver\n  764: rifle\n  765: rocking chair\n  766: rotisserie\n  767: eraser\n  768: rugby ball\n  769: ruler\n  770: running shoe\n  771: safe\n  772: safety pin\n  773: salt shaker\n  774: sandal\n  775: sarong\n  776: saxophone\n  777: scabbard\n  778: weighing scale\n  779: school bus\n  780: schooner\n  781: scoreboard\n  782: CRT screen\n  783: screw\n  784: screwdriver\n  785: seat belt\n  786: sewing machine\n  787: shield\n  788: shoe store\n  789: shoji\n  790: shopping basket\n  791: shopping cart\n  792: shovel\n  793: shower cap\n  794: shower curtain\n  795: ski\n  796: ski mask\n  797: sleeping bag\n  798: slide rule\n  799: sliding door\n  800: slot machine\n  801: snorkel\n  802: snowmobile\n  803: snowplow\n  804: soap dispenser\n  805: soccer ball\n  806: sock\n  807: solar thermal collector\n  808: sombrero\n  809: soup bowl\n  810: space bar\n  811: space heater\n  812: space shuttle\n  813: spatula\n  814: motorboat\n  815: spider web\n  816: spindle\n  817: sports car\n  818: spotlight\n  819: stage\n  820: steam locomotive\n  821: through arch bridge\n  822: steel drum\n  823: stethoscope\n  824: scarf\n  825: stone wall\n  826: stopwatch\n  827: stove\n  828: strainer\n  829: tram\n  830: stretcher\n  831: couch\n  832: stupa\n  833: submarine\n  834: suit\n  835: sundial\n  836: sunglass\n  837: sunglasses\n  838: sunscreen\n  839: suspension bridge\n  840: mop\n  841: sweatshirt\n  842: swimsuit\n  843: swing\n  844: switch\n  845: syringe\n  846: table lamp\n  847: tank\n  848: tape player\n  849: teapot\n  850: teddy bear\n  851: television\n  852: tennis ball\n  853: thatched roof\n  854: front curtain\n  855: thimble\n  856: threshing machine\n  857: throne\n  858: tile roof\n  859: toaster\n  860: tobacco shop\n  861: toilet seat\n  862: torch\n  863: totem pole\n  864: tow truck\n  865: toy store\n  866: tractor\n  867: semi-trailer truck\n  868: tray\n  869: trench coat\n  870: tricycle\n  871: trimaran\n  872: tripod\n  873: triumphal arch\n  874: trolleybus\n  875: trombone\n  876: tub\n  877: turnstile\n  878: typewriter keyboard\n  879: umbrella\n  880: unicycle\n  881: upright piano\n  882: vacuum cleaner\n  883: vase\n  884: vault\n  885: velvet\n  886: vending machine\n  887: vestment\n  888: viaduct\n  889: violin\n  890: volleyball\n  891: waffle iron\n  892: wall clock\n  893: wallet\n  894: wardrobe\n  895: military aircraft\n  896: sink\n  897: washing machine\n  898: water bottle\n  899: water jug\n  900: water tower\n  901: whiskey jug\n  902: whistle\n  903: wig\n  904: window screen\n  905: window shade\n  906: Windsor tie\n  907: wine bottle\n  908: wing\n  909: wok\n  910: wooden spoon\n  911: wool\n  912: split-rail fence\n  913: shipwreck\n  914: yawl\n  915: yurt\n  916: website\n  917: comic book\n  918: crossword\n  919: traffic sign\n  920: traffic light\n  921: dust jacket\n  922: menu\n  923: plate\n  924: guacamole\n  925: consomme\n  926: hot pot\n  927: trifle\n  928: ice cream\n  929: ice pop\n  930: baguette\n  931: bagel\n  932: pretzel\n  933: cheeseburger\n  934: hot dog\n  935: mashed potato\n  936: cabbage\n  937: broccoli\n  938: cauliflower\n  939: zucchini\n  940: spaghetti squash\n  941: acorn squash\n  942: butternut squash\n  943: cucumber\n  944: artichoke\n  945: bell pepper\n  946: cardoon\n  947: mushroom\n  948: Granny Smith\n  949: strawberry\n  950: orange\n  951: lemon\n  952: fig\n  953: pineapple\n  954: banana\n  955: jackfruit\n  956: custard apple\n  957: pomegranate\n  958: hay\n  959: carbonara\n  960: chocolate syrup\n  961: dough\n  962: meatloaf\n  963: pizza\n  964: pot pie\n  965: burrito\n  966: red wine\n  967: espresso\n  968: cup\n  969: eggnog\n  970: alp\n  971: bubble\n  972: cliff\n  973: coral reef\n  974: geyser\n  975: lakeshore\n  976: promontory\n  977: shoal\n  978: seashore\n  979: valley\n  980: volcano\n  981: baseball player\n  982: bridegroom\n  983: scuba diver\n  984: rapeseed\n  985: daisy\n  986: yellow lady's slipper\n  987: corn\n  988: acorn\n  989: rose hip\n  990: horse chestnut seed\n  991: coral fungus\n  992: agaric\n  993: gyromitra\n  994: stinkhorn mushroom\n  995: earth star\n  996: hen-of-the-woods\n  997: bolete\n  998: ear\n  999: toilet paper\n\n# Download script/URL (optional)\ndownload: data/scripts/get_imagenet1000.sh\n"
  },
  {
    "path": "data/Objects365.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Objects365 dataset https://www.objects365.org/ by Megvii\n# Example usage: python train.py --data Objects365.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── Objects365  ← downloads here (712 GB = 367G data + 345G zips)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/Objects365 # dataset root dir\ntrain: images/train # train images (relative to 'path') 1742289 images\nval: images/val # val images (relative to 'path') 80000 images\ntest: # test images (optional)\n\n# Classes\nnames:\n  0: Person\n  1: Sneakers\n  2: Chair\n  3: Other Shoes\n  4: Hat\n  5: Car\n  6: Lamp\n  7: Glasses\n  8: Bottle\n  9: Desk\n  10: Cup\n  11: Street Lights\n  12: Cabinet/shelf\n  13: Handbag/Satchel\n  14: Bracelet\n  15: Plate\n  16: Picture/Frame\n  17: Helmet\n  18: Book\n  19: Gloves\n  20: Storage box\n  21: Boat\n  22: Leather Shoes\n  23: Flower\n  24: Bench\n  25: Potted Plant\n  26: Bowl/Basin\n  27: Flag\n  28: Pillow\n  29: Boots\n  30: Vase\n  31: Microphone\n  32: Necklace\n  33: Ring\n  34: SUV\n  35: Wine Glass\n  36: Belt\n  37: Monitor/TV\n  38: Backpack\n  39: Umbrella\n  40: Traffic Light\n  41: Speaker\n  42: Watch\n  43: Tie\n  44: Trash bin Can\n  45: Slippers\n  46: Bicycle\n  47: Stool\n  48: Barrel/bucket\n  49: Van\n  50: Couch\n  51: Sandals\n  52: Basket\n  53: Drum\n  54: Pen/Pencil\n  55: Bus\n  56: Wild Bird\n  57: High Heels\n  58: Motorcycle\n  59: Guitar\n  60: Carpet\n  61: Cell Phone\n  62: Bread\n  63: Camera\n  64: Canned\n  65: Truck\n  66: Traffic cone\n  67: Cymbal\n  68: Lifesaver\n  69: Towel\n  70: Stuffed Toy\n  71: Candle\n  72: Sailboat\n  73: Laptop\n  74: Awning\n  75: Bed\n  76: Faucet\n  77: Tent\n  78: Horse\n  79: Mirror\n  80: Power outlet\n  81: Sink\n  82: Apple\n  83: Air Conditioner\n  84: Knife\n  85: Hockey Stick\n  86: Paddle\n  87: Pickup Truck\n  88: Fork\n  89: Traffic Sign\n  90: Balloon\n  91: Tripod\n  92: Dog\n  93: Spoon\n  94: Clock\n  95: Pot\n  96: Cow\n  97: Cake\n  98: Dinning Table\n  99: Sheep\n  100: Hanger\n  101: Blackboard/Whiteboard\n  102: Napkin\n  103: Other Fish\n  104: Orange/Tangerine\n  105: Toiletry\n  106: Keyboard\n  107: Tomato\n  108: Lantern\n  109: Machinery Vehicle\n  110: Fan\n  111: Green Vegetables\n  112: Banana\n  113: Baseball Glove\n  114: Airplane\n  115: Mouse\n  116: Train\n  117: Pumpkin\n  118: Soccer\n  119: Skiboard\n  120: Luggage\n  121: Nightstand\n  122: Tea pot\n  123: Telephone\n  124: Trolley\n  125: Head Phone\n  126: Sports Car\n  127: Stop Sign\n  128: Dessert\n  129: Scooter\n  130: Stroller\n  131: Crane\n  132: Remote\n  133: Refrigerator\n  134: Oven\n  135: Lemon\n  136: Duck\n  137: Baseball Bat\n  138: Surveillance Camera\n  139: Cat\n  140: Jug\n  141: Broccoli\n  142: Piano\n  143: Pizza\n  144: Elephant\n  145: Skateboard\n  146: Surfboard\n  147: Gun\n  148: Skating and Skiing shoes\n  149: Gas stove\n  150: Donut\n  151: Bow Tie\n  152: Carrot\n  153: Toilet\n  154: Kite\n  155: Strawberry\n  156: Other Balls\n  157: Shovel\n  158: Pepper\n  159: Computer Box\n  160: Toilet Paper\n  161: Cleaning Products\n  162: Chopsticks\n  163: Microwave\n  164: Pigeon\n  165: Baseball\n  166: Cutting/chopping Board\n  167: Coffee Table\n  168: Side Table\n  169: Scissors\n  170: Marker\n  171: Pie\n  172: Ladder\n  173: Snowboard\n  174: Cookies\n  175: Radiator\n  176: Fire Hydrant\n  177: Basketball\n  178: Zebra\n  179: Grape\n  180: Giraffe\n  181: Potato\n  182: Sausage\n  183: Tricycle\n  184: Violin\n  185: Egg\n  186: Fire Extinguisher\n  187: Candy\n  188: Fire Truck\n  189: Billiards\n  190: Converter\n  191: Bathtub\n  192: Wheelchair\n  193: Golf Club\n  194: Briefcase\n  195: Cucumber\n  196: Cigar/Cigarette\n  197: Paint Brush\n  198: Pear\n  199: Heavy Truck\n  200: Hamburger\n  201: Extractor\n  202: Extension Cord\n  203: Tong\n  204: Tennis Racket\n  205: Folder\n  206: American Football\n  207: earphone\n  208: Mask\n  209: Kettle\n  210: Tennis\n  211: Ship\n  212: Swing\n  213: Coffee Machine\n  214: Slide\n  215: Carriage\n  216: Onion\n  217: Green beans\n  218: Projector\n  219: Frisbee\n  220: Washing Machine/Drying Machine\n  221: Chicken\n  222: Printer\n  223: Watermelon\n  224: Saxophone\n  225: Tissue\n  226: Toothbrush\n  227: Ice cream\n  228: Hot-air balloon\n  229: Cello\n  230: French Fries\n  231: Scale\n  232: Trophy\n  233: Cabbage\n  234: Hot dog\n  235: Blender\n  236: Peach\n  237: Rice\n  238: Wallet/Purse\n  239: Volleyball\n  240: Deer\n  241: Goose\n  242: Tape\n  243: Tablet\n  244: Cosmetics\n  245: Trumpet\n  246: Pineapple\n  247: Golf Ball\n  248: Ambulance\n  249: Parking meter\n  250: Mango\n  251: Key\n  252: Hurdle\n  253: Fishing Rod\n  254: Medal\n  255: Flute\n  256: Brush\n  257: Penguin\n  258: Megaphone\n  259: Corn\n  260: Lettuce\n  261: Garlic\n  262: Swan\n  263: Helicopter\n  264: Green Onion\n  265: Sandwich\n  266: Nuts\n  267: Speed Limit Sign\n  268: Induction Cooker\n  269: Broom\n  270: Trombone\n  271: Plum\n  272: Rickshaw\n  273: Goldfish\n  274: Kiwi fruit\n  275: Router/modem\n  276: Poker Card\n  277: Toaster\n  278: Shrimp\n  279: Sushi\n  280: Cheese\n  281: Notepaper\n  282: Cherry\n  283: Pliers\n  284: CD\n  285: Pasta\n  286: Hammer\n  287: Cue\n  288: Avocado\n  289: Hamimelon\n  290: Flask\n  291: Mushroom\n  292: Screwdriver\n  293: Soap\n  294: Recorder\n  295: Bear\n  296: Eggplant\n  297: Board Eraser\n  298: Coconut\n  299: Tape Measure/Ruler\n  300: Pig\n  301: Showerhead\n  302: Globe\n  303: Chips\n  304: Steak\n  305: Crosswalk Sign\n  306: Stapler\n  307: Camel\n  308: Formula 1\n  309: Pomegranate\n  310: Dishwasher\n  311: Crab\n  312: Hoverboard\n  313: Meat ball\n  314: Rice Cooker\n  315: Tuba\n  316: Calculator\n  317: Papaya\n  318: Antelope\n  319: Parrot\n  320: Seal\n  321: Butterfly\n  322: Dumbbell\n  323: Donkey\n  324: Lion\n  325: Urinal\n  326: Dolphin\n  327: Electric Drill\n  328: Hair Dryer\n  329: Egg tart\n  330: Jellyfish\n  331: Treadmill\n  332: Lighter\n  333: Grapefruit\n  334: Game board\n  335: Mop\n  336: Radish\n  337: Baozi\n  338: Target\n  339: French\n  340: Spring Rolls\n  341: Monkey\n  342: Rabbit\n  343: Pencil Case\n  344: Yak\n  345: Red Cabbage\n  346: Binoculars\n  347: Asparagus\n  348: Barbell\n  349: Scallop\n  350: Noddles\n  351: Comb\n  352: Dumpling\n  353: Oyster\n  354: Table Tennis paddle\n  355: Cosmetics Brush/Eyeliner Pencil\n  356: Chainsaw\n  357: Eraser\n  358: Lobster\n  359: Durian\n  360: Okra\n  361: Lipstick\n  362: Cosmetics Mirror\n  363: Curling\n  364: Table Tennis\n\n# Download script/URL (optional) ---------------------------------------------------------------------------------------\ndownload: |\n  from tqdm import tqdm\n\n  from utils.general import Path, check_requirements, download, np, xyxy2xywhn\n\n  check_requirements('pycocotools>=2.0')\n  from pycocotools.coco import COCO\n\n  # Make Directories\n  dir = Path(yaml['path'])  # dataset root dir\n  for p in 'images', 'labels':\n      (dir / p).mkdir(parents=True, exist_ok=True)\n      for q in 'train', 'val':\n          (dir / p / q).mkdir(parents=True, exist_ok=True)\n\n  # Train, Val Splits\n  for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:\n      print(f\"Processing {split} in {patches} patches ...\")\n      images, labels = dir / 'images' / split, dir / 'labels' / split\n\n      # Download\n      url = f\"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/\"\n      if split == 'train':\n          download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False)  # annotations json\n          download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8)\n      elif split == 'val':\n          download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False)  # annotations json\n          download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)\n          download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)\n\n      # Move\n      for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):\n          f.rename(images / f.name)  # move to /images/{split}\n\n      # Labels\n      coco = COCO(dir / f'zhiyuan_objv2_{split}.json')\n      names = [x[\"name\"] for x in coco.loadCats(coco.getCatIds())]\n      for cid, cat in enumerate(names):\n          catIds = coco.getCatIds(catNms=[cat])\n          imgIds = coco.getImgIds(catIds=catIds)\n          for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):\n              width, height = im[\"width\"], im[\"height\"]\n              path = Path(im[\"file_name\"])  # image filename\n              try:\n                  with open(labels / path.with_suffix('.txt').name, 'a') as file:\n                      annIds = coco.getAnnIds(imgIds=im[\"id\"], catIds=catIds, iscrowd=False)\n                      for a in coco.loadAnns(annIds):\n                          x, y, w, h = a['bbox']  # bounding box in xywh (xy top-left corner)\n                          xyxy = np.array([x, y, x + w, y + h])[None]  # pixels(1,4)\n                          x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0]  # normalized and clipped\n                          file.write(f\"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\\n\")\n              except Exception as e:\n                  print(e)\n"
  },
  {
    "path": "data/SKU-110K.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail\n# Example usage: python train.py --data SKU-110K.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── SKU-110K  ← downloads here (13.6 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/SKU-110K # dataset root dir\ntrain: train.txt # train images (relative to 'path')  8219 images\nval: val.txt # val images (relative to 'path')  588 images\ntest: test.txt # test images (optional)  2936 images\n\n# Classes\nnames:\n  0: object\n\n# Download script/URL (optional) ---------------------------------------------------------------------------------------\ndownload: |\n  import shutil\n  from tqdm import tqdm\n  from utils.general import np, pd, Path, download, xyxy2xywh\n\n\n  # Download\n  dir = Path(yaml['path'])  # dataset root dir\n  parent = Path(dir.parent)  # download dir\n  urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']\n  download(urls, dir=parent, delete=False)\n\n  # Rename directories\n  if dir.exists():\n      shutil.rmtree(dir)\n  (parent / 'SKU110K_fixed').rename(dir)  # rename dir\n  (dir / 'labels').mkdir(parents=True, exist_ok=True)  # create labels dir\n\n  # Convert labels\n  names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height'  # column names\n  for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':\n      x = pd.read_csv(dir / 'annotations' / d, names=names).values  # annotations\n      images, unique_images = x[:, 0], np.unique(x[:, 0])\n      with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:\n          f.writelines(f'./images/{s}\\n' for s in unique_images)\n      for im in tqdm(unique_images, desc=f'Converting {dir / d}'):\n          cls = 0  # single-class dataset\n          with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:\n              for r in x[images == im]:\n                  w, h = r[6], r[7]  # image width, height\n                  xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0]  # instance\n                  f.write(f\"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\\n\")  # write label\n"
  },
  {
    "path": "data/VOC.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford\n# Example usage: python train.py --data VOC.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── VOC  ← downloads here (2.8 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/VOC\ntrain: # train images (relative to 'path')  16551 images\n  - images/train2012\n  - images/train2007\n  - images/val2012\n  - images/val2007\nval: # val images (relative to 'path')  4952 images\n  - images/test2007\ntest: # test images (optional)\n  - images/test2007\n\n# Classes\nnames:\n  0: aeroplane\n  1: bicycle\n  2: bird\n  3: boat\n  4: bottle\n  5: bus\n  6: car\n  7: cat\n  8: chair\n  9: cow\n  10: diningtable\n  11: dog\n  12: horse\n  13: motorbike\n  14: person\n  15: pottedplant\n  16: sheep\n  17: sofa\n  18: train\n  19: tvmonitor\n\n# Download script/URL (optional) ---------------------------------------------------------------------------------------\ndownload: |\n  import xml.etree.ElementTree as ET\n\n  from tqdm import tqdm\n  from utils.general import download, Path\n\n\n  def convert_label(path, lb_path, year, image_id):\n      def convert_box(size, box):\n          dw, dh = 1. / size[0], 1. / size[1]\n          x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]\n          return x * dw, y * dh, w * dw, h * dh\n\n      in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')\n      out_file = open(lb_path, 'w')\n      tree = ET.parse(in_file)\n      root = tree.getroot()\n      size = root.find('size')\n      w = int(size.find('width').text)\n      h = int(size.find('height').text)\n\n      names = list(yaml['names'].values())  # names list\n      for obj in root.iter('object'):\n          cls = obj.find('name').text\n          if cls in names and int(obj.find('difficult').text) != 1:\n              xmlbox = obj.find('bndbox')\n              bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])\n              cls_id = names.index(cls)  # class id\n              out_file.write(\" \".join([str(a) for a in (cls_id, *bb)]) + '\\n')\n\n\n  # Download\n  dir = Path(yaml['path'])  # dataset root dir\n  url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'\n  urls = [f'{url}VOCtrainval_06-Nov-2007.zip',  # 446MB, 5012 images\n          f'{url}VOCtest_06-Nov-2007.zip',  # 438MB, 4953 images\n          f'{url}VOCtrainval_11-May-2012.zip']  # 1.95GB, 17126 images\n  download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)\n\n  # Convert\n  path = dir / 'images/VOCdevkit'\n  for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):\n      imgs_path = dir / 'images' / f'{image_set}{year}'\n      lbs_path = dir / 'labels' / f'{image_set}{year}'\n      imgs_path.mkdir(exist_ok=True, parents=True)\n      lbs_path.mkdir(exist_ok=True, parents=True)\n\n      with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:\n          image_ids = f.read().strip().split()\n      for id in tqdm(image_ids, desc=f'{image_set}{year}'):\n          f = path / f'VOC{year}/JPEGImages/{id}.jpg'  # old img path\n          lb_path = (lbs_path / f.name).with_suffix('.txt')  # new label path\n          f.rename(imgs_path / f.name)  # move image\n          convert_label(path, lb_path, year, id)  # convert labels to YOLO format\n"
  },
  {
    "path": "data/VisDrone.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University\n# Example usage: python train.py --data VisDrone.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── VisDrone  ← downloads here (2.3 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/VisDrone # dataset root dir\ntrain: VisDrone2019-DET-train/images # train images (relative to 'path')  6471 images\nval: VisDrone2019-DET-val/images # val images (relative to 'path')  548 images\ntest: VisDrone2019-DET-test-dev/images # test images (optional)  1610 images\n\n# Classes\nnames:\n  0: pedestrian\n  1: people\n  2: bicycle\n  3: car\n  4: van\n  5: truck\n  6: tricycle\n  7: awning-tricycle\n  8: bus\n  9: motor\n\n# Download script/URL (optional) ---------------------------------------------------------------------------------------\ndownload: |\n  from utils.general import download, os, Path\n\n  def visdrone2yolo(dir):\n      from PIL import Image\n      from tqdm import tqdm\n\n      def convert_box(size, box):\n          # Convert VisDrone box to YOLO xywh box\n          dw = 1. / size[0]\n          dh = 1. / size[1]\n          return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh\n\n      (dir / 'labels').mkdir(parents=True, exist_ok=True)  # make labels directory\n      pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')\n      for f in pbar:\n          img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size\n          lines = []\n          with open(f, 'r') as file:  # read annotation.txt\n              for row in [x.split(',') for x in file.read().strip().splitlines()]:\n                  if row[4] == '0':  # VisDrone 'ignored regions' class 0\n                      continue\n                  cls = int(row[5]) - 1\n                  box = convert_box(img_size, tuple(map(int, row[:4])))\n                  lines.append(f\"{cls} {' '.join(f'{x:.6f}' for x in box)}\\n\")\n                  with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:\n                      fl.writelines(lines)  # write label.txt\n\n\n  # Download\n  dir = Path(yaml['path'])  # dataset root dir\n  urls = ['https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip',\n          'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip',\n          'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip',\n          'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip']\n  download(urls, dir=dir, curl=True, threads=4)\n\n  # Convert\n  for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':\n      visdrone2yolo(dir / d)  # convert VisDrone annotations to YOLO labels\n"
  },
  {
    "path": "data/coco.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# COCO 2017 dataset http://cocodataset.org by Microsoft\n# Example usage: python train.py --data coco.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── coco  ← downloads here (20.1 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/coco # dataset root dir\ntrain: train2017.txt # train images (relative to 'path') 118287 images\nval: val2017.txt # val images (relative to 'path') 5000 images\ntest: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794\n\n# Classes\nnames:\n  0: person\n  1: bicycle\n  2: car\n  3: motorcycle\n  4: airplane\n  5: bus\n  6: train\n  7: truck\n  8: boat\n  9: traffic light\n  10: fire hydrant\n  11: stop sign\n  12: parking meter\n  13: bench\n  14: bird\n  15: cat\n  16: dog\n  17: horse\n  18: sheep\n  19: cow\n  20: elephant\n  21: bear\n  22: zebra\n  23: giraffe\n  24: backpack\n  25: umbrella\n  26: handbag\n  27: tie\n  28: suitcase\n  29: frisbee\n  30: skis\n  31: snowboard\n  32: sports ball\n  33: kite\n  34: baseball bat\n  35: baseball glove\n  36: skateboard\n  37: surfboard\n  38: tennis racket\n  39: bottle\n  40: wine glass\n  41: cup\n  42: fork\n  43: knife\n  44: spoon\n  45: bowl\n  46: banana\n  47: apple\n  48: sandwich\n  49: orange\n  50: broccoli\n  51: carrot\n  52: hot dog\n  53: pizza\n  54: donut\n  55: cake\n  56: chair\n  57: couch\n  58: potted plant\n  59: bed\n  60: dining table\n  61: toilet\n  62: tv\n  63: laptop\n  64: mouse\n  65: remote\n  66: keyboard\n  67: cell phone\n  68: microwave\n  69: oven\n  70: toaster\n  71: sink\n  72: refrigerator\n  73: book\n  74: clock\n  75: vase\n  76: scissors\n  77: teddy bear\n  78: hair drier\n  79: toothbrush\n\n# Download script/URL (optional)\ndownload: |\n  from utils.general import download, Path\n\n\n  # Download labels\n  segments = False  # segment or box labels\n  dir = Path(yaml['path'])  # dataset root dir\n  url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'\n  urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')]  # labels\n  download(urls, dir=dir.parent)\n\n  # Download data\n  urls = ['http://images.cocodataset.org/zips/train2017.zip',  # 19G, 118k images\n          'http://images.cocodataset.org/zips/val2017.zip',  # 1G, 5k images\n          'http://images.cocodataset.org/zips/test2017.zip']  # 7G, 41k images (optional)\n  download(urls, dir=dir / 'images', threads=3)\n"
  },
  {
    "path": "data/coco128-seg.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# COCO128-seg dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics\n# Example usage: python train.py --data coco128.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── coco128-seg  ← downloads here (7 MB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/coco128-seg # dataset root dir\ntrain: images/train2017 # train images (relative to 'path') 128 images\nval: images/train2017 # val images (relative to 'path') 128 images\ntest: # test images (optional)\n\n# Classes\nnames:\n  0: person\n  1: bicycle\n  2: car\n  3: motorcycle\n  4: airplane\n  5: bus\n  6: train\n  7: truck\n  8: boat\n  9: traffic light\n  10: fire hydrant\n  11: stop sign\n  12: parking meter\n  13: bench\n  14: bird\n  15: cat\n  16: dog\n  17: horse\n  18: sheep\n  19: cow\n  20: elephant\n  21: bear\n  22: zebra\n  23: giraffe\n  24: backpack\n  25: umbrella\n  26: handbag\n  27: tie\n  28: suitcase\n  29: frisbee\n  30: skis\n  31: snowboard\n  32: sports ball\n  33: kite\n  34: baseball bat\n  35: baseball glove\n  36: skateboard\n  37: surfboard\n  38: tennis racket\n  39: bottle\n  40: wine glass\n  41: cup\n  42: fork\n  43: knife\n  44: spoon\n  45: bowl\n  46: banana\n  47: apple\n  48: sandwich\n  49: orange\n  50: broccoli\n  51: carrot\n  52: hot dog\n  53: pizza\n  54: donut\n  55: cake\n  56: chair\n  57: couch\n  58: potted plant\n  59: bed\n  60: dining table\n  61: toilet\n  62: tv\n  63: laptop\n  64: mouse\n  65: remote\n  66: keyboard\n  67: cell phone\n  68: microwave\n  69: oven\n  70: toaster\n  71: sink\n  72: refrigerator\n  73: book\n  74: clock\n  75: vase\n  76: scissors\n  77: teddy bear\n  78: hair drier\n  79: toothbrush\n\n# Download script/URL (optional)\ndownload: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip\n"
  },
  {
    "path": "data/coco128.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# COCO128 dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics\n# Example usage: python train.py --data coco128.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── coco128  ← downloads here (7 MB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/coco128 # dataset root dir\ntrain: images/train2017 # train images (relative to 'path') 128 images\nval: images/train2017 # val images (relative to 'path') 128 images\ntest: # test images (optional)\n\n# Classes\nnames:\n  0: person\n  1: bicycle\n  2: car\n  3: motorcycle\n  4: airplane\n  5: bus\n  6: train\n  7: truck\n  8: boat\n  9: traffic light\n  10: fire hydrant\n  11: stop sign\n  12: parking meter\n  13: bench\n  14: bird\n  15: cat\n  16: dog\n  17: horse\n  18: sheep\n  19: cow\n  20: elephant\n  21: bear\n  22: zebra\n  23: giraffe\n  24: backpack\n  25: umbrella\n  26: handbag\n  27: tie\n  28: suitcase\n  29: frisbee\n  30: skis\n  31: snowboard\n  32: sports ball\n  33: kite\n  34: baseball bat\n  35: baseball glove\n  36: skateboard\n  37: surfboard\n  38: tennis racket\n  39: bottle\n  40: wine glass\n  41: cup\n  42: fork\n  43: knife\n  44: spoon\n  45: bowl\n  46: banana\n  47: apple\n  48: sandwich\n  49: orange\n  50: broccoli\n  51: carrot\n  52: hot dog\n  53: pizza\n  54: donut\n  55: cake\n  56: chair\n  57: couch\n  58: potted plant\n  59: bed\n  60: dining table\n  61: toilet\n  62: tv\n  63: laptop\n  64: mouse\n  65: remote\n  66: keyboard\n  67: cell phone\n  68: microwave\n  69: oven\n  70: toaster\n  71: sink\n  72: refrigerator\n  73: book\n  74: clock\n  75: vase\n  76: scissors\n  77: teddy bear\n  78: hair drier\n  79: toothbrush\n\n# Download script/URL (optional)\ndownload: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128.zip\n"
  },
  {
    "path": "data/hyps/hyp.Objects365.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for Objects365 training\n# python train.py --weights yolov5m.pt --data Objects365.yaml --evolve\n# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials\n\nlr0: 0.00258\nlrf: 0.17\nmomentum: 0.779\nweight_decay: 0.00058\nwarmup_epochs: 1.33\nwarmup_momentum: 0.86\nwarmup_bias_lr: 0.0711\nbox: 0.0539\ncls: 0.299\ncls_pw: 0.825\nobj: 0.632\nobj_pw: 1.0\niou_t: 0.2\nanchor_t: 3.44\nanchors: 3.2\nfl_gamma: 0.0\nhsv_h: 0.0188\nhsv_s: 0.704\nhsv_v: 0.36\ndegrees: 0.0\ntranslate: 0.0902\nscale: 0.491\nshear: 0.0\nperspective: 0.0\nflipud: 0.0\nfliplr: 0.5\nmosaic: 1.0\nmixup: 0.0\ncopy_paste: 0.0\n"
  },
  {
    "path": "data/hyps/hyp.VOC.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for VOC training\n# python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve\n# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials\n\n# YOLOv5 Hyperparameter Evolution Results\n# Best generation: 467\n# Last generation: 996\n#    metrics/precision,       metrics/recall,      metrics/mAP_0.5, metrics/mAP_0.5:0.95,         val/box_loss,         val/obj_loss,         val/cls_loss\n#              0.87729,              0.85125,              0.91286,              0.72664,            0.0076739,            0.0042529,            0.0013865\n\nlr0: 0.00334\nlrf: 0.15135\nmomentum: 0.74832\nweight_decay: 0.00025\nwarmup_epochs: 3.3835\nwarmup_momentum: 0.59462\nwarmup_bias_lr: 0.18657\nbox: 0.02\ncls: 0.21638\ncls_pw: 0.5\nobj: 0.51728\nobj_pw: 0.67198\niou_t: 0.2\nanchor_t: 3.3744\nfl_gamma: 0.0\nhsv_h: 0.01041\nhsv_s: 0.54703\nhsv_v: 0.27739\ndegrees: 0.0\ntranslate: 0.04591\nscale: 0.75544\nshear: 0.0\nperspective: 0.0\nflipud: 0.0\nfliplr: 0.5\nmosaic: 0.85834\nmixup: 0.04266\ncopy_paste: 0.0\nanchors: 3.412\n"
  },
  {
    "path": "data/hyps/hyp.no-augmentation.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters when using Albumentations frameworks\n# python train.py --hyp hyp.no-augmentation.yaml\n# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples\n\nlr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)\nlrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)\nmomentum: 0.937 # SGD momentum/Adam beta1\nweight_decay: 0.0005 # optimizer weight decay 5e-4\nwarmup_epochs: 3.0 # warmup epochs (fractions ok)\nwarmup_momentum: 0.8 # warmup initial momentum\nwarmup_bias_lr: 0.1 # warmup initial bias lr\nbox: 0.05 # box loss gain\ncls: 0.3 # cls loss gain\ncls_pw: 1.0 # cls BCELoss positive_weight\nobj: 0.7 # obj loss gain (scale with pixels)\nobj_pw: 1.0 # obj BCELoss positive_weight\niou_t: 0.20 # IoU training threshold\nanchor_t: 4.0 # anchor-multiple threshold\n# anchors: 3  # anchors per output layer (0 to ignore)\n# this parameters are all zero since we want to use albumentation framework\nfl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\nhsv_h: 0 # image HSV-Hue augmentation (fraction)\nhsv_s: 0 # image HSV-Saturation augmentation (fraction)\nhsv_v: 0 # image HSV-Value augmentation (fraction)\ndegrees: 0.0 # image rotation (+/- deg)\ntranslate: 0 # image translation (+/- fraction)\nscale: 0 # image scale (+/- gain)\nshear: 0 # image shear (+/- deg)\nperspective: 0.0 # image perspective (+/- fraction), range 0-0.001\nflipud: 0.0 # image flip up-down (probability)\nfliplr: 0.0 # image flip left-right (probability)\nmosaic: 0.0 # image mosaic (probability)\nmixup: 0.0 # image mixup (probability)\ncopy_paste: 0.0 # segment copy-paste (probability)\n"
  },
  {
    "path": "data/hyps/hyp.scratch-high.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for high-augmentation COCO training from scratch\n# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300\n# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials\n\nlr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)\nlrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)\nmomentum: 0.937 # SGD momentum/Adam beta1\nweight_decay: 0.0005 # optimizer weight decay 5e-4\nwarmup_epochs: 3.0 # warmup epochs (fractions ok)\nwarmup_momentum: 0.8 # warmup initial momentum\nwarmup_bias_lr: 0.1 # warmup initial bias lr\nbox: 0.05 # box loss gain\ncls: 0.3 # cls loss gain\ncls_pw: 1.0 # cls BCELoss positive_weight\nobj: 0.7 # obj loss gain (scale with pixels)\nobj_pw: 1.0 # obj BCELoss positive_weight\niou_t: 0.20 # IoU training threshold\nanchor_t: 4.0 # anchor-multiple threshold\n# anchors: 3  # anchors per output layer (0 to ignore)\nfl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\nhsv_h: 0.015 # image HSV-Hue augmentation (fraction)\nhsv_s: 0.7 # image HSV-Saturation augmentation (fraction)\nhsv_v: 0.4 # image HSV-Value augmentation (fraction)\ndegrees: 0.0 # image rotation (+/- deg)\ntranslate: 0.1 # image translation (+/- fraction)\nscale: 0.9 # image scale (+/- gain)\nshear: 0.0 # image shear (+/- deg)\nperspective: 0.0 # image perspective (+/- fraction), range 0-0.001\nflipud: 0.0 # image flip up-down (probability)\nfliplr: 0.5 # image flip left-right (probability)\nmosaic: 1.0 # image mosaic (probability)\nmixup: 0.1 # image mixup (probability)\ncopy_paste: 0.1 # segment copy-paste (probability)\n"
  },
  {
    "path": "data/hyps/hyp.scratch-low.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for low-augmentation COCO training from scratch\n# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear\n# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials\n\nlr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)\nlrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)\nmomentum: 0.937 # SGD momentum/Adam beta1\nweight_decay: 0.0005 # optimizer weight decay 5e-4\nwarmup_epochs: 3.0 # warmup epochs (fractions ok)\nwarmup_momentum: 0.8 # warmup initial momentum\nwarmup_bias_lr: 0.1 # warmup initial bias lr\nbox: 0.05 # box loss gain\ncls: 0.5 # cls loss gain\ncls_pw: 1.0 # cls BCELoss positive_weight\nobj: 1.0 # obj loss gain (scale with pixels)\nobj_pw: 1.0 # obj BCELoss positive_weight\niou_t: 0.20 # IoU training threshold\nanchor_t: 4.0 # anchor-multiple threshold\n# anchors: 3  # anchors per output layer (0 to ignore)\nfl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\nhsv_h: 0.015 # image HSV-Hue augmentation (fraction)\nhsv_s: 0.7 # image HSV-Saturation augmentation (fraction)\nhsv_v: 0.4 # image HSV-Value augmentation (fraction)\ndegrees: 0.0 # image rotation (+/- deg)\ntranslate: 0.1 # image translation (+/- fraction)\nscale: 0.5 # image scale (+/- gain)\nshear: 0.0 # image shear (+/- deg)\nperspective: 0.0 # image perspective (+/- fraction), range 0-0.001\nflipud: 0.0 # image flip up-down (probability)\nfliplr: 0.5 # image flip left-right (probability)\nmosaic: 1.0 # image mosaic (probability)\nmixup: 0.0 # image mixup (probability)\ncopy_paste: 0.0 # segment copy-paste (probability)\n"
  },
  {
    "path": "data/hyps/hyp.scratch-med.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for medium-augmentation COCO training from scratch\n# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300\n# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials\n\nlr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)\nlrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)\nmomentum: 0.937 # SGD momentum/Adam beta1\nweight_decay: 0.0005 # optimizer weight decay 5e-4\nwarmup_epochs: 3.0 # warmup epochs (fractions ok)\nwarmup_momentum: 0.8 # warmup initial momentum\nwarmup_bias_lr: 0.1 # warmup initial bias lr\nbox: 0.05 # box loss gain\ncls: 0.3 # cls loss gain\ncls_pw: 1.0 # cls BCELoss positive_weight\nobj: 0.7 # obj loss gain (scale with pixels)\nobj_pw: 1.0 # obj BCELoss positive_weight\niou_t: 0.20 # IoU training threshold\nanchor_t: 4.0 # anchor-multiple threshold\n# anchors: 3  # anchors per output layer (0 to ignore)\nfl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\nhsv_h: 0.015 # image HSV-Hue augmentation (fraction)\nhsv_s: 0.7 # image HSV-Saturation augmentation (fraction)\nhsv_v: 0.4 # image HSV-Value augmentation (fraction)\ndegrees: 0.0 # image rotation (+/- deg)\ntranslate: 0.1 # image translation (+/- fraction)\nscale: 0.9 # image scale (+/- gain)\nshear: 0.0 # image shear (+/- deg)\nperspective: 0.0 # image perspective (+/- fraction), range 0-0.001\nflipud: 0.0 # image flip up-down (probability)\nfliplr: 0.5 # image flip left-right (probability)\nmosaic: 1.0 # image mosaic (probability)\nmixup: 0.1 # image mixup (probability)\ncopy_paste: 0.0 # segment copy-paste (probability)\n"
  },
  {
    "path": "data/scripts/download_weights.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download latest models from https://github.com/ultralytics/yolov5/releases\n# Example usage: bash data/scripts/download_weights.sh\n# parent\n# └── yolov5\n#     ├── yolov5s.pt  ← downloads here\n#     ├── yolov5m.pt\n#     └── ...\n\npython - << EOF\nfrom utils.downloads import attempt_download\n\np5 = list('nsmlx')  # P5 models\np6 = [f'{x}6' for x in p5]  # P6 models\ncls = [f'{x}-cls' for x in p5]  # classification models\nseg = [f'{x}-seg' for x in p5]  # classification models\n\nfor x in p5 + p6 + cls + seg:\n    attempt_download(f'weights/yolov5{x}.pt')\n\nEOF\n"
  },
  {
    "path": "data/scripts/get_coco.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download COCO 2017 dataset http://cocodataset.org\n# Example usage: bash data/scripts/get_coco.sh\n# parent\n# ├── yolov5\n# └── datasets\n#     └── coco  ← downloads here\n\n# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments\nif [ \"$#\" -gt 0 ]; then\n  for opt in \"$@\"; do\n    case \"${opt}\" in\n      --train) train=true ;;\n      --val) val=true ;;\n      --test) test=true ;;\n      --segments) segments=true ;;\n    esac\n  done\nelse\n  train=true\n  val=true\n  test=false\n  segments=false\nfi\n\n# Download/unzip labels\nd='../datasets' # unzip directory\nurl=https://github.com/ultralytics/yolov5/releases/download/v1.0/\nif [ \"$segments\" == \"true\" ]; then\n  f='coco2017labels-segments.zip' # 168 MB\nelse\n  f='coco2017labels.zip' # 46 MB\nfi\necho 'Downloading' $url$f ' ...'\ncurl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &\n\n# Download/unzip images\nd='../datasets/coco/images' # unzip directory\nurl=http://images.cocodataset.org/zips/\nif [ \"$train\" == \"true\" ]; then\n  f='train2017.zip' # 19G, 118k images\n  echo 'Downloading' $url$f '...'\n  curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &\nfi\nif [ \"$val\" == \"true\" ]; then\n  f='val2017.zip' # 1G, 5k images\n  echo 'Downloading' $url$f '...'\n  curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &\nfi\nif [ \"$test\" == \"true\" ]; then\n  f='test2017.zip' # 7G, 41k images (optional)\n  echo 'Downloading' $url$f '...'\n  curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &\nfi\nwait # finish background tasks\n"
  },
  {
    "path": "data/scripts/get_coco128.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)\n# Example usage: bash data/scripts/get_coco128.sh\n# parent\n# ├── yolov5\n# └── datasets\n#     └── coco128  ← downloads here\n\n# Download/unzip images and labels\nd='../datasets' # unzip directory\nurl=https://github.com/ultralytics/yolov5/releases/download/v1.0/\nf='coco128.zip' # or 'coco128-segments.zip', 68 MB\necho 'Downloading' $url$f ' ...'\ncurl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &\n\nwait # finish background tasks\n"
  },
  {
    "path": "data/scripts/get_imagenet.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# Example usage: bash data/scripts/get_imagenet.sh\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet  ← downloads here\n\n# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val\nif [ \"$#\" -gt 0 ]; then\n  for opt in \"$@\"; do\n    case \"${opt}\" in\n      --train) train=true ;;\n      --val) val=true ;;\n    esac\n  done\nelse\n  train=true\n  val=true\nfi\n\n# Make dir\nd='../datasets/imagenet' # unzip directory\nmkdir -p $d && cd $d\n\n# Download/unzip train\nif [ \"$train\" == \"true\" ]; then\n  wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar # download 138G, 1281167 images\n  mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train\n  tar -xf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar\n  find . -name \"*.tar\" | while read NAME; do\n    mkdir -p \"${NAME%.tar}\"\n    tar -xf \"${NAME}\" -C \"${NAME%.tar}\"\n    rm -f \"${NAME}\"\n  done\n  cd ..\nfi\n\n# Download/unzip val\nif [ \"$val\" == \"true\" ]; then\n  wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar # download 6.3G, 50000 images\n  mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xf ILSVRC2012_img_val.tar\n  wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash # move into subdirs\nfi\n\n# Delete corrupted image (optional: PNG under JPEG name that may cause dataloaders to fail)\n# rm train/n04266014/n04266014_10835.JPEG\n\n# TFRecords (optional)\n# wget https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_lsvrc_2015_synsets.txt\n"
  },
  {
    "path": "data/scripts/get_imagenet10.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# Example usage: bash data/scripts/get_imagenet.sh\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet  ← downloads here\n\n# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val\nif [ \"$#\" -gt 0 ]; then\n  for opt in \"$@\"; do\n    case \"${opt}\" in\n      --train) train=true ;;\n      --val) val=true ;;\n    esac\n  done\nelse\n  train=true\n  val=true\nfi\n\n# Make dir\nd='../datasets/imagenet10' # unzip directory\nmkdir -p $d && cd $d\n\n# Download/unzip train\nwget https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenet10.zip\nunzip imagenet10.zip && rm imagenet10.zip\n"
  },
  {
    "path": "data/scripts/get_imagenet100.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# Example usage: bash data/scripts/get_imagenet.sh\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet  ← downloads here\n\n# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val\nif [ \"$#\" -gt 0 ]; then\n  for opt in \"$@\"; do\n    case \"${opt}\" in\n      --train) train=true ;;\n      --val) val=true ;;\n    esac\n  done\nelse\n  train=true\n  val=true\nfi\n\n# Make dir\nd='../datasets/imagenet100' # unzip directory\nmkdir -p $d && cd $d\n\n# Download/unzip train\nwget https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenet100.zip\nunzip imagenet100.zip && rm imagenet100.zip\n"
  },
  {
    "path": "data/scripts/get_imagenet1000.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# Example usage: bash data/scripts/get_imagenet.sh\n# parent\n# ├── yolov5\n# └── datasets\n#     └── imagenet  ← downloads here\n\n# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val\nif [ \"$#\" -gt 0 ]; then\n  for opt in \"$@\"; do\n    case \"${opt}\" in\n      --train) train=true ;;\n      --val) val=true ;;\n    esac\n  done\nelse\n  train=true\n  val=true\nfi\n\n# Make dir\nd='../datasets/imagenet1000' # unzip directory\nmkdir -p $d && cd $d\n\n# Download/unzip train\nwget https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenet1000.zip\nunzip imagenet1000.zip && rm imagenet1000.zip\n"
  },
  {
    "path": "data/xView.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)\n# --------  DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command!  --------\n# Example usage: python train.py --data xView.yaml\n# parent\n# ├── yolov5\n# └── datasets\n#     └── xView  ← downloads here (20.7 GB)\n\n# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]\npath: ../datasets/xView # dataset root dir\ntrain: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images\nval: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images\n\n# Classes\nnames:\n  0: Fixed-wing Aircraft\n  1: Small Aircraft\n  2: Cargo Plane\n  3: Helicopter\n  4: Passenger Vehicle\n  5: Small Car\n  6: Bus\n  7: Pickup Truck\n  8: Utility Truck\n  9: Truck\n  10: Cargo Truck\n  11: Truck w/Box\n  12: Truck Tractor\n  13: Trailer\n  14: Truck w/Flatbed\n  15: Truck w/Liquid\n  16: Crane Truck\n  17: Railway Vehicle\n  18: Passenger Car\n  19: Cargo Car\n  20: Flat Car\n  21: Tank car\n  22: Locomotive\n  23: Maritime Vessel\n  24: Motorboat\n  25: Sailboat\n  26: Tugboat\n  27: Barge\n  28: Fishing Vessel\n  29: Ferry\n  30: Yacht\n  31: Container Ship\n  32: Oil Tanker\n  33: Engineering Vehicle\n  34: Tower crane\n  35: Container Crane\n  36: Reach Stacker\n  37: Straddle Carrier\n  38: Mobile Crane\n  39: Dump Truck\n  40: Haul Truck\n  41: Scraper/Tractor\n  42: Front loader/Bulldozer\n  43: Excavator\n  44: Cement Mixer\n  45: Ground Grader\n  46: Hut/Tent\n  47: Shed\n  48: Building\n  49: Aircraft Hangar\n  50: Damaged Building\n  51: Facility\n  52: Construction Site\n  53: Vehicle Lot\n  54: Helipad\n  55: Storage Tank\n  56: Shipping container lot\n  57: Shipping Container\n  58: Pylon\n  59: Tower\n\n# Download script/URL (optional) ---------------------------------------------------------------------------------------\ndownload: |\n  import json\n  import os\n  from pathlib import Path\n\n  import numpy as np\n  from PIL import Image\n  from tqdm import tqdm\n\n  from utils.dataloaders import autosplit\n  from utils.general import download, xyxy2xywhn\n\n\n  def convert_labels(fname=Path('xView/xView_train.geojson')):\n      # Convert xView geoJSON labels to YOLO format\n      path = fname.parent\n      with open(fname) as f:\n          print(f'Loading {fname}...')\n          data = json.load(f)\n\n      # Make dirs\n      labels = Path(path / 'labels' / 'train')\n      os.system(f'rm -rf {labels}')\n      labels.mkdir(parents=True, exist_ok=True)\n\n      # xView classes 11-94 to 0-59\n      xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,\n                           12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,\n                           29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,\n                           47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]\n\n      shapes = {}\n      for feature in tqdm(data['features'], desc=f'Converting {fname}'):\n          p = feature['properties']\n          if p['bounds_imcoords']:\n              id = p['image_id']\n              file = path / 'train_images' / id\n              if file.exists():  # 1395.tif missing\n                  try:\n                      box = np.array([int(num) for num in p['bounds_imcoords'].split(\",\")])\n                      assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'\n                      cls = p['type_id']\n                      cls = xview_class2index[int(cls)]  # xView class to 0-60\n                      assert 59 >= cls >= 0, f'incorrect class index {cls}'\n\n                      # Write YOLO label\n                      if id not in shapes:\n                          shapes[id] = Image.open(file).size\n                      box = xyxy2xywhn(box[None].astype(np.float64), w=shapes[id][0], h=shapes[id][1], clip=True)\n                      with open((labels / id).with_suffix('.txt'), 'a') as f:\n                          f.write(f\"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\\n\")  # write label.txt\n                  except Exception as e:\n                      print(f'WARNING: skipping one label for {file}: {e}')\n\n\n  # Download manually from https://challenge.xviewdataset.org\n  dir = Path(yaml['path'])  # dataset root dir\n  # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip',  # train labels\n  #         'https://d307kc0mrhucc3.cloudfront.net/train_images.zip',  # 15G, 847 train images\n  #         'https://d307kc0mrhucc3.cloudfront.net/val_images.zip']  # 5G, 282 val images (no labels)\n  # download(urls, dir=dir, delete=False)\n\n  # Convert labels\n  convert_labels(dir / 'xView_train.geojson')\n\n  # Move images\n  images = Path(dir / 'images')\n  images.mkdir(parents=True, exist_ok=True)\n  Path(dir / 'train_images').rename(dir / 'images' / 'train')\n  Path(dir / 'val_images').rename(dir / 'images' / 'val')\n\n  # Split\n  autosplit(dir / 'images' / 'train')\n"
  },
  {
    "path": "detect.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.\n\nUsage - sources:\n    $ python detect.py --weights yolov5s.pt --source 0                               # webcam\n                                                     img.jpg                         # image\n                                                     vid.mp4                         # video\n                                                     screen                          # screenshot\n                                                     path/                           # directory\n                                                     list.txt                        # list of images\n                                                     list.streams                    # list of streams\n                                                     'path/*.jpg'                    # glob\n                                                     'https://youtu.be/LNwODJXcvt4'  # YouTube\n                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\n\nUsage - formats:\n    $ python detect.py --weights yolov5s.pt                 # PyTorch\n                                 yolov5s.torchscript        # TorchScript\n                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn\n                                 yolov5s_openvino_model     # OpenVINO\n                                 yolov5s.engine             # TensorRT\n                                 yolov5s.mlpackage          # CoreML (macOS-only)\n                                 yolov5s_saved_model        # TensorFlow SavedModel\n                                 yolov5s.pb                 # TensorFlow GraphDef\n                                 yolov5s.tflite             # TensorFlow Lite\n                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU\n                                 yolov5s_paddle_model       # PaddlePaddle\n\"\"\"\n\nimport argparse\nimport csv\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport torch\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom ultralytics.utils.plotting import Annotator, colors, save_one_box\n\nfrom models.common import DetectMultiBackend\nfrom utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams\nfrom utils.general import (\n    LOGGER,\n    Profile,\n    check_file,\n    check_img_size,\n    check_imshow,\n    check_requirements,\n    colorstr,\n    cv2,\n    increment_path,\n    non_max_suppression,\n    print_args,\n    scale_boxes,\n    strip_optimizer,\n    xyxy2xywh,\n)\nfrom utils.torch_utils import select_device, smart_inference_mode\n\n\n@smart_inference_mode()\ndef run(\n    weights=ROOT / \"yolov5s.pt\",  # model path or triton URL\n    source=ROOT / \"data/images\",  # file/dir/URL/glob/screen/0(webcam)\n    data=ROOT / \"data/coco128.yaml\",  # dataset.yaml path\n    imgsz=(640, 640),  # inference size (height, width)\n    conf_thres=0.25,  # confidence threshold\n    iou_thres=0.45,  # NMS IOU threshold\n    max_det=1000,  # maximum detections per image\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    view_img=False,  # show results\n    save_txt=False,  # save results to *.txt\n    save_format=0,  # save boxes coordinates in YOLO format or Pascal-VOC format (0 for YOLO and 1 for Pascal-VOC)\n    save_csv=False,  # save results in CSV format\n    save_conf=False,  # save confidences in --save-txt labels\n    save_crop=False,  # save cropped prediction boxes\n    nosave=False,  # do not save images/videos\n    classes=None,  # filter by class: --class 0, or --class 0 2 3\n    agnostic_nms=False,  # class-agnostic NMS\n    augment=False,  # augmented inference\n    visualize=False,  # visualize features\n    update=False,  # update all models\n    project=ROOT / \"runs/detect\",  # save results to project/name\n    name=\"exp\",  # save results to project/name\n    exist_ok=False,  # existing project/name ok, do not increment\n    line_thickness=3,  # bounding box thickness (pixels)\n    hide_labels=False,  # hide labels\n    hide_conf=False,  # hide confidences\n    half=False,  # use FP16 half-precision inference\n    dnn=False,  # use OpenCV DNN for ONNX inference\n    vid_stride=1,  # video frame-rate stride\n):\n    \"\"\"Runs YOLOv5 detection inference on various sources like images, videos, directories, streams, etc.\n\n    Args:\n        weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'.\n        source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam\n            index. Default is 'data/images'.\n        data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'.\n        imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640).\n        conf_thres (float): Confidence threshold for detections. Default is 0.25.\n        iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45.\n        max_det (int): Maximum number of detections per image. Default is 1000.\n        device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which uses\n            the best available device.\n        view_img (bool): If True, display inference results using OpenCV. Default is False.\n        save_txt (bool): If True, save results in a text file. Default is False.\n        save_csv (bool): If True, save results in a CSV file. Default is False.\n        save_conf (bool): If True, include confidence scores in the saved results. Default is False.\n        save_crop (bool): If True, save cropped prediction boxes. Default is False.\n        nosave (bool): If True, do not save inference images or videos. Default is False.\n        classes (list[int]): List of class indices to filter detections by. Default is None.\n        agnostic_nms (bool): If True, perform class-agnostic non-max suppression. Default is False.\n        augment (bool): If True, use augmented inference. Default is False.\n        visualize (bool): If True, visualize feature maps. Default is False.\n        update (bool): If True, update all models' weights. Default is False.\n        project (str | Path): Directory to save results. Default is 'runs/detect'.\n        name (str): Name of the current experiment; used to create a subdirectory within 'project'. Default is 'exp'.\n        exist_ok (bool): If True, existing directories with the same name are reused instead of being incremented.\n            Default is False.\n        line_thickness (int): Thickness of bounding box lines in pixels. Default is 3.\n        hide_labels (bool): If True, do not display labels on bounding boxes. Default is False.\n        hide_conf (bool): If True, do not display confidence scores on bounding boxes. Default is False.\n        half (bool): If True, use FP16 half-precision inference. Default is False.\n        dnn (bool): If True, use OpenCV DNN backend for ONNX inference. Default is False.\n        vid_stride (int): Stride for processing video frames, to skip frames between processing. Default is 1.\n\n    Returns:\n        None\n\n    Examples:\n        ```python\n        from ultralytics import run\n\n        # Run inference on an image\n        run(source='data/images/example.jpg', weights='yolov5s.pt', device='0')\n\n        # Run inference on a video with specific confidence threshold\n        run(source='data/videos/example.mp4', weights='yolov5s.pt', conf_thres=0.4, device='0')\n        ```\n    \"\"\"\n    source = str(source)\n    save_img = not nosave and not source.endswith(\".txt\")  # save inference images\n    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)\n    is_url = source.lower().startswith((\"rtsp://\", \"rtmp://\", \"http://\", \"https://\"))\n    webcam = source.isnumeric() or source.endswith(\".streams\") or (is_url and not is_file)\n    screenshot = source.lower().startswith(\"screen\")\n    if is_url and is_file:\n        source = check_file(source)  # download\n\n    # Directories\n    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run\n    (save_dir / \"labels\" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir\n\n    # Load model\n    device = select_device(device)\n    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)\n    stride, names, pt = model.stride, model.names, model.pt\n    imgsz = check_img_size(imgsz, s=stride)  # check image size\n\n    # Dataloader\n    bs = 1  # batch_size\n    if webcam:\n        view_img = check_imshow(warn=True)\n        dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n        bs = len(dataset)\n    elif screenshot:\n        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)\n    else:\n        dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n    vid_path, vid_writer = [None] * bs, [None] * bs\n\n    # Run inference\n    model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz))  # warmup\n    seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))\n    for path, im, im0s, vid_cap, s in dataset:\n        with dt[0]:\n            im = torch.from_numpy(im).to(model.device)\n            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32\n            im /= 255  # 0 - 255 to 0.0 - 1.0\n            if len(im.shape) == 3:\n                im = im[None]  # expand for batch dim\n            if model.xml and im.shape[0] > 1:\n                ims = torch.chunk(im, im.shape[0], 0)\n\n        # Inference\n        with dt[1]:\n            visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False\n            if model.xml and im.shape[0] > 1:\n                pred = None\n                for image in ims:\n                    if pred is None:\n                        pred = model(image, augment=augment, visualize=visualize).unsqueeze(0)\n                    else:\n                        pred = torch.cat((pred, model(image, augment=augment, visualize=visualize).unsqueeze(0)), dim=0)\n                pred = [pred, None]\n            else:\n                pred = model(im, augment=augment, visualize=visualize)\n        # NMS\n        with dt[2]:\n            pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)\n\n        # Second-stage classifier (optional)\n        # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)\n\n        # Define the path for the CSV file\n        csv_path = save_dir / \"predictions.csv\"\n\n        # Create or append to the CSV file\n        def write_to_csv(image_name, prediction, confidence):\n            \"\"\"Writes prediction data for an image to a CSV file, appending if the file exists.\"\"\"\n            data = {\"Image Name\": image_name, \"Prediction\": prediction, \"Confidence\": confidence}\n            file_exists = os.path.isfile(csv_path)\n            with open(csv_path, mode=\"a\", newline=\"\") as f:\n                writer = csv.DictWriter(f, fieldnames=data.keys())\n                if not file_exists:\n                    writer.writeheader()\n                writer.writerow(data)\n\n        # Process predictions\n        for i, det in enumerate(pred):  # per image\n            seen += 1\n            if webcam:  # batch_size >= 1\n                p, im0, frame = path[i], im0s[i].copy(), dataset.count\n                s += f\"{i}: \"\n            else:\n                p, im0, frame = path, im0s.copy(), getattr(dataset, \"frame\", 0)\n\n            p = Path(p)  # to Path\n            save_path = str(save_dir / p.name)  # im.jpg\n            txt_path = str(save_dir / \"labels\" / p.stem) + (\"\" if dataset.mode == \"image\" else f\"_{frame}\")  # im.txt\n            s += \"{:g}x{:g} \".format(*im.shape[2:])  # print string\n            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh\n            imc = im0.copy() if save_crop else im0  # for save_crop\n            annotator = Annotator(im0, line_width=line_thickness, example=str(names))\n            if len(det):\n                # Rescale boxes from img_size to im0 size\n                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()\n\n                # Print results\n                for c in det[:, 5].unique():\n                    n = (det[:, 5] == c).sum()  # detections per class\n                    s += f\"{n} {names[int(c)]}{'s' * (n > 1)}, \"  # add to string\n\n                # Write results\n                for *xyxy, conf, cls in reversed(det):\n                    c = int(cls)  # integer class\n                    label = names[c] if hide_conf else f\"{names[c]}\"\n                    confidence = float(conf)\n                    confidence_str = f\"{confidence:.2f}\"\n\n                    if save_csv:\n                        write_to_csv(p.name, label, confidence_str)\n\n                    if save_txt:  # Write to file\n                        if save_format == 0:\n                            coords = (\n                                (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()\n                            )  # normalized xywh\n                        else:\n                            coords = (torch.tensor(xyxy).view(1, 4) / gn).view(-1).tolist()  # xyxy\n                        line = (cls, *coords, conf) if save_conf else (cls, *coords)  # label format\n                        with open(f\"{txt_path}.txt\", \"a\") as f:\n                            f.write((\"%g \" * len(line)).rstrip() % line + \"\\n\")\n\n                    if save_img or save_crop or view_img:  # Add bbox to image\n                        c = int(cls)  # integer class\n                        label = None if hide_labels else (names[c] if hide_conf else f\"{names[c]} {conf:.2f}\")\n                        annotator.box_label(xyxy, label, color=colors(c, True))\n                    if save_crop:\n                        save_one_box(xyxy, imc, file=save_dir / \"crops\" / names[c] / f\"{p.stem}.jpg\", BGR=True)\n\n            # Stream results\n            im0 = annotator.result()\n            if view_img:\n                if platform.system() == \"Linux\" and p not in windows:\n                    windows.append(p)\n                    cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)\n                    cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])\n                cv2.imshow(str(p), im0)\n                cv2.waitKey(1)  # 1 millisecond\n\n            # Save results (image with detections)\n            if save_img:\n                if dataset.mode == \"image\":\n                    cv2.imwrite(save_path, im0)\n                else:  # 'video' or 'stream'\n                    if vid_path[i] != save_path:  # new video\n                        vid_path[i] = save_path\n                        if isinstance(vid_writer[i], cv2.VideoWriter):\n                            vid_writer[i].release()  # release previous video writer\n                        if vid_cap:  # video\n                            fps = vid_cap.get(cv2.CAP_PROP_FPS)\n                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n                        else:  # stream\n                            fps, w, h = 30, im0.shape[1], im0.shape[0]\n                        save_path = str(Path(save_path).with_suffix(\".mp4\"))  # force *.mp4 suffix on results videos\n                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n                    vid_writer[i].write(im0)\n\n        # Print time (inference-only)\n        LOGGER.info(f\"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms\")\n\n    # Print results\n    t = tuple(x.t / seen * 1e3 for x in dt)  # speeds per image\n    LOGGER.info(f\"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}\" % t)\n    if save_txt or save_img:\n        s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else \"\"\n        LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\n    if update:\n        strip_optimizer(weights[0])  # update model (to fix SourceChangeWarning)\n\n\ndef parse_opt():\n    \"\"\"Parse command-line arguments for YOLOv5 detection, allowing custom inference options and model configurations.\n\n    Args:\n        --weights (str | list[str], optional): Model path or Triton URL. Defaults to ROOT / 'yolov5s.pt'.\n        --source (str, optional): File/dir/URL/glob/screen/0(webcam). Defaults to ROOT / 'data/images'.\n        --data (str, optional): Dataset YAML path. Provides dataset configuration information.\n        --imgsz (list[int], optional): Inference size (height, width). Defaults to [640].\n        --conf-thres (float, optional): Confidence threshold. Defaults to 0.25.\n        --iou-thres (float, optional): NMS IoU threshold. Defaults to 0.45.\n        --max-det (int, optional): Maximum number of detections per image. Defaults to 1000.\n        --device (str, optional): CUDA device, i.e., '0' or '0,1,2,3' or 'cpu'. Defaults to \"\".\n        --view-img (bool, optional): Flag to display results. Defaults to False.\n        --save-txt (bool, optional): Flag to save results to *.txt files. Defaults to False.\n        --save-csv (bool, optional): Flag to save results in CSV format. Defaults to False.\n        --save-conf (bool, optional): Flag to save confidences in labels saved via --save-txt. Defaults to False.\n        --save-crop (bool, optional): Flag to save cropped prediction boxes. Defaults to False.\n        --nosave (bool, optional): Flag to prevent saving images/videos. Defaults to False.\n        --classes (list[int], optional): List of classes to filter results by, e.g., '--classes 0 2 3'. Defaults to\n            None.\n        --agnostic-nms (bool, optional): Flag for class-agnostic NMS. Defaults to False.\n        --augment (bool, optional): Flag for augmented inference. Defaults to False.\n        --visualize (bool, optional): Flag for visualizing features. Defaults to False.\n        --update (bool, optional): Flag to update all models in the model directory. Defaults to False.\n        --project (str, optional): Directory to save results. Defaults to ROOT / 'runs/detect'.\n        --name (str, optional): Sub-directory name for saving results within --project. Defaults to 'exp'.\n        --exist-ok (bool, optional): Flag to allow overwriting if the project/name already exists. Defaults to False.\n        --line-thickness (int, optional): Thickness (in pixels) of bounding boxes. Defaults to 3.\n        --hide-labels (bool, optional): Flag to hide labels in the output. Defaults to False.\n        --hide-conf (bool, optional): Flag to hide confidences in the output. Defaults to False.\n        --half (bool, optional): Flag to use FP16 half-precision inference. Defaults to False.\n        --dnn (bool, optional): Flag to use OpenCV DNN for ONNX inference. Defaults to False.\n        --vid-stride (int, optional): Video frame-rate stride, determining the number of frames to skip in between\n            consecutive frames. Defaults to 1.\n\n    Returns:\n        argparse.Namespace: Parsed command-line arguments as an argparse.Namespace object.\n\n    Examples:\n        ```python\n        from ultralytics import YOLOv5\n        args = YOLOv5.parse_opt()\n        ```\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", nargs=\"+\", type=str, default=ROOT / \"yolov5s.pt\", help=\"model path or triton URL\")\n    parser.add_argument(\"--source\", type=str, default=ROOT / \"data/images\", help=\"file/dir/URL/glob/screen/0(webcam)\")\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"(optional) dataset.yaml path\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", nargs=\"+\", type=int, default=[640], help=\"inference size h,w\")\n    parser.add_argument(\"--conf-thres\", type=float, default=0.25, help=\"confidence threshold\")\n    parser.add_argument(\"--iou-thres\", type=float, default=0.45, help=\"NMS IoU threshold\")\n    parser.add_argument(\"--max-det\", type=int, default=1000, help=\"maximum detections per image\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--view-img\", action=\"store_true\", help=\"show results\")\n    parser.add_argument(\"--save-txt\", action=\"store_true\", help=\"save results to *.txt\")\n    parser.add_argument(\n        \"--save-format\",\n        type=int,\n        default=0,\n        help=\"whether to save boxes coordinates in YOLO format or Pascal-VOC format when save-txt is True, 0 for YOLO and 1 for Pascal-VOC\",\n    )\n    parser.add_argument(\"--save-csv\", action=\"store_true\", help=\"save results in CSV format\")\n    parser.add_argument(\"--save-conf\", action=\"store_true\", help=\"save confidences in --save-txt labels\")\n    parser.add_argument(\"--save-crop\", action=\"store_true\", help=\"save cropped prediction boxes\")\n    parser.add_argument(\"--nosave\", action=\"store_true\", help=\"do not save images/videos\")\n    parser.add_argument(\"--classes\", nargs=\"+\", type=int, help=\"filter by class: --classes 0, or --classes 0 2 3\")\n    parser.add_argument(\"--agnostic-nms\", action=\"store_true\", help=\"class-agnostic NMS\")\n    parser.add_argument(\"--augment\", action=\"store_true\", help=\"augmented inference\")\n    parser.add_argument(\"--visualize\", action=\"store_true\", help=\"visualize features\")\n    parser.add_argument(\"--update\", action=\"store_true\", help=\"update all models\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/detect\", help=\"save results to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save results to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--line-thickness\", default=3, type=int, help=\"bounding box thickness (pixels)\")\n    parser.add_argument(\"--hide-labels\", default=False, action=\"store_true\", help=\"hide labels\")\n    parser.add_argument(\"--hide-conf\", default=False, action=\"store_true\", help=\"hide confidences\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"use FP16 half-precision inference\")\n    parser.add_argument(\"--dnn\", action=\"store_true\", help=\"use OpenCV DNN for ONNX inference\")\n    parser.add_argument(\"--vid-stride\", type=int, default=1, help=\"video frame-rate stride\")\n    opt = parser.parse_args()\n    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes YOLOv5 model inference based on provided command-line arguments, validating dependencies before running.\n\n    Args:\n        opt (argparse.Namespace): Command-line arguments for YOLOv5 detection. See function `parse_opt` for details.\n\n    Returns:\n        None\n\n    Notes:\n        This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified\n        options. Refer to the usage guide and examples for more information about different sources and formats at:\n        https://github.com/ultralytics/ultralytics\n\n    Example usage:\n\n    ```python\n    if __name__ == \"__main__\":\n        opt = parse_opt()\n        main(opt)\n    ```\n    \"\"\"\n    check_requirements(ROOT / \"requirements.txt\", exclude=(\"tensorboard\", \"thop\"))\n    run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "export.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nExport a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.\n\nFormat                      | `export.py --include`         | Model\n---                         | ---                           | ---\nPyTorch                     | -                             | yolov5s.pt\nTorchScript                 | `torchscript`                 | yolov5s.torchscript\nONNX                        | `onnx`                        | yolov5s.onnx\nOpenVINO                    | `openvino`                    | yolov5s_openvino_model/\nTensorRT                    | `engine`                      | yolov5s.engine\nCoreML                      | `coreml`                      | yolov5s.mlmodel\nTensorFlow SavedModel       | `saved_model`                 | yolov5s_saved_model/\nTensorFlow GraphDef         | `pb`                          | yolov5s.pb\nTensorFlow Lite             | `tflite`                      | yolov5s.tflite\nTensorFlow Edge TPU         | `edgetpu`                     | yolov5s_edgetpu.tflite\nTensorFlow.js               | `tfjs`                        | yolov5s_web_model/\nPaddlePaddle                | `paddle`                      | yolov5s_paddle_model/\n\nRequirements:\n    $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu  # CPU\n    $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow  # GPU\n\nUsage:\n    $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...\n\nInference:\n    $ python detect.py --weights yolov5s.pt                 # PyTorch\n                                 yolov5s.torchscript        # TorchScript\n                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn\n                                 yolov5s_openvino_model     # OpenVINO\n                                 yolov5s.engine             # TensorRT\n                                 yolov5s.mlmodel            # CoreML (macOS-only)\n                                 yolov5s_saved_model        # TensorFlow SavedModel\n                                 yolov5s.pb                 # TensorFlow GraphDef\n                                 yolov5s.tflite             # TensorFlow Lite\n                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU\n                                 yolov5s_paddle_model       # PaddlePaddle\n\nTensorFlow.js:\n    $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example\n    $ npm install\n    $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model\n    $ npm start\n\"\"\"\n\nimport argparse\nimport contextlib\nimport json\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport time\nimport warnings\nfrom pathlib import Path\n\nimport pandas as pd\nimport torch\nfrom torch.utils.mobile_optimizer import optimize_for_mobile\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nif platform.system() != \"Windows\":\n    ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom models.experimental import attempt_load\nfrom models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel\nfrom utils.dataloaders import LoadImages\nfrom utils.general import (\n    LOGGER,\n    Profile,\n    check_dataset,\n    check_img_size,\n    check_requirements,\n    check_version,\n    check_yaml,\n    colorstr,\n    file_size,\n    get_default_args,\n    print_args,\n    url2file,\n    yaml_save,\n)\nfrom utils.torch_utils import select_device, smart_inference_mode\n\nMACOS = platform.system() == \"Darwin\"  # macOS environment\n\n\nclass iOSModel(torch.nn.Module):\n    \"\"\"An iOS-compatible wrapper for YOLOv5 models that normalizes input images based on their dimensions.\"\"\"\n\n    def __init__(self, model, im):\n        \"\"\"Initializes an iOS compatible model with normalization based on image dimensions.\n\n        Args:\n            model (torch.nn.Module): The PyTorch model to be adapted for iOS compatibility.\n            im (torch.Tensor): An input tensor representing a batch of images with shape (B, C, H, W).\n\n        Returns:\n            None: This method does not return any value.\n\n        Notes:\n            This initializer configures normalization based on the input image dimensions, which is critical for\n            ensuring the model's compatibility and proper functionality on iOS devices. The normalization step\n            involves dividing by the image width if the image is square; otherwise, additional conditions might apply.\n        \"\"\"\n        super().__init__()\n        _b, _c, h, w = im.shape  # batch, channel, height, width\n        self.model = model\n        self.nc = model.nc  # number of classes\n        if w == h:\n            self.normalize = 1.0 / w\n        else:\n            self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h])  # broadcast (slower, smaller)\n            # np = model(im)[0].shape[1]  # number of points\n            # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4)  # explicit (faster, larger)\n\n    def forward(self, x):\n        \"\"\"Run a forward pass on the input tensor, returning class confidences and normalized coordinates.\n\n        Args:\n            x (torch.Tensor): Input tensor containing the image data with shape (batch, channels, height, width).\n\n        Returns:\n            torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf), and class\n                probabilities (cls), having shape (N, 4 + 1 + C), where N is the number of predictions, and C is the\n                number of classes.\n\n        Examples:\n            ```python\n            model = iOSModel(pretrained_model, input_image)\n            output = model.forward(torch_input_tensor)\n            ```\n        \"\"\"\n        xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)\n        return cls * conf, xywh * self.normalize  # confidence (3780, 80), coordinates (3780, 4)\n\n\ndef export_formats():\n    r\"\"\"Returns a DataFrame of supported YOLOv5 model export formats and their properties.\n\n    Returns:\n        pandas.DataFrame: A DataFrame containing supported export formats and their properties. The DataFrame includes\n            columns for format name, CLI argument suffix, file extension or directory name, and boolean flags indicating\n            if the export format supports training and detection.\n\n    Examples:\n        ```python\n        formats = export_formats()\n        print(f\"Supported export formats:\\n{formats}\")\n        ```\n\n    Notes:\n        The DataFrame contains the following columns:\n        - Format: The name of the model format (e.g., PyTorch, TorchScript, ONNX, etc.).\n        - Include Argument: The argument to use with the export script to include this format.\n        - File Suffix: File extension or directory name associated with the format.\n        - Supports Training: Whether the format supports training.\n        - Supports Detection: Whether the format supports detection.\n    \"\"\"\n    x = [\n        [\"PyTorch\", \"-\", \".pt\", True, True],\n        [\"TorchScript\", \"torchscript\", \".torchscript\", True, True],\n        [\"ONNX\", \"onnx\", \".onnx\", True, True],\n        [\"OpenVINO\", \"openvino\", \"_openvino_model\", True, False],\n        [\"TensorRT\", \"engine\", \".engine\", False, True],\n        [\"CoreML\", \"coreml\", \".mlpackage\", True, False],\n        [\"TensorFlow SavedModel\", \"saved_model\", \"_saved_model\", True, True],\n        [\"TensorFlow GraphDef\", \"pb\", \".pb\", True, True],\n        [\"TensorFlow Lite\", \"tflite\", \".tflite\", True, False],\n        [\"TensorFlow Edge TPU\", \"edgetpu\", \"_edgetpu.tflite\", False, False],\n        [\"TensorFlow.js\", \"tfjs\", \"_web_model\", False, False],\n        [\"PaddlePaddle\", \"paddle\", \"_paddle_model\", True, True],\n    ]\n    return pd.DataFrame(x, columns=[\"Format\", \"Argument\", \"Suffix\", \"CPU\", \"GPU\"])\n\n\ndef try_export(inner_func):\n    \"\"\"Log success or failure, execution time, and file size for YOLOv5 model export functions wrapped with @try_export.\n\n    Args:\n        inner_func (Callable): The model export function to be wrapped by the decorator.\n\n    Returns:\n        Callable: The wrapped function that logs execution details. When executed, this wrapper function returns either:\n            - Tuple (str | torch.nn.Module): On success — the file path of the exported model and the model instance.\n            - Tuple (None, None): On failure — None values indicating export failure.\n\n    Examples:\n        ```python\n        @try_export\n        def export_onnx(model, filepath):\n            # implementation here\n            pass\n\n        exported_file, exported_model = export_onnx(yolo_model, 'path/to/save/model.onnx')\n        ```\n\n    Notes:\n        For additional requirements and model export formats, refer to the\n        [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics).\n    \"\"\"\n    inner_args = get_default_args(inner_func)\n\n    def outer_func(*args, **kwargs):\n        \"\"\"Logs success/failure and execution details of model export functions wrapped with @try_export decorator.\"\"\"\n        prefix = inner_args[\"prefix\"]\n        try:\n            with Profile() as dt:\n                f, model = inner_func(*args, **kwargs)\n            LOGGER.info(f\"{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)\")\n            return f, model\n        except Exception as e:\n            LOGGER.info(f\"{prefix} export failure ❌ {dt.t:.1f}s: {e}\")\n            return None, None\n\n    return outer_func\n\n\n@try_export\ndef export_torchscript(model, im, file, optimize, prefix=colorstr(\"TorchScript:\")):\n    \"\"\"Export a YOLOv5 model to the TorchScript format.\n\n    Args:\n        model (torch.nn.Module): The YOLOv5 model to be exported.\n        im (torch.Tensor): Example input tensor to be used for tracing the TorchScript model.\n        file (Path): File path where the exported TorchScript model will be saved.\n        optimize (bool): If True, applies optimizations for mobile deployment.\n        prefix (str): Optional prefix for log messages. Default is 'TorchScript:'.\n\n    Returns:\n        (str | None, torch.jit.ScriptModule | None): A tuple containing the file path of the exported model (as a\n            string) and the TorchScript model (as a torch.jit.ScriptModule). If the export fails, both elements of the\n            tuple will be None.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        import torch\n        from models.experimental import attempt_load\n        from utils.torch_utils import select_device\n\n        # Load model\n        weights = 'yolov5s.pt'\n        device = select_device('')\n        model = attempt_load(weights, device=device)\n\n        # Example input tensor\n        im = torch.zeros(1, 3, 640, 640).to(device)\n\n        # Export model\n        file = Path('yolov5s.torchscript')\n        export_torchscript(model, im, file, optimize=False)\n        ```\n\n    Notes:\n        - This function uses tracing to create the TorchScript model.\n        - Metadata, including the input image shape, model stride, and class names, is saved in an extra file (`config.txt`)\n          within the TorchScript model package.\n        - For mobile optimization, refer to the PyTorch tutorial: https://pytorch.org/tutorials/recipes/mobile_interpreter.html\n    \"\"\"\n    LOGGER.info(f\"\\n{prefix} starting export with torch {torch.__version__}...\")\n    f = file.with_suffix(\".torchscript\")\n\n    ts = torch.jit.trace(model, im, strict=False)\n    d = {\"shape\": im.shape, \"stride\": int(max(model.stride)), \"names\": model.names}\n    extra_files = {\"config.txt\": json.dumps(d)}  # torch._C.ExtraFilesMap()\n    if optimize:  # https://pytorch.org/tutorials/recipes/mobile_interpreter.html\n        optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)\n    else:\n        ts.save(str(f), _extra_files=extra_files)\n    return f, None\n\n\n@try_export\ndef export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr(\"ONNX:\")):\n    \"\"\"Export a YOLOv5 model to ONNX format with dynamic axes support and optional model simplification.\n\n    Args:\n        model (torch.nn.Module): The YOLOv5 model to be exported.\n        im (torch.Tensor): A sample input tensor for model tracing, usually the shape is (1, 3, height, width).\n        file (pathlib.Path | str): The output file path where the ONNX model will be saved.\n        opset (int): The ONNX opset version to use for export.\n        dynamic (bool): If True, enables dynamic axes for batch, height, and width dimensions.\n        simplify (bool): If True, applies ONNX model simplification for optimization.\n        prefix (str): A prefix string for logging messages, defaults to 'ONNX:'.\n\n    Returns:\n        tuple[pathlib.Path | str, None]: The path to the saved ONNX model file and None (consistent with decorator).\n\n    Raises:\n        ImportError: If required libraries for export (e.g., 'onnx', 'onnx-simplifier') are not installed.\n        AssertionError: If the simplification check fails.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        import torch\n        from models.experimental import attempt_load\n        from utils.torch_utils import select_device\n\n        # Load model\n        weights = 'yolov5s.pt'\n        device = select_device('')\n        model = attempt_load(weights, map_location=device)\n\n        # Example input tensor\n        im = torch.zeros(1, 3, 640, 640).to(device)\n\n        # Export model\n        file_path = Path('yolov5s.onnx')\n        export_onnx(model, im, file_path, opset=12, dynamic=True, simplify=True)\n        ```\n\n    Notes:\n        The required packages for this function can be installed via:\n        ```\n        pip install onnx onnx-simplifier onnxruntime onnxruntime-gpu\n        ```\n    \"\"\"\n    check_requirements((\"onnx>=1.12.0\", \"onnxscript\"))\n    import onnx\n\n    LOGGER.info(f\"\\n{prefix} starting export with onnx {onnx.__version__}...\")\n    f = str(file.with_suffix(\".onnx\"))\n\n    output_names = [\"output0\", \"output1\"] if isinstance(model, SegmentationModel) else [\"output0\"]\n    if dynamic:\n        dynamic = {\"images\": {0: \"batch\", 2: \"height\", 3: \"width\"}}  # shape(1,3,640,640)\n        if isinstance(model, SegmentationModel):\n            dynamic[\"output0\"] = {0: \"batch\", 1: \"anchors\"}  # shape(1,25200,85)\n            dynamic[\"output1\"] = {0: \"batch\", 2: \"mask_height\", 3: \"mask_width\"}  # shape(1,32,160,160)\n        elif isinstance(model, DetectionModel):\n            dynamic[\"output0\"] = {0: \"batch\", 1: \"anchors\"}  # shape(1,25200,85)\n\n    torch.onnx.export(\n        model.cpu() if dynamic else model,  # --dynamic only compatible with cpu\n        im.cpu() if dynamic else im,\n        f,\n        verbose=False,\n        opset_version=opset,\n        do_constant_folding=True,  # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False\n        input_names=[\"images\"],\n        output_names=output_names,\n        dynamic_axes=dynamic or None,\n    )\n\n    # Checks\n    model_onnx = onnx.load(f)  # load onnx model\n    onnx.checker.check_model(model_onnx)  # check onnx model\n\n    # Metadata\n    d = {\"stride\": int(max(model.stride)), \"names\": model.names}\n    for k, v in d.items():\n        meta = model_onnx.metadata_props.add()\n        meta.key, meta.value = k, str(v)\n    onnx.save(model_onnx, f)\n\n    # Simplify\n    if simplify:\n        try:\n            cuda = torch.cuda.is_available()\n            check_requirements((\"onnxruntime-gpu\" if cuda else \"onnxruntime\", \"onnxslim\"))\n            import onnxslim\n\n            LOGGER.info(f\"{prefix} slimming with onnxslim {onnxslim.__version__}...\")\n            model_onnx = onnxslim.slim(model_onnx)\n            onnx.save(model_onnx, f)\n        except Exception as e:\n            LOGGER.info(f\"{prefix} simplifier failure: {e}\")\n    return f, model_onnx\n\n\n@try_export\ndef export_openvino(file, metadata, half, int8, data, prefix=colorstr(\"OpenVINO:\")):\n    \"\"\"Export a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization.\n\n    Args:\n        file (Path): Path to the output file where the OpenVINO model will be saved.\n        metadata (dict): Dictionary including model metadata such as names and strides.\n        half (bool): If True, export the model with FP16 precision.\n        int8 (bool): If True, export the model with INT8 quantization.\n        data (str): Path to the dataset YAML file required for INT8 quantization.\n        prefix (str): Prefix string for logging purposes (default is \"OpenVINO:\").\n\n    Returns:\n        (str, openvino.runtime.Model | None): The OpenVINO model file path and openvino.runtime.Model object if export\n            is successful; otherwise, None.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        from ultralytics import YOLOv5\n\n        model = YOLOv5('yolov5s.pt')\n        export_openvino(Path('yolov5s.onnx'), metadata={'names': model.names, 'stride': model.stride}, half=True,\n                        int8=False, data='data.yaml')\n        ```\n\n        This will export the YOLOv5 model to OpenVINO with FP16 precision but without INT8 quantization, saving it to\n        the specified file path.\n\n    Notes:\n        - Requires `openvino-dev` package version 2023.0 or higher. Install with:\n          `$ pip install openvino-dev>=2023.0`\n        - For INT8 quantization, also requires `nncf` library version 2.5.0 or higher. Install with:\n          `$ pip install nncf>=2.5.0`\n    \"\"\"\n    check_requirements(\"openvino-dev>=2023.0\")  # requires openvino-dev: https://pypi.org/project/openvino-dev/\n    import openvino.runtime as ov\n    from openvino.tools import mo\n\n    LOGGER.info(f\"\\n{prefix} starting export with openvino {ov.__version__}...\")\n    f = str(file).replace(file.suffix, f\"_{'int8_' if int8 else ''}openvino_model{os.sep}\")\n    f_onnx = file.with_suffix(\".onnx\")\n    f_ov = str(Path(f) / file.with_suffix(\".xml\").name)\n\n    ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework=\"onnx\", compress_to_fp16=half)  # export\n\n    if int8:\n        check_requirements(\"nncf>=2.5.0\")  # requires at least version 2.5.0 to use the post-training quantization\n        import nncf\n        import numpy as np\n\n        from utils.dataloaders import create_dataloader\n\n        def gen_dataloader(yaml_path, task=\"train\", imgsz=640, workers=4):\n            \"\"\"Generates a DataLoader for model training or validation based on the given YAML dataset configuration.\"\"\"\n            data_yaml = check_yaml(yaml_path)\n            data = check_dataset(data_yaml)\n            dataloader = create_dataloader(\n                data[task], imgsz=imgsz, batch_size=1, stride=32, pad=0.5, single_cls=False, rect=False, workers=workers\n            )[0]\n            return dataloader\n\n        def transform_fn(data_item):\n            \"\"\"Quantization transform function.\n\n            Extracts and preprocess input data from dataloader item for quantization.\n\n            Args:\n                data_item: Tuple with data item produced by DataLoader during iteration\n\n            Returns:\n                input_tensor: Input data for quantization\n            \"\"\"\n            assert data_item[0].dtype == torch.uint8, \"input image must be uint8 for the quantization preprocessing\"\n\n            img = data_item[0].numpy().astype(np.float32)  # uint8 to fp16/32\n            img /= 255.0  # 0 - 255 to 0.0 - 1.0\n            return np.expand_dims(img, 0) if img.ndim == 3 else img\n\n        ds = gen_dataloader(data)\n        quantization_dataset = nncf.Dataset(ds, transform_fn)\n        ov_model = nncf.quantize(ov_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)\n\n    ov.serialize(ov_model, f_ov)  # save\n    yaml_save(Path(f) / file.with_suffix(\".yaml\").name, metadata)  # add metadata.yaml\n    return f, None\n\n\n@try_export\ndef export_paddle(model, im, file, metadata, prefix=colorstr(\"PaddlePaddle:\")):\n    \"\"\"Export a YOLOv5 PyTorch model to PaddlePaddle format using X2Paddle, saving the converted model and metadata.\n\n    Args:\n        model (torch.nn.Module): The YOLOv5 model to be exported.\n        im (torch.Tensor): Input tensor used for model tracing during export.\n        file (pathlib.Path): Path to the source file to be converted.\n        metadata (dict): Additional metadata to be saved alongside the model.\n        prefix (str): Prefix for logging information.\n\n    Returns:\n        tuple (str, None): A tuple where the first element is the path to the saved PaddlePaddle model, and the second\n            element is None.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        import torch\n\n        # Assume 'model' is a pre-trained YOLOv5 model and 'im' is an example input tensor\n        model = ...  # Load your model here\n        im = torch.randn((1, 3, 640, 640))  # Dummy input tensor for tracing\n        file = Path(\"yolov5s.pt\")\n        metadata = {\"stride\": 32, \"names\": [\"person\", \"bicycle\", \"car\", \"motorbike\"]}\n\n        export_paddle(model=model, im=im, file=file, metadata=metadata)\n        ```\n\n    Notes:\n        Ensure that `paddlepaddle` and `x2paddle` are installed, as these are required for the export function. You can\n        install them via pip:\n        ```\n        $ pip install paddlepaddle x2paddle\n        ```\n    \"\"\"\n    check_requirements((\"paddlepaddle>=3.0.0\", \"x2paddle\"))\n    import x2paddle\n    from x2paddle.convert import pytorch2paddle\n\n    LOGGER.info(f\"\\n{prefix} starting export with X2Paddle {x2paddle.__version__}...\")\n    f = str(file).replace(\".pt\", f\"_paddle_model{os.sep}\")\n\n    pytorch2paddle(module=model, save_dir=f, jit_type=\"trace\", input_examples=[im])  # export\n    yaml_save(Path(f) / file.with_suffix(\".yaml\").name, metadata)  # add metadata.yaml\n    return f, None\n\n\n@try_export\ndef export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr(\"CoreML:\")):\n    \"\"\"Export a YOLOv5 model to CoreML format with optional NMS, INT8, and FP16 support.\n\n    Args:\n        model (torch.nn.Module): The YOLOv5 model to be exported.\n        im (torch.Tensor): Example input tensor to trace the model.\n        file (pathlib.Path): Path object where the CoreML model will be saved.\n        int8 (bool): Flag indicating whether to use INT8 quantization (default is False).\n        half (bool): Flag indicating whether to use FP16 quantization (default is False).\n        nms (bool): Flag indicating whether to include Non-Maximum Suppression (default is False).\n        mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).\n        prefix (str): Prefix string for logging purposes (default is 'CoreML:').\n\n    Returns:\n        tuple[pathlib.Path | None, None]: The path to the saved CoreML model file, or (None, None) if there is an error.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        import torch\n        from models.yolo import Model\n        model = Model(cfg, ch=3, nc=80)\n        im = torch.randn(1, 3, 640, 640)\n        file = Path(\"yolov5s_coreml\")\n        export_coreml(model, im, file, int8=False, half=False, nms=True, mlmodel=False)\n        ```\n\n    Notes:\n        The exported CoreML model will be saved with a .mlmodel extension.\n        Quantization is supported only on macOS.\n    \"\"\"\n    check_requirements(\"coremltools\")\n    import coremltools as ct\n\n    LOGGER.info(f\"\\n{prefix} starting export with coremltools {ct.__version__}...\")\n    if mlmodel:\n        f = file.with_suffix(\".mlmodel\")\n        convert_to = \"neuralnetwork\"\n        precision = None\n    else:\n        f = file.with_suffix(\".mlpackage\")\n        convert_to = \"mlprogram\"\n        precision = ct.precision.FLOAT16 if half else ct.precision.FLOAT32\n    if nms:\n        model = iOSModel(model, im)\n    ts = torch.jit.trace(model, im, strict=False)  # TorchScript model\n    ct_model = ct.convert(\n        ts,\n        inputs=[ct.ImageType(\"image\", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])],\n        convert_to=convert_to,\n        compute_precision=precision,\n    )\n    bits, mode = (8, \"kmeans\") if int8 else (16, \"linear\") if half else (32, None)\n    if bits < 32:\n        if mlmodel:\n            with warnings.catch_warnings():\n                warnings.filterwarnings(\n                    \"ignore\", category=DeprecationWarning\n                )  # suppress numpy==1.20 float warning, fixed in coremltools==7.0\n                ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)\n        elif bits == 8:\n            op_config = ct.optimize.coreml.OpPalettizerConfig(mode=mode, nbits=bits, weight_threshold=512)\n            config = ct.optimize.coreml.OptimizationConfig(global_config=op_config)\n            ct_model = ct.optimize.coreml.palettize_weights(ct_model, config)\n    ct_model.save(f)\n    return f, ct_model\n\n\n@try_export\ndef export_engine(\n    model, im, file, half, dynamic, simplify, workspace=4, verbose=False, cache=\"\", prefix=colorstr(\"TensorRT:\")\n):\n    \"\"\"Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0.\n\n    Args:\n        model (torch.nn.Module): YOLOv5 model to be exported.\n        im (torch.Tensor): Input tensor of shape (B, C, H, W).\n        file (pathlib.Path): Path to save the exported model.\n        half (bool): Set to True to export with FP16 precision.\n        dynamic (bool): Set to True to enable dynamic input shapes.\n        simplify (bool): Set to True to simplify the model during export.\n        workspace (int): Workspace size in GB (default is 4).\n        verbose (bool): Set to True for verbose logging output.\n        cache (str): Path to save the TensorRT timing cache.\n        prefix (str): Log message prefix.\n\n    Returns:\n        (pathlib.Path, None): Tuple containing the path to the exported model and None.\n\n    Raises:\n        AssertionError: If executed on CPU instead of GPU.\n        RuntimeError: If there is a failure in parsing the ONNX file.\n\n    Examples:\n        ```python\n        from ultralytics import YOLOv5\n        import torch\n        from pathlib import Path\n\n        model = YOLOv5('yolov5s.pt')  # Load a pre-trained YOLOv5 model\n        input_tensor = torch.randn(1, 3, 640, 640).cuda()  # example input tensor on GPU\n        export_path = Path('yolov5s.engine')  # export destination\n\n        export_engine(model.model, input_tensor, export_path, half=True, dynamic=True, simplify=True, workspace=8, verbose=True)\n        ```\n    \"\"\"\n    assert im.device.type != \"cpu\", \"export running on CPU but must be on GPU, i.e. `python export.py --device 0`\"\n    try:\n        import tensorrt as trt\n    except Exception:\n        if platform.system() == \"Linux\":\n            check_requirements(\"nvidia-tensorrt\", cmds=\"-U --index-url https://pypi.ngc.nvidia.com\")\n        import tensorrt as trt\n\n    if trt.__version__[0] == \"7\":  # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012\n        grid = model.model[-1].anchor_grid\n        model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]\n        export_onnx(model, im, file, 12, dynamic, simplify)  # opset 12\n        model.model[-1].anchor_grid = grid\n    else:  # TensorRT >= 8\n        check_version(trt.__version__, \"8.0.0\", hard=True)  # require tensorrt>=8.0.0\n        export_onnx(model, im, file, 12, dynamic, simplify)  # opset 12\n    onnx = file.with_suffix(\".onnx\")\n\n    LOGGER.info(f\"\\n{prefix} starting export with TensorRT {trt.__version__}...\")\n    is_trt10 = int(trt.__version__.split(\".\")[0]) >= 10  # is TensorRT >= 10\n    assert onnx.exists(), f\"failed to export ONNX file: {onnx}\"\n    f = file.with_suffix(\".engine\")  # TensorRT engine file\n    logger = trt.Logger(trt.Logger.INFO)\n    if verbose:\n        logger.min_severity = trt.Logger.Severity.VERBOSE\n\n    builder = trt.Builder(logger)\n    config = builder.create_builder_config()\n    if is_trt10:\n        config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30)\n    else:  # TensorRT versions 7, 8\n        config.max_workspace_size = workspace * 1 << 30\n    if cache:  # enable timing cache\n        Path(cache).parent.mkdir(parents=True, exist_ok=True)\n        buf = Path(cache).read_bytes() if Path(cache).exists() else b\"\"\n        timing_cache = config.create_timing_cache(buf)\n        config.set_timing_cache(timing_cache, ignore_mismatch=True)\n    flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)\n    network = builder.create_network(flag)\n    parser = trt.OnnxParser(network, logger)\n    if not parser.parse_from_file(str(onnx)):\n        raise RuntimeError(f\"failed to load ONNX file: {onnx}\")\n\n    inputs = [network.get_input(i) for i in range(network.num_inputs)]\n    outputs = [network.get_output(i) for i in range(network.num_outputs)]\n    for inp in inputs:\n        LOGGER.info(f'{prefix} input \"{inp.name}\" with shape{inp.shape} {inp.dtype}')\n    for out in outputs:\n        LOGGER.info(f'{prefix} output \"{out.name}\" with shape{out.shape} {out.dtype}')\n\n    if dynamic:\n        if im.shape[0] <= 1:\n            LOGGER.warning(f\"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument\")\n        profile = builder.create_optimization_profile()\n        for inp in inputs:\n            profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)\n        config.add_optimization_profile(profile)\n\n    LOGGER.info(f\"{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}\")\n    if builder.platform_has_fast_fp16 and half:\n        config.set_flag(trt.BuilderFlag.FP16)\n\n    build = builder.build_serialized_network if is_trt10 else builder.build_engine\n    with build(network, config) as engine, open(f, \"wb\") as t:\n        t.write(engine if is_trt10 else engine.serialize())\n    if cache:  # save timing cache\n        with open(cache, \"wb\") as c:\n            c.write(config.get_timing_cache().serialize())\n    return f, None\n\n\n@try_export\ndef export_saved_model(\n    model,\n    im,\n    file,\n    dynamic,\n    tf_nms=False,\n    agnostic_nms=False,\n    topk_per_class=100,\n    topk_all=100,\n    iou_thres=0.45,\n    conf_thres=0.25,\n    keras=False,\n    prefix=colorstr(\"TensorFlow SavedModel:\"),\n):\n    \"\"\"Export a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression\n    (NMS).\n\n    Args:\n        model (torch.nn.Module): The PyTorch model to convert.\n        im (torch.Tensor): Sample input tensor with shape (B, C, H, W) for tracing.\n        file (pathlib.Path): File path to save the exported model.\n        dynamic (bool): Flag to indicate whether dynamic axes should be used.\n        tf_nms (bool, optional): Enable TensorFlow non-maximum suppression (NMS). Default is False.\n        agnostic_nms (bool, optional): Enable class-agnostic NMS. Default is False.\n        topk_per_class (int, optional): Top K detections per class to keep before applying NMS. Default is 100.\n        topk_all (int, optional): Top K detections across all classes to keep before applying NMS. Default is 100.\n        iou_thres (float, optional): IoU threshold for NMS. Default is 0.45.\n        conf_thres (float, optional): Confidence threshold for detections. Default is 0.25.\n        keras (bool, optional): Save the model in Keras format if True. Default is False.\n        prefix (str, optional): Prefix for logging messages. Default is \"TensorFlow SavedModel:\".\n\n    Returns:\n        tuple[str, tf.keras.Model | None]: A tuple containing the path to the saved model folder and the Keras model instance,\n        or None if TensorFlow export fails.\n\n    Examples:\n        ```python\n        model, im = ...  # Initialize your PyTorch model and input tensor\n        export_saved_model(model, im, Path(\"yolov5_saved_model\"), dynamic=True)\n        ```\n\n    Notes:\n        - The method supports TensorFlow versions up to 2.15.1.\n        - TensorFlow NMS may not be supported in older TensorFlow versions.\n        - If the TensorFlow version exceeds 2.13.1, it might cause issues when exporting to TFLite.\n          Refer to: https://github.com/ultralytics/yolov5/issues/12489\n    \"\"\"\n    # YOLOv5 TensorFlow SavedModel export\n    try:\n        import tensorflow as tf\n    except Exception:\n        check_requirements(f\"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}<=2.15.1\")\n\n        import tensorflow as tf\n    from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\n\n    from models.tf import TFModel\n\n    LOGGER.info(f\"\\n{prefix} starting export with tensorflow {tf.__version__}...\")\n    if tf.__version__ > \"2.13.1\":\n        helper_url = \"https://github.com/ultralytics/yolov5/issues/12489\"\n        LOGGER.info(\n            f\"WARNING ⚠️ using Tensorflow {tf.__version__} > 2.13.1 might cause issue when exporting the model to tflite {helper_url}\"\n        )  # handling issue https://github.com/ultralytics/yolov5/issues/12489\n    f = str(file).replace(\".pt\", \"_saved_model\")\n    batch_size, ch, *imgsz = list(im.shape)  # BCHW\n\n    tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)\n    im = tf.zeros((batch_size, *imgsz, ch))  # BHWC order for TensorFlow\n    _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)\n    inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)\n    outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)\n    keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)\n    keras_model.trainable = False\n    keras_model.summary()\n    if keras:\n        keras_model.save(f, save_format=\"tf\")\n    else:\n        spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)\n        m = tf.function(lambda x: keras_model(x))  # full model\n        m = m.get_concrete_function(spec)\n        frozen_func = convert_variables_to_constants_v2(m)\n        tfm = tf.Module()\n        tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])\n        tfm.__call__(im)\n        tf.saved_model.save(\n            tfm,\n            f,\n            options=tf.saved_model.SaveOptions(experimental_custom_gradients=False)\n            if check_version(tf.__version__, \"2.6\")\n            else tf.saved_model.SaveOptions(),\n        )\n    return f, keras_model\n\n\n@try_export\ndef export_pb(keras_model, file, prefix=colorstr(\"TensorFlow GraphDef:\")):\n    \"\"\"Export YOLOv5 model to TensorFlow GraphDef (*.pb) format.\n\n    Args:\n        keras_model (tf.keras.Model): The Keras model to be converted.\n        file (Path): The output file path where the GraphDef will be saved.\n        prefix (str): Optional prefix string; defaults to a colored string indicating TensorFlow GraphDef export status.\n\n    Returns:\n        Tuple[Path, None]: The file path where the GraphDef model was saved and a None placeholder.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        keras_model = ...  # assume an existing Keras model\n        file = Path(\"model.pb\")\n        export_pb(keras_model, file)\n        ```\n\n    Notes:\n        For more details, refer to the guide on frozen graphs: https://github.com/leimao/Frozen_Graph_TensorFlow\n    \"\"\"\n    import tensorflow as tf\n    from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\n\n    LOGGER.info(f\"\\n{prefix} starting export with tensorflow {tf.__version__}...\")\n    f = file.with_suffix(\".pb\")\n\n    m = tf.function(lambda x: keras_model(x))  # full model\n    m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))\n    frozen_func = convert_variables_to_constants_v2(m)\n    frozen_func.graph.as_graph_def()\n    tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)\n    return f, None\n\n\n@try_export\ndef export_tflite(\n    keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr(\"TensorFlow Lite:\")\n):\n    # YOLOv5 TensorFlow Lite export\n    \"\"\"Export a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support.\n\n    Args:\n        keras_model (tf.keras.Model): The Keras model to be exported.\n        im (torch.Tensor): An input image tensor for normalization and model tracing.\n        file (Path): The file path to save the TensorFlow Lite model.\n        int8 (bool): Enables INT8 quantization if True.\n        per_tensor (bool): If True, disables per-channel quantization.\n        data (str): Path to the dataset for representative dataset generation in INT8 quantization.\n        nms (bool): Enables Non-Maximum Suppression (NMS) if True.\n        agnostic_nms (bool): Enables class-agnostic NMS if True.\n        prefix (str): Prefix for log messages.\n\n    Returns:\n        (str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or\n            None if the export failed.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        import torch\n        import tensorflow as tf\n\n        # Load a Keras model wrapping a YOLOv5 model\n        keras_model = tf.keras.models.load_model('path/to/keras_model.h5')\n\n        # Example input tensor\n        im = torch.zeros(1, 3, 640, 640)\n\n        # Export the model\n        export_tflite(keras_model, im, Path('model.tflite'), int8=True, per_tensor=False, data='data/coco.yaml',\n                      nms=True, agnostic_nms=False)\n        ```\n\n    Notes:\n        - Ensure TensorFlow and TensorFlow Lite dependencies are installed.\n        - INT8 quantization requires a representative dataset to achieve optimal accuracy.\n        - TensorFlow Lite models are suitable for efficient inference on mobile and edge devices.\n    \"\"\"\n    import tensorflow as tf\n\n    LOGGER.info(f\"\\n{prefix} starting export with tensorflow {tf.__version__}...\")\n    _batch_size, _ch, *imgsz = list(im.shape)  # BCHW\n    f = str(file).replace(\".pt\", \"-fp16.tflite\")\n\n    converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)\n    converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]\n    converter.target_spec.supported_types = [tf.float16]\n    converter.optimizations = [tf.lite.Optimize.DEFAULT]\n    if int8:\n        from models.tf import representative_dataset_gen\n\n        dataset = LoadImages(check_dataset(check_yaml(data))[\"train\"], img_size=imgsz, auto=False)\n        converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)\n        converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n        converter.target_spec.supported_types = []\n        converter.inference_input_type = tf.uint8  # or tf.int8\n        converter.inference_output_type = tf.uint8  # or tf.int8\n        converter.experimental_new_quantizer = True\n        if per_tensor:\n            converter._experimental_disable_per_channel = True\n        f = str(file).replace(\".pt\", \"-int8.tflite\")\n    if nms or agnostic_nms:\n        converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)\n\n    tflite_model = converter.convert()\n    open(f, \"wb\").write(tflite_model)\n    return f, None\n\n\n@try_export\ndef export_edgetpu(file, prefix=colorstr(\"Edge TPU:\")):\n    \"\"\"Exports a YOLOv5 model to Edge TPU compatible TFLite format; requires Linux and Edge TPU compiler.\n\n    Args:\n        file (Path): Path to the YOLOv5 model file to be exported (.pt format).\n        prefix (str, optional): Prefix for logging messages. Defaults to colorstr(\"Edge TPU:\").\n\n    Returns:\n        tuple[Path, None]: Path to the exported Edge TPU compatible TFLite model, None.\n\n    Raises:\n        AssertionError: If the system is not Linux.\n        subprocess.CalledProcessError: If any subprocess call to install or run the Edge TPU compiler fails.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        file = Path('yolov5s.pt')\n        export_edgetpu(file)\n        ```\n\n    Notes:\n        To use this function, ensure you have the Edge TPU compiler installed on your Linux system. You can find\n        installation instructions here: https://coral.ai/docs/edgetpu/compiler/.\n    \"\"\"\n    cmd = \"edgetpu_compiler --version\"\n    help_url = \"https://coral.ai/docs/edgetpu/compiler/\"\n    assert platform.system() == \"Linux\", f\"export only supported on Linux. See {help_url}\"\n    if subprocess.run(f\"{cmd} > /dev/null 2>&1\", shell=True).returncode != 0:\n        LOGGER.info(f\"\\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}\")\n        sudo = subprocess.run(\"sudo --version >/dev/null\", shell=True).returncode == 0  # sudo installed on system\n        for c in (\n            \"curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\",\n            'echo \"deb https://packages.cloud.google.com/apt coral-edgetpu-stable main\" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',\n            \"sudo apt-get update\",\n            \"sudo apt-get install edgetpu-compiler\",\n        ):\n            subprocess.run(c if sudo else c.replace(\"sudo \", \"\"), shell=True, check=True)\n    ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]\n\n    LOGGER.info(f\"\\n{prefix} starting export with Edge TPU compiler {ver}...\")\n    f = str(file).replace(\".pt\", \"-int8_edgetpu.tflite\")  # Edge TPU model\n    f_tfl = str(file).replace(\".pt\", \"-int8.tflite\")  # TFLite model\n\n    subprocess.run(\n        [\n            \"edgetpu_compiler\",\n            \"-s\",\n            \"-d\",\n            \"-k\",\n            \"10\",\n            \"--out_dir\",\n            str(file.parent),\n            f_tfl,\n        ],\n        check=True,\n    )\n    return f, None\n\n\n@try_export\ndef export_tfjs(file, int8, prefix=colorstr(\"TensorFlow.js:\")):\n    \"\"\"Convert a YOLOv5 model to TensorFlow.js format with optional uint8 quantization.\n\n    Args:\n        file (Path): Path to the YOLOv5 model file to be converted, typically having a \".pt\" or \".onnx\" extension.\n        int8 (bool): If True, applies uint8 quantization during the conversion process.\n        prefix (str): Optional prefix for logging messages, default is 'TensorFlow.js:' with color formatting.\n\n    Returns:\n        (str, None): Tuple containing the output directory path as a string and None.\n\n    Examples:\n        ```python\n        from pathlib import Path\n        file = Path('yolov5.onnx')\n        export_tfjs(file, int8=False)\n        ```\n\n    Notes:\n        - This function requires the `tensorflowjs` package. Install it using:\n          ```shell\n          pip install tensorflowjs\n          ```\n        - The converted TensorFlow.js model will be saved in a directory with the \"_web_model\" suffix appended to the original file name.\n        - The conversion involves running shell commands that invoke the TensorFlow.js converter tool.\n    \"\"\"\n    check_requirements(\"tensorflowjs\")\n    import tensorflowjs as tfjs\n\n    LOGGER.info(f\"\\n{prefix} starting export with tensorflowjs {tfjs.__version__}...\")\n    f = str(file).replace(\".pt\", \"_web_model\")  # js dir\n    f_pb = file.with_suffix(\".pb\")  # *.pb path\n    f_json = f\"{f}/model.json\"  # *.json path\n\n    args = [\n        \"tensorflowjs_converter\",\n        \"--input_format=tf_frozen_model\",\n        \"--quantize_uint8\" if int8 else \"\",\n        \"--output_node_names=Identity,Identity_1,Identity_2,Identity_3\",\n        str(f_pb),\n        f,\n    ]\n    subprocess.run([arg for arg in args if arg], check=True)\n\n    json = Path(f_json).read_text()\n    with open(f_json, \"w\") as j:  # sort JSON Identity_* in ascending order\n        subst = re.sub(\n            r'{\"outputs\": {\"Identity.?.?\": {\"name\": \"Identity.?.?\"}, '\n            r'\"Identity.?.?\": {\"name\": \"Identity.?.?\"}, '\n            r'\"Identity.?.?\": {\"name\": \"Identity.?.?\"}, '\n            r'\"Identity.?.?\": {\"name\": \"Identity.?.?\"}}}',\n            r'{\"outputs\": {\"Identity\": {\"name\": \"Identity\"}, '\n            r'\"Identity_1\": {\"name\": \"Identity_1\"}, '\n            r'\"Identity_2\": {\"name\": \"Identity_2\"}, '\n            r'\"Identity_3\": {\"name\": \"Identity_3\"}}}',\n            json,\n        )\n        j.write(subst)\n    return f, None\n\n\ndef add_tflite_metadata(file, metadata, num_outputs):\n    \"\"\"Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs according to TensorFlow\n    guidelines.\n\n    Args:\n        file (str): Path to the TFLite model file to which metadata will be added.\n        metadata (dict): Metadata information to be added to the model, structured as required by the TFLite metadata\n            schema. Common keys include \"name\", \"description\", \"version\", \"author\", and \"license\".\n        num_outputs (int): Number of output tensors the model has, used to configure the metadata properly.\n\n    Returns:\n        None\n\n    Examples:\n        ```python\n        metadata = {\n            \"name\": \"yolov5\",\n            \"description\": \"YOLOv5 object detection model\",\n            \"version\": \"1.0\",\n            \"author\": \"Ultralytics\",\n            \"license\": \"Apache License 2.0\"\n        }\n        add_tflite_metadata(\"model.tflite\", metadata, num_outputs=4)\n        ```\n\n    Notes:\n        TFLite metadata can include information such as model name, version, author, and other relevant details.\n        For more details on the structure of the metadata, refer to TensorFlow Lite\n        [metadata guidelines](https://ai.google.dev/edge/litert/models/metadata).\n    \"\"\"\n    with contextlib.suppress(ImportError):\n        # check_requirements('tflite_support')\n        from tflite_support import flatbuffers\n        from tflite_support import metadata as _metadata\n        from tflite_support import metadata_schema_py_generated as _metadata_fb\n\n        tmp_file = Path(\"/tmp/meta.txt\")\n        with open(tmp_file, \"w\") as meta_f:\n            meta_f.write(str(metadata))\n\n        model_meta = _metadata_fb.ModelMetadataT()\n        label_file = _metadata_fb.AssociatedFileT()\n        label_file.name = tmp_file.name\n        model_meta.associatedFiles = [label_file]\n\n        subgraph = _metadata_fb.SubGraphMetadataT()\n        subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]\n        subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs\n        model_meta.subgraphMetadata = [subgraph]\n\n        b = flatbuffers.Builder(0)\n        b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n        metadata_buf = b.Output()\n\n        populator = _metadata.MetadataPopulator.with_model_file(file)\n        populator.load_metadata_buffer(metadata_buf)\n        populator.load_associated_files([str(tmp_file)])\n        populator.populate()\n        tmp_file.unlink()\n\n\ndef pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr(\"CoreML Pipeline:\")):\n    \"\"\"Convert a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different\n    input/output shapes, and saving the model.\n\n    Args:\n        model (torch.nn.Module): The YOLOv5 PyTorch model to be converted.\n        im (torch.Tensor): Example input tensor with shape (N, C, H, W), where N is the batch size, C is the number of\n            channels, H is the height, and W is the width.\n        file (Path): Path to save the converted CoreML model.\n        names (dict[int, str]): Dictionary mapping class indices to class names.\n        y (torch.Tensor): Output tensor from the PyTorch model's forward pass.\n        mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).\n        prefix (str): Custom prefix for logging messages.\n\n    Returns:\n        (Path): Path to the saved CoreML model (.mlmodel).\n\n    Raises:\n        AssertionError: If the number of class names does not match the number of classes in the model.\n\n    Examples:\n        ```python\n        from ultralytics.utils.patches import torch_load\n        from pathlib import Path\n        import torch\n\n        model = torch_load('yolov5s.pt')  # Load YOLOv5 model\n        im = torch.zeros((1, 3, 640, 640))  # Example input tensor\n\n        names = {0: \"person\", 1: \"bicycle\", 2: \"car\", ...}  # Define class names\n\n        y = model(im)  # Perform forward pass to get model output\n\n        output_file = Path('yolov5s.mlmodel')  # Convert to CoreML\n        pipeline_coreml(model, im, output_file, names, y)\n        ```\n\n    Notes:\n        - This function requires `coremltools` to be installed.\n        - Running this function on a non-macOS environment might not support some features.\n        - Flexible input shapes and additional NMS options can be customized within the function.\n    \"\"\"\n    import coremltools as ct\n    from PIL import Image\n\n    f = file.with_suffix(\".mlmodel\") if mlmodel else file.with_suffix(\".mlpackage\")\n    print(f\"{prefix} starting pipeline with coremltools {ct.__version__}...\")\n    _batch_size, _ch, h, w = list(im.shape)  # BCHW\n    t = time.time()\n\n    # YOLOv5 Output shapes\n    spec = model.get_spec()\n    out0, out1 = iter(spec.description.output)\n    if platform.system() == \"Darwin\":\n        img = Image.new(\"RGB\", (w, h))  # img(192 width, 320 height)\n        # img = torch.zeros((*opt.img_size, 3)).numpy()  # img size(320,192,3) iDetection\n        out = model.predict({\"image\": img})\n        out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape\n    else:  # linux and windows can not run model.predict(), get sizes from pytorch output y\n        s = tuple(y[0].shape)\n        out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4)  # (3780, 80), (3780, 4)\n\n    # Checks\n    nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height\n    _na, nc = out0_shape\n    # na, nc = out0.type.multiArrayType.shape  # number anchors, classes\n    assert len(names) == nc, f\"{len(names)} names found for nc={nc}\"  # check\n\n    # Define output shapes (missing)\n    out0.type.multiArrayType.shape[:] = out0_shape  # (3780, 80)\n    out1.type.multiArrayType.shape[:] = out1_shape  # (3780, 4)\n    # spec.neuralNetwork.preprocessing[0].featureName = '0'\n\n    # Flexible input shapes\n    # from coremltools.models.neural_network import flexible_shape_utils\n    # s = [] # shapes\n    # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))\n    # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384))  # (height, width)\n    # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)\n    # r = flexible_shape_utils.NeuralNetworkImageSizeRange()  # shape ranges\n    # r.add_height_range((192, 640))\n    # r.add_width_range((192, 640))\n    # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)\n\n    # Print\n    print(spec.description)\n\n    # Model from spec\n    weights_dir = None\n    weights_dir = None if mlmodel else str(f / \"Data/com.apple.CoreML/weights\")\n    model = ct.models.MLModel(spec, weights_dir=weights_dir)\n\n    # 3. Create NMS protobuf\n    nms_spec = ct.proto.Model_pb2.Model()\n    nms_spec.specificationVersion = 5\n    for i in range(2):\n        decoder_output = model._spec.description.output[i].SerializeToString()\n        nms_spec.description.input.add()\n        nms_spec.description.input[i].ParseFromString(decoder_output)\n        nms_spec.description.output.add()\n        nms_spec.description.output[i].ParseFromString(decoder_output)\n\n    nms_spec.description.output[0].name = \"confidence\"\n    nms_spec.description.output[1].name = \"coordinates\"\n\n    output_sizes = [nc, 4]\n    for i in range(2):\n        ma_type = nms_spec.description.output[i].type.multiArrayType\n        ma_type.shapeRange.sizeRanges.add()\n        ma_type.shapeRange.sizeRanges[0].lowerBound = 0\n        ma_type.shapeRange.sizeRanges[0].upperBound = -1\n        ma_type.shapeRange.sizeRanges.add()\n        ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]\n        ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]\n        del ma_type.shape[:]\n\n    nms = nms_spec.nonMaximumSuppression\n    nms.confidenceInputFeatureName = out0.name  # 1x507x80\n    nms.coordinatesInputFeatureName = out1.name  # 1x507x4\n    nms.confidenceOutputFeatureName = \"confidence\"\n    nms.coordinatesOutputFeatureName = \"coordinates\"\n    nms.iouThresholdInputFeatureName = \"iouThreshold\"\n    nms.confidenceThresholdInputFeatureName = \"confidenceThreshold\"\n    nms.iouThreshold = 0.45\n    nms.confidenceThreshold = 0.25\n    nms.pickTop.perClass = True\n    nms.stringClassLabels.vector.extend(names.values())\n    nms_model = ct.models.MLModel(nms_spec)\n\n    # 4. Pipeline models together\n    pipeline = ct.models.pipeline.Pipeline(\n        input_features=[\n            (\"image\", ct.models.datatypes.Array(3, ny, nx)),\n            (\"iouThreshold\", ct.models.datatypes.Double()),\n            (\"confidenceThreshold\", ct.models.datatypes.Double()),\n        ],\n        output_features=[\"confidence\", \"coordinates\"],\n    )\n    pipeline.add_model(model)\n    pipeline.add_model(nms_model)\n\n    # Correct datatypes\n    pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())\n    pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())\n    pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())\n\n    # Update metadata\n    pipeline.spec.specificationVersion = 5\n    pipeline.spec.description.metadata.versionString = \"https://github.com/ultralytics/yolov5\"\n    pipeline.spec.description.metadata.shortDescription = \"https://github.com/ultralytics/yolov5\"\n    pipeline.spec.description.metadata.author = \"glenn.jocher@ultralytics.com\"\n    pipeline.spec.description.metadata.license = \"https://github.com/ultralytics/yolov5/blob/master/LICENSE\"\n    pipeline.spec.description.metadata.userDefined.update(\n        {\n            \"classes\": \",\".join(names.values()),\n            \"iou_threshold\": str(nms.iouThreshold),\n            \"confidence_threshold\": str(nms.confidenceThreshold),\n        }\n    )\n\n    # Save the model\n    model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir)\n    model.input_description[\"image\"] = \"Input image\"\n    model.input_description[\"iouThreshold\"] = f\"(optional) IOU Threshold override (default: {nms.iouThreshold})\"\n    model.input_description[\"confidenceThreshold\"] = (\n        f\"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})\"\n    )\n    model.output_description[\"confidence\"] = 'Boxes × Class confidence (see user-defined metadata \"classes\")'\n    model.output_description[\"coordinates\"] = \"Boxes × [x, y, width, height] (relative to image size)\"\n    model.save(f)  # pipelined\n    print(f\"{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)\")\n\n\n@smart_inference_mode()\ndef run(\n    data=ROOT / \"data/coco128.yaml\",  # 'dataset.yaml path'\n    weights=ROOT / \"yolov5s.pt\",  # weights path\n    imgsz=(640, 640),  # image (height, width)\n    batch_size=1,  # batch size\n    device=\"cpu\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    include=(\"torchscript\", \"onnx\"),  # include formats\n    half=False,  # FP16 half-precision export\n    inplace=False,  # set YOLOv5 Detect() inplace=True\n    keras=False,  # use Keras\n    optimize=False,  # TorchScript: optimize for mobile\n    int8=False,  # CoreML/TF INT8 quantization\n    per_tensor=False,  # TF per tensor quantization\n    dynamic=False,  # ONNX/TF/TensorRT: dynamic axes\n    cache=\"\",  # TensorRT: timing cache path\n    simplify=False,  # ONNX: simplify model\n    mlmodel=False,  # CoreML: Export in *.mlmodel format\n    opset=12,  # ONNX: opset version\n    verbose=False,  # TensorRT: verbose log\n    workspace=4,  # TensorRT: workspace size (GB)\n    nms=False,  # TF: add NMS to model\n    agnostic_nms=False,  # TF: add agnostic NMS to model\n    topk_per_class=100,  # TF.js NMS: topk per class to keep\n    topk_all=100,  # TF.js NMS: topk for all classes to keep\n    iou_thres=0.45,  # TF.js NMS: IoU threshold\n    conf_thres=0.25,  # TF.js NMS: confidence threshold\n):\n    \"\"\"Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow.\n\n    Args:\n        data (str | Path): Path to the dataset YAML configuration file. Default is 'data/coco128.yaml'.\n        weights (str | Path): Path to the pretrained model weights file. Default is 'yolov5s.pt'.\n        imgsz (tuple): Image size as (height, width). Default is (640, 640).\n        batch_size (int): Batch size for exporting the model. Default is 1.\n        device (str): Device to run the export on, e.g., '0' for GPU, 'cpu' for CPU. Default is 'cpu'.\n        include (tuple): Formats to include in the export. Default is ('torchscript', 'onnx').\n        half (bool): Flag to export model with FP16 half-precision. Default is False.\n        inplace (bool): Set the YOLOv5 Detect() module inplace=True. Default is False.\n        keras (bool): Flag to use Keras for TensorFlow SavedModel export. Default is False.\n        optimize (bool): Optimize TorchScript model for mobile deployment. Default is False.\n        int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False.\n        per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False.\n        dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False.\n        cache (str): TensorRT timing cache path. Default is an empty string.\n        simplify (bool): Simplify the ONNX model during export. Default is False.\n        opset (int): ONNX opset version. Default is 12.\n        verbose (bool): Enable verbose logging for TensorRT export. Default is False.\n        workspace (int): TensorRT workspace size in GB. Default is 4.\n        nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model. Default is False.\n        agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model. Default is False.\n        topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS. Default is 100.\n        topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. Default is 100.\n        iou_thres (float): IoU threshold for NMS. Default is 0.45.\n        conf_thres (float): Confidence threshold for NMS. Default is 0.25.\n        mlmodel (bool): Flag to use *.mlmodel for CoreML export. Default is False.\n\n    Returns:\n        None\n\n    Examples:\n        ```python\n        run(\n            data=\"data/coco128.yaml\",\n            weights=\"yolov5s.pt\",\n            imgsz=(640, 640),\n            batch_size=1,\n            device=\"cpu\",\n            include=(\"torchscript\", \"onnx\"),\n            half=False,\n            inplace=False,\n            keras=False,\n            optimize=False,\n            int8=False,\n            per_tensor=False,\n            dynamic=False,\n            cache=\"\",\n            simplify=False,\n            opset=12,\n            verbose=False,\n            mlmodel=False,\n            workspace=4,\n            nms=False,\n            agnostic_nms=False,\n            topk_per_class=100,\n            topk_all=100,\n            iou_thres=0.45,\n            conf_thres=0.25,\n        )\n        ```\n\n    Notes:\n        - Model export is based on the specified formats in the 'include' argument.\n        - Be cautious of combinations where certain flags are mutually exclusive, such as `--half` and `--dynamic`.\n    \"\"\"\n    t = time.time()\n    include = [x.lower() for x in include]  # to lowercase\n    fmts = tuple(export_formats()[\"Argument\"][1:])  # --include arguments\n    flags = [x in include for x in fmts]\n    assert sum(flags) == len(include), f\"ERROR: Invalid --include {include}, valid --include arguments are {fmts}\"\n    jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags  # export booleans\n    file = Path(url2file(weights) if str(weights).startswith((\"http:/\", \"https:/\")) else weights)  # PyTorch weights\n\n    # Load PyTorch model\n    device = select_device(device)\n    if half:\n        assert device.type != \"cpu\" or coreml, \"--half only compatible with GPU export, i.e. use --device 0\"\n        assert not dynamic, \"--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both\"\n    model = attempt_load(weights, device=device, inplace=True, fuse=True)  # load FP32 model\n\n    # Checks\n    imgsz *= 2 if len(imgsz) == 1 else 1  # expand\n    if optimize:\n        assert device.type == \"cpu\", \"--optimize not compatible with cuda devices, i.e. use --device cpu\"\n\n    # Input\n    gs = int(max(model.stride))  # grid size (max stride)\n    imgsz = [check_img_size(x, gs) for x in imgsz]  # verify img_size are gs-multiples\n    ch = next(model.parameters()).size(1)  # require input image channels\n    im = torch.zeros(batch_size, ch, *imgsz).to(device)  # image size(1,3,320,192) BCHW iDetection\n\n    # Update model\n    model.eval()\n    for k, m in model.named_modules():\n        if isinstance(m, Detect):\n            m.inplace = inplace\n            m.dynamic = dynamic\n            m.export = True\n\n    for _ in range(2):\n        y = model(im)  # dry runs\n    if half and not coreml:\n        im, model = im.half(), model.half()  # to FP16\n    shape = tuple((y[0] if isinstance(y, tuple) else y).shape)  # model output shape\n    metadata = {\"stride\": int(max(model.stride)), \"names\": model.names}  # model metadata\n    LOGGER.info(f\"\\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)\")\n\n    # Exports\n    f = [\"\"] * len(fmts)  # exported filenames\n    warnings.filterwarnings(action=\"ignore\", category=torch.jit.TracerWarning)  # suppress TracerWarning\n    if jit:  # TorchScript\n        f[0], _ = export_torchscript(model, im, file, optimize)\n    if engine:  # TensorRT required before ONNX\n        f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose, cache)\n    if onnx or xml:  # OpenVINO requires ONNX\n        f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)\n    if xml:  # OpenVINO\n        f[3], _ = export_openvino(file, metadata, half, int8, data)\n    if coreml:  # CoreML\n        f[4], ct_model = export_coreml(model, im, file, int8, half, nms, mlmodel)\n        if nms:\n            pipeline_coreml(ct_model, im, file, model.names, y, mlmodel)\n    if any((saved_model, pb, tflite, edgetpu, tfjs)):  # TensorFlow formats\n        assert not tflite or not tfjs, \"TFLite and TF.js models must be exported separately, please pass only one type.\"\n        assert not isinstance(model, ClassificationModel), \"ClassificationModel export to TF formats not yet supported.\"\n        f[5], s_model = export_saved_model(\n            model.cpu(),\n            im,\n            file,\n            dynamic,\n            tf_nms=nms or agnostic_nms or tfjs,\n            agnostic_nms=agnostic_nms or tfjs,\n            topk_per_class=topk_per_class,\n            topk_all=topk_all,\n            iou_thres=iou_thres,\n            conf_thres=conf_thres,\n            keras=keras,\n        )\n        if pb or tfjs:  # pb prerequisite to tfjs\n            f[6], _ = export_pb(s_model, file)\n        if tflite or edgetpu:\n            f[7], _ = export_tflite(\n                s_model, im, file, int8 or edgetpu, per_tensor, data=data, nms=nms, agnostic_nms=agnostic_nms\n            )\n            if edgetpu:\n                f[8], _ = export_edgetpu(file)\n            add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))\n        if tfjs:\n            f[9], _ = export_tfjs(file, int8)\n    if paddle:  # PaddlePaddle\n        f[10], _ = export_paddle(model, im, file, metadata)\n\n    # Finish\n    f = [str(x) for x in f if x]  # filter out '' and None\n    if any(f):\n        cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel))  # type\n        det &= not seg  # segmentation models inherit from SegmentationModel(DetectionModel)\n        dir = Path(\"segment\" if seg else \"classify\" if cls else \"\")\n        h = \"--half\" if half else \"\"  # --half FP16 inference arg\n        s = (\n            \"# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference\"\n            if cls\n            else \"# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference\"\n            if seg\n            else \"\"\n        )\n        LOGGER.info(\n            f\"\\nExport complete ({time.time() - t:.1f}s)\"\n            f\"\\nResults saved to {colorstr('bold', file.parent.resolve())}\"\n            f\"\\nDetect:          python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}\"\n            f\"\\nValidate:        python {dir / 'val.py'} --weights {f[-1]} {h}\"\n            f\"\\nPyTorch Hub:     model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')  {s}\"\n            f\"\\nVisualize:       https://netron.app\"\n        )\n    return f  # return list of exported files/dirs\n\n\ndef parse_opt(known=False):\n    \"\"\"Parse command-line options for YOLOv5 model export configurations.\n\n    Args:\n        known (bool): If True, uses `argparse.ArgumentParser.parse_known_args`; otherwise, uses\n            `argparse.ArgumentParser.parse_args`. Default is False.\n\n    Returns:\n        argparse.Namespace: Object containing parsed command-line arguments.\n\n    Examples:\n        ```python\n        opts = parse_opt()\n        print(opts.data)\n        print(opts.weights)\n        ```\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"dataset.yaml path\")\n    parser.add_argument(\"--weights\", nargs=\"+\", type=str, default=ROOT / \"yolov5s.pt\", help=\"model.pt path(s)\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", nargs=\"+\", type=int, default=[640, 640], help=\"image (h, w)\")\n    parser.add_argument(\"--batch-size\", type=int, default=1, help=\"batch size\")\n    parser.add_argument(\"--device\", default=\"cpu\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"FP16 half-precision export\")\n    parser.add_argument(\"--inplace\", action=\"store_true\", help=\"set YOLOv5 Detect() inplace=True\")\n    parser.add_argument(\"--keras\", action=\"store_true\", help=\"TF: use Keras\")\n    parser.add_argument(\"--optimize\", action=\"store_true\", help=\"TorchScript: optimize for mobile\")\n    parser.add_argument(\"--int8\", action=\"store_true\", help=\"CoreML/TF/OpenVINO INT8 quantization\")\n    parser.add_argument(\"--per-tensor\", action=\"store_true\", help=\"TF per-tensor quantization\")\n    parser.add_argument(\"--dynamic\", action=\"store_true\", help=\"ONNX/TF/TensorRT: dynamic axes\")\n    parser.add_argument(\"--cache\", type=str, default=\"\", help=\"TensorRT: timing cache file path\")\n    parser.add_argument(\"--simplify\", action=\"store_true\", help=\"ONNX: simplify model\")\n    parser.add_argument(\"--mlmodel\", action=\"store_true\", help=\"CoreML: Export in *.mlmodel format\")\n    parser.add_argument(\"--opset\", type=int, default=17, help=\"ONNX: opset version\")\n    parser.add_argument(\"--verbose\", action=\"store_true\", help=\"TensorRT: verbose log\")\n    parser.add_argument(\"--workspace\", type=int, default=4, help=\"TensorRT: workspace size (GB)\")\n    parser.add_argument(\"--nms\", action=\"store_true\", help=\"TF: add NMS to model\")\n    parser.add_argument(\"--agnostic-nms\", action=\"store_true\", help=\"TF: add agnostic NMS to model\")\n    parser.add_argument(\"--topk-per-class\", type=int, default=100, help=\"TF.js NMS: topk per class to keep\")\n    parser.add_argument(\"--topk-all\", type=int, default=100, help=\"TF.js NMS: topk for all classes to keep\")\n    parser.add_argument(\"--iou-thres\", type=float, default=0.45, help=\"TF.js NMS: IoU threshold\")\n    parser.add_argument(\"--conf-thres\", type=float, default=0.25, help=\"TF.js NMS: confidence threshold\")\n    parser.add_argument(\n        \"--include\",\n        nargs=\"+\",\n        default=[\"torchscript\"],\n        help=\"torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle\",\n    )\n    opt = parser.parse_known_args()[0] if known else parser.parse_args()\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Run(**vars(opt)), execute the run function with parsed options.\"\"\"\n    for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:\n        run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "hubconf.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nPyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5.\n\nUsage:\n    import torch\n    model = torch.hub.load('ultralytics/yolov5', 'yolov5s')  # official model\n    model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s')  # from branch\n    model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')  # custom/local model\n    model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local')  # local repo\n\"\"\"\n\nfrom ultralytics.utils.patches import torch_load\n\n\ndef _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n    \"\"\"Creates or loads a YOLOv5 model, with options for pretrained weights and model customization.\n\n    Args:\n        name (str): Model name (e.g., 'yolov5s') or path to the model checkpoint (e.g., 'path/to/best.pt').\n        pretrained (bool, optional): If True, loads pretrained weights into the model. Defaults to True.\n        channels (int, optional): Number of input channels the model expects. Defaults to 3.\n        classes (int, optional): Number of classes the model is expected to detect. Defaults to 80.\n        autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper for various input formats. Defaults\n            to True.\n        verbose (bool, optional): If True, prints detailed information during the model creation/loading process.\n            Defaults to True.\n        device (str | torch.device | None, optional): Device to use for model parameters (e.g., 'cpu', 'cuda'). If None,\n            selects the best available device. Defaults to None.\n\n    Returns:\n        (DetectMultiBackend | AutoShape): The loaded YOLOv5 model, potentially wrapped with AutoShape if specified.\n\n    Examples:\n        ```python\n        import torch\n        from ultralytics import _create\n\n        # Load an official YOLOv5s model with pretrained weights\n        model = _create('yolov5s')\n\n        # Load a custom model from a local checkpoint\n        model = _create('path/to/custom_model.pt', pretrained=False)\n\n        # Load a model with specific input channels and classes\n        model = _create('yolov5s', channels=1, classes=10)\n        ```\n\n    Notes:\n        For more information on model loading and customization, visit the\n        [YOLOv5 PyTorch Hub Documentation](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/).\n    \"\"\"\n    from pathlib import Path\n\n    from models.common import AutoShape, DetectMultiBackend\n    from models.experimental import attempt_load\n    from models.yolo import ClassificationModel, DetectionModel, SegmentationModel\n    from utils.downloads import attempt_download\n    from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging\n    from utils.torch_utils import select_device\n\n    if not verbose:\n        LOGGER.setLevel(logging.WARNING)\n    check_requirements(ROOT / \"requirements.txt\", exclude=(\"opencv-python\", \"tensorboard\", \"thop\"))\n    name = Path(name)\n    path = name.with_suffix(\".pt\") if name.suffix == \"\" and not name.is_dir() else name  # checkpoint path\n    try:\n        device = select_device(device)\n        if pretrained and channels == 3 and classes == 80:\n            try:\n                model = DetectMultiBackend(path, device=device, fuse=autoshape)  # detection model\n                if autoshape:\n                    if model.pt and isinstance(model.model, ClassificationModel):\n                        LOGGER.warning(\n                            \"WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. \"\n                            \"You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).\"\n                        )\n                    elif model.pt and isinstance(model.model, SegmentationModel):\n                        LOGGER.warning(\n                            \"WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. \"\n                            \"You will not be able to run inference with this model.\"\n                        )\n                    else:\n                        model = AutoShape(model)  # for file/URI/PIL/cv2/np inputs and NMS\n            except Exception:\n                model = attempt_load(path, device=device, fuse=False)  # arbitrary model\n        else:\n            cfg = next(iter((Path(__file__).parent / \"models\").rglob(f\"{path.stem}.yaml\")))  # model.yaml path\n            model = DetectionModel(cfg, channels, classes)  # create model\n            if pretrained:\n                ckpt = torch_load(attempt_download(path), map_location=device)  # load\n                csd = ckpt[\"model\"].float().state_dict()  # checkpoint state_dict as FP32\n                csd = intersect_dicts(csd, model.state_dict(), exclude=[\"anchors\"])  # intersect\n                model.load_state_dict(csd, strict=False)  # load\n                if len(ckpt[\"model\"].names) == classes:\n                    model.names = ckpt[\"model\"].names  # set class names attribute\n        if not verbose:\n            LOGGER.setLevel(logging.INFO)  # reset to default\n        return model.to(device)\n\n    except Exception as e:\n        help_url = \"https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading\"\n        s = f\"{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.\"\n        raise Exception(s) from e\n\n\ndef custom(path=\"path/to/model.pt\", autoshape=True, _verbose=True, device=None):\n    \"\"\"Loads a custom or local YOLOv5 model from a given path with optional autoshaping and device specification.\n\n    Args:\n        path (str): Path to the custom model file (e.g., 'path/to/model.pt').\n        autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model if True, enabling compatibility with various input\n            types (default is True).\n        _verbose (bool): If True, prints all informational messages to the screen; otherwise, operates silently (default\n            is True).\n        device (str | torch.device | None): Device to load the model on, e.g., 'cpu', 'cuda', torch.device('cuda:0'),\n            etc. (default is None, which automatically selects the best available device).\n\n    Returns:\n        torch.nn.Module: A YOLOv5 model loaded with the specified parameters.\n\n    Examples:\n        ```python\n        # Load model from a given path with autoshape enabled on the best available device\n        model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')\n\n        # Load model from a local path without autoshape on the CPU device\n        model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local', autoshape=False, device='cpu')\n        ```\n\n    Notes:\n        For more details on loading models from PyTorch Hub:\n        https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading\n    \"\"\"\n    return _create(path, autoshape=autoshape, verbose=_verbose, device=device)\n\n\ndef yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Instantiates the YOLOv5-nano model with options for pretraining, input channels, class count, autoshaping,\n    verbosity, and device.\n\n    Args:\n        pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.\n        channels (int): Number of input channels for the model. Defaults to 3.\n        classes (int): Number of classes for object detection. Defaults to 80.\n        autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper to the model for various formats\n            (file/URI/PIL/cv2/np) and non-maximum suppression (NMS) during inference. Defaults to True.\n        _verbose (bool): If True, prints detailed information to the screen. Defaults to True.\n        device (str | torch.device | None): Specifies the device to use for model computation. If None, uses the best\n            device available (i.e., GPU if available, otherwise CPU). Defaults to None.\n\n    Returns:\n        DetectionModel | ClassificationModel | SegmentationModel: The instantiated YOLOv5-nano model, potentially with\n            pretrained weights and autoshaping applied.\n\n    Examples:\n        ```python\n        import torch\n        from ultralytics import yolov5n\n\n        # Load the YOLOv5-nano model with defaults\n        model = yolov5n()\n\n        # Load the YOLOv5-nano model with a specific device\n        model = yolov5n(device='cuda')\n        ```\n\n    Notes:\n        For further details on loading models from PyTorch Hub, refer to [PyTorch Hub models](https://pytorch.org/hub/\n        ultralytics_yolov5).\n    \"\"\"\n    return _create(\"yolov5n\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Create a YOLOv5-small (yolov5s) model with options for pretraining, input channels, class count, autoshaping,\n    verbosity, and device configuration.\n\n    Args:\n        pretrained (bool, optional): Flag to load pretrained weights into the model. Defaults to True.\n        channels (int, optional): Number of input channels. Defaults to 3.\n        classes (int, optional): Number of model classes. Defaults to 80.\n        autoshape (bool, optional): Whether to wrap the model with YOLOv5's .autoshape() for handling various input\n            formats. Defaults to True.\n        _verbose (bool, optional): Flag to print detailed information regarding model loading. Defaults to True.\n        device (str | torch.device | None, optional): Device to use for model computation, can be 'cpu', 'cuda', or\n            torch.device instances. If None, automatically selects the best available device. Defaults to None.\n\n    Returns:\n        torch.nn.Module: The YOLOv5-small model configured and loaded according to the specified parameters.\n\n    Examples:\n        ```python\n        import torch\n\n        # Load the official YOLOv5-small model with pretrained weights\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n\n        # Load the YOLOv5-small model from a specific branch\n        model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s')\n\n        # Load a custom YOLOv5-small model from a local checkpoint\n        model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')\n\n        # Load a local YOLOv5-small model specifying source as local repository\n        model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local')\n        ```\n\n    Notes:\n        For more details on model loading and customization, visit\n        the [YOLOv5 PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5/).\n    \"\"\"\n    return _create(\"yolov5s\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Instantiates the YOLOv5-medium model with customizable pretraining, channel count, class count, autoshaping,\n    verbosity, and device.\n\n    Args:\n        pretrained (bool, optional): Whether to load pretrained weights into the model. Default is True.\n        channels (int, optional): Number of input channels. Default is 3.\n        classes (int, optional): Number of model classes. Default is 80.\n        autoshape (bool, optional): Apply YOLOv5 .autoshape() wrapper to the model for handling various input formats.\n            Default is True.\n        _verbose (bool, optional): Whether to print detailed information to the screen. Default is True.\n        device (str | torch.device | None, optional): Device specification to use for model parameters (e.g., 'cpu',\n            'cuda'). Default is None.\n\n    Returns:\n        torch.nn.Module: The instantiated YOLOv5-medium model.\n\n    Examples:\n        ```python\n        import torch\n\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5m')  # Load YOLOv5-medium from Ultralytics repository\n        model = torch.hub.load('ultralytics/yolov5:master', 'yolov5m')  # Load from the master branch\n        model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m.pt')  # Load a custom/local YOLOv5-medium model\n        model = torch.hub.load('.', 'custom', 'yolov5m.pt', source='local')  # Load from a local repository\n        ```\n\n    Notes:\n        For more information, visit https://pytorch.org/hub/ultralytics_yolov5.\n    \"\"\"\n    return _create(\"yolov5m\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Creates YOLOv5-large model with options for pretraining, channels, classes, autoshaping, verbosity, and device\n    selection.\n\n    Args:\n        pretrained (bool): Load pretrained weights into the model. Default is True.\n        channels (int): Number of input channels. Default is 3.\n        classes (int): Number of model classes. Default is 80.\n        autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model. Default is True.\n        _verbose (bool): Print all information to screen. Default is True.\n        device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device\n            instance. Default is None.\n\n    Returns:\n        YOLOv5 model (torch.nn.Module): The YOLOv5-large model instantiated with specified configurations and possibly\n            pretrained weights.\n\n    Examples:\n        ```python\n        import torch\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5l')\n        ```\n\n    Notes:\n        For additional details, refer to the PyTorch Hub models documentation:\n        https://pytorch.org/hub/ultralytics_yolov5\n    \"\"\"\n    return _create(\"yolov5l\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Perform object detection using the YOLOv5-xlarge model with options for pretraining, input channels, class count,\n    autoshaping, verbosity, and device specification.\n\n    Args:\n        pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.\n        channels (int): Number of input channels for the model. Defaults to 3.\n        classes (int): Number of model classes for object detection. Defaults to 80.\n        autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper for handling different input formats.\n            Defaults to True.\n        _verbose (bool): If True, prints detailed information during model loading. Defaults to True.\n        device (str | torch.device | None): Device specification for computing the model, e.g., 'cpu', 'cuda:0',\n            torch.device('cuda'). Defaults to None.\n\n    Returns:\n        torch.nn.Module: The YOLOv5-xlarge model loaded with the specified parameters, optionally with pretrained\n            weights and autoshaping applied.\n\n    Examples:\n        ```python\n        import torch\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5x')\n        ```\n\n    For additional details, refer to the official YOLOv5 PyTorch Hub models documentation:\n    https://pytorch.org/hub/ultralytics_yolov5\n    \"\"\"\n    return _create(\"yolov5x\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Creates YOLOv5-nano-P6 model with options for pretraining, channels, classes, autoshaping, verbosity, and device.\n\n    Args:\n        pretrained (bool, optional): If True, loads pretrained weights into the model. Default is True.\n        channels (int, optional): Number of input channels. Default is 3.\n        classes (int, optional): Number of model classes. Default is 80.\n        autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper to the model. Default is True.\n        _verbose (bool, optional): If True, prints all information to screen. Default is True.\n        device (str | torch.device | None, optional): Device to use for model parameters. Can be 'cpu', 'cuda', or None.\n            Default is None.\n\n    Returns:\n        torch.nn.Module: YOLOv5-nano-P6 model loaded with the specified configurations.\n\n    Examples:\n        ```python\n        import torch\n        model = yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device='cuda')\n        ```\n\n    Notes:\n        For more information on PyTorch Hub models, visit: https://pytorch.org/hub/ultralytics_yolov5\n    \"\"\"\n    return _create(\"yolov5n6\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Instantiate the YOLOv5-small-P6 model with options for pretraining, input channels, number of classes,\n    autoshaping, verbosity, and device selection.\n\n    Args:\n        pretrained (bool): If True, loads pretrained weights. Default is True.\n        channels (int): Number of input channels. Default is 3.\n        classes (int): Number of object detection classes. Default is 80.\n        autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model, allowing for varied input formats.\n            Default is True.\n        _verbose (bool): If True, prints detailed information during model loading. Default is True.\n        device (str | torch.device | None): Device specification for model parameters (e.g., 'cpu', 'cuda', or\n            torch.device). Default is None, which selects an available device automatically.\n\n    Returns:\n        torch.nn.Module: The YOLOv5-small-P6 model instance.\n\n    Raises:\n        Exception: If there is an error during model creation or loading, with a suggestion to visit the YOLOv5\n            tutorials for help.\n\n    Examples:\n        ```python\n        import torch\n\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5s6')\n        model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s6')  # load from a specific branch\n        model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5s6.pt')  # custom/local model\n        model = torch.hub.load('.', 'custom', 'path/to/yolov5s6.pt', source='local')  # local repo model\n        ```\n\n    Notes:\n        - For more information, refer to the PyTorch Hub models documentation at https://pytorch.org/hub/ultralytics_yolov5\n    \"\"\"\n    return _create(\"yolov5s6\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Create YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity,\n    and device.\n\n    Args:\n        pretrained (bool): If True, loads pretrained weights. Default is True.\n        channels (int): Number of input channels. Default is 3.\n        classes (int): Number of model classes. Default is 80.\n        autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS. Default\n            is True.\n        _verbose (bool): If True, prints detailed information to the screen. Default is True.\n        device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the best\n            available device.\n\n    Returns:\n        torch.nn.Module: The YOLOv5-medium-P6 model.\n        Refer to the PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 for\n        additional details.\n\n    Examples:\n        ```python\n        import torch\n\n        # Load YOLOv5-medium-P6 model\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5m6')\n        ```\n\n    Notes:\n        - The model can be loaded with pre-trained weights for better performance on specific tasks.\n        - The autoshape feature simplifies input handling by allowing various popular data formats.\n    \"\"\"\n    return _create(\"yolov5m6\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Instantiate the YOLOv5-large-P6 model with options for pretraining, channel and class counts, autoshaping,\n    verbosity, and device selection.\n\n    Args:\n        pretrained (bool, optional): If True, load pretrained weights into the model. Default is True.\n        channels (int, optional): Number of input channels. Default is 3.\n        classes (int, optional): Number of model classes. Default is 80.\n        autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility.\n            Default is True.\n        _verbose (bool, optional): If True, print all information to the screen. Default is True.\n        device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or\n            torch.device. If None, automatically selects the best available device. Default is None.\n\n    Returns:\n        torch.nn.Module: The instantiated YOLOv5-large-P6 model.\n\n    Examples:\n        ```python\n        import torch\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5l6')  # official model\n        model = torch.hub.load('ultralytics/yolov5:master', 'yolov5l6')  # from specific branch\n        model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5l6.pt')  # custom/local model\n        model = torch.hub.load('.', 'custom', 'path/to/yolov5l6.pt', source='local')  # local repository\n        ```\n\n    Notes:\n        Refer to [PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5/) for additional usage instructions.\n    \"\"\"\n    return _create(\"yolov5l6\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\ndef yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):\n    \"\"\"Creates the YOLOv5-xlarge-P6 model with options for pretraining, number of input channels, class count,\n    autoshaping, verbosity, and device selection.\n\n    Args:\n        pretrained (bool): If True, loads pretrained weights into the model. Default is True.\n        channels (int): Number of input channels. Default is 3.\n        classes (int): Number of model classes. Default is 80.\n        autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model. Default is True.\n        _verbose (bool): If True, prints all information to the screen. Default is True.\n        device (str | torch.device | None): Device to use for model parameters, can be a string, torch.device object, or\n            None for default device selection. Default is None.\n\n    Returns:\n        torch.nn.Module: The instantiated YOLOv5-xlarge-P6 model.\n\n    Examples:\n        ```python\n        import torch\n        model = torch.hub.load('ultralytics/yolov5', 'yolov5x6')  # load the YOLOv5-xlarge-P6 model\n        ```\n\n    Notes:\n        For more information on YOLOv5 models, visit the official documentation:\n        https://docs.ultralytics.com/yolov5\n    \"\"\"\n    return _create(\"yolov5x6\", pretrained, channels, classes, autoshape, _verbose, device)\n\n\nif __name__ == \"__main__\":\n    import argparse\n    from pathlib import Path\n\n    import numpy as np\n    from PIL import Image\n\n    from utils.general import cv2, print_args\n\n    # Argparser\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--model\", type=str, default=\"yolov5s\", help=\"model name\")\n    opt = parser.parse_args()\n    print_args(vars(opt))\n\n    # Model\n    model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)\n    # model = custom(path='path/to/model.pt')  # custom\n\n    # Images\n    imgs = [\n        \"data/images/zidane.jpg\",  # filename\n        Path(\"data/images/zidane.jpg\"),  # Path\n        \"https://ultralytics.com/images/zidane.jpg\",  # URI\n        cv2.imread(\"data/images/bus.jpg\")[:, :, ::-1],  # OpenCV\n        Image.open(\"data/images/bus.jpg\"),  # PIL\n        np.zeros((320, 640, 3)),\n    ]  # numpy\n\n    # Inference\n    results = model(imgs, size=320)  # batched inference\n\n    # Results\n    results.print()\n    results.save()\n"
  },
  {
    "path": "models/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
  },
  {
    "path": "models/common.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Common modules.\"\"\"\n\nimport ast\nimport contextlib\nimport json\nimport math\nimport platform\nimport warnings\nimport zipfile\nfrom collections import OrderedDict, namedtuple\nfrom copy import copy\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torch.cuda import amp\n\n# Import 'ultralytics' package or install if missing\ntry:\n    import ultralytics\n\n    assert hasattr(ultralytics, \"__version__\")  # verify package is not directory\nexcept (ImportError, AssertionError):\n    import os\n\n    os.system(\"pip install -U ultralytics\")\n    import ultralytics\n\nfrom ultralytics.utils.plotting import Annotator, colors, save_one_box\n\nfrom utils import TryExcept\nfrom utils.dataloaders import exif_transpose, letterbox\nfrom utils.general import (\n    LOGGER,\n    ROOT,\n    Profile,\n    check_requirements,\n    check_suffix,\n    check_version,\n    colorstr,\n    increment_path,\n    is_jupyter,\n    make_divisible,\n    non_max_suppression,\n    scale_boxes,\n    xywh2xyxy,\n    xyxy2xywh,\n    yaml_load,\n)\nfrom utils.torch_utils import copy_attr, smart_inference_mode\n\n\ndef autopad(k, p=None, d=1):\n    \"\"\"Pads kernel to 'same' output shape, adjusting for optional dilation; returns padding size.\n\n    `k`: kernel, `p`: padding, `d`: dilation.\n    \"\"\"\n    if d > 1:\n        k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k]  # actual kernel-size\n    if p is None:\n        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad\n    return p\n\n\nclass Conv(nn.Module):\n    \"\"\"Applies a convolution, batch normalization, and activation function to an input tensor in a neural network.\"\"\"\n\n    default_act = nn.SiLU()  # default activation\n\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):\n        \"\"\"Initializes a standard convolution layer with optional batch normalization and activation.\"\"\"\n        super().__init__()\n        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)\n        self.bn = nn.BatchNorm2d(c2)\n        self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()\n\n    def forward(self, x):\n        \"\"\"Applies a convolution followed by batch normalization and an activation function to the input tensor `x`.\"\"\"\n        return self.act(self.bn(self.conv(x)))\n\n    def forward_fuse(self, x):\n        \"\"\"Applies a fused convolution and activation function to the input tensor `x`.\"\"\"\n        return self.act(self.conv(x))\n\n\nclass DWConv(Conv):\n    \"\"\"Implements a depth-wise convolution layer with optional activation for efficient spatial filtering.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, d=1, act=True):\n        \"\"\"Initializes a depth-wise convolution layer with optional activation; args: input channels (c1), output\n        channels (c2), kernel size (k), stride (s), dilation (d), and activation flag (act).\n        \"\"\"\n        super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)\n\n\nclass DWConvTranspose2d(nn.ConvTranspose2d):\n    \"\"\"A depth-wise transpose convolutional layer for upsampling in neural networks, particularly in YOLOv5 models.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):\n        \"\"\"Initializes a depth-wise transpose convolutional layer for YOLOv5; args: input channels (c1), output channels\n        (c2), kernel size (k), stride (s), input padding (p1), output padding (p2).\n        \"\"\"\n        super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))\n\n\nclass TransformerLayer(nn.Module):\n    \"\"\"Transformer layer with multihead attention and linear layers, optimized by removing LayerNorm.\"\"\"\n\n    def __init__(self, c, num_heads):\n        \"\"\"Initializes a transformer layer, sans LayerNorm for performance, with multihead attention and linear layers.\n\n        See  as described in https://arxiv.org/abs/2010.11929.\n        \"\"\"\n        super().__init__()\n        self.q = nn.Linear(c, c, bias=False)\n        self.k = nn.Linear(c, c, bias=False)\n        self.v = nn.Linear(c, c, bias=False)\n        self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)\n        self.fc1 = nn.Linear(c, c, bias=False)\n        self.fc2 = nn.Linear(c, c, bias=False)\n\n    def forward(self, x):\n        \"\"\"Performs forward pass using MultiheadAttention and two linear transformations with residual connections.\"\"\"\n        x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x\n        x = self.fc2(self.fc1(x)) + x\n        return x\n\n\nclass TransformerBlock(nn.Module):\n    \"\"\"A Transformer block for vision tasks with convolution, position embeddings, and Transformer layers.\"\"\"\n\n    def __init__(self, c1, c2, num_heads, num_layers):\n        \"\"\"Initializes a Transformer block for vision tasks, adapting dimensions if necessary and stacking specified\n        layers.\n        \"\"\"\n        super().__init__()\n        self.conv = None\n        if c1 != c2:\n            self.conv = Conv(c1, c2)\n        self.linear = nn.Linear(c2, c2)  # learnable position embedding\n        self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))\n        self.c2 = c2\n\n    def forward(self, x):\n        \"\"\"Processes input through an optional convolution, followed by Transformer layers and position embeddings for\n        object detection.\n        \"\"\"\n        if self.conv is not None:\n            x = self.conv(x)\n        b, _, w, h = x.shape\n        p = x.flatten(2).permute(2, 0, 1)\n        return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)\n\n\nclass Bottleneck(nn.Module):\n    \"\"\"A bottleneck layer with optional shortcut and group convolution for efficient feature extraction.\"\"\"\n\n    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):\n        \"\"\"Initializes a standard bottleneck layer with optional shortcut and group convolution, supporting channel\n        expansion.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c_, c2, 3, 1, g=g)\n        self.add = shortcut and c1 == c2\n\n    def forward(self, x):\n        \"\"\"Processes input through two convolutions, optionally adds shortcut if channel dimensions match; input is a\n        tensor.\n        \"\"\"\n        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass BottleneckCSP(nn.Module):\n    \"\"\"CSP bottleneck layer for feature extraction with cross-stage partial connections and optional shortcuts.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        \"\"\"Initializes CSP bottleneck with optional shortcuts; args: ch_in, ch_out, number of repeats, shortcut bool,\n        groups, expansion.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n        self.cv4 = Conv(2 * c_, c2, 1, 1)\n        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)\n        self.act = nn.SiLU()\n        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n\n    def forward(self, x):\n        \"\"\"Performs forward pass by applying layers, activation, and concatenation on input x, returning feature-\n        enhanced output.\n        \"\"\"\n        y1 = self.cv3(self.m(self.cv1(x)))\n        y2 = self.cv2(x)\n        return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))\n\n\nclass CrossConv(nn.Module):\n    \"\"\"Implements a cross convolution layer with downsampling, expansion, and optional shortcut.\"\"\"\n\n    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):\n        \"\"\"Initializes CrossConv with downsampling, expanding, and optionally shortcutting; `c1` input, `c2` output\n        channels.\n\n        Inputs are ch_in, ch_out, kernel, stride, groups, expansion, shortcut.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, (1, k), (1, s))\n        self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)\n        self.add = shortcut and c1 == c2\n\n    def forward(self, x):\n        \"\"\"Performs feature sampling, expanding, and applies shortcut if channels match; expects `x` input tensor.\"\"\"\n        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass C3(nn.Module):\n    \"\"\"Implements a CSP Bottleneck module with three convolutions for enhanced feature extraction in neural networks.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        \"\"\"Initializes C3 module with options for channel count, bottleneck repetition, shortcut usage, group\n        convolutions, and expansion.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c1, c_, 1, 1)\n        self.cv3 = Conv(2 * c_, c2, 1)  # optional act=FReLU(c2)\n        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n\n    def forward(self, x):\n        \"\"\"Performs forward propagation using concatenated outputs from two convolutions and a Bottleneck sequence.\"\"\"\n        return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))\n\n\nclass C3x(C3):\n    \"\"\"Extends the C3 module with cross-convolutions for enhanced feature extraction in neural networks.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        \"\"\"Initializes C3x module with cross-convolutions, extending C3 with customizable channel dimensions, groups,\n        and expansion.\n        \"\"\"\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)\n        self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))\n\n\nclass C3TR(C3):\n    \"\"\"C3 module with TransformerBlock for enhanced feature extraction in object detection models.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        \"\"\"Initializes C3 module with TransformerBlock for enhanced feature extraction, accepts channel sizes, shortcut\n        config, group, and expansion.\n        \"\"\"\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)\n        self.m = TransformerBlock(c_, c_, 4, n)\n\n\nclass C3SPP(C3):\n    \"\"\"Extends the C3 module with an SPP layer for enhanced spatial feature extraction and customizable channels.\"\"\"\n\n    def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):\n        \"\"\"Initializes a C3 module with SPP layer for advanced spatial feature extraction, given channel sizes, kernel\n        sizes, shortcut, group, and expansion ratio.\n        \"\"\"\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)\n        self.m = SPP(c_, c_, k)\n\n\nclass C3Ghost(C3):\n    \"\"\"Implements a C3 module with Ghost Bottlenecks for efficient feature extraction in YOLOv5.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        \"\"\"Initializes YOLOv5's C3 module with Ghost Bottlenecks for efficient feature extraction.\"\"\"\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)  # hidden channels\n        self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))\n\n\nclass SPP(nn.Module):\n    \"\"\"Implements Spatial Pyramid Pooling (SPP) for feature extraction, ref: https://arxiv.org/abs/1406.4729.\"\"\"\n\n    def __init__(self, c1, c2, k=(5, 9, 13)):\n        \"\"\"Initializes SPP layer with Spatial Pyramid Pooling, ref: https://arxiv.org/abs/1406.4729, args: c1 (input\n        channels), c2 (output channels), k (kernel sizes).\n        \"\"\"\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n        self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n    def forward(self, x):\n        \"\"\"Applies convolution and max pooling layers to the input tensor `x`, concatenates results, and returns output\n        tensor.\n        \"\"\"\n        x = self.cv1(x)\n        with warnings.catch_warnings():\n            warnings.simplefilter(\"ignore\")  # suppress torch 1.9.0 max_pool2d() warning\n            return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass SPPF(nn.Module):\n    \"\"\"Implements a fast Spatial Pyramid Pooling (SPPF) layer for efficient feature extraction in YOLOv5 models.\"\"\"\n\n    def __init__(self, c1, c2, k=5):\n        \"\"\"Initializes YOLOv5 SPPF layer with given channels and kernel size for YOLOv5 model, combining convolution and\n        max pooling.\n\n        Equivalent to SPP(k=(5, 9, 13)).\n        \"\"\"\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c_ * 4, c2, 1, 1)\n        self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)\n\n    def forward(self, x):\n        \"\"\"Processes input through a series of convolutions and max pooling operations for feature extraction.\"\"\"\n        x = self.cv1(x)\n        with warnings.catch_warnings():\n            warnings.simplefilter(\"ignore\")  # suppress torch 1.9.0 max_pool2d() warning\n            y1 = self.m(x)\n            y2 = self.m(y1)\n            return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))\n\n\nclass Focus(nn.Module):\n    \"\"\"Focuses spatial information into channel space using slicing and convolution for efficient feature extraction.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):\n        \"\"\"Initializes Focus module to concentrate width-height info into channel space with configurable convolution\n        parameters.\n        \"\"\"\n        super().__init__()\n        self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)\n        # self.contract = Contract(gain=2)\n\n    def forward(self, x):\n        \"\"\"Processes input through Focus mechanism, reshaping (b,c,w,h) to (b,4c,w/2,h/2) then applies convolution.\"\"\"\n        return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))\n        # return self.conv(self.contract(x))\n\n\nclass GhostConv(nn.Module):\n    \"\"\"Implements Ghost Convolution for efficient feature extraction, see https://github.com/huawei-noah/ghostnet.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, g=1, act=True):\n        \"\"\"Initializes GhostConv with in/out channels, kernel size, stride, groups, and activation; halves out channels\n        for efficiency.\n        \"\"\"\n        super().__init__()\n        c_ = c2 // 2  # hidden channels\n        self.cv1 = Conv(c1, c_, k, s, None, g, act=act)\n        self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)\n\n    def forward(self, x):\n        \"\"\"Performs forward pass, concatenating outputs of two convolutions on input `x`: shape (B,C,H,W).\"\"\"\n        y = self.cv1(x)\n        return torch.cat((y, self.cv2(y)), 1)\n\n\nclass GhostBottleneck(nn.Module):\n    \"\"\"Efficient bottleneck layer using Ghost Convolutions, see https://github.com/huawei-noah/ghostnet.\"\"\"\n\n    def __init__(self, c1, c2, k=3, s=1):\n        \"\"\"Initializes GhostBottleneck with ch_in `c1`, ch_out `c2`, kernel size `k`, stride `s`; see\n        https://github.com/huawei-noah/ghostnet.\n        \"\"\"\n        super().__init__()\n        c_ = c2 // 2\n        self.conv = nn.Sequential(\n            GhostConv(c1, c_, 1, 1),  # pw\n            DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw\n            GhostConv(c_, c2, 1, 1, act=False),\n        )  # pw-linear\n        self.shortcut = (\n            nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()\n        )\n\n    def forward(self, x):\n        \"\"\"Processes input through conv and shortcut layers, returning their summed output.\"\"\"\n        return self.conv(x) + self.shortcut(x)\n\n\nclass Contract(nn.Module):\n    \"\"\"Contracts spatial dimensions into channel dimensions for efficient processing in neural networks.\"\"\"\n\n    def __init__(self, gain=2):\n        \"\"\"Initializes a layer to contract spatial dimensions (width-height) into channels, e.g., input shape\n        (1,64,80,80) to (1,256,40,40).\n        \"\"\"\n        super().__init__()\n        self.gain = gain\n\n    def forward(self, x):\n        \"\"\"Processes input tensor to expand channel dimensions by contracting spatial dimensions, yielding output shape\n        `(b, c*s*s, h//s, w//s)`.\n        \"\"\"\n        b, c, h, w = x.size()  # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'\n        s = self.gain\n        x = x.view(b, c, h // s, s, w // s, s)  # x(1,64,40,2,40,2)\n        x = x.permute(0, 3, 5, 1, 2, 4).contiguous()  # x(1,2,2,64,40,40)\n        return x.view(b, c * s * s, h // s, w // s)  # x(1,256,40,40)\n\n\nclass Expand(nn.Module):\n    \"\"\"Expands spatial dimensions by redistributing channels, e.g., from (1,64,80,80) to (1,16,160,160).\"\"\"\n\n    def __init__(self, gain=2):\n        \"\"\"Initializes the Expand module to increase spatial dimensions by redistributing channels, with an optional\n        gain factor.\n\n        Example: x(1,64,80,80) to x(1,16,160,160).\n        \"\"\"\n        super().__init__()\n        self.gain = gain\n\n    def forward(self, x):\n        \"\"\"Processes input tensor x to expand spatial dims by redistributing channels, requiring C / gain^2 == 0.\"\"\"\n        b, c, h, w = x.size()  # assert C / s ** 2 == 0, 'Indivisible gain'\n        s = self.gain\n        x = x.view(b, s, s, c // s**2, h, w)  # x(1,2,2,16,80,80)\n        x = x.permute(0, 3, 4, 1, 5, 2).contiguous()  # x(1,16,80,2,80,2)\n        return x.view(b, c // s**2, h * s, w * s)  # x(1,16,160,160)\n\n\nclass Concat(nn.Module):\n    \"\"\"Concatenates tensors along a specified dimension for efficient tensor manipulation in neural networks.\"\"\"\n\n    def __init__(self, dimension=1):\n        \"\"\"Initializes a Concat module to concatenate tensors along a specified dimension.\"\"\"\n        super().__init__()\n        self.d = dimension\n\n    def forward(self, x):\n        \"\"\"Concatenates a list of tensors along a specified dims; `x` is a list of tensors, `dimension` is an int.\"\"\"\n        return torch.cat(x, self.d)\n\n\nclass DetectMultiBackend(nn.Module):\n    \"\"\"YOLOv5 MultiBackend class for inference on various backends including PyTorch, ONNX, TensorRT, and more.\"\"\"\n\n    def __init__(self, weights=\"yolov5s.pt\", device=torch.device(\"cpu\"), dnn=False, data=None, fp16=False, fuse=True):\n        \"\"\"Initializes DetectMultiBackend with support for various inference backends, including PyTorch and ONNX.\"\"\"\n        #   PyTorch:              weights = *.pt\n        #   TorchScript:                    *.torchscript\n        #   ONNX Runtime:                   *.onnx\n        #   ONNX OpenCV DNN:                *.onnx --dnn\n        #   OpenVINO:                       *_openvino_model\n        #   CoreML:                         *.mlpackage\n        #   TensorRT:                       *.engine\n        #   TensorFlow SavedModel:          *_saved_model\n        #   TensorFlow GraphDef:            *.pb\n        #   TensorFlow Lite:                *.tflite\n        #   TensorFlow Edge TPU:            *_edgetpu.tflite\n        #   PaddlePaddle:                   *_paddle_model\n        from models.experimental import attempt_download, attempt_load  # scoped to avoid circular import\n\n        super().__init__()\n        w = str(weights[0] if isinstance(weights, list) else weights)\n        pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n        fp16 &= pt or jit or onnx or engine or triton  # FP16\n        nhwc = coreml or saved_model or pb or tflite or edgetpu  # BHWC formats (vs torch BCWH)\n        stride = 32  # default stride\n        cuda = torch.cuda.is_available() and device.type != \"cpu\"  # use CUDA\n        if not (pt or triton):\n            w = attempt_download(w)  # download if not local\n\n        if pt:  # PyTorch\n            model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n            stride = max(int(model.stride.max()), 32)  # model stride\n            names = model.module.names if hasattr(model, \"module\") else model.names  # get class names\n            model.half() if fp16 else model.float()\n            self.model = model  # explicitly assign for to(), cpu(), cuda(), half()\n        elif jit:  # TorchScript\n            LOGGER.info(f\"Loading {w} for TorchScript inference...\")\n            extra_files = {\"config.txt\": \"\"}  # model metadata\n            model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n            model.half() if fp16 else model.float()\n            if extra_files[\"config.txt\"]:  # load metadata dict\n                d = json.loads(\n                    extra_files[\"config.txt\"],\n                    object_hook=lambda d: {int(k) if k.isdigit() else k: v for k, v in d.items()},\n                )\n                stride, names = int(d[\"stride\"]), d[\"names\"]\n        elif dnn:  # ONNX OpenCV DNN\n            LOGGER.info(f\"Loading {w} for ONNX OpenCV DNN inference...\")\n            check_requirements(\"opencv-python>=4.5.4\")\n            net = cv2.dnn.readNetFromONNX(w)\n        elif onnx:  # ONNX Runtime\n            LOGGER.info(f\"Loading {w} for ONNX Runtime inference...\")\n            check_requirements((\"onnx\", \"onnxruntime-gpu\" if cuda else \"onnxruntime\"))\n            import onnxruntime\n\n            providers = [\"CUDAExecutionProvider\", \"CPUExecutionProvider\"] if cuda else [\"CPUExecutionProvider\"]\n            session = onnxruntime.InferenceSession(w, providers=providers)\n            output_names = [x.name for x in session.get_outputs()]\n            meta = session.get_modelmeta().custom_metadata_map  # metadata\n            if \"stride\" in meta:\n                stride, names = int(meta[\"stride\"]), eval(meta[\"names\"])\n        elif xml:  # OpenVINO\n            LOGGER.info(f\"Loading {w} for OpenVINO inference...\")\n            check_requirements(\"openvino>=2023.0\")  # requires openvino-dev: https://pypi.org/project/openvino-dev/\n            from openvino.runtime import Core, Layout, get_batch\n\n            core = Core()\n            if not Path(w).is_file():  # if not *.xml\n                w = next(Path(w).glob(\"*.xml\"))  # get *.xml file from *_openvino_model dir\n            ov_model = core.read_model(model=w, weights=Path(w).with_suffix(\".bin\"))\n            if ov_model.get_parameters()[0].get_layout().empty:\n                ov_model.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n            batch_dim = get_batch(ov_model)\n            if batch_dim.is_static:\n                batch_size = batch_dim.get_length()\n            ov_compiled_model = core.compile_model(ov_model, device_name=\"AUTO\")  # AUTO selects best available device\n            stride, names = self._load_metadata(Path(w).with_suffix(\".yaml\"))  # load metadata\n        elif engine:  # TensorRT\n            LOGGER.info(f\"Loading {w} for TensorRT inference...\")\n            import tensorrt as trt  # https://developer.nvidia.com/nvidia-tensorrt-download\n\n            check_version(trt.__version__, \"7.0.0\", hard=True)  # require tensorrt>=7.0.0\n            if device.type == \"cpu\":\n                device = torch.device(\"cuda:0\")\n            Binding = namedtuple(\"Binding\", (\"name\", \"dtype\", \"shape\", \"data\", \"ptr\"))\n            logger = trt.Logger(trt.Logger.INFO)\n            with open(w, \"rb\") as f, trt.Runtime(logger) as runtime:\n                model = runtime.deserialize_cuda_engine(f.read())\n            context = model.create_execution_context()\n            bindings = OrderedDict()\n            output_names = []\n            fp16 = False  # default updated below\n            dynamic = False\n            is_trt10 = not hasattr(model, \"num_bindings\")\n            num = range(model.num_io_tensors) if is_trt10 else range(model.num_bindings)\n            for i in num:\n                if is_trt10:\n                    name = model.get_tensor_name(i)\n                    dtype = trt.nptype(model.get_tensor_dtype(name))\n                    is_input = model.get_tensor_mode(name) == trt.TensorIOMode.INPUT\n                    if is_input:\n                        if -1 in tuple(model.get_tensor_shape(name)):  # dynamic\n                            dynamic = True\n                            context.set_input_shape(name, tuple(model.get_profile_shape(name, 0)[2]))\n                        if dtype == np.float16:\n                            fp16 = True\n                    else:  # output\n                        output_names.append(name)\n                    shape = tuple(context.get_tensor_shape(name))\n                else:\n                    name = model.get_binding_name(i)\n                    dtype = trt.nptype(model.get_binding_dtype(i))\n                    if model.binding_is_input(i):\n                        if -1 in tuple(model.get_binding_shape(i)):  # dynamic\n                            dynamic = True\n                            context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n                        if dtype == np.float16:\n                            fp16 = True\n                    else:  # output\n                        output_names.append(name)\n                    shape = tuple(context.get_binding_shape(i))\n                im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n                bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n            binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n            batch_size = bindings[\"images\"].shape[0]  # if dynamic, this is instead max batch size\n        elif coreml:  # CoreML\n            LOGGER.info(f\"Loading {w} for CoreML inference...\")\n            import coremltools as ct\n\n            model = ct.models.MLModel(w)\n        elif saved_model:  # TF SavedModel\n            LOGGER.info(f\"Loading {w} for TensorFlow SavedModel inference...\")\n            import tensorflow as tf\n\n            keras = False  # assume TF1 saved_model\n            model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n        elif pb:  # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n            LOGGER.info(f\"Loading {w} for TensorFlow GraphDef inference...\")\n            import tensorflow as tf\n\n            def wrap_frozen_graph(gd, inputs, outputs):\n                \"\"\"Wraps a TensorFlow GraphDef for inference, returning a pruned function.\"\"\"\n                x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), [])  # wrapped\n                ge = x.graph.as_graph_element\n                return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n            def gd_outputs(gd):\n                \"\"\"Generates a sorted list of graph outputs excluding NoOp nodes and inputs, formatted as '<name>:0'.\"\"\"\n                name_list, input_list = [], []\n                for node in gd.node:  # tensorflow.core.framework.node_def_pb2.NodeDef\n                    name_list.append(node.name)\n                    input_list.extend(node.input)\n                return sorted(f\"{x}:0\" for x in list(set(name_list) - set(input_list)) if not x.startswith(\"NoOp\"))\n\n            gd = tf.Graph().as_graph_def()  # TF GraphDef\n            with open(w, \"rb\") as f:\n                gd.ParseFromString(f.read())\n            frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=gd_outputs(gd))\n        elif tflite or edgetpu:  # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n            try:  # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n                from tflite_runtime.interpreter import Interpreter, load_delegate\n            except ImportError:\n                import tensorflow as tf\n\n                Interpreter, load_delegate = (\n                    tf.lite.Interpreter,\n                    tf.lite.experimental.load_delegate,\n                )\n            if edgetpu:  # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n                LOGGER.info(f\"Loading {w} for TensorFlow Lite Edge TPU inference...\")\n                delegate = {\"Linux\": \"libedgetpu.so.1\", \"Darwin\": \"libedgetpu.1.dylib\", \"Windows\": \"edgetpu.dll\"}[\n                    platform.system()\n                ]\n                interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n            else:  # TFLite\n                LOGGER.info(f\"Loading {w} for TensorFlow Lite inference...\")\n                interpreter = Interpreter(model_path=w)  # load TFLite model\n            interpreter.allocate_tensors()  # allocate\n            input_details = interpreter.get_input_details()  # inputs\n            output_details = interpreter.get_output_details()  # outputs\n            # load metadata\n            with contextlib.suppress(zipfile.BadZipFile):\n                with zipfile.ZipFile(w, \"r\") as model:\n                    meta_file = model.namelist()[0]\n                    meta = ast.literal_eval(model.read(meta_file).decode(\"utf-8\"))\n                    stride, names = int(meta[\"stride\"]), meta[\"names\"]\n        elif tfjs:  # TF.js\n            raise NotImplementedError(\"ERROR: YOLOv5 TF.js inference is not supported\")\n        # PaddlePaddle\n        elif paddle:\n            LOGGER.info(f\"Loading {w} for PaddlePaddle inference...\")\n            check_requirements(\"paddlepaddle-gpu\" if cuda else \"paddlepaddle>=3.0.0\")\n            import paddle.inference as pdi\n\n            w = Path(w)\n            if w.is_dir():\n                model_file = next(w.rglob(\"*.json\"), None)\n                params_file = next(w.rglob(\"*.pdiparams\"), None)\n            elif w.suffix == \".pdiparams\":\n                model_file = w.with_name(\"model.json\")\n                params_file = w\n            else:\n                raise ValueError(f\"Invalid model path {w}. Provide model directory or a .pdiparams file.\")\n\n            if not (model_file and params_file and model_file.is_file() and params_file.is_file()):\n                raise FileNotFoundError(f\"Model files not found in {w}. Both .json and .pdiparams files are required.\")\n\n            config = pdi.Config(str(model_file), str(params_file))\n            if cuda:\n                config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n            config.disable_mkldnn()  # disable MKL-DNN for PIR compatibility\n            predictor = pdi.create_predictor(config)\n            input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n            output_names = predictor.get_output_names()\n\n        elif triton:  # NVIDIA Triton Inference Server\n            LOGGER.info(f\"Using {w} as Triton Inference Server...\")\n            check_requirements(\"tritonclient[all]\")\n            from utils.triton import TritonRemoteModel\n\n            model = TritonRemoteModel(url=w)\n            nhwc = model.runtime.startswith(\"tensorflow\")\n        else:\n            raise NotImplementedError(f\"ERROR: {w} is not a supported format\")\n\n        # class names\n        if \"names\" not in locals():\n            names = yaml_load(data)[\"names\"] if data else {i: f\"class{i}\" for i in range(999)}\n        if names[0] == \"n01440764\" and len(names) == 1000:  # ImageNet\n            names = yaml_load(ROOT / \"data/ImageNet.yaml\")[\"names\"]  # human-readable names\n\n        self.__dict__.update(locals())  # assign all variables to self\n\n    def forward(self, im, augment=False, visualize=False):\n        \"\"\"Performs YOLOv5 inference on input images with options for augmentation and visualization.\"\"\"\n        _b, _ch, h, w = im.shape  # batch, channel, height, width\n        if self.fp16 and im.dtype != torch.float16:\n            im = im.half()  # to FP16\n        if self.nhwc:\n            im = im.permute(0, 2, 3, 1)  # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n        if self.pt:  # PyTorch\n            y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n        elif self.jit:  # TorchScript\n            y = self.model(im)\n        elif self.dnn:  # ONNX OpenCV DNN\n            im = im.cpu().numpy()  # torch to numpy\n            self.net.setInput(im)\n            y = self.net.forward()\n        elif self.onnx:  # ONNX Runtime\n            im = im.cpu().numpy()  # torch to numpy\n            y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n        elif self.xml:  # OpenVINO\n            im = im.cpu().numpy()  # FP32\n            y = list(self.ov_compiled_model(im).values())\n        elif self.engine:  # TensorRT\n            if self.dynamic and im.shape != self.bindings[\"images\"].shape:\n                i = self.model.get_binding_index(\"images\")\n                self.context.set_binding_shape(i, im.shape)  # reshape if dynamic\n                self.bindings[\"images\"] = self.bindings[\"images\"]._replace(shape=im.shape)\n                for name in self.output_names:\n                    i = self.model.get_binding_index(name)\n                    self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n            s = self.bindings[\"images\"].shape\n            assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n            self.binding_addrs[\"images\"] = int(im.data_ptr())\n            self.context.execute_v2(list(self.binding_addrs.values()))\n            y = [self.bindings[x].data for x in sorted(self.output_names)]\n        elif self.coreml:  # CoreML\n            im = im.cpu().numpy()\n            im = Image.fromarray((im[0] * 255).astype(\"uint8\"))\n            # im = im.resize((192, 320), Image.BILINEAR)\n            y = self.model.predict({\"image\": im})  # coordinates are xywh normalized\n            if \"confidence\" in y:\n                box = xywh2xyxy(y[\"coordinates\"] * [[w, h, w, h]])  # xyxy pixels\n                conf, cls = y[\"confidence\"].max(1), y[\"confidence\"].argmax(1).astype(float)\n                y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n            else:\n                y = list(reversed(y.values()))  # reversed for segmentation models (pred, proto)\n        elif self.paddle:  # PaddlePaddle\n            im = im.cpu().numpy().astype(np.float32)\n            self.input_handle.copy_from_cpu(im)\n            self.predictor.run()\n            y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n        elif self.triton:  # NVIDIA Triton Inference Server\n            y = self.model(im)\n        else:  # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n            im = im.cpu().numpy()\n            if self.saved_model:  # SavedModel\n                y = self.model(im, training=False) if self.keras else self.model(im)\n            elif self.pb:  # GraphDef\n                y = self.frozen_func(x=self.tf.constant(im))\n            else:  # Lite or Edge TPU\n                input = self.input_details[0]\n                int8 = input[\"dtype\"] == np.uint8  # is TFLite quantized uint8 model\n                if int8:\n                    scale, zero_point = input[\"quantization\"]\n                    im = (im / scale + zero_point).astype(np.uint8)  # de-scale\n                self.interpreter.set_tensor(input[\"index\"], im)\n                self.interpreter.invoke()\n                y = []\n                for output in self.output_details:\n                    x = self.interpreter.get_tensor(output[\"index\"])\n                    if int8:\n                        scale, zero_point = output[\"quantization\"]\n                        x = (x.astype(np.float32) - zero_point) * scale  # re-scale\n                    y.append(x)\n            if len(y) == 2 and len(y[1].shape) != 4:\n                y = list(reversed(y))\n            y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n            y[0][..., :4] *= [w, h, w, h]  # xywh normalized to pixels\n\n        if isinstance(y, (list, tuple)):\n            return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n        else:\n            return self.from_numpy(y)\n\n    def from_numpy(self, x):\n        \"\"\"Converts a NumPy array to a torch tensor, maintaining device compatibility.\"\"\"\n        return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n    def warmup(self, imgsz=(1, 3, 640, 640)):\n        \"\"\"Performs a single inference warmup to initialize model weights, accepting an `imgsz` tuple for image size.\"\"\"\n        warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n        if any(warmup_types) and (self.device.type != \"cpu\" or self.triton):\n            im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device)  # input\n            for _ in range(2 if self.jit else 1):  #\n                self.forward(im)  # warmup\n\n    @staticmethod\n    def _model_type(p=\"path/to/model.pt\"):\n        \"\"\"Determines model type from file path or URL, supporting various export formats.\n\n        Example: path='path/to/model.onnx' -> type=onnx\n        \"\"\"\n        # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n        from export import export_formats\n        from utils.downloads import is_url\n\n        sf = list(export_formats().Suffix)  # export suffixes\n        if not is_url(p, check=False):\n            check_suffix(p, sf)  # checks\n        url = urlparse(p)  # if url may be Triton inference server\n        types = [s in Path(p).name for s in sf]\n        types[8] &= not types[9]  # tflite &= not edgetpu\n        triton = not any(types) and all([any(s in url.scheme for s in [\"http\", \"grpc\"]), url.netloc])\n        return [*types, triton]\n\n    @staticmethod\n    def _load_metadata(f=Path(\"path/to/meta.yaml\")):\n        \"\"\"Loads metadata from a YAML file, returning strides and names if the file exists, otherwise `None`.\"\"\"\n        if f.exists():\n            d = yaml_load(f)\n            return d[\"stride\"], d[\"names\"]  # assign stride, names\n        return None, None\n\n\nclass AutoShape(nn.Module):\n    \"\"\"AutoShape class for robust YOLOv5 inference with preprocessing, NMS, and support for various input formats.\"\"\"\n\n    conf = 0.25  # NMS confidence threshold\n    iou = 0.45  # NMS IoU threshold\n    agnostic = False  # NMS class-agnostic\n    multi_label = False  # NMS multiple labels per box\n    classes = None  # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs\n    max_det = 1000  # maximum number of detections per image\n    amp = False  # Automatic Mixed Precision (AMP) inference\n\n    def __init__(self, model, verbose=True):\n        \"\"\"Initializes YOLOv5 model for inference, setting up attributes and preparing model for evaluation.\"\"\"\n        super().__init__()\n        if verbose:\n            LOGGER.info(\"Adding AutoShape... \")\n        copy_attr(self, model, include=(\"yaml\", \"nc\", \"hyp\", \"names\", \"stride\", \"abc\"), exclude=())  # copy attributes\n        self.dmb = isinstance(model, DetectMultiBackend)  # DetectMultiBackend() instance\n        self.pt = not self.dmb or model.pt  # PyTorch model\n        self.model = model.eval()\n        if self.pt:\n            m = self.model.model.model[-1] if self.dmb else self.model.model[-1]  # Detect()\n            m.inplace = False  # Detect.inplace=False for safe multithread inference\n            m.export = True  # do not output loss values\n\n    def _apply(self, fn):\n        \"\"\"Applies to(), cpu(), cuda(), half() etc.\n\n        to model tensors excluding parameters or registered buffers.\n        \"\"\"\n        self = super()._apply(fn)\n        if self.pt:\n            m = self.model.model.model[-1] if self.dmb else self.model.model[-1]  # Detect()\n            m.stride = fn(m.stride)\n            m.grid = list(map(fn, m.grid))\n            if isinstance(m.anchor_grid, list):\n                m.anchor_grid = list(map(fn, m.anchor_grid))\n        return self\n\n    @smart_inference_mode()\n    def forward(self, ims, size=640, augment=False, profile=False):\n        \"\"\"Performs inference on inputs with optional augment & profiling.\n\n        Supports various formats including file, URI, OpenCV, PIL, numpy, torch.\n        \"\"\"\n        # For size(height=640, width=1280), RGB images example inputs are:\n        #   file:        ims = 'data/images/zidane.jpg'  # str or PosixPath\n        #   URI:             = 'https://ultralytics.com/images/zidane.jpg'\n        #   OpenCV:          = cv2.imread('image.jpg')[:,:,::-1]  # HWC BGR to RGB x(640,1280,3)\n        #   PIL:             = Image.open('image.jpg') or ImageGrab.grab()  # HWC x(640,1280,3)\n        #   numpy:           = np.zeros((640,1280,3))  # HWC\n        #   torch:           = torch.zeros(16,3,320,640)  # BCHW (scaled to size=640, 0-1 values)\n        #   multiple:        = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...]  # list of images\n\n        dt = (Profile(), Profile(), Profile())\n        with dt[0]:\n            if isinstance(size, int):  # expand\n                size = (size, size)\n            p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device)  # param\n            autocast = self.amp and (p.device.type != \"cpu\")  # Automatic Mixed Precision (AMP) inference\n            if isinstance(ims, torch.Tensor):  # torch\n                with amp.autocast(autocast):\n                    return self.model(ims.to(p.device).type_as(p), augment=augment)  # inference\n\n            # Pre-process\n            n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims])  # number, list of images\n            shape0, shape1, files = [], [], []  # image and inference shapes, filenames\n            for i, im in enumerate(ims):\n                f = f\"image{i}\"  # filename\n                if isinstance(im, (str, Path)):  # filename or uri\n                    im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith(\"http\") else im), im\n                    im = np.asarray(exif_transpose(im))\n                elif isinstance(im, Image.Image):  # PIL Image\n                    im, f = np.asarray(exif_transpose(im)), getattr(im, \"filename\", f) or f\n                files.append(Path(f).with_suffix(\".jpg\").name)\n                if im.shape[0] < 5:  # image in CHW\n                    im = im.transpose((1, 2, 0))  # reverse dataloader .transpose(2, 0, 1)\n                im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)  # enforce 3ch input\n                s = im.shape[:2]  # HWC\n                shape0.append(s)  # image shape\n                g = max(size) / max(s)  # gain\n                shape1.append([int(y * g) for y in s])\n                ims[i] = im if im.data.contiguous else np.ascontiguousarray(im)  # update\n            shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)]  # inf shape\n            x = [letterbox(im, shape1, auto=False)[0] for im in ims]  # pad\n            x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2)))  # stack and BHWC to BCHW\n            x = torch.from_numpy(x).to(p.device).type_as(p) / 255  # uint8 to fp16/32\n\n        with amp.autocast(autocast):\n            # Inference\n            with dt[1]:\n                y = self.model(x, augment=augment)  # forward\n\n            # Post-process\n            with dt[2]:\n                y = non_max_suppression(\n                    y if self.dmb else y[0],\n                    self.conf,\n                    self.iou,\n                    self.classes,\n                    self.agnostic,\n                    self.multi_label,\n                    max_det=self.max_det,\n                )  # NMS\n                for i in range(n):\n                    scale_boxes(shape1, y[i][:, :4], shape0[i])\n\n            return Detections(ims, y, files, dt, self.names, x.shape)\n\n\nclass Detections:\n    \"\"\"Manages YOLOv5 detection results with methods for visualization, saving, cropping, and exporting detections.\"\"\"\n\n    def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):\n        \"\"\"Initializes the YOLOv5 Detections class with image info, predictions, filenames, timing and normalization.\"\"\"\n        super().__init__()\n        d = pred[0].device  # device\n        gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims]  # normalizations\n        self.ims = ims  # list of images as numpy arrays\n        self.pred = pred  # list of tensors pred[0] = (xyxy, conf, cls)\n        self.names = names  # class names\n        self.files = files  # image filenames\n        self.times = times  # profiling times\n        self.xyxy = pred  # xyxy pixels\n        self.xywh = [xyxy2xywh(x) for x in pred]  # xywh pixels\n        self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)]  # xyxy normalized\n        self.xywhn = [x / g for x, g in zip(self.xywh, gn)]  # xywh normalized\n        self.n = len(self.pred)  # number of images (batch size)\n        self.t = tuple(x.t / self.n * 1e3 for x in times)  # timestamps (ms)\n        self.s = tuple(shape)  # inference BCHW shape\n\n    def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path(\"\")):\n        \"\"\"Executes model predictions, displaying and/or saving outputs with optional crops and labels.\"\"\"\n        s, crops = \"\", []\n        for i, (im, pred) in enumerate(zip(self.ims, self.pred)):\n            s += f\"\\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} \"  # string\n            if pred.shape[0]:\n                for c in pred[:, -1].unique():\n                    n = (pred[:, -1] == c).sum()  # detections per class\n                    s += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \"  # add to string\n                s = s.rstrip(\", \")\n                if show or save or render or crop:\n                    annotator = Annotator(im, example=str(self.names))\n                    for *box, conf, cls in reversed(pred):  # xyxy, confidence, class\n                        label = f\"{self.names[int(cls)]} {conf:.2f}\"\n                        if crop:\n                            file = save_dir / \"crops\" / self.names[int(cls)] / self.files[i] if save else None\n                            crops.append(\n                                {\n                                    \"box\": box,\n                                    \"conf\": conf,\n                                    \"cls\": cls,\n                                    \"label\": label,\n                                    \"im\": save_one_box(box, im, file=file, save=save),\n                                }\n                            )\n                        else:  # all others\n                            annotator.box_label(box, label if labels else \"\", color=colors(cls))\n                    im = annotator.im\n            else:\n                s += \"(no detections)\"\n\n            im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im  # from np\n            if show:\n                if is_jupyter():\n                    from IPython.display import display\n\n                    display(im)\n                else:\n                    im.show(self.files[i])\n            if save:\n                f = self.files[i]\n                im.save(save_dir / f)  # save\n                if i == self.n - 1:\n                    LOGGER.info(f\"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}\")\n            if render:\n                self.ims[i] = np.asarray(im)\n        if pprint:\n            s = s.lstrip(\"\\n\")\n            return f\"{s}\\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}\" % self.t\n        if crop:\n            if save:\n                LOGGER.info(f\"Saved results to {save_dir}\\n\")\n            return crops\n\n    @TryExcept(\"Showing images is not supported in this environment\")\n    def show(self, labels=True):\n        \"\"\"Displays detection results with optional labels.\n\n        Usage: show(labels=True)\n        \"\"\"\n        self._run(show=True, labels=labels)  # show results\n\n    def save(self, labels=True, save_dir=\"runs/detect/exp\", exist_ok=False):\n        \"\"\"Saves detection results with optional labels to a specified directory.\n\n        Usage: save(labels=True, save_dir='runs/detect/exp', exist_ok=False)\n        \"\"\"\n        save_dir = increment_path(save_dir, exist_ok, mkdir=True)  # increment save_dir\n        self._run(save=True, labels=labels, save_dir=save_dir)  # save results\n\n    def crop(self, save=True, save_dir=\"runs/detect/exp\", exist_ok=False):\n        \"\"\"Crops detection results, optionally saves them to a directory.\n\n        Args: save (bool), save_dir (str), exist_ok (bool).\n        \"\"\"\n        save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None\n        return self._run(crop=True, save=save, save_dir=save_dir)  # crop results\n\n    def render(self, labels=True):\n        \"\"\"Renders detection results with optional labels on images; args: labels (bool) indicating label inclusion.\"\"\"\n        self._run(render=True, labels=labels)  # render results\n        return self.ims\n\n    def pandas(self):\n        \"\"\"Returns detections as pandas DataFrames for various box formats (xyxy, xyxyn, xywh, xywhn).\n\n        Example: print(results.pandas().xyxy[0]).\n        \"\"\"\n        new = copy(self)  # return copy\n        ca = \"xmin\", \"ymin\", \"xmax\", \"ymax\", \"confidence\", \"class\", \"name\"  # xyxy columns\n        cb = \"xcenter\", \"ycenter\", \"width\", \"height\", \"confidence\", \"class\", \"name\"  # xywh columns\n        for k, c in zip([\"xyxy\", \"xyxyn\", \"xywh\", \"xywhn\"], [ca, ca, cb, cb]):\n            a = [[[*x[:5], int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)]  # update\n            setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])\n        return new\n\n    def tolist(self):\n        \"\"\"Converts a Detections object into a list of individual detection results for iteration.\n\n        Example: for result in results.tolist():\n        \"\"\"\n        r = range(self.n)  # iterable\n        return [\n            Detections(\n                [self.ims[i]],\n                [self.pred[i]],\n                [self.files[i]],\n                self.times,\n                self.names,\n                self.s,\n            )\n            for i in r\n        ]\n\n    def print(self):\n        \"\"\"Logs the string representation of the current object's state via the LOGGER.\"\"\"\n        LOGGER.info(self.__str__())\n\n    def __len__(self):\n        \"\"\"Returns the number of results stored, overrides the default len(results).\"\"\"\n        return self.n\n\n    def __str__(self):\n        \"\"\"Returns a string representation of the model's results, suitable for printing, overrides default\n        print(results).\n        \"\"\"\n        return self._run(pprint=True)  # print results\n\n    def __repr__(self):\n        \"\"\"Returns a string representation of the YOLOv5 object, including its class and formatted results.\"\"\"\n        return f\"YOLOv5 {self.__class__} instance\\n\" + self.__str__()\n\n\nclass Proto(nn.Module):\n    \"\"\"YOLOv5 mask Proto module for segmentation models, performing convolutions and upsampling on input tensors.\"\"\"\n\n    def __init__(self, c1, c_=256, c2=32):\n        \"\"\"Initializes YOLOv5 Proto module for segmentation with input, proto, and mask channels configuration.\"\"\"\n        super().__init__()\n        self.cv1 = Conv(c1, c_, k=3)\n        self.upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n        self.cv2 = Conv(c_, c_, k=3)\n        self.cv3 = Conv(c_, c2)\n\n    def forward(self, x):\n        \"\"\"Performs a forward pass using convolutional layers and upsampling on input tensor `x`.\"\"\"\n        return self.cv3(self.cv2(self.upsample(self.cv1(x))))\n\n\nclass Classify(nn.Module):\n    \"\"\"YOLOv5 classification head with convolution, pooling, and dropout layers for channel transformation.\"\"\"\n\n    def __init__(\n        self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0\n    ):  # ch_in, ch_out, kernel, stride, padding, groups, dropout probability\n        \"\"\"Initializes YOLOv5 classification head with convolution, pooling, and dropout layers for input to output\n        channel transformation.\n        \"\"\"\n        super().__init__()\n        c_ = 1280  # efficientnet_b0 size\n        self.conv = Conv(c1, c_, k, s, autopad(k, p), g)\n        self.pool = nn.AdaptiveAvgPool2d(1)  # to x(b,c_,1,1)\n        self.drop = nn.Dropout(p=dropout_p, inplace=True)\n        self.linear = nn.Linear(c_, c2)  # to x(b,c2)\n\n    def forward(self, x):\n        \"\"\"Processes input through conv, pool, drop, and linear layers; supports list concatenation input.\"\"\"\n        if isinstance(x, list):\n            x = torch.cat(x, 1)\n        return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))\n"
  },
  {
    "path": "models/experimental.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Experimental modules.\"\"\"\n\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom ultralytics.utils.patches import torch_load\n\nfrom utils.downloads import attempt_download\n\n\nclass Sum(nn.Module):\n    \"\"\"Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070.\"\"\"\n\n    def __init__(self, n, weight=False):\n        \"\"\"Initializes a module to sum outputs of layers with number of inputs `n` and optional weighting, supporting 2+\n        inputs.\n        \"\"\"\n        super().__init__()\n        self.weight = weight  # apply weights boolean\n        self.iter = range(n - 1)  # iter object\n        if weight:\n            self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True)  # layer weights\n\n    def forward(self, x):\n        \"\"\"Processes input through a customizable weighted sum of `n` inputs, optionally applying learned weights.\"\"\"\n        y = x[0]  # no weight\n        if self.weight:\n            w = torch.sigmoid(self.w) * 2\n            for i in self.iter:\n                y = y + x[i + 1] * w[i]\n        else:\n            for i in self.iter:\n                y = y + x[i + 1]\n        return y\n\n\nclass MixConv2d(nn.Module):\n    \"\"\"Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595.\"\"\"\n\n    def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):\n        \"\"\"Initializes MixConv2d with mixed depth-wise convolutional layers, taking input and output channels (c1, c2),\n        kernel sizes (k), stride (s), and channel distribution strategy (equal_ch).\n        \"\"\"\n        super().__init__()\n        n = len(k)  # number of convolutions\n        if equal_ch:  # equal c_ per group\n            i = torch.linspace(0, n - 1e-6, c2).floor()  # c2 indices\n            c_ = [(i == g).sum() for g in range(n)]  # intermediate channels\n        else:  # equal weight.numel() per group\n            b = [c2] + [0] * n\n            a = np.eye(n + 1, n, k=-1)\n            a -= np.roll(a, 1, axis=1)\n            a *= np.array(k) ** 2\n            a[0] = 1\n            c_ = np.linalg.lstsq(a, b, rcond=None)[0].round()  # solve for equal weight indices, ax = b\n\n        self.m = nn.ModuleList(\n            [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]\n        )\n        self.bn = nn.BatchNorm2d(c2)\n        self.act = nn.SiLU()\n\n    def forward(self, x):\n        \"\"\"Performs forward pass by applying SiLU activation on batch-normalized concatenated convolutional layer\n        outputs.\n        \"\"\"\n        return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))\n\n\nclass Ensemble(nn.ModuleList):\n    \"\"\"Ensemble of models.\"\"\"\n\n    def __init__(self):\n        \"\"\"Initializes an ensemble of models to be used for aggregated predictions.\"\"\"\n        super().__init__()\n\n    def forward(self, x, augment=False, profile=False, visualize=False):\n        \"\"\"Performs forward pass aggregating outputs from an ensemble of models..\"\"\"\n        y = [module(x, augment, profile, visualize)[0] for module in self]\n        # y = torch.stack(y).max(0)[0]  # max ensemble\n        # y = torch.stack(y).mean(0)  # mean ensemble\n        y = torch.cat(y, 1)  # nms ensemble\n        return y, None  # inference, train output\n\n\ndef attempt_load(weights, device=None, inplace=True, fuse=True):\n    \"\"\"Loads and fuses an ensemble or single YOLOv5 model from weights, handling device placement and model adjustments.\n\n    Example inputs: weights=[a,b,c] or a single model weights=[a] or weights=a.\n    \"\"\"\n    from models.yolo import Detect, Model\n\n    model = Ensemble()\n    for w in weights if isinstance(weights, list) else [weights]:\n        ckpt = torch_load(attempt_download(w), map_location=\"cpu\")  # load\n        ckpt = (ckpt.get(\"ema\") or ckpt[\"model\"]).to(device).float()  # FP32 model\n\n        # Model compatibility updates\n        if not hasattr(ckpt, \"stride\"):\n            ckpt.stride = torch.tensor([32.0])\n        if hasattr(ckpt, \"names\") and isinstance(ckpt.names, (list, tuple)):\n            ckpt.names = dict(enumerate(ckpt.names))  # convert to dict\n\n        model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, \"fuse\") else ckpt.eval())  # model in eval mode\n\n    # Module updates\n    for m in model.modules():\n        t = type(m)\n        if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):\n            m.inplace = inplace\n            if t is Detect and not isinstance(m.anchor_grid, list):\n                delattr(m, \"anchor_grid\")\n                setattr(m, \"anchor_grid\", [torch.zeros(1)] * m.nl)\n        elif t is nn.Upsample and not hasattr(m, \"recompute_scale_factor\"):\n            m.recompute_scale_factor = None  # torch 1.11.0 compatibility\n\n    # Return model\n    if len(model) == 1:\n        return model[-1]\n\n    # Return detection ensemble\n    print(f\"Ensemble created with {weights}\\n\")\n    for k in \"names\", \"nc\", \"yaml\":\n        setattr(model, k, getattr(model[0], k))\n    model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride  # max stride\n    assert all(model[0].nc == m.nc for m in model), f\"Models have different class counts: {[m.nc for m in model]}\"\n    return model\n"
  },
  {
    "path": "models/hub/anchors.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Default anchors for COCO data\n\n# P5 -------------------------------------------------------------------------------------------------------------------\n# P5-640:\nanchors_p5_640:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# P6 -------------------------------------------------------------------------------------------------------------------\n# P6-640:  thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11,  21,19,  17,41,  43,32,  39,70,  86,64,  65,131,  134,130,  120,265,  282,180,  247,354,  512,387\nanchors_p6_640:\n  - [9, 11, 21, 19, 17, 41] # P3/8\n  - [43, 32, 39, 70, 86, 64] # P4/16\n  - [65, 131, 134, 130, 120, 265] # P5/32\n  - [282, 180, 247, 354, 512, 387] # P6/64\n\n# P6-1280:  thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27,  44,40,  38,94,  96,68,  86,152,  180,137,  140,301,  303,264,  238,542,  436,615,  739,380,  925,792\nanchors_p6_1280:\n  - [19, 27, 44, 40, 38, 94] # P3/8\n  - [96, 68, 86, 152, 180, 137] # P4/16\n  - [140, 301, 303, 264, 238, 542] # P5/32\n  - [436, 615, 739, 380, 925, 792] # P6/64\n\n# P6-1920:  thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41,  67,59,  57,141,  144,103,  129,227,  270,205,  209,452,  455,396,  358,812,  653,922,  1109,570,  1387,1187\nanchors_p6_1920:\n  - [28, 41, 67, 59, 57, 141] # P3/8\n  - [144, 103, 129, 227, 270, 205] # P4/16\n  - [209, 452, 455, 396, 358, 812] # P5/32\n  - [653, 922, 1109, 570, 1387, 1187] # P6/64\n\n# P7 -------------------------------------------------------------------------------------------------------------------\n# P7-640:  thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11,  13,30,  29,20,  30,46,  61,38,  39,92,  78,80,  146,66,  79,163,  149,150,  321,143,  157,303,  257,402,  359,290,  524,372\nanchors_p7_640:\n  - [11, 11, 13, 30, 29, 20] # P3/8\n  - [30, 46, 61, 38, 39, 92] # P4/16\n  - [78, 80, 146, 66, 79, 163] # P5/32\n  - [149, 150, 321, 143, 157, 303] # P6/64\n  - [257, 402, 359, 290, 524, 372] # P7/128\n\n# P7-1280:  thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22,  54,36,  32,77,  70,83,  138,71,  75,173,  165,159,  148,334,  375,151,  334,317,  251,626,  499,474,  750,326,  534,814,  1079,818\nanchors_p7_1280:\n  - [19, 22, 54, 36, 32, 77] # P3/8\n  - [70, 83, 138, 71, 75, 173] # P4/16\n  - [165, 159, 148, 334, 375, 151] # P5/32\n  - [334, 317, 251, 626, 499, 474] # P6/64\n  - [750, 326, 534, 814, 1079, 818] # P7/128\n\n# P7-1920:  thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34,  81,55,  47,115,  105,124,  207,107,  113,259,  247,238,  222,500,  563,227,  501,476,  376,939,  749,711,  1126,489,  801,1222,  1618,1227\nanchors_p7_1920:\n  - [29, 34, 81, 55, 47, 115] # P3/8\n  - [105, 124, 207, 107, 113, 259] # P4/16\n  - [247, 238, 222, 500, 563, 227] # P5/32\n  - [501, 476, 376, 939, 749, 711] # P6/64\n  - [1126, 489, 801, 1222, 1618, 1227] # P7/128\n"
  },
  {
    "path": "models/hub/yolov3-spp.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# darknet53 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [32, 3, 1]], # 0\n    [-1, 1, Conv, [64, 3, 2]], # 1-P1/2\n    [-1, 1, Bottleneck, [64]],\n    [-1, 1, Conv, [128, 3, 2]], # 3-P2/4\n    [-1, 2, Bottleneck, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 5-P3/8\n    [-1, 8, Bottleneck, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 7-P4/16\n    [-1, 8, Bottleneck, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32\n    [-1, 4, Bottleneck, [1024]], # 10\n  ]\n\n# YOLOv3-SPP head\nhead: [\n    [-1, 1, Bottleneck, [1024, False]],\n    [-1, 1, SPP, [512, [5, 9, 13]]],\n    [-1, 1, Conv, [1024, 3, 1]],\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)\n\n    [-2, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P4\n    [-1, 1, Bottleneck, [512, False]],\n    [-1, 1, Bottleneck, [512, False]],\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)\n\n    [-2, 1, Conv, [128, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P3\n    [-1, 1, Bottleneck, [256, False]],\n    [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)\n\n    [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov3-tiny.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 14, 23, 27, 37, 58] # P4/16\n  - [81, 82, 135, 169, 344, 319] # P5/32\n\n# YOLOv3-tiny backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [16, 3, 1]], # 0\n    [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2\n    [-1, 1, Conv, [32, 3, 1]],\n    [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4\n    [-1, 1, Conv, [64, 3, 1]],\n    [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8\n    [-1, 1, Conv, [128, 3, 1]],\n    [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16\n    [-1, 1, Conv, [256, 3, 1]],\n    [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32\n    [-1, 1, Conv, [512, 3, 1]],\n    [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11\n    [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12\n  ]\n\n# YOLOv3-tiny head\nhead: [\n    [-1, 1, Conv, [1024, 3, 1]],\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)\n\n    [-2, 1, Conv, [128, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P4\n    [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)\n\n    [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov3.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# darknet53 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [32, 3, 1]], # 0\n    [-1, 1, Conv, [64, 3, 2]], # 1-P1/2\n    [-1, 1, Bottleneck, [64]],\n    [-1, 1, Conv, [128, 3, 2]], # 3-P2/4\n    [-1, 2, Bottleneck, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 5-P3/8\n    [-1, 8, Bottleneck, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 7-P4/16\n    [-1, 8, Bottleneck, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32\n    [-1, 4, Bottleneck, [1024]], # 10\n  ]\n\n# YOLOv3 head\nhead: [\n    [-1, 1, Bottleneck, [1024, False]],\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, Conv, [1024, 3, 1]],\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)\n\n    [-2, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P4\n    [-1, 1, Bottleneck, [512, False]],\n    [-1, 1, Bottleneck, [512, False]],\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)\n\n    [-2, 1, Conv, [128, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P3\n    [-1, 1, Bottleneck, [256, False]],\n    [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)\n\n    [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5-bifpn.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 BiFPN head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5-fpn.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 FPN head\nhead: [\n    [-1, 3, C3, [1024, False]], # 10 (P5/32-large)\n\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 3, C3, [512, False]], # 14 (P4/16-medium)\n\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 3, C3, [256, False]], # 18 (P3/8-small)\n\n    [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5-p2.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors: 3 # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [128, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 2], 1, Concat, [1]], # cat backbone P2\n    [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)\n\n    [-1, 1, Conv, [128, 3, 2]],\n    [[-1, 18], 1, Concat, [1]], # cat head P3\n    [-1, 3, C3, [256, False]], # 24 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 27 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 30 (P5/32-large)\n\n    [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5-p34.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.50 # layer channel multiple\nanchors: 3 # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head with (P3, P4) outputs\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [[17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5-p6.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors: 3 # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [768, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [768]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 11\n  ]\n\n# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs\nhead: [\n    [-1, 1, Conv, [768, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P5\n    [-1, 3, C3, [768, False]], # 15\n\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 19\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 23 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 20], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 26 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 16], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [768, False]], # 29 (P5/32-large)\n\n    [-1, 1, Conv, [768, 3, 2]],\n    [[-1, 12], 1, Concat, [1]], # cat head P6\n    [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)\n\n    [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5-p7.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors: 3 # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [768, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [768]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64\n    [-1, 3, C3, [1024]],\n    [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128\n    [-1, 3, C3, [1280]],\n    [-1, 1, SPPF, [1280, 5]], # 13\n  ]\n\n# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs\nhead: [\n    [-1, 1, Conv, [1024, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 10], 1, Concat, [1]], # cat backbone P6\n    [-1, 3, C3, [1024, False]], # 17\n\n    [-1, 1, Conv, [768, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P5\n    [-1, 3, C3, [768, False]], # 21\n\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 25\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 29 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 26], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 32 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 22], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [768, False]], # 35 (P5/32-large)\n\n    [-1, 1, Conv, [768, 3, 2]],\n    [[-1, 18], 1, Concat, [1]], # cat head P6\n    [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)\n\n    [-1, 1, Conv, [1024, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P7\n    [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)\n\n    [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5-panet.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 PANet head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5l6.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [19, 27, 44, 40, 38, 94] # P3/8\n  - [96, 68, 86, 152, 180, 137] # P4/16\n  - [140, 301, 303, 264, 238, 542] # P5/32\n  - [436, 615, 739, 380, 925, 792] # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [768, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [768]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [768, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P5\n    [-1, 3, C3, [768, False]], # 15\n\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 19\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 23 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 20], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 26 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 16], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [768, False]], # 29 (P5/32-large)\n\n    [-1, 1, Conv, [768, 3, 2]],\n    [[-1, 12], 1, Concat, [1]], # cat head P6\n    [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)\n\n    [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5m6.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.67 # model depth multiple\nwidth_multiple: 0.75 # layer channel multiple\nanchors:\n  - [19, 27, 44, 40, 38, 94] # P3/8\n  - [96, 68, 86, 152, 180, 137] # P4/16\n  - [140, 301, 303, 264, 238, 542] # P5/32\n  - [436, 615, 739, 380, 925, 792] # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [768, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [768]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [768, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P5\n    [-1, 3, C3, [768, False]], # 15\n\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 19\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 23 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 20], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 26 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 16], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [768, False]], # 29 (P5/32-large)\n\n    [-1, 1, Conv, [768, 3, 2]],\n    [[-1, 12], 1, Concat, [1]], # cat head P6\n    [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)\n\n    [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5n6.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.25 # layer channel multiple\nanchors:\n  - [19, 27, 44, 40, 38, 94] # P3/8\n  - [96, 68, 86, 152, 180, 137] # P4/16\n  - [140, 301, 303, 264, 238, 542] # P5/32\n  - [436, 615, 739, 380, 925, 792] # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [768, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [768]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [768, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P5\n    [-1, 3, C3, [768, False]], # 15\n\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 19\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 23 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 20], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 26 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 16], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [768, False]], # 29 (P5/32-large)\n\n    [-1, 1, Conv, [768, 3, 2]],\n    [[-1, 12], 1, Concat, [1]], # cat head P6\n    [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)\n\n    [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5s-LeakyReLU.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\nactivation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.50 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5s-ghost.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.50 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3Ghost, [128]],\n    [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3Ghost, [256]],\n    [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3Ghost, [512]],\n    [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3Ghost, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, GhostConv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3Ghost, [512, False]], # 13\n\n    [-1, 1, GhostConv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, GhostConv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, GhostConv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5s-transformer.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.50 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5s6.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.50 # layer channel multiple\nanchors:\n  - [19, 27, 44, 40, 38, 94] # P3/8\n  - [96, 68, 86, 152, 180, 137] # P4/16\n  - [140, 301, 303, 264, 238, 542] # P5/32\n  - [436, 615, 739, 380, 925, 792] # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [768, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [768]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [768, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P5\n    [-1, 3, C3, [768, False]], # 15\n\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 19\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 23 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 20], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 26 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 16], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [768, False]], # 29 (P5/32-large)\n\n    [-1, 1, Conv, [768, 3, 2]],\n    [[-1, 12], 1, Concat, [1]], # cat head P6\n    [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)\n\n    [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "models/hub/yolov5x6.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.33 # model depth multiple\nwidth_multiple: 1.25 # layer channel multiple\nanchors:\n  - [19, 27, 44, 40, 38, 94] # P3/8\n  - [96, 68, 86, 152, 180, 137] # P4/16\n  - [140, 301, 303, 264, 238, 542] # P5/32\n  - [436, 615, 739, 380, 925, 792] # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [768, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [768]],\n    [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [768, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 8], 1, Concat, [1]], # cat backbone P5\n    [-1, 3, C3, [768, False]], # 15\n\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 19\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 23 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 20], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 26 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 16], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [768, False]], # 29 (P5/32-large)\n\n    [-1, 1, Conv, [768, 3, 2]],\n    [[-1, 12], 1, Concat, [1]], # cat head P6\n    [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)\n\n    [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "models/segment/yolov5l-seg.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/segment/yolov5m-seg.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.67 # model depth multiple\nwidth_multiple: 0.75 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/segment/yolov5n-seg.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.25 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/segment/yolov5s-seg.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.5 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/segment/yolov5x-seg.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.33 # model depth multiple\nwidth_multiple: 1.25 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/tf.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTensorFlow, Keras and TFLite versions of YOLOv5\nAuthored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127.\n\nUsage:\n    $ python models/tf.py --weights yolov5s.pt\n\nExport:\n    $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs\n\"\"\"\n\nimport argparse\nimport sys\nfrom copy import deepcopy\nfrom pathlib import Path\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n# ROOT = ROOT.relative_to(Path.cwd())  # relative\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\nfrom tensorflow import keras\n\nfrom models.common import (\n    C3,\n    SPP,\n    SPPF,\n    Bottleneck,\n    BottleneckCSP,\n    C3x,\n    Concat,\n    Conv,\n    CrossConv,\n    DWConv,\n    DWConvTranspose2d,\n    Focus,\n    autopad,\n)\nfrom models.experimental import MixConv2d, attempt_load\nfrom models.yolo import Detect, Segment\nfrom utils.activations import SiLU\nfrom utils.general import LOGGER, make_divisible, print_args\n\n\nclass TFBN(keras.layers.Layer):\n    \"\"\"TensorFlow BatchNormalization wrapper for initializing with optional pretrained weights.\"\"\"\n\n    def __init__(self, w=None):\n        \"\"\"Initializes a TensorFlow BatchNormalization layer with optional pretrained weights.\"\"\"\n        super().__init__()\n        self.bn = keras.layers.BatchNormalization(\n            beta_initializer=keras.initializers.Constant(w.bias.numpy()),\n            gamma_initializer=keras.initializers.Constant(w.weight.numpy()),\n            moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),\n            moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),\n            epsilon=w.eps,\n        )\n\n    def call(self, inputs):\n        \"\"\"Applies batch normalization to the inputs.\"\"\"\n        return self.bn(inputs)\n\n\nclass TFPad(keras.layers.Layer):\n    \"\"\"Pads input tensors in spatial dimensions 1 and 2 with specified integer or tuple padding values.\"\"\"\n\n    def __init__(self, pad):\n        \"\"\"Initializes a padding layer for spatial dimensions 1 and 2 with specified padding, supporting both int and\n        tuple inputs.\n\n        Inputs are\n        \"\"\"\n        super().__init__()\n        if isinstance(pad, int):\n            self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])\n        else:  # tuple/list\n            self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])\n\n    def call(self, inputs):\n        \"\"\"Pads input tensor with zeros using specified padding, suitable for int and tuple pad dimensions.\"\"\"\n        return tf.pad(inputs, self.pad, mode=\"constant\", constant_values=0)\n\n\nclass TFConv(keras.layers.Layer):\n    \"\"\"Implements a standard convolutional layer with optional batch normalization and activation for TensorFlow.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):\n        \"\"\"Initializes a standard convolution layer with optional batch normalization and activation; supports only\n        group=1.\n\n        Inputs are ch_in, ch_out, weights, kernel, stride, padding, groups.\n        \"\"\"\n        super().__init__()\n        assert g == 1, \"TF v2.2 Conv2D does not support 'groups' argument\"\n        # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)\n        # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch\n        conv = keras.layers.Conv2D(\n            filters=c2,\n            kernel_size=k,\n            strides=s,\n            padding=\"SAME\" if s == 1 else \"VALID\",\n            use_bias=not hasattr(w, \"bn\"),\n            kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),\n            bias_initializer=\"zeros\" if hasattr(w, \"bn\") else keras.initializers.Constant(w.conv.bias.numpy()),\n        )\n        self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])\n        self.bn = TFBN(w.bn) if hasattr(w, \"bn\") else tf.identity\n        self.act = activations(w.act) if act else tf.identity\n\n    def call(self, inputs):\n        \"\"\"Applies convolution, batch normalization, and activation function to input tensors.\"\"\"\n        return self.act(self.bn(self.conv(inputs)))\n\n\nclass TFDWConv(keras.layers.Layer):\n    \"\"\"Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):\n        \"\"\"Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow\n        models.\n\n        Input are ch_in, ch_out, weights, kernel, stride, padding, groups.\n        \"\"\"\n        super().__init__()\n        assert c2 % c1 == 0, f\"TFDWConv() output={c2} must be a multiple of input={c1} channels\"\n        conv = keras.layers.DepthwiseConv2D(\n            kernel_size=k,\n            depth_multiplier=c2 // c1,\n            strides=s,\n            padding=\"SAME\" if s == 1 else \"VALID\",\n            use_bias=not hasattr(w, \"bn\"),\n            depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),\n            bias_initializer=\"zeros\" if hasattr(w, \"bn\") else keras.initializers.Constant(w.conv.bias.numpy()),\n        )\n        self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])\n        self.bn = TFBN(w.bn) if hasattr(w, \"bn\") else tf.identity\n        self.act = activations(w.act) if act else tf.identity\n\n    def call(self, inputs):\n        \"\"\"Applies convolution, batch normalization, and activation function to input tensors.\"\"\"\n        return self.act(self.bn(self.conv(inputs)))\n\n\nclass TFDWConvTranspose2d(keras.layers.Layer):\n    \"\"\"Implements a depthwise ConvTranspose2D layer for TensorFlow with specific settings.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):\n        \"\"\"Initializes depthwise ConvTranspose2D layer with specific channel, kernel, stride, and padding settings.\n\n        Inputs are ch_in, ch_out, weights, kernel, stride, padding, groups.\n        \"\"\"\n        super().__init__()\n        assert c1 == c2, f\"TFDWConv() output={c2} must be equal to input={c1} channels\"\n        assert k == 4 and p1 == 1, \"TFDWConv() only valid for k=4 and p1=1\"\n        weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()\n        self.c1 = c1\n        self.conv = [\n            keras.layers.Conv2DTranspose(\n                filters=1,\n                kernel_size=k,\n                strides=s,\n                padding=\"VALID\",\n                output_padding=p2,\n                use_bias=True,\n                kernel_initializer=keras.initializers.Constant(weight[..., i : i + 1]),\n                bias_initializer=keras.initializers.Constant(bias[i]),\n            )\n            for i in range(c1)\n        ]\n\n    def call(self, inputs):\n        \"\"\"Processes input through parallel convolutions and concatenates results, trimming border pixels.\"\"\"\n        return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]\n\n\nclass TFFocus(keras.layers.Layer):\n    \"\"\"Focuses spatial information into channel space using pixel shuffling and convolution for TensorFlow models.\"\"\"\n\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):\n        \"\"\"Initializes TFFocus layer to focus width and height information into channel space with custom convolution\n        parameters.\n\n        Inputs are ch_in, ch_out, kernel, stride, padding, groups.\n        \"\"\"\n        super().__init__()\n        self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)\n\n    def call(self, inputs):\n        \"\"\"Performs pixel shuffling and convolution on input tensor, downsampling by 2 and expanding channels by 4.\n\n        Example x(b,w,h,c) -> y(b,w/2,h/2,4c).\n        \"\"\"\n        inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]\n        return self.conv(tf.concat(inputs, 3))\n\n\nclass TFBottleneck(keras.layers.Layer):\n    \"\"\"Implements a TensorFlow bottleneck layer with optional shortcut connections for efficient feature extraction.\"\"\"\n\n    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):\n        \"\"\"Initializes a standard bottleneck layer for TensorFlow models, expanding and contracting channels with\n        optional shortcut.\n\n        Arguments are ch_in, ch_out, shortcut, groups, expansion.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)\n        self.add = shortcut and c1 == c2\n\n    def call(self, inputs):\n        \"\"\"Performs forward pass; if shortcut is True & input/output channels match, adds input to the convolution\n        result.\n        \"\"\"\n        return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))\n\n\nclass TFCrossConv(keras.layers.Layer):\n    \"\"\"Implements a cross convolutional layer with optional expansion, grouping, and shortcut for TensorFlow.\"\"\"\n\n    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):\n        \"\"\"Initializes cross convolution layer with optional expansion, grouping, and shortcut addition capabilities.\"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)\n        self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)\n        self.add = shortcut and c1 == c2\n\n    def call(self, inputs):\n        \"\"\"Passes input through two convolutions optionally adding the input if channel dimensions match.\"\"\"\n        return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))\n\n\nclass TFConv2d(keras.layers.Layer):\n    \"\"\"Implements a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D for specified filters and stride.\"\"\"\n\n    def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):\n        \"\"\"Initializes a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D functionality for given filter\n        sizes and stride.\n        \"\"\"\n        super().__init__()\n        assert g == 1, \"TF v2.2 Conv2D does not support 'groups' argument\"\n        self.conv = keras.layers.Conv2D(\n            filters=c2,\n            kernel_size=k,\n            strides=s,\n            padding=\"VALID\",\n            use_bias=bias,\n            kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()),\n            bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None,\n        )\n\n    def call(self, inputs):\n        \"\"\"Applies a convolution operation to the inputs and returns the result.\"\"\"\n        return self.conv(inputs)\n\n\nclass TFBottleneckCSP(keras.layers.Layer):\n    \"\"\"Implements a CSP bottleneck layer for TensorFlow models to enhance gradient flow and efficiency.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):\n        \"\"\"Initializes CSP bottleneck layer with specified channel sizes, count, shortcut option, groups, and expansion\n        ratio.\n\n        Inputs are ch_in, ch_out, number, shortcut, groups, expansion.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)\n        self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)\n        self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)\n        self.bn = TFBN(w.bn)\n        self.act = lambda x: keras.activations.swish(x)\n        self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])\n\n    def call(self, inputs):\n        \"\"\"Processes input through the model layers, concatenates, normalizes, activates, and reduces the output\n        dimensions.\n        \"\"\"\n        y1 = self.cv3(self.m(self.cv1(inputs)))\n        y2 = self.cv2(inputs)\n        return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))\n\n\nclass TFC3(keras.layers.Layer):\n    \"\"\"CSP bottleneck layer with 3 convolutions for TensorFlow, supporting optional shortcuts and group convolutions.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):\n        \"\"\"Initializes CSP Bottleneck with 3 convolutions, supporting optional shortcuts and group convolutions.\n\n        Inputs are ch_in, ch_out, number, shortcut, groups, expansion.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)\n        self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)\n        self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])\n\n    def call(self, inputs):\n        \"\"\"Processes input through a sequence of transformations for object detection (YOLOv5).\n\n        See https://github.com/ultralytics/yolov5.\n        \"\"\"\n        return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))\n\n\nclass TFC3x(keras.layers.Layer):\n    \"\"\"A TensorFlow layer for enhanced feature extraction using cross-convolutions in object detection models.\"\"\"\n\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):\n        \"\"\"Initializes layer with cross-convolutions for enhanced feature extraction in object detection models.\n\n        Inputs are ch_in, ch_out, number, shortcut, groups, expansion.\n        \"\"\"\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)\n        self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)\n        self.m = keras.Sequential(\n            [TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)]\n        )\n\n    def call(self, inputs):\n        \"\"\"Processes input through cascaded convolutions and merges features, returning the final tensor output.\"\"\"\n        return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))\n\n\nclass TFSPP(keras.layers.Layer):\n    \"\"\"Implements spatial pyramid pooling for YOLOv3-SPP with specific channels and kernel sizes.\"\"\"\n\n    def __init__(self, c1, c2, k=(5, 9, 13), w=None):\n        \"\"\"Initializes a YOLOv3-SPP layer with specific input/output channels and kernel sizes for pooling.\"\"\"\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)\n        self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding=\"SAME\") for x in k]\n\n    def call(self, inputs):\n        \"\"\"Processes input through two TFConv layers and concatenates with max-pooled outputs at intermediate stage.\"\"\"\n        x = self.cv1(inputs)\n        return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))\n\n\nclass TFSPPF(keras.layers.Layer):\n    \"\"\"Implements a fast spatial pyramid pooling layer for TensorFlow with optimized feature extraction.\"\"\"\n\n    def __init__(self, c1, c2, k=5, w=None):\n        \"\"\"Initialize a fast spatial pyramid pooling layer with customizable channels, kernel size, and weights.\"\"\"\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)\n        self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding=\"SAME\")\n\n    def call(self, inputs):\n        \"\"\"Executes the model's forward pass, concatenating input features with three max-pooled versions before final\n        convolution.\n        \"\"\"\n        x = self.cv1(inputs)\n        y1 = self.m(x)\n        y2 = self.m(y1)\n        return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))\n\n\nclass TFDetect(keras.layers.Layer):\n    \"\"\"Implements YOLOv5 object detection layer in TensorFlow for predicting bounding boxes and class probabilities.\"\"\"\n\n    def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None):\n        \"\"\"Initializes YOLOv5 detection layer for TensorFlow with configurable classes, anchors, channels, and image\n        size.\n        \"\"\"\n        super().__init__()\n        self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)\n        self.nc = nc  # number of classes\n        self.no = nc + 5  # number of outputs per anchor\n        self.nl = len(anchors)  # number of detection layers\n        self.na = len(anchors[0]) // 2  # number of anchors\n        self.grid = [tf.zeros(1)] * self.nl  # init grid\n        self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)\n        self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])\n        self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]\n        self.training = False  # set to False after building model\n        self.imgsz = imgsz\n        for i in range(self.nl):\n            ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]\n            self.grid[i] = self._make_grid(nx, ny)\n\n    def call(self, inputs):\n        \"\"\"Performs forward pass through the model layers to predict object bounding boxes and classifications.\"\"\"\n        z = []  # inference output\n        x = []\n        for i in range(self.nl):\n            x.append(self.m[i](inputs[i]))\n            # x(bs,20,20,255) to x(bs,3,20,20,85)\n            ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]\n            x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])\n\n            if not self.training:  # inference\n                y = x[i]\n                grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5\n                anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4\n                xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i]  # xy\n                wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid\n                # Normalize xywh to 0-1 to reduce calibration error\n                xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)\n                wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)\n                y = tf.concat([xy, wh, tf.sigmoid(y[..., 4 : 5 + self.nc]), y[..., 5 + self.nc :]], -1)\n                z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))\n\n        return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)\n\n    @staticmethod\n    def _make_grid(nx=20, ny=20):\n        \"\"\"Generates a 2D grid of coordinates in (x, y) format with shape [1, 1, ny*nx, 2].\"\"\"\n        # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()\n        xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))\n        return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)\n\n\nclass TFSegment(TFDetect):\n    \"\"\"YOLOv5 segmentation head for TensorFlow, combining detection and segmentation.\"\"\"\n\n    def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):\n        \"\"\"Initializes YOLOv5 Segment head with specified channel depths, anchors, and input size for segmentation\n        models.\n        \"\"\"\n        super().__init__(nc, anchors, ch, imgsz, w)\n        self.nm = nm  # number of masks\n        self.npr = npr  # number of protos\n        self.no = 5 + nc + self.nm  # number of outputs per anchor\n        self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]  # output conv\n        self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto)  # protos\n        self.detect = TFDetect.call\n\n    def call(self, x):\n        \"\"\"Applies detection and proto layers on input, returning detections and optionally protos if training.\"\"\"\n        p = self.proto(x[0])\n        # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0]))  # (optional) full-size protos\n        p = tf.transpose(p, [0, 3, 1, 2])  # from shape(1,160,160,32) to shape(1,32,160,160)\n        x = self.detect(self, x)\n        return (x, p) if self.training else (x[0], p)\n\n\nclass TFProto(keras.layers.Layer):\n    \"\"\"Implements convolutional and upsampling layers for feature extraction in YOLOv5 segmentation.\"\"\"\n\n    def __init__(self, c1, c_=256, c2=32, w=None):\n        \"\"\"Initialize TFProto layer with convolutional and upsampling for feature extraction and transformation.\"\"\"\n        super().__init__()\n        self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)\n        self.upsample = TFUpsample(None, scale_factor=2, mode=\"nearest\")\n        self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)\n        self.cv3 = TFConv(c_, c2, w=w.cv3)\n\n    def call(self, inputs):\n        \"\"\"Performs forward pass through the model, applying convolutions and upscaling on input tensor.\"\"\"\n        return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))\n\n\nclass TFUpsample(keras.layers.Layer):\n    \"\"\"Implements a TensorFlow upsampling layer with specified size, scale factor, and interpolation mode.\"\"\"\n\n    def __init__(self, size, scale_factor, mode, w=None):\n        \"\"\"Initializes a TensorFlow upsampling layer with specified size, scale_factor, and mode, ensuring scale_factor\n        is even.\n\n        Warning: all arguments needed including 'w'\n        \"\"\"\n        super().__init__()\n        assert scale_factor % 2 == 0, \"scale_factor must be multiple of 2\"\n        self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)\n        # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)\n        # with default arguments: align_corners=False, half_pixel_centers=False\n        # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,\n        #                                                            size=(x.shape[1] * 2, x.shape[2] * 2))\n\n    def call(self, inputs):\n        \"\"\"Applies upsample operation to inputs using nearest neighbor interpolation.\"\"\"\n        return self.upsample(inputs)\n\n\nclass TFConcat(keras.layers.Layer):\n    \"\"\"Implements TensorFlow's version of torch.concat() for concatenating tensors along the last dimension.\"\"\"\n\n    def __init__(self, dimension=1, w=None):\n        \"\"\"Initializes a TensorFlow layer for NCHW to NHWC concatenation, requiring dimension=1.\"\"\"\n        super().__init__()\n        assert dimension == 1, \"convert only NCHW to NHWC concat\"\n        self.d = 3\n\n    def call(self, inputs):\n        \"\"\"Concatenates a list of tensors along the last dimension, used for NCHW to NHWC conversion.\"\"\"\n        return tf.concat(inputs, self.d)\n\n\ndef parse_model(d, ch, model, imgsz):\n    \"\"\"Parses a model definition dict `d` to create YOLOv5 model layers, including dynamic channel adjustments.\"\"\"\n    LOGGER.info(f\"\\n{'':>3}{'from':>18}{'n':>3}{'params':>10}  {'module':<40}{'arguments':<30}\")\n    anchors, nc, gd, gw, ch_mul = (\n        d[\"anchors\"],\n        d[\"nc\"],\n        d[\"depth_multiple\"],\n        d[\"width_multiple\"],\n        d.get(\"channel_multiple\"),\n    )\n    na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors  # number of anchors\n    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)\n    if not ch_mul:\n        ch_mul = 8\n\n    layers, save, c2 = [], [], ch[-1]  # layers, savelist, ch out\n    for i, (f, n, m, args) in enumerate(d[\"backbone\"] + d[\"head\"]):  # from, number, module, args\n        m_str = m\n        m = eval(m) if isinstance(m, str) else m  # eval strings\n        for j, a in enumerate(args):\n            try:\n                args[j] = eval(a) if isinstance(a, str) else a  # eval strings\n            except NameError:\n                pass\n\n        n = max(round(n * gd), 1) if n > 1 else n  # depth gain\n        if m in [\n            nn.Conv2d,\n            Conv,\n            DWConv,\n            DWConvTranspose2d,\n            Bottleneck,\n            SPP,\n            SPPF,\n            MixConv2d,\n            Focus,\n            CrossConv,\n            BottleneckCSP,\n            C3,\n            C3x,\n        ]:\n            c1, c2 = ch[f], args[0]\n            c2 = make_divisible(c2 * gw, ch_mul) if c2 != no else c2\n\n            args = [c1, c2, *args[1:]]\n            if m in [BottleneckCSP, C3, C3x]:\n                args.insert(2, n)\n                n = 1\n        elif m is nn.BatchNorm2d:\n            args = [ch[f]]\n        elif m is Concat:\n            c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)\n        elif m in [Detect, Segment]:\n            args.append([ch[x + 1] for x in f])\n            if isinstance(args[1], int):  # number of anchors\n                args[1] = [list(range(args[1] * 2))] * len(f)\n            if m is Segment:\n                args[3] = make_divisible(args[3] * gw, ch_mul)\n            args.append(imgsz)\n        else:\n            c2 = ch[f]\n\n        tf_m = eval(\"TF\" + m_str.replace(\"nn.\", \"\"))\n        m_ = (\n            keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)])\n            if n > 1\n            else tf_m(*args, w=model.model[i])\n        )  # module\n\n        torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)  # module\n        t = str(m)[8:-2].replace(\"__main__.\", \"\")  # module type\n        np = sum(x.numel() for x in torch_m_.parameters())  # number params\n        m_.i, m_.f, m_.type, m_.np = i, f, t, np  # attach index, 'from' index, type, number params\n        LOGGER.info(f\"{i:>3}{f!s:>18}{n!s:>3}{np:>10}  {t:<40}{args!s:<30}\")  # print\n        save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)  # append to savelist\n        layers.append(m_)\n        ch.append(c2)\n    return keras.Sequential(layers), sorted(save)\n\n\nclass TFModel:\n    \"\"\"Implements YOLOv5 model in TensorFlow, supporting TensorFlow, Keras, and TFLite formats for object detection.\"\"\"\n\n    def __init__(self, cfg=\"yolov5s.yaml\", ch=3, nc=None, model=None, imgsz=(640, 640)):\n        \"\"\"Initialize TF YOLOv5 model with specified channels, classes, model instance, and input size.\"\"\"\n        super().__init__()\n        if isinstance(cfg, dict):\n            self.yaml = cfg  # model dict\n        else:  # is *.yaml\n            import yaml  # for torch hub\n\n            self.yaml_file = Path(cfg).name\n            with open(cfg) as f:\n                self.yaml = yaml.load(f, Loader=yaml.FullLoader)  # model dict\n\n        # Define model\n        if nc and nc != self.yaml[\"nc\"]:\n            LOGGER.info(f\"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}\")\n            self.yaml[\"nc\"] = nc  # override yaml value\n        self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)\n\n    def predict(\n        self,\n        inputs,\n        tf_nms=False,\n        agnostic_nms=False,\n        topk_per_class=100,\n        topk_all=100,\n        iou_thres=0.45,\n        conf_thres=0.25,\n    ):\n        \"\"\"Runs inference on input data, with an option for TensorFlow NMS.\"\"\"\n        y = []  # outputs\n        x = inputs\n        for m in self.model.layers:\n            if m.f != -1:  # if not from previous layer\n                x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]  # from earlier layers\n\n            x = m(x)  # run\n            y.append(x if m.i in self.savelist else None)  # save output\n\n        # Add TensorFlow NMS\n        if tf_nms:\n            boxes = self._xywh2xyxy(x[0][..., :4])\n            probs = x[0][:, :, 4:5]\n            classes = x[0][:, :, 5:]\n            scores = probs * classes\n            if agnostic_nms:\n                nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)\n            else:\n                boxes = tf.expand_dims(boxes, 2)\n                nms = tf.image.combined_non_max_suppression(\n                    boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False\n                )\n            return (nms,)\n        return x  # output [1,6300,85] = [xywh, conf, class0, class1, ...]\n        # x = x[0]  # [x(1,6300,85), ...] to x(6300,85)\n        # xywh = x[..., :4]  # x(6300,4) boxes\n        # conf = x[..., 4:5]  # x(6300,1) confidences\n        # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1))  # x(6300,1)  classes\n        # return tf.concat([conf, cls, xywh], 1)\n\n    @staticmethod\n    def _xywh2xyxy(xywh):\n        \"\"\"Convert box format from [x, y, w, h] to [x1, y1, x2, y2], where xy1=top-left and xy2=bottom- right.\"\"\"\n        x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)\n        return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)\n\n\nclass AgnosticNMS(keras.layers.Layer):\n    \"\"\"Performs agnostic non-maximum suppression (NMS) on detected objects using IoU and confidence thresholds.\"\"\"\n\n    def call(self, input, topk_all, iou_thres, conf_thres):\n        \"\"\"Performs agnostic NMS on input tensors using given thresholds and top-K selection.\"\"\"\n        return tf.map_fn(\n            lambda x: self._nms(x, topk_all, iou_thres, conf_thres),\n            input,\n            fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),\n            name=\"agnostic_nms\",\n        )\n\n    @staticmethod\n    def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):\n        \"\"\"Performs agnostic non-maximum suppression (NMS) on detected objects, filtering based on IoU and confidence\n        thresholds.\n        \"\"\"\n        boxes, classes, scores = x\n        class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)\n        scores_inp = tf.reduce_max(scores, -1)\n        selected_inds = tf.image.non_max_suppression(\n            boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres\n        )\n        selected_boxes = tf.gather(boxes, selected_inds)\n        padded_boxes = tf.pad(\n            selected_boxes,\n            paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],\n            mode=\"CONSTANT\",\n            constant_values=0.0,\n        )\n        selected_scores = tf.gather(scores_inp, selected_inds)\n        padded_scores = tf.pad(\n            selected_scores,\n            paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],\n            mode=\"CONSTANT\",\n            constant_values=-1.0,\n        )\n        selected_classes = tf.gather(class_inds, selected_inds)\n        padded_classes = tf.pad(\n            selected_classes,\n            paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],\n            mode=\"CONSTANT\",\n            constant_values=-1.0,\n        )\n        valid_detections = tf.shape(selected_inds)[0]\n        return padded_boxes, padded_scores, padded_classes, valid_detections\n\n\ndef activations(act=nn.SiLU):\n    \"\"\"Converts PyTorch activations to TensorFlow equivalents, supporting LeakyReLU, Hardswish, and SiLU/Swish.\"\"\"\n    if isinstance(act, nn.LeakyReLU):\n        return lambda x: keras.activations.relu(x, alpha=0.1)\n    elif isinstance(act, nn.Hardswish):\n        return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667\n    elif isinstance(act, (nn.SiLU, SiLU)):\n        return lambda x: keras.activations.swish(x)\n    else:\n        raise Exception(f\"no matching TensorFlow activation found for PyTorch activation {act}\")\n\n\ndef representative_dataset_gen(dataset, ncalib=100):\n    \"\"\"Generate representative dataset for calibration by yielding transformed numpy arrays from the input dataset.\"\"\"\n    for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):\n        im = np.transpose(img, [1, 2, 0])\n        im = np.expand_dims(im, axis=0).astype(np.float32)\n        im /= 255\n        yield [im]\n        if n >= ncalib:\n            break\n\n\ndef run(\n    weights=ROOT / \"yolov5s.pt\",  # weights path\n    imgsz=(640, 640),  # inference size h,w\n    batch_size=1,  # batch size\n    dynamic=False,  # dynamic batch size\n):\n    # PyTorch model\n    \"\"\"Exports YOLOv5 model from PyTorch to TensorFlow and Keras formats, performing inference for validation.\"\"\"\n    im = torch.zeros((batch_size, 3, *imgsz))  # BCHW image\n    model = attempt_load(weights, device=torch.device(\"cpu\"), inplace=True, fuse=False)\n    _ = model(im)  # inference\n    model.info()\n\n    # TensorFlow model\n    im = tf.zeros((batch_size, *imgsz, 3))  # BHWC image\n    tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)\n    _ = tf_model.predict(im)  # inference\n\n    # Keras model\n    im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)\n    keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))\n    keras_model.summary()\n\n    LOGGER.info(\"PyTorch, TensorFlow and Keras models successfully verified.\\nUse export.py for TF model export.\")\n\n\ndef parse_opt():\n    \"\"\"Parses and returns command-line options for model inference, including weights path, image size, batch size, and\n    dynamic batching.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", type=str, default=ROOT / \"yolov5s.pt\", help=\"weights path\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", nargs=\"+\", type=int, default=[640], help=\"inference size h,w\")\n    parser.add_argument(\"--batch-size\", type=int, default=1, help=\"batch size\")\n    parser.add_argument(\"--dynamic\", action=\"store_true\", help=\"dynamic batch size\")\n    opt = parser.parse_args()\n    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes the YOLOv5 model run function with parsed command line options.\"\"\"\n    run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "models/yolo.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nYOLO-specific modules.\n\nUsage:\n    $ python models/yolo.py --cfg yolov5s.yaml\n\"\"\"\n\nimport argparse\nimport contextlib\nimport math\nimport os\nimport platform\nimport sys\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport torch\nimport torch.nn as nn\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nif platform.system() != \"Windows\":\n    ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom models.common import (\n    C3,\n    C3SPP,\n    C3TR,\n    SPP,\n    SPPF,\n    Bottleneck,\n    BottleneckCSP,\n    C3Ghost,\n    C3x,\n    Classify,\n    Concat,\n    Contract,\n    Conv,\n    CrossConv,\n    DetectMultiBackend,\n    DWConv,\n    DWConvTranspose2d,\n    Expand,\n    Focus,\n    GhostBottleneck,\n    GhostConv,\n    Proto,\n)\nfrom models.experimental import MixConv2d\nfrom utils.autoanchor import check_anchor_order\nfrom utils.general import LOGGER, check_version, check_yaml, colorstr, make_divisible, print_args\nfrom utils.plots import feature_visualization\nfrom utils.torch_utils import (\n    fuse_conv_and_bn,\n    initialize_weights,\n    model_info,\n    profile,\n    scale_img,\n    select_device,\n    time_sync,\n)\n\ntry:\n    import thop  # for FLOPs computation\nexcept ImportError:\n    thop = None\n\n\nclass Detect(nn.Module):\n    \"\"\"YOLOv5 Detect head for processing input tensors and generating detection outputs in object detection models.\"\"\"\n\n    stride = None  # strides computed during build\n    dynamic = False  # force grid reconstruction\n    export = False  # export mode\n\n    def __init__(self, nc=80, anchors=(), ch=(), inplace=True):\n        \"\"\"Initializes YOLOv5 detection layer with specified classes, anchors, channels, and inplace operations.\"\"\"\n        super().__init__()\n        self.nc = nc  # number of classes\n        self.no = nc + 5  # number of outputs per anchor\n        self.nl = len(anchors)  # number of detection layers\n        self.na = len(anchors[0]) // 2  # number of anchors\n        self.grid = [torch.empty(0) for _ in range(self.nl)]  # init grid\n        self.anchor_grid = [torch.empty(0) for _ in range(self.nl)]  # init anchor grid\n        self.register_buffer(\"anchors\", torch.tensor(anchors).float().view(self.nl, -1, 2))  # shape(nl,na,2)\n        self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch)  # output conv\n        self.inplace = inplace  # use inplace ops (e.g. slice assignment)\n\n    def forward(self, x):\n        \"\"\"Processes input through YOLOv5 layers, altering shape for detection: `x(bs, 3, ny, nx, 85)`.\"\"\"\n        z = []  # inference output\n        for i in range(self.nl):\n            x[i] = self.m[i](x[i])  # conv\n            bs, _, ny, nx = x[i].shape  # x(bs,255,20,20) to x(bs,3,20,20,85)\n            x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()\n\n            if not self.training:  # inference\n                if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:\n                    self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)\n\n                if isinstance(self, Segment):  # (boxes + masks)\n                    xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)\n                    xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i]  # xy\n                    wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i]  # wh\n                    y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)\n                else:  # Detect (boxes only)\n                    xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)\n                    xy = (xy * 2 + self.grid[i]) * self.stride[i]  # xy\n                    wh = (wh * 2) ** 2 * self.anchor_grid[i]  # wh\n                    y = torch.cat((xy, wh, conf), 4)\n                z.append(y.view(bs, self.na * nx * ny, self.no))\n\n        return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)\n\n    def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, \"1.10.0\")):\n        \"\"\"Generates a mesh grid for anchor boxes with optional compatibility for torch versions < 1.10.\"\"\"\n        d = self.anchors[i].device\n        t = self.anchors[i].dtype\n        shape = 1, self.na, ny, nx, 2  # grid shape\n        y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)\n        yv, xv = torch.meshgrid(y, x, indexing=\"ij\") if torch_1_10 else torch.meshgrid(y, x)  # torch>=0.7 compatibility\n        grid = torch.stack((xv, yv), 2).expand(shape) - 0.5  # add grid offset, i.e. y = 2.0 * x - 0.5\n        anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)\n        return grid, anchor_grid\n\n\nclass Segment(Detect):\n    \"\"\"YOLOv5 Segment head for segmentation models, extending Detect with mask and prototype layers.\"\"\"\n\n    def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):\n        \"\"\"Initializes YOLOv5 Segment head with options for mask count, protos, and channel adjustments.\"\"\"\n        super().__init__(nc, anchors, ch, inplace)\n        self.nm = nm  # number of masks\n        self.npr = npr  # number of protos\n        self.no = 5 + nc + self.nm  # number of outputs per anchor\n        self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch)  # output conv\n        self.proto = Proto(ch[0], self.npr, self.nm)  # protos\n        self.detect = Detect.forward\n\n    def forward(self, x):\n        \"\"\"Processes input through the network, returning detections and prototypes; adjusts output based on\n        training/export mode.\n        \"\"\"\n        p = self.proto(x[0])\n        x = self.detect(self, x)\n        return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])\n\n\nclass BaseModel(nn.Module):\n    \"\"\"YOLOv5 base model.\"\"\"\n\n    def forward(self, x, profile=False, visualize=False):\n        \"\"\"Executes a single-scale inference or training pass on the YOLOv5 base model, with options for profiling and\n        visualization.\n        \"\"\"\n        return self._forward_once(x, profile, visualize)  # single-scale inference, train\n\n    def _forward_once(self, x, profile=False, visualize=False):\n        \"\"\"Performs a forward pass on the YOLOv5 model, enabling profiling and feature visualization options.\"\"\"\n        y, dt = [], []  # outputs\n        for m in self.model:\n            if m.f != -1:  # if not from previous layer\n                x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]  # from earlier layers\n            if profile:\n                self._profile_one_layer(m, x, dt)\n            x = m(x)  # run\n            y.append(x if m.i in self.save else None)  # save output\n            if visualize:\n                feature_visualization(x, m.type, m.i, save_dir=visualize)\n        return x\n\n    def _profile_one_layer(self, m, x, dt):\n        \"\"\"Profiles a single layer's performance by computing GFLOPs, execution time, and parameters.\"\"\"\n        c = m == self.model[-1]  # is final layer, copy input as inplace fix\n        o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1e9 * 2 if thop else 0  # FLOPs\n        t = time_sync()\n        for _ in range(10):\n            m(x.copy() if c else x)\n        dt.append((time_sync() - t) * 100)\n        if m == self.model[0]:\n            LOGGER.info(f\"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s}  module\")\n        LOGGER.info(f\"{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f}  {m.type}\")\n        if c:\n            LOGGER.info(f\"{sum(dt):10.2f} {'-':>10s} {'-':>10s}  Total\")\n\n    def fuse(self):\n        \"\"\"Fuses Conv2d() and BatchNorm2d() layers in the model to improve inference speed.\"\"\"\n        LOGGER.info(\"Fusing layers... \")\n        for m in self.model.modules():\n            if isinstance(m, (Conv, DWConv)) and hasattr(m, \"bn\"):\n                m.conv = fuse_conv_and_bn(m.conv, m.bn)  # update conv\n                delattr(m, \"bn\")  # remove batchnorm\n                m.forward = m.forward_fuse  # update forward\n        self.info()\n        return self\n\n    def info(self, verbose=False, img_size=640):\n        \"\"\"Prints model information given verbosity and image size, e.g., `info(verbose=True, img_size=640)`.\"\"\"\n        model_info(self, verbose, img_size)\n\n    def _apply(self, fn):\n        \"\"\"Applies transformations like to(), cpu(), cuda(), half() to model tensors excluding parameters or registered\n        buffers.\n        \"\"\"\n        self = super()._apply(fn)\n        m = self.model[-1]  # Detect()\n        if isinstance(m, (Detect, Segment)):\n            m.stride = fn(m.stride)\n            m.grid = list(map(fn, m.grid))\n            if isinstance(m.anchor_grid, list):\n                m.anchor_grid = list(map(fn, m.anchor_grid))\n        return self\n\n\nclass DetectionModel(BaseModel):\n    \"\"\"YOLOv5 detection model class for object detection tasks, supporting custom configurations and anchors.\"\"\"\n\n    def __init__(self, cfg=\"yolov5s.yaml\", ch=3, nc=None, anchors=None):\n        \"\"\"Initializes YOLOv5 model with configuration file, input channels, number of classes, and custom anchors.\"\"\"\n        super().__init__()\n        if isinstance(cfg, dict):\n            self.yaml = cfg  # model dict\n        else:  # is *.yaml\n            import yaml  # for torch hub\n\n            self.yaml_file = Path(cfg).name\n            with open(cfg, encoding=\"ascii\", errors=\"ignore\") as f:\n                self.yaml = yaml.safe_load(f)  # model dict\n\n        # Define model\n        ch = self.yaml[\"ch\"] = self.yaml.get(\"ch\", ch)  # input channels\n        if nc and nc != self.yaml[\"nc\"]:\n            LOGGER.info(f\"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}\")\n            self.yaml[\"nc\"] = nc  # override yaml value\n        if anchors:\n            LOGGER.info(f\"Overriding model.yaml anchors with anchors={anchors}\")\n            self.yaml[\"anchors\"] = round(anchors)  # override yaml value\n        self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch])  # model, savelist\n        self.names = [str(i) for i in range(self.yaml[\"nc\"])]  # default names\n        self.inplace = self.yaml.get(\"inplace\", True)\n\n        # Build strides, anchors\n        m = self.model[-1]  # Detect()\n        if isinstance(m, (Detect, Segment)):\n\n            def _forward(x):\n                \"\"\"Passes the input 'x' through the model and returns the processed output.\"\"\"\n                return self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)\n\n            s = 256  # 2x min stride\n            m.inplace = self.inplace\n            m.stride = torch.tensor([s / x.shape[-2] for x in _forward(torch.zeros(1, ch, s, s))])  # forward\n            check_anchor_order(m)\n            m.anchors /= m.stride.view(-1, 1, 1)\n            self.stride = m.stride\n            self._initialize_biases()  # only run once\n\n        # Init weights, biases\n        initialize_weights(self)\n        self.info()\n        LOGGER.info(\"\")\n\n    def forward(self, x, augment=False, profile=False, visualize=False):\n        \"\"\"Performs single-scale or augmented inference and may include profiling or visualization.\"\"\"\n        if augment:\n            return self._forward_augment(x)  # augmented inference, None\n        return self._forward_once(x, profile, visualize)  # single-scale inference, train\n\n    def _forward_augment(self, x):\n        \"\"\"Performs augmented inference across different scales and flips, returning combined detections.\"\"\"\n        img_size = x.shape[-2:]  # height, width\n        s = [1, 0.83, 0.67]  # scales\n        f = [None, 3, None]  # flips (2-ud, 3-lr)\n        y = []  # outputs\n        for si, fi in zip(s, f):\n            xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))\n            yi = self._forward_once(xi)[0]  # forward\n            # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1])  # save\n            yi = self._descale_pred(yi, fi, si, img_size)\n            y.append(yi)\n        y = self._clip_augmented(y)  # clip augmented tails\n        return torch.cat(y, 1), None  # augmented inference, train\n\n    def _descale_pred(self, p, flips, scale, img_size):\n        \"\"\"De-scales predictions from augmented inference, adjusting for flips and image size.\"\"\"\n        if self.inplace:\n            p[..., :4] /= scale  # de-scale\n            if flips == 2:\n                p[..., 1] = img_size[0] - p[..., 1]  # de-flip ud\n            elif flips == 3:\n                p[..., 0] = img_size[1] - p[..., 0]  # de-flip lr\n        else:\n            x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale  # de-scale\n            if flips == 2:\n                y = img_size[0] - y  # de-flip ud\n            elif flips == 3:\n                x = img_size[1] - x  # de-flip lr\n            p = torch.cat((x, y, wh, p[..., 4:]), -1)\n        return p\n\n    def _clip_augmented(self, y):\n        \"\"\"Clips augmented inference tails for YOLOv5 models, affecting first and last tensors based on grid points and\n        layer counts.\n        \"\"\"\n        nl = self.model[-1].nl  # number of detection layers (P3-P5)\n        g = sum(4**x for x in range(nl))  # grid points\n        e = 1  # exclude layer count\n        i = (y[0].shape[1] // g) * sum(4**x for x in range(e))  # indices\n        y[0] = y[0][:, :-i]  # large\n        i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e))  # indices\n        y[-1] = y[-1][:, i:]  # small\n        return y\n\n    def _initialize_biases(self, cf=None):\n        \"\"\"Initializes biases for YOLOv5's Detect() module, optionally using class frequencies (cf).\n\n        For details see https://arxiv.org/abs/1708.02002 section 3.3.\n        \"\"\"\n        # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n        m = self.model[-1]  # Detect() module\n        for mi, s in zip(m.m, m.stride):  # from\n            b = mi.bias.view(m.na, -1)  # conv.bias(255) to (3,85)\n            b.data[:, 4] += math.log(8 / (640 / s) ** 2)  # obj (8 objects per 640 image)\n            b.data[:, 5 : 5 + m.nc] += (\n                math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum())\n            )  # cls\n            mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n\nModel = DetectionModel  # retain YOLOv5 'Model' class for backwards compatibility\n\n\nclass SegmentationModel(DetectionModel):\n    \"\"\"YOLOv5 segmentation model for object detection and segmentation tasks with configurable parameters.\"\"\"\n\n    def __init__(self, cfg=\"yolov5s-seg.yaml\", ch=3, nc=None, anchors=None):\n        \"\"\"Initializes a YOLOv5 segmentation model with configurable params: cfg (str) for configuration, ch (int) for\n        channels, nc (int) for num classes, anchors (list).\n        \"\"\"\n        super().__init__(cfg, ch, nc, anchors)\n\n\nclass ClassificationModel(BaseModel):\n    \"\"\"YOLOv5 classification model for image classification tasks, initialized with a config file or detection model.\"\"\"\n\n    def __init__(self, cfg=None, model=None, nc=1000, cutoff=10):\n        \"\"\"Initializes YOLOv5 model with config file `cfg`, input channels `ch`, number of classes `nc`, and `cuttoff`\n        index.\n        \"\"\"\n        super().__init__()\n        self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)\n\n    def _from_detection_model(self, model, nc=1000, cutoff=10):\n        \"\"\"Creates a classification model from a YOLOv5 detection model, slicing at `cutoff` and adding a classification\n        layer.\n        \"\"\"\n        if isinstance(model, DetectMultiBackend):\n            model = model.model  # unwrap DetectMultiBackend\n        model.model = model.model[:cutoff]  # backbone\n        m = model.model[-1]  # last layer\n        ch = m.conv.in_channels if hasattr(m, \"conv\") else m.cv1.conv.in_channels  # ch into module\n        c = Classify(ch, nc)  # Classify()\n        c.i, c.f, c.type = m.i, m.f, \"models.common.Classify\"  # index, from, type\n        model.model[-1] = c  # replace\n        self.model = model.model\n        self.stride = model.stride\n        self.save = []\n        self.nc = nc\n\n    def _from_yaml(self, cfg):\n        \"\"\"Creates a YOLOv5 classification model from a specified *.yaml configuration file.\"\"\"\n        self.model = None\n\n\ndef parse_model(d, ch):\n    \"\"\"Parses a YOLOv5 model from a dict `d`, configuring layers based on input channels `ch` and model architecture.\"\"\"\n    LOGGER.info(f\"\\n{'':>3}{'from':>18}{'n':>3}{'params':>10}  {'module':<40}{'arguments':<30}\")\n    anchors, nc, gd, gw, act, ch_mul = (\n        d[\"anchors\"],\n        d[\"nc\"],\n        d[\"depth_multiple\"],\n        d[\"width_multiple\"],\n        d.get(\"activation\"),\n        d.get(\"channel_multiple\"),\n    )\n    if act:\n        Conv.default_act = eval(act)  # redefine default activation, i.e. Conv.default_act = nn.SiLU()\n        LOGGER.info(f\"{colorstr('activation:')} {act}\")  # print\n    if not ch_mul:\n        ch_mul = 8\n    na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors  # number of anchors\n    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)\n\n    layers, save, c2 = [], [], ch[-1]  # layers, savelist, ch out\n    for i, (f, n, m, args) in enumerate(d[\"backbone\"] + d[\"head\"]):  # from, number, module, args\n        m = eval(m) if isinstance(m, str) else m  # eval strings\n        for j, a in enumerate(args):\n            with contextlib.suppress(NameError):\n                args[j] = eval(a) if isinstance(a, str) else a  # eval strings\n\n        n = n_ = max(round(n * gd), 1) if n > 1 else n  # depth gain\n        if m in {\n            Conv,\n            GhostConv,\n            Bottleneck,\n            GhostBottleneck,\n            SPP,\n            SPPF,\n            DWConv,\n            MixConv2d,\n            Focus,\n            CrossConv,\n            BottleneckCSP,\n            C3,\n            C3TR,\n            C3SPP,\n            C3Ghost,\n            nn.ConvTranspose2d,\n            DWConvTranspose2d,\n            C3x,\n        }:\n            c1, c2 = ch[f], args[0]\n            if c2 != no:  # if not output\n                c2 = make_divisible(c2 * gw, ch_mul)\n\n            args = [c1, c2, *args[1:]]\n            if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:\n                args.insert(2, n)  # number of repeats\n                n = 1\n        elif m is nn.BatchNorm2d:\n            args = [ch[f]]\n        elif m is Concat:\n            c2 = sum(ch[x] for x in f)\n        # TODO: channel, gw, gd\n        elif m in {Detect, Segment}:\n            args.append([ch[x] for x in f])\n            if isinstance(args[1], int):  # number of anchors\n                args[1] = [list(range(args[1] * 2))] * len(f)\n            if m is Segment:\n                args[3] = make_divisible(args[3] * gw, ch_mul)\n        elif m is Contract:\n            c2 = ch[f] * args[0] ** 2\n        elif m is Expand:\n            c2 = ch[f] // args[0] ** 2\n        else:\n            c2 = ch[f]\n\n        m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)  # module\n        t = str(m)[8:-2].replace(\"__main__.\", \"\")  # module type\n        np = sum(x.numel() for x in m_.parameters())  # number params\n        m_.i, m_.f, m_.type, m_.np = i, f, t, np  # attach index, 'from' index, type, number params\n        LOGGER.info(f\"{i:>3}{f!s:>18}{n_:>3}{np:10.0f}  {t:<40}{args!s:<30}\")  # print\n        save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)  # append to savelist\n        layers.append(m_)\n        if i == 0:\n            ch = []\n        ch.append(c2)\n    return nn.Sequential(*layers), sorted(save)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--cfg\", type=str, default=\"yolov5s.yaml\", help=\"model.yaml\")\n    parser.add_argument(\"--batch-size\", type=int, default=1, help=\"total batch size for all GPUs\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--profile\", action=\"store_true\", help=\"profile model speed\")\n    parser.add_argument(\"--line-profile\", action=\"store_true\", help=\"profile model speed layer by layer\")\n    parser.add_argument(\"--test\", action=\"store_true\", help=\"test all yolo*.yaml\")\n    opt = parser.parse_args()\n    opt.cfg = check_yaml(opt.cfg)  # check YAML\n    print_args(vars(opt))\n    device = select_device(opt.device)\n\n    # Create model\n    im = torch.rand(opt.batch_size, 3, 640, 640).to(device)\n    model = Model(opt.cfg).to(device)\n\n    # Options\n    if opt.line_profile:  # profile layer by layer\n        model(im, profile=True)\n\n    elif opt.profile:  # profile forward-backward\n        results = profile(input=im, ops=[model], n=3)\n\n    elif opt.test:  # test all models\n        for cfg in Path(ROOT / \"models\").rglob(\"yolo*.yaml\"):\n            try:\n                _ = Model(cfg)\n            except Exception as e:\n                print(f\"Error in {cfg}: {e}\")\n\n    else:  # report fused model summary\n        model.fuse()\n"
  },
  {
    "path": "models/yolov5l.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth multiple\nwidth_multiple: 1.0 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/yolov5m.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.67 # model depth multiple\nwidth_multiple: 0.75 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/yolov5n.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.25 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/yolov5s.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth multiple\nwidth_multiple: 0.50 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "models/yolov5x.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.33 # model depth multiple\nwidth_multiple: 1.25 # layer channel multiple\nanchors:\n  - [10, 13, 16, 30, 33, 23] # P3/8\n  - [30, 61, 62, 45, 59, 119] # P4/16\n  - [116, 90, 156, 198, 373, 326] # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [\n    [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2\n    [-1, 1, Conv, [128, 3, 2]], # 1-P2/4\n    [-1, 3, C3, [128]],\n    [-1, 1, Conv, [256, 3, 2]], # 3-P3/8\n    [-1, 6, C3, [256]],\n    [-1, 1, Conv, [512, 3, 2]], # 5-P4/16\n    [-1, 9, C3, [512]],\n    [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32\n    [-1, 3, C3, [1024]],\n    [-1, 1, SPPF, [1024, 5]], # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead: [\n    [-1, 1, Conv, [512, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 6], 1, Concat, [1]], # cat backbone P4\n    [-1, 3, C3, [512, False]], # 13\n\n    [-1, 1, Conv, [256, 1, 1]],\n    [-1, 1, nn.Upsample, [None, 2, \"nearest\"]],\n    [[-1, 4], 1, Concat, [1]], # cat backbone P3\n    [-1, 3, C3, [256, False]], # 17 (P3/8-small)\n\n    [-1, 1, Conv, [256, 3, 2]],\n    [[-1, 14], 1, Concat, [1]], # cat head P4\n    [-1, 3, C3, [512, False]], # 20 (P4/16-medium)\n\n    [-1, 1, Conv, [512, 3, 2]],\n    [[-1, 10], 1, Concat, [1]], # cat head P5\n    [-1, 3, C3, [1024, False]], # 23 (P5/32-large)\n\n    [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "pyproject.toml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Overview:\n# This pyproject.toml file manages the build, packaging, and distribution of the Ultralytics library.\n# It defines essential project metadata, dependencies, and settings used to develop and deploy the library.\n\n# Key Sections:\n# - [build-system]: Specifies the build requirements and backend (e.g., setuptools, wheel).\n# - [project]: Includes details like name, version, description, authors, dependencies and more.\n# - [project.optional-dependencies]: Provides additional, optional packages for extended features.\n# - [tool.*]: Configures settings for various tools (pytest, yapf, etc.) used in the project.\n\n# Installation:\n# The Ultralytics library can be installed using the command: 'pip install ultralytics'\n# For development purposes, you can install the package in editable mode with: 'pip install -e .'\n# This approach allows for real-time code modifications without the need for re-installation.\n\n# Documentation:\n# For comprehensive documentation and usage instructions, visit: https://docs.ultralytics.com\n\n[build-system]\nrequires = [\"setuptools>=43.0.0\", \"wheel\"]\nbuild-backend = \"setuptools.build_meta\"\n\n# Project settings -----------------------------------------------------------------------------------------------------\n[project]\nversion = \"7.0.0\"\nname = \"YOLOv5\"\ndescription = \"Ultralytics YOLOv5 for SOTA object detection, instance segmentation and image classification.\"\nreadme = \"README.md\"\nrequires-python = \">=3.8\"\nlicense = { \"text\" = \"AGPL-3.0\" }\nkeywords = [\"machine-learning\", \"deep-learning\", \"computer-vision\", \"ML\", \"DL\", \"AI\", \"YOLO\", \"YOLOv3\", \"YOLOv5\", \"YOLOv8\", \"HUB\", \"Ultralytics\"]\nauthors = [\n    { name = \"Glenn Jocher\" },\n    { name = \"Ayush Chaurasia\" },\n    { name = \"Jing Qiu\" }\n]\nmaintainers = [\n    { name = \"Glenn Jocher\" },\n    { name = \"Ayush Chaurasia\" },\n    { name = \"Jing Qiu\" }\n]\nclassifiers = [\n    \"Development Status :: 4 - Beta\",\n    \"Intended Audience :: Developers\",\n    \"Intended Audience :: Education\",\n    \"Intended Audience :: Science/Research\",\n    \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.8\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Topic :: Software Development\",\n    \"Topic :: Scientific/Engineering\",\n    \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n    \"Topic :: Scientific/Engineering :: Image Recognition\",\n    \"Operating System :: POSIX :: Linux\",\n    \"Operating System :: MacOS\",\n    \"Operating System :: Microsoft :: Windows\",\n]\n\n# Required dependencies ------------------------------------------------------------------------------------------------\ndependencies = [\n    \"matplotlib>=3.3.0\",\n    \"numpy>=1.22.2\",\n    \"opencv-python>=4.6.0\",\n    \"pillow>=7.1.2\",\n    \"pyyaml>=5.3.1\",\n    \"requests>=2.23.0\",\n    \"scipy>=1.4.1\",\n    \"torch>=1.8.0\",\n    \"torchvision>=0.9.0\",\n    \"tqdm>=4.64.0\", # progress bars\n    \"psutil\", # system utilization\n    \"py-cpuinfo\", # display CPU info\n    \"thop>=0.1.1\", # FLOPs computation\n    \"pandas>=1.1.4\",\n    \"seaborn>=0.11.0\", # plotting\n    \"packaging\", # general utilities\n    \"ultralytics>=8.2.64\"\n]\n\n# Optional dependencies ------------------------------------------------------------------------------------------------\n[project.optional-dependencies]\ndev = [\n    \"ipython\",\n    \"check-manifest\",\n    \"pytest\",\n    \"pytest-cov\",\n    \"coverage[toml]\",\n    \"mkdocs-material\",\n    \"mkdocstrings[python]\",\n    \"mkdocs-ultralytics-plugin>=0.0.34\", # for meta descriptions and images, dates and authors\n]\nexport = [\n    \"onnx>=1.12.0\", # ONNX export\n    \"coremltools>=7.0; platform_system != 'Windows'\", # CoreML only supported on macOS and Linux\n    \"openvino-dev>=2023.0\", # OpenVINO export\n    \"tensorflow>=2.0.0,<=2.19.0\", # TF bug https://github.com/ultralytics/ultralytics/issues/5161\n    \"tensorflowjs>=3.9.0\", # TF.js export, automatically installs tensorflow\n    \"keras>=3.5.0,<=3.12.0\", # pin to avoid XNNPACK errors\n]\n# tensorflow>=2.4.1,<=2.13.1  # TF exports (-cpu, -aarch64, -macos)\n# tflite-support  # for TFLite model metadata\n# scikit-learn==0.19.2  # CoreML quantization\n# nvidia-pyindex  # TensorRT export\n# nvidia-tensorrt  # TensorRT export\nlogging = [\n    \"comet\", # https://docs.ultralytics.com/integrations/comet/\n    \"tensorboard>=2.13.0\",\n    \"dvclive>=2.12.0\",\n]\nextra = [\n    \"ipython\", # interactive notebook\n    \"albumentations>=1.0.3\", # training augmentations\n    \"pycocotools>=2.0.6\", # COCO mAP\n]\n\n[project.urls]\n\"Bug Reports\" = \"https://github.com/ultralytics/yolov5/issues\"\n\"Funding\" = \"https://ultralytics.com\"\n\"Source\" = \"https://github.com/ultralytics/yolov5/\"\n\n# Tools settings -------------------------------------------------------------------------------------------------------\n[tool.pytest]\nnorecursedirs = [\".git\", \"dist\", \"build\"]\naddopts = \"--doctest-modules --durations=30 --color=yes\"\n\n[tool.isort]\nline_length = 120\nmulti_line_output = 0\n\n[tool.ruff]\nline-length = 120\n\n[tool.docformatter]\nwrap-summaries = 120\nwrap-descriptions = 120\nin-place = true\npre-summary-newline = true\nclose-quotes-on-newline = true\n\n[tool.codespell]\nignore-words-list = \"crate,nd,strack,dota,ane,segway,fo,gool,winn,commend\"\nskip = '*.csv,*venv*,docs/??/,docs/mkdocs_??.yml'\n"
  },
  {
    "path": "requirements.txt",
    "content": "# YOLOv5 requirements\n# Usage: pip install -r requirements.txt\n\n# Base ------------------------------------------------------------------------\ngitpython>=3.1.30\nmatplotlib>=3.3\nnumpy>=1.23.5\nopencv-python>=4.1.1\npillow>=10.3.0\npsutil  # system resources\nPyYAML>=5.3.1\nrequests>=2.32.2\nscipy>=1.4.1\nthop>=0.1.1  # FLOPs computation\ntorch>=1.8.0  # see https://pytorch.org/get-started/locally (recommended)\ntorchvision>=0.9.0\ntqdm>=4.66.3\nultralytics>=8.2.64  # https://ultralytics.com\n# protobuf<=3.20.1  # https://github.com/ultralytics/yolov5/issues/8012\n\n# Logging ---------------------------------------------------------------------\n# tensorboard>=2.4.1\n# clearml>=1.2.0\n# comet\n\n# Plotting --------------------------------------------------------------------\npandas>=1.1.4\nseaborn>=0.11.0\n\n# Export ----------------------------------------------------------------------\n# coremltools>=6.0  # CoreML export\n# onnx>=1.10.0  # ONNX export\n# onnx-simplifier>=0.4.1  # ONNX simplifier\n# nvidia-pyindex  # TensorRT export\n# nvidia-tensorrt  # TensorRT export\n# scikit-learn<=1.1.2  # CoreML quantization\n# tensorflow>=2.4.0,<=2.13.1  # TF exports (-cpu, -aarch64, -macos)\n# tensorflowjs>=3.9.0  # TF.js export\n# openvino-dev>=2023.0  # OpenVINO export\n\n# Deploy ----------------------------------------------------------------------\npackaging  # Migration of deprecated pkg_resources packages\nsetuptools>=70.0.0 # Snyk vulnerability fix\n# tritonclient[all]~=2.24.0\n\n# Extras ----------------------------------------------------------------------\n# ipython  # interactive notebook\n# mss  # screenshots\n# albumentations>=1.0.3\n# pycocotools>=2.0.6  # COCO mAP\nurllib3>=2.6.0 ; python_version > \"3.8\" # not directly required, pinned by Snyk to avoid a vulnerability\n"
  },
  {
    "path": "segment/predict.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 segmentation inference on images, videos, directories, streams, etc.\n\nUsage - sources:\n    $ python segment/predict.py --weights yolov5s-seg.pt --source 0                               # webcam\n                                                                  img.jpg                         # image\n                                                                  vid.mp4                         # video\n                                                                  screen                          # screenshot\n                                                                  path/                           # directory\n                                                                  list.txt                        # list of images\n                                                                  list.streams                    # list of streams\n                                                                  'path/*.jpg'                    # glob\n                                                                  'https://youtu.be/LNwODJXcvt4'  # YouTube\n                                                                  'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\n\nUsage - formats:\n    $ python segment/predict.py --weights yolov5s-seg.pt                 # PyTorch\n                                          yolov5s-seg.torchscript        # TorchScript\n                                          yolov5s-seg.onnx               # ONNX Runtime or OpenCV DNN with --dnn\n                                          yolov5s-seg_openvino_model     # OpenVINO\n                                          yolov5s-seg.engine             # TensorRT\n                                          yolov5s-seg.mlmodel            # CoreML (macOS-only)\n                                          yolov5s-seg_saved_model        # TensorFlow SavedModel\n                                          yolov5s-seg.pb                 # TensorFlow GraphDef\n                                          yolov5s-seg.tflite             # TensorFlow Lite\n                                          yolov5s-seg_edgetpu.tflite     # TensorFlow Edge TPU\n                                          yolov5s-seg_paddle_model       # PaddlePaddle\n\"\"\"\n\nimport argparse\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport torch\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom ultralytics.utils.plotting import Annotator, colors, save_one_box\n\nfrom models.common import DetectMultiBackend\nfrom utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams\nfrom utils.general import (\n    LOGGER,\n    Profile,\n    check_file,\n    check_img_size,\n    check_imshow,\n    check_requirements,\n    colorstr,\n    cv2,\n    increment_path,\n    non_max_suppression,\n    print_args,\n    scale_boxes,\n    scale_segments,\n    strip_optimizer,\n)\nfrom utils.segment.general import masks2segments, process_mask, process_mask_native\nfrom utils.torch_utils import select_device, smart_inference_mode\n\n\n@smart_inference_mode()\ndef run(\n    weights=ROOT / \"yolov5s-seg.pt\",  # model.pt path(s)\n    source=ROOT / \"data/images\",  # file/dir/URL/glob/screen/0(webcam)\n    data=ROOT / \"data/coco128.yaml\",  # dataset.yaml path\n    imgsz=(640, 640),  # inference size (height, width)\n    conf_thres=0.25,  # confidence threshold\n    iou_thres=0.45,  # NMS IOU threshold\n    max_det=1000,  # maximum detections per image\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    view_img=False,  # show results\n    save_txt=False,  # save results to *.txt\n    save_conf=False,  # save confidences in --save-txt labels\n    save_crop=False,  # save cropped prediction boxes\n    nosave=False,  # do not save images/videos\n    classes=None,  # filter by class: --class 0, or --class 0 2 3\n    agnostic_nms=False,  # class-agnostic NMS\n    augment=False,  # augmented inference\n    visualize=False,  # visualize features\n    update=False,  # update all models\n    project=ROOT / \"runs/predict-seg\",  # save results to project/name\n    name=\"exp\",  # save results to project/name\n    exist_ok=False,  # existing project/name ok, do not increment\n    line_thickness=3,  # bounding box thickness (pixels)\n    hide_labels=False,  # hide labels\n    hide_conf=False,  # hide confidences\n    half=False,  # use FP16 half-precision inference\n    dnn=False,  # use OpenCV DNN for ONNX inference\n    vid_stride=1,  # video frame-rate stride\n    retina_masks=False,\n):\n    \"\"\"Run YOLOv5 segmentation inference on diverse sources including images, videos, directories, and streams.\"\"\"\n    source = str(source)\n    save_img = not nosave and not source.endswith(\".txt\")  # save inference images\n    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)\n    is_url = source.lower().startswith((\"rtsp://\", \"rtmp://\", \"http://\", \"https://\"))\n    webcam = source.isnumeric() or source.endswith(\".streams\") or (is_url and not is_file)\n    screenshot = source.lower().startswith(\"screen\")\n    if is_url and is_file:\n        source = check_file(source)  # download\n\n    # Directories\n    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run\n    (save_dir / \"labels\" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir\n\n    # Load model\n    device = select_device(device)\n    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)\n    stride, names, pt = model.stride, model.names, model.pt\n    imgsz = check_img_size(imgsz, s=stride)  # check image size\n\n    # Dataloader\n    bs = 1  # batch_size\n    if webcam:\n        view_img = check_imshow(warn=True)\n        dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n        bs = len(dataset)\n    elif screenshot:\n        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)\n    else:\n        dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n    vid_path, vid_writer = [None] * bs, [None] * bs\n\n    # Run inference\n    model.warmup(imgsz=(1 if pt else bs, 3, *imgsz))  # warmup\n    seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))\n    for path, im, im0s, vid_cap, s in dataset:\n        with dt[0]:\n            im = torch.from_numpy(im).to(model.device)\n            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32\n            im /= 255  # 0 - 255 to 0.0 - 1.0\n            if len(im.shape) == 3:\n                im = im[None]  # expand for batch dim\n\n        # Inference\n        with dt[1]:\n            visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False\n            pred, proto = model(im, augment=augment, visualize=visualize)[:2]\n\n        # NMS\n        with dt[2]:\n            pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)\n\n        # Second-stage classifier (optional)\n        # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)\n\n        # Process predictions\n        for i, det in enumerate(pred):  # per image\n            seen += 1\n            if webcam:  # batch_size >= 1\n                p, im0, frame = path[i], im0s[i].copy(), dataset.count\n                s += f\"{i}: \"\n            else:\n                p, im0, frame = path, im0s.copy(), getattr(dataset, \"frame\", 0)\n\n            p = Path(p)  # to Path\n            save_path = str(save_dir / p.name)  # im.jpg\n            txt_path = str(save_dir / \"labels\" / p.stem) + (\"\" if dataset.mode == \"image\" else f\"_{frame}\")  # im.txt\n            s += \"{:g}x{:g} \".format(*im.shape[2:])  # print string\n            imc = im0.copy() if save_crop else im0  # for save_crop\n            annotator = Annotator(im0, line_width=line_thickness, example=str(names))\n            if len(det):\n                if retina_masks:\n                    # scale bbox first the crop masks\n                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()  # rescale boxes to im0 size\n                    masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2])  # HWC\n                else:\n                    masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True)  # HWC\n                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()  # rescale boxes to im0 size\n\n                # Segments\n                if save_txt:\n                    segments = [\n                        scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)\n                        for x in reversed(masks2segments(masks))\n                    ]\n\n                # Print results\n                for c in det[:, 5].unique():\n                    n = (det[:, 5] == c).sum()  # detections per class\n                    s += f\"{n} {names[int(c)]}{'s' * (n > 1)}, \"  # add to string\n\n                # Mask plotting\n                annotator.masks(\n                    masks,\n                    colors=[colors(x, True) for x in det[:, 5]],\n                    im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous()\n                    / 255\n                    if retina_masks\n                    else im[i],\n                )\n\n                # Write results\n                for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):\n                    if save_txt:  # Write to file\n                        seg = segments[j].reshape(-1)  # (n,2) to (n*2)\n                        line = (cls, *seg, conf) if save_conf else (cls, *seg)  # label format\n                        with open(f\"{txt_path}.txt\", \"a\") as f:\n                            f.write((\"%g \" * len(line)).rstrip() % line + \"\\n\")\n\n                    if save_img or save_crop or view_img:  # Add bbox to image\n                        c = int(cls)  # integer class\n                        label = None if hide_labels else (names[c] if hide_conf else f\"{names[c]} {conf:.2f}\")\n                        annotator.box_label(xyxy, label, color=colors(c, True))\n                        # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)\n                    if save_crop:\n                        save_one_box(xyxy, imc, file=save_dir / \"crops\" / names[c] / f\"{p.stem}.jpg\", BGR=True)\n\n            # Stream results\n            im0 = annotator.result()\n            if view_img:\n                if platform.system() == \"Linux\" and p not in windows:\n                    windows.append(p)\n                    cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)\n                    cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])\n                cv2.imshow(str(p), im0)\n                if cv2.waitKey(1) == ord(\"q\"):  # 1 millisecond\n                    exit()\n\n            # Save results (image with detections)\n            if save_img:\n                if dataset.mode == \"image\":\n                    cv2.imwrite(save_path, im0)\n                else:  # 'video' or 'stream'\n                    if vid_path[i] != save_path:  # new video\n                        vid_path[i] = save_path\n                        if isinstance(vid_writer[i], cv2.VideoWriter):\n                            vid_writer[i].release()  # release previous video writer\n                        if vid_cap:  # video\n                            fps = vid_cap.get(cv2.CAP_PROP_FPS)\n                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n                        else:  # stream\n                            fps, w, h = 30, im0.shape[1], im0.shape[0]\n                        save_path = str(Path(save_path).with_suffix(\".mp4\"))  # force *.mp4 suffix on results videos\n                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n                    vid_writer[i].write(im0)\n\n        # Print time (inference-only)\n        LOGGER.info(f\"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms\")\n\n    # Print results\n    t = tuple(x.t / seen * 1e3 for x in dt)  # speeds per image\n    LOGGER.info(f\"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}\" % t)\n    if save_txt or save_img:\n        s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else \"\"\n        LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\n    if update:\n        strip_optimizer(weights[0])  # update model (to fix SourceChangeWarning)\n\n\ndef parse_opt():\n    \"\"\"Parses command-line options for YOLOv5 inference including model paths, data sources, inference settings, and\n    output preferences.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", nargs=\"+\", type=str, default=ROOT / \"yolov5s-seg.pt\", help=\"model path(s)\")\n    parser.add_argument(\"--source\", type=str, default=ROOT / \"data/images\", help=\"file/dir/URL/glob/screen/0(webcam)\")\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"(optional) dataset.yaml path\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", nargs=\"+\", type=int, default=[640], help=\"inference size h,w\")\n    parser.add_argument(\"--conf-thres\", type=float, default=0.25, help=\"confidence threshold\")\n    parser.add_argument(\"--iou-thres\", type=float, default=0.45, help=\"NMS IoU threshold\")\n    parser.add_argument(\"--max-det\", type=int, default=1000, help=\"maximum detections per image\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--view-img\", action=\"store_true\", help=\"show results\")\n    parser.add_argument(\"--save-txt\", action=\"store_true\", help=\"save results to *.txt\")\n    parser.add_argument(\"--save-conf\", action=\"store_true\", help=\"save confidences in --save-txt labels\")\n    parser.add_argument(\"--save-crop\", action=\"store_true\", help=\"save cropped prediction boxes\")\n    parser.add_argument(\"--nosave\", action=\"store_true\", help=\"do not save images/videos\")\n    parser.add_argument(\"--classes\", nargs=\"+\", type=int, help=\"filter by class: --classes 0, or --classes 0 2 3\")\n    parser.add_argument(\"--agnostic-nms\", action=\"store_true\", help=\"class-agnostic NMS\")\n    parser.add_argument(\"--augment\", action=\"store_true\", help=\"augmented inference\")\n    parser.add_argument(\"--visualize\", action=\"store_true\", help=\"visualize features\")\n    parser.add_argument(\"--update\", action=\"store_true\", help=\"update all models\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/predict-seg\", help=\"save results to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save results to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--line-thickness\", default=3, type=int, help=\"bounding box thickness (pixels)\")\n    parser.add_argument(\"--hide-labels\", default=False, action=\"store_true\", help=\"hide labels\")\n    parser.add_argument(\"--hide-conf\", default=False, action=\"store_true\", help=\"hide confidences\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"use FP16 half-precision inference\")\n    parser.add_argument(\"--dnn\", action=\"store_true\", help=\"use OpenCV DNN for ONNX inference\")\n    parser.add_argument(\"--vid-stride\", type=int, default=1, help=\"video frame-rate stride\")\n    parser.add_argument(\"--retina-masks\", action=\"store_true\", help=\"whether to plot masks in native resolution\")\n    opt = parser.parse_args()\n    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes YOLOv5 model inference with given options, checking for requirements before launching.\"\"\"\n    check_requirements(ROOT / \"requirements.txt\", exclude=(\"tensorboard\", \"thop\"))\n    run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "segment/train.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTrain a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5\nrelease.\n\nUsage - Single-GPU training:\n    $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640  # from pretrained (recommended)\n    $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640  # from scratch\n\nUsage - Multi-GPU DDP training:\n    $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3\n\nModels:     https://github.com/ultralytics/yolov5/tree/master/models\nDatasets:   https://github.com/ultralytics/yolov5/tree/master/data\nTutorial:   https://docs.ultralytics.com/yolov5/tutorials/train_custom_data\n\"\"\"\n\nimport argparse\nimport math\nimport os\nimport random\nimport subprocess\nimport sys\nimport time\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport yaml\nfrom torch.optim import lr_scheduler\nfrom tqdm import tqdm\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom ultralytics.utils.patches import torch_load\n\nimport segment.val as validate  # for end-of-epoch mAP\nfrom models.experimental import attempt_load\nfrom models.yolo import SegmentationModel\nfrom utils.autoanchor import check_anchors\nfrom utils.autobatch import check_train_batch_size\nfrom utils.callbacks import Callbacks\nfrom utils.downloads import attempt_download, is_url\nfrom utils.general import (\n    LOGGER,\n    TQDM_BAR_FORMAT,\n    check_amp,\n    check_dataset,\n    check_file,\n    check_git_info,\n    check_git_status,\n    check_img_size,\n    check_requirements,\n    check_suffix,\n    check_yaml,\n    colorstr,\n    get_latest_run,\n    increment_path,\n    init_seeds,\n    intersect_dicts,\n    labels_to_class_weights,\n    labels_to_image_weights,\n    one_cycle,\n    print_args,\n    print_mutation,\n    strip_optimizer,\n    yaml_save,\n)\nfrom utils.loggers import GenericLogger\nfrom utils.plots import plot_evolve, plot_labels\nfrom utils.segment.dataloaders import create_dataloader\nfrom utils.segment.loss import ComputeLoss\nfrom utils.segment.metrics import KEYS, fitness\nfrom utils.segment.plots import plot_images_and_masks, plot_results_with_masks\nfrom utils.torch_utils import (\n    EarlyStopping,\n    ModelEMA,\n    de_parallel,\n    select_device,\n    smart_DDP,\n    smart_optimizer,\n    smart_resume,\n    torch_distributed_zero_first,\n)\n\nLOCAL_RANK = int(os.getenv(\"LOCAL_RANK\", -1))  # https://pytorch.org/docs/stable/elastic/run.html\nRANK = int(os.getenv(\"RANK\", -1))\nWORLD_SIZE = int(os.getenv(\"WORLD_SIZE\", 1))\nGIT_INFO = check_git_info()\n\n\ndef train(hyp, opt, device, callbacks):\n    \"\"\"Trains the YOLOv5 model on a dataset, managing hyperparameters, model optimization, logging, and validation.\n\n    `hyp` is path/to/hyp.yaml or hyp dictionary.\n    \"\"\"\n    (\n        save_dir,\n        epochs,\n        batch_size,\n        weights,\n        single_cls,\n        evolve,\n        data,\n        cfg,\n        resume,\n        noval,\n        nosave,\n        workers,\n        freeze,\n        mask_ratio,\n    ) = (\n        Path(opt.save_dir),\n        opt.epochs,\n        opt.batch_size,\n        opt.weights,\n        opt.single_cls,\n        opt.evolve,\n        opt.data,\n        opt.cfg,\n        opt.resume,\n        opt.noval,\n        opt.nosave,\n        opt.workers,\n        opt.freeze,\n        opt.mask_ratio,\n    )\n    # callbacks.run('on_pretrain_routine_start')\n\n    # Directories\n    w = save_dir / \"weights\"  # weights dir\n    (w.parent if evolve else w).mkdir(parents=True, exist_ok=True)  # make dir\n    last, best = w / \"last.pt\", w / \"best.pt\"\n\n    # Hyperparameters\n    if isinstance(hyp, str):\n        with open(hyp, errors=\"ignore\") as f:\n            hyp = yaml.safe_load(f)  # load hyps dict\n    LOGGER.info(colorstr(\"hyperparameters: \") + \", \".join(f\"{k}={v}\" for k, v in hyp.items()))\n    opt.hyp = hyp.copy()  # for saving hyps to checkpoints\n\n    # Save run settings\n    if not evolve:\n        yaml_save(save_dir / \"hyp.yaml\", hyp)\n        yaml_save(save_dir / \"opt.yaml\", vars(opt))\n\n    # Loggers\n    data_dict = None\n    if RANK in {-1, 0}:\n        logger = GenericLogger(opt=opt, console_logger=LOGGER)\n\n    # Config\n    plots = not evolve and not opt.noplots  # create plots\n    overlap = not opt.no_overlap\n    cuda = device.type != \"cpu\"\n    init_seeds(opt.seed + 1 + RANK, deterministic=True)\n    with torch_distributed_zero_first(LOCAL_RANK):\n        data_dict = data_dict or check_dataset(data)  # check if None\n    train_path, val_path = data_dict[\"train\"], data_dict[\"val\"]\n    nc = 1 if single_cls else int(data_dict[\"nc\"])  # number of classes\n    names = {0: \"item\"} if single_cls and len(data_dict[\"names\"]) != 1 else data_dict[\"names\"]  # class names\n    is_coco = isinstance(val_path, str) and val_path.endswith(\"coco/val2017.txt\")  # COCO dataset\n\n    # Model\n    check_suffix(weights, \".pt\")  # check weights\n    pretrained = weights.endswith(\".pt\")\n    if pretrained:\n        with torch_distributed_zero_first(LOCAL_RANK):\n            weights = attempt_download(weights)  # download if not found locally\n        ckpt = torch_load(weights, map_location=\"cpu\")  # load checkpoint to CPU to avoid CUDA memory leak\n        model = SegmentationModel(cfg or ckpt[\"model\"].yaml, ch=3, nc=nc, anchors=hyp.get(\"anchors\")).to(device)\n        exclude = [\"anchor\"] if (cfg or hyp.get(\"anchors\")) and not resume else []  # exclude keys\n        csd = ckpt[\"model\"].float().state_dict()  # checkpoint state_dict as FP32\n        csd = intersect_dicts(csd, model.state_dict(), exclude=exclude)  # intersect\n        model.load_state_dict(csd, strict=False)  # load\n        LOGGER.info(f\"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}\")  # report\n    else:\n        model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get(\"anchors\")).to(device)  # create\n    amp = check_amp(model)  # check AMP\n\n    # Freeze\n    freeze = [f\"model.{x}.\" for x in (freeze if len(freeze) > 1 else range(freeze[0]))]  # layers to freeze\n    for k, v in model.named_parameters():\n        v.requires_grad = True  # train all layers\n        # v.register_hook(lambda x: torch.nan_to_num(x))  # NaN to 0 (commented for erratic training results)\n        if any(x in k for x in freeze):\n            LOGGER.info(f\"freezing {k}\")\n            v.requires_grad = False\n\n    # Image size\n    gs = max(int(model.stride.max()), 32)  # grid size (max stride)\n    imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2)  # verify imgsz is gs-multiple\n\n    # Batch size\n    if RANK == -1 and batch_size == -1:  # single-GPU only, estimate best batch size\n        batch_size = check_train_batch_size(model, imgsz, amp)\n        logger.update_params({\"batch_size\": batch_size})\n        # loggers.on_params_update({\"batch_size\": batch_size})\n\n    # Optimizer\n    nbs = 64  # nominal batch size\n    accumulate = max(round(nbs / batch_size), 1)  # accumulate loss before optimizing\n    hyp[\"weight_decay\"] *= batch_size * accumulate / nbs  # scale weight_decay\n    optimizer = smart_optimizer(model, opt.optimizer, hyp[\"lr0\"], hyp[\"momentum\"], hyp[\"weight_decay\"])\n\n    # Scheduler\n    if opt.cos_lr:\n        lf = one_cycle(1, hyp[\"lrf\"], epochs)  # cosine 1->hyp['lrf']\n    else:\n\n        def lf(x):\n            \"\"\"Linear learning rate scheduler decreasing from 1 to hyp['lrf'] over 'epochs'.\"\"\"\n            return (1 - x / epochs) * (1.0 - hyp[\"lrf\"]) + hyp[\"lrf\"]  # linear\n\n    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)  # plot_lr_scheduler(optimizer, scheduler, epochs)\n\n    # EMA\n    ema = ModelEMA(model) if RANK in {-1, 0} else None\n\n    # Resume\n    best_fitness, start_epoch = 0.0, 0\n    if pretrained:\n        if resume:\n            best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)\n        del ckpt, csd\n\n    # DP mode\n    if cuda and RANK == -1 and torch.cuda.device_count() > 1:\n        LOGGER.warning(\n            \"WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\\n\"\n            \"See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.\"\n        )\n        model = torch.nn.DataParallel(model)\n\n    # SyncBatchNorm\n    if opt.sync_bn and cuda and RANK != -1:\n        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)\n        LOGGER.info(\"Using SyncBatchNorm()\")\n\n    # Trainloader\n    train_loader, dataset = create_dataloader(\n        train_path,\n        imgsz,\n        batch_size // WORLD_SIZE,\n        gs,\n        single_cls,\n        hyp=hyp,\n        augment=True,\n        cache=None if opt.cache == \"val\" else opt.cache,\n        rect=opt.rect,\n        rank=LOCAL_RANK,\n        workers=workers,\n        image_weights=opt.image_weights,\n        quad=opt.quad,\n        prefix=colorstr(\"train: \"),\n        shuffle=True,\n        mask_downsample_ratio=mask_ratio,\n        overlap_mask=overlap,\n    )\n    labels = np.concatenate(dataset.labels, 0)\n    mlc = int(labels[:, 0].max())  # max label class\n    assert mlc < nc, f\"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}\"\n\n    # Process 0\n    if RANK in {-1, 0}:\n        val_loader = create_dataloader(\n            val_path,\n            imgsz,\n            batch_size // WORLD_SIZE * 2,\n            gs,\n            single_cls,\n            hyp=hyp,\n            cache=None if noval else opt.cache,\n            rect=True,\n            rank=-1,\n            workers=workers * 2,\n            pad=0.5,\n            mask_downsample_ratio=mask_ratio,\n            overlap_mask=overlap,\n            prefix=colorstr(\"val: \"),\n        )[0]\n\n        if not resume:\n            if not opt.noautoanchor:\n                check_anchors(dataset, model=model, thr=hyp[\"anchor_t\"], imgsz=imgsz)  # run AutoAnchor\n            model.half().float()  # pre-reduce anchor precision\n\n            if plots:\n                plot_labels(labels, names, save_dir)\n        # callbacks.run('on_pretrain_routine_end', labels, names)\n\n    # DDP mode\n    if cuda and RANK != -1:\n        model = smart_DDP(model)\n\n    # Model attributes\n    nl = de_parallel(model).model[-1].nl  # number of detection layers (to scale hyps)\n    hyp[\"box\"] *= 3 / nl  # scale to layers\n    hyp[\"cls\"] *= nc / 80 * 3 / nl  # scale to classes and layers\n    hyp[\"obj\"] *= (imgsz / 640) ** 2 * 3 / nl  # scale to image size and layers\n    hyp[\"label_smoothing\"] = opt.label_smoothing\n    model.nc = nc  # attach number of classes to model\n    model.hyp = hyp  # attach hyperparameters to model\n    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights\n    model.names = names\n\n    # Start training\n    t0 = time.time()\n    nb = len(train_loader)  # number of batches\n    nw = max(round(hyp[\"warmup_epochs\"] * nb), 100)  # number of warmup iterations, max(3 epochs, 100 iterations)\n    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training\n    last_opt_step = -1\n    maps = np.zeros(nc)  # mAP per class\n    results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)\n    scheduler.last_epoch = start_epoch - 1  # do not move\n    scaler = torch.cuda.amp.GradScaler(enabled=amp)\n    stopper, stop = EarlyStopping(patience=opt.patience), False\n    compute_loss = ComputeLoss(model, overlap=overlap)  # init loss class\n    # callbacks.run('on_train_start')\n    LOGGER.info(\n        f\"Image sizes {imgsz} train, {imgsz} val\\n\"\n        f\"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\\n\"\n        f\"Logging results to {colorstr('bold', save_dir)}\\n\"\n        f\"Starting training for {epochs} epochs...\"\n    )\n    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------\n        # callbacks.run('on_train_epoch_start')\n        model.train()\n\n        # Update image weights (optional, single-GPU only)\n        if opt.image_weights:\n            cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc  # class weights\n            iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights\n            dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx\n\n        # Update mosaic border (optional)\n        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)\n        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders\n\n        mloss = torch.zeros(4, device=device)  # mean losses\n        if RANK != -1:\n            train_loader.sampler.set_epoch(epoch)\n        pbar = enumerate(train_loader)\n        LOGGER.info(\n            (\"\\n\" + \"%11s\" * 8)\n            % (\"Epoch\", \"GPU_mem\", \"box_loss\", \"seg_loss\", \"obj_loss\", \"cls_loss\", \"Instances\", \"Size\")\n        )\n        if RANK in {-1, 0}:\n            pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT)  # progress bar\n        optimizer.zero_grad()\n        for i, (imgs, targets, paths, _, masks) in pbar:  # batch ------------------------------------------------------\n            # callbacks.run('on_train_batch_start')\n            ni = i + nb * epoch  # number integrated batches (since train start)\n            imgs = imgs.to(device, non_blocking=True).float() / 255  # uint8 to float32, 0-255 to 0.0-1.0\n\n            # Warmup\n            if ni <= nw:\n                xi = [0, nw]  # x interp\n                # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)\n                accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())\n                for j, x in enumerate(optimizer.param_groups):\n                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0\n                    x[\"lr\"] = np.interp(ni, xi, [hyp[\"warmup_bias_lr\"] if j == 0 else 0.0, x[\"initial_lr\"] * lf(epoch)])\n                    if \"momentum\" in x:\n                        x[\"momentum\"] = np.interp(ni, xi, [hyp[\"warmup_momentum\"], hyp[\"momentum\"]])\n\n            # Multi-scale\n            if opt.multi_scale:\n                sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs  # size\n                sf = sz / max(imgs.shape[2:])  # scale factor\n                if sf != 1:\n                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)\n                    imgs = nn.functional.interpolate(imgs, size=ns, mode=\"bilinear\", align_corners=False)\n\n            # Forward\n            with torch.cuda.amp.autocast(amp):\n                pred = model(imgs)  # forward\n                loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())\n                if RANK != -1:\n                    loss *= WORLD_SIZE  # gradient averaged between devices in DDP mode\n                if opt.quad:\n                    loss *= 4.0\n\n            # Backward\n            scaler.scale(loss).backward()\n\n            # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html\n            if ni - last_opt_step >= accumulate:\n                scaler.unscale_(optimizer)  # unscale gradients\n                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients\n                scaler.step(optimizer)  # optimizer.step\n                scaler.update()\n                optimizer.zero_grad()\n                if ema:\n                    ema.update(model)\n                last_opt_step = ni\n\n            # Log\n            if RANK in {-1, 0}:\n                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses\n                mem = f\"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G\"  # (GB)\n                pbar.set_description(\n                    (\"%11s\" * 2 + \"%11.4g\" * 6)\n                    % (f\"{epoch}/{epochs - 1}\", mem, *mloss, targets.shape[0], imgs.shape[-1])\n                )\n                # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths)\n                # if callbacks.stop_training:\n                #    return\n\n                # Mosaic plots\n                if plots:\n                    if ni < 3:\n                        plot_images_and_masks(imgs, targets, masks, paths, save_dir / f\"train_batch{ni}.jpg\")\n                    if ni == 10:\n                        files = sorted(save_dir.glob(\"train*.jpg\"))\n                        logger.log_images(files, \"Mosaics\", epoch)\n            # end batch ------------------------------------------------------------------------------------------------\n\n        # Scheduler\n        lr = [x[\"lr\"] for x in optimizer.param_groups]  # for loggers\n        scheduler.step()\n\n        if RANK in {-1, 0}:\n            # mAP\n            # callbacks.run('on_train_epoch_end', epoch=epoch)\n            ema.update_attr(model, include=[\"yaml\", \"nc\", \"hyp\", \"names\", \"stride\", \"class_weights\"])\n            final_epoch = (epoch + 1 == epochs) or stopper.possible_stop\n            if not noval or final_epoch:  # Calculate mAP\n                results, maps, _ = validate.run(\n                    data_dict,\n                    batch_size=batch_size // WORLD_SIZE * 2,\n                    imgsz=imgsz,\n                    half=amp,\n                    model=ema.ema,\n                    single_cls=single_cls,\n                    dataloader=val_loader,\n                    save_dir=save_dir,\n                    plots=False,\n                    callbacks=callbacks,\n                    compute_loss=compute_loss,\n                    mask_downsample_ratio=mask_ratio,\n                    overlap=overlap,\n                )\n\n            # Update best mAP\n            fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, mAP@.5, mAP@.5-.95]\n            stop = stopper(epoch=epoch, fitness=fi)  # early stop check\n            if fi > best_fitness:\n                best_fitness = fi\n            log_vals = list(mloss) + list(results) + lr\n            # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)\n            # Log val metrics and media\n            metrics_dict = dict(zip(KEYS, log_vals))\n            logger.log_metrics(metrics_dict, epoch)\n\n            # Save model\n            if (not nosave) or (final_epoch and not evolve):  # if save\n                ckpt = {\n                    \"epoch\": epoch,\n                    \"best_fitness\": best_fitness,\n                    \"model\": deepcopy(de_parallel(model)).half(),\n                    \"ema\": deepcopy(ema.ema).half(),\n                    \"updates\": ema.updates,\n                    \"optimizer\": optimizer.state_dict(),\n                    \"opt\": vars(opt),\n                    \"git\": GIT_INFO,  # {remote, branch, commit} if a git repo\n                    \"date\": datetime.now().isoformat(),\n                }\n\n                # Save last, best and delete\n                torch.save(ckpt, last)\n                if best_fitness == fi:\n                    torch.save(ckpt, best)\n                if opt.save_period > 0 and epoch % opt.save_period == 0:\n                    torch.save(ckpt, w / f\"epoch{epoch}.pt\")\n                    logger.log_model(w / f\"epoch{epoch}.pt\")\n                del ckpt\n                # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)\n\n        # EarlyStopping\n        if RANK != -1:  # if DDP training\n            broadcast_list = [stop if RANK == 0 else None]\n            dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks\n            if RANK != 0:\n                stop = broadcast_list[0]\n        if stop:\n            break  # must break all DDP ranks\n\n        # end epoch ----------------------------------------------------------------------------------------------------\n    # end training -----------------------------------------------------------------------------------------------------\n    if RANK in {-1, 0}:\n        LOGGER.info(f\"\\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\")\n        for f in last, best:\n            if f.exists():\n                strip_optimizer(f)  # strip optimizers\n                if f is best:\n                    LOGGER.info(f\"\\nValidating {f}...\")\n                    results, _, _ = validate.run(\n                        data_dict,\n                        batch_size=batch_size // WORLD_SIZE * 2,\n                        imgsz=imgsz,\n                        model=attempt_load(f, device).half(),\n                        iou_thres=0.65 if is_coco else 0.60,  # best pycocotools at iou 0.65\n                        single_cls=single_cls,\n                        dataloader=val_loader,\n                        save_dir=save_dir,\n                        save_json=is_coco,\n                        verbose=True,\n                        plots=plots,\n                        callbacks=callbacks,\n                        compute_loss=compute_loss,\n                        mask_downsample_ratio=mask_ratio,\n                        overlap=overlap,\n                    )  # val best model with plots\n                    if is_coco:\n                        # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)\n                        metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr))\n                        logger.log_metrics(metrics_dict, epoch)\n\n        # callbacks.run('on_train_end', last, best, epoch, results)\n        # on train end callback using genericLogger\n        logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs)\n        if not opt.evolve:\n            logger.log_model(best, epoch)\n        if plots:\n            plot_results_with_masks(file=save_dir / \"results.csv\")  # save results.png\n            files = [\"results.png\", \"confusion_matrix.png\", *(f\"{x}_curve.png\" for x in (\"F1\", \"PR\", \"P\", \"R\"))]\n            files = [(save_dir / f) for f in files if (save_dir / f).exists()]  # filter\n            LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}\")\n            logger.log_images(files, \"Results\", epoch + 1)\n            logger.log_images(sorted(save_dir.glob(\"val*.jpg\")), \"Validation\", epoch + 1)\n    torch.cuda.empty_cache()\n    return results\n\n\ndef parse_opt(known=False):\n    \"\"\"Parses command line arguments for training configurations, returning parsed arguments.\n\n    Supports both known and unknown args.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", type=str, default=ROOT / \"yolov5s-seg.pt\", help=\"initial weights path\")\n    parser.add_argument(\"--cfg\", type=str, default=\"\", help=\"model.yaml path\")\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128-seg.yaml\", help=\"dataset.yaml path\")\n    parser.add_argument(\"--hyp\", type=str, default=ROOT / \"data/hyps/hyp.scratch-low.yaml\", help=\"hyperparameters path\")\n    parser.add_argument(\"--epochs\", type=int, default=100, help=\"total training epochs\")\n    parser.add_argument(\"--batch-size\", type=int, default=16, help=\"total batch size for all GPUs, -1 for autobatch\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=640, help=\"train, val image size (pixels)\")\n    parser.add_argument(\"--rect\", action=\"store_true\", help=\"rectangular training\")\n    parser.add_argument(\"--resume\", nargs=\"?\", const=True, default=False, help=\"resume most recent training\")\n    parser.add_argument(\"--nosave\", action=\"store_true\", help=\"only save final checkpoint\")\n    parser.add_argument(\"--noval\", action=\"store_true\", help=\"only validate final epoch\")\n    parser.add_argument(\"--noautoanchor\", action=\"store_true\", help=\"disable AutoAnchor\")\n    parser.add_argument(\"--noplots\", action=\"store_true\", help=\"save no plot files\")\n    parser.add_argument(\"--evolve\", type=int, nargs=\"?\", const=300, help=\"evolve hyperparameters for x generations\")\n    parser.add_argument(\"--bucket\", type=str, default=\"\", help=\"gsutil bucket\")\n    parser.add_argument(\"--cache\", type=str, nargs=\"?\", const=\"ram\", help=\"image --cache ram/disk\")\n    parser.add_argument(\"--image-weights\", action=\"store_true\", help=\"use weighted image selection for training\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--multi-scale\", action=\"store_true\", help=\"vary img-size +/- 50%%\")\n    parser.add_argument(\"--single-cls\", action=\"store_true\", help=\"train multi-class data as single-class\")\n    parser.add_argument(\"--optimizer\", type=str, choices=[\"SGD\", \"Adam\", \"AdamW\"], default=\"SGD\", help=\"optimizer\")\n    parser.add_argument(\"--sync-bn\", action=\"store_true\", help=\"use SyncBatchNorm, only available in DDP mode\")\n    parser.add_argument(\"--workers\", type=int, default=8, help=\"max dataloader workers (per RANK in DDP mode)\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/train-seg\", help=\"save to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--quad\", action=\"store_true\", help=\"quad dataloader\")\n    parser.add_argument(\"--cos-lr\", action=\"store_true\", help=\"cosine LR scheduler\")\n    parser.add_argument(\"--label-smoothing\", type=float, default=0.0, help=\"Label smoothing epsilon\")\n    parser.add_argument(\"--patience\", type=int, default=100, help=\"EarlyStopping patience (epochs without improvement)\")\n    parser.add_argument(\"--freeze\", nargs=\"+\", type=int, default=[0], help=\"Freeze layers: backbone=10, first3=0 1 2\")\n    parser.add_argument(\"--save-period\", type=int, default=-1, help=\"Save checkpoint every x epochs (disabled if < 1)\")\n    parser.add_argument(\"--seed\", type=int, default=0, help=\"Global training seed\")\n    parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"Automatic DDP Multi-GPU argument, do not modify\")\n\n    # Instance Segmentation Args\n    parser.add_argument(\"--mask-ratio\", type=int, default=4, help=\"Downsample the truth masks to saving memory\")\n    parser.add_argument(\"--no-overlap\", action=\"store_true\", help=\"Overlap masks train faster at slightly less mAP\")\n\n    return parser.parse_known_args()[0] if known else parser.parse_args()\n\n\ndef main(opt, callbacks=Callbacks()):\n    \"\"\"Initializes training or evolution of YOLOv5 models based on provided configuration and options.\"\"\"\n    if RANK in {-1, 0}:\n        print_args(vars(opt))\n        check_git_status()\n        check_requirements(ROOT / \"requirements.txt\")\n\n    # Resume\n    if opt.resume and not opt.evolve:  # resume from specified or most recent last.pt\n        last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())\n        opt_yaml = last.parent.parent / \"opt.yaml\"  # train options yaml\n        opt_data = opt.data  # original dataset\n        if opt_yaml.is_file():\n            with open(opt_yaml, errors=\"ignore\") as f:\n                d = yaml.safe_load(f)\n        else:\n            d = torch_load(last, map_location=\"cpu\")[\"opt\"]\n        opt = argparse.Namespace(**d)  # replace\n        opt.cfg, opt.weights, opt.resume = \"\", str(last), True  # reinstate\n        if is_url(opt_data):\n            opt.data = check_file(opt_data)  # avoid HUB resume auth timeout\n    else:\n        opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = (\n            check_file(opt.data),\n            check_yaml(opt.cfg),\n            check_yaml(opt.hyp),\n            str(opt.weights),\n            str(opt.project),\n        )  # checks\n        assert len(opt.cfg) or len(opt.weights), \"either --cfg or --weights must be specified\"\n        if opt.evolve:\n            if opt.project == str(ROOT / \"runs/train-seg\"):  # if default project name, rename to runs/evolve-seg\n                opt.project = str(ROOT / \"runs/evolve-seg\")\n            opt.exist_ok, opt.resume = opt.resume, False  # pass resume to exist_ok and disable resume\n        if opt.name == \"cfg\":\n            opt.name = Path(opt.cfg).stem  # use model.yaml as name\n        opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))\n\n    # DDP mode\n    device = select_device(opt.device, batch_size=opt.batch_size)\n    if LOCAL_RANK != -1:\n        msg = \"is not compatible with YOLOv5 Multi-GPU DDP training\"\n        assert not opt.image_weights, f\"--image-weights {msg}\"\n        assert not opt.evolve, f\"--evolve {msg}\"\n        assert opt.batch_size != -1, f\"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size\"\n        assert opt.batch_size % WORLD_SIZE == 0, f\"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE\"\n        assert torch.cuda.device_count() > LOCAL_RANK, \"insufficient CUDA devices for DDP command\"\n        torch.cuda.set_device(LOCAL_RANK)\n        device = torch.device(\"cuda\", LOCAL_RANK)\n        dist.init_process_group(backend=\"nccl\" if dist.is_nccl_available() else \"gloo\")\n\n    # Train\n    if not opt.evolve:\n        train(opt.hyp, opt, device, callbacks)\n\n    # Evolve hyperparameters (optional)\n    else:\n        # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)\n        meta = {\n            \"lr0\": (1, 1e-5, 1e-1),  # initial learning rate (SGD=1E-2, Adam=1E-3)\n            \"lrf\": (1, 0.01, 1.0),  # final OneCycleLR learning rate (lr0 * lrf)\n            \"momentum\": (0.3, 0.6, 0.98),  # SGD momentum/Adam beta1\n            \"weight_decay\": (1, 0.0, 0.001),  # optimizer weight decay\n            \"warmup_epochs\": (1, 0.0, 5.0),  # warmup epochs (fractions ok)\n            \"warmup_momentum\": (1, 0.0, 0.95),  # warmup initial momentum\n            \"warmup_bias_lr\": (1, 0.0, 0.2),  # warmup initial bias lr\n            \"box\": (1, 0.02, 0.2),  # box loss gain\n            \"cls\": (1, 0.2, 4.0),  # cls loss gain\n            \"cls_pw\": (1, 0.5, 2.0),  # cls BCELoss positive_weight\n            \"obj\": (1, 0.2, 4.0),  # obj loss gain (scale with pixels)\n            \"obj_pw\": (1, 0.5, 2.0),  # obj BCELoss positive_weight\n            \"iou_t\": (0, 0.1, 0.7),  # IoU training threshold\n            \"anchor_t\": (1, 2.0, 8.0),  # anchor-multiple threshold\n            \"anchors\": (2, 2.0, 10.0),  # anchors per output grid (0 to ignore)\n            \"fl_gamma\": (0, 0.0, 2.0),  # focal loss gamma (efficientDet default gamma=1.5)\n            \"hsv_h\": (1, 0.0, 0.1),  # image HSV-Hue augmentation (fraction)\n            \"hsv_s\": (1, 0.0, 0.9),  # image HSV-Saturation augmentation (fraction)\n            \"hsv_v\": (1, 0.0, 0.9),  # image HSV-Value augmentation (fraction)\n            \"degrees\": (1, 0.0, 45.0),  # image rotation (+/- deg)\n            \"translate\": (1, 0.0, 0.9),  # image translation (+/- fraction)\n            \"scale\": (1, 0.0, 0.9),  # image scale (+/- gain)\n            \"shear\": (1, 0.0, 10.0),  # image shear (+/- deg)\n            \"perspective\": (0, 0.0, 0.001),  # image perspective (+/- fraction), range 0-0.001\n            \"flipud\": (1, 0.0, 1.0),  # image flip up-down (probability)\n            \"fliplr\": (0, 0.0, 1.0),  # image flip left-right (probability)\n            \"mosaic\": (1, 0.0, 1.0),  # image mixup (probability)\n            \"mixup\": (1, 0.0, 1.0),  # image mixup (probability)\n            \"copy_paste\": (1, 0.0, 1.0),\n        }  # segment copy-paste (probability)\n\n        with open(opt.hyp, errors=\"ignore\") as f:\n            hyp = yaml.safe_load(f)  # load hyps dict\n            if \"anchors\" not in hyp:  # anchors commented in hyp.yaml\n                hyp[\"anchors\"] = 3\n        if opt.noautoanchor:\n            del hyp[\"anchors\"], meta[\"anchors\"]\n        opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir)  # only val/save final epoch\n        # ei = [isinstance(x, (int, float)) for x in hyp.values()]  # evolvable indices\n        evolve_yaml, evolve_csv = save_dir / \"hyp_evolve.yaml\", save_dir / \"evolve.csv\"\n        if opt.bucket:\n            # download evolve.csv if exists\n            subprocess.run(\n                [\n                    \"gsutil\",\n                    \"cp\",\n                    f\"gs://{opt.bucket}/evolve.csv\",\n                    str(evolve_csv),\n                ]\n            )\n\n        for _ in range(opt.evolve):  # generations to evolve\n            if evolve_csv.exists():  # if evolve.csv exists: select best hyps and mutate\n                # Select parent(s)\n                parent = \"single\"  # parent selection method: 'single' or 'weighted'\n                x = np.loadtxt(evolve_csv, ndmin=2, delimiter=\",\", skiprows=1)\n                n = min(5, len(x))  # number of previous results to consider\n                x = x[np.argsort(-fitness(x))][:n]  # top n mutations\n                w = fitness(x) - fitness(x).min() + 1e-6  # weights (sum > 0)\n                if parent == \"single\" or len(x) == 1:\n                    # x = x[random.randint(0, n - 1)]  # random selection\n                    x = x[random.choices(range(n), weights=w)[0]]  # weighted selection\n                elif parent == \"weighted\":\n                    x = (x * w.reshape(n, 1)).sum(0) / w.sum()  # weighted combination\n\n                # Mutate\n                mp, s = 0.8, 0.2  # mutation probability, sigma\n                npr = np.random\n                npr.seed(int(time.time()))\n                g = np.array([meta[k][0] for k in hyp.keys()])  # gains 0-1\n                ng = len(meta)\n                v = np.ones(ng)\n                while all(v == 1):  # mutate until a change occurs (prevent duplicates)\n                    v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)\n                for i, k in enumerate(hyp.keys()):  # plt.hist(v.ravel(), 300)\n                    hyp[k] = float(x[i + 12] * v[i])  # mutate\n\n            # Constrain to limits\n            for k, v in meta.items():\n                hyp[k] = max(hyp[k], v[1])  # lower limit\n                hyp[k] = min(hyp[k], v[2])  # upper limit\n                hyp[k] = round(hyp[k], 5)  # significant digits\n\n            # Train mutation\n            results = train(hyp.copy(), opt, device, callbacks)\n            callbacks = Callbacks()\n            # Write mutation results\n            print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket)\n\n        # Plot results\n        plot_evolve(evolve_csv)\n        LOGGER.info(\n            f\"Hyperparameter evolution finished {opt.evolve} generations\\n\"\n            f\"Results saved to {colorstr('bold', save_dir)}\\n\"\n            f\"Usage example: $ python train.py --hyp {evolve_yaml}\"\n        )\n\n\ndef run(**kwargs):\n    \"\"\"Executes YOLOv5 training with given parameters, altering options programmatically; returns updated options.\n\n    Example: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')\n    \"\"\"\n    opt = parse_opt(True)\n    for k, v in kwargs.items():\n        setattr(opt, k, v)\n    main(opt)\n    return opt\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "segment/tutorial.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"t6MPjfT5NrKQ\"\n   },\n   \"source\": [\n    \"<div align=\\\"center\\\">\\n\",\n    \"  <a href=\\\"https://ultralytics.com/yolo\\\" target=\\\"_blank\\\">\\n\",\n    \"    <img width=\\\"1024\\\" src=\\\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\\\">\\n\",\n    \"  </a>\\n\",\n    \"\\n\",\n    \"  [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\\n\",\n    \"\\n\",\n    \"  <a href=\\\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml\\\"><img src=\\\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg\\\" alt=\\\"Ultralytics CI\\\"></a>\\n\",\n    \"  <a href=\\\"https://console.paperspace.com/github/ultralytics/ultralytics\\\"><img src=\\\"https://assets.paperspace.io/img/gradient-badge.svg\\\" alt=\\\"Run on Gradient\\\"/></a>\\n\",\n    \"  <a href=\\\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\\\"><img src=\\\"https://colab.research.google.com/assets/colab-badge.svg\\\" alt=\\\"Open In Colab\\\"></a>\\n\",\n    \"  <a href=\\\"https://www.kaggle.com/models/ultralytics/yolo11\\\"><img src=\\\"https://kaggle.com/static/images/open-in-kaggle.svg\\\" alt=\\\"Open In Kaggle\\\"></a>\\n\",\n    \"\\n\",\n    \"  <a href=\\\"https://ultralytics.com/discord\\\"><img alt=\\\"Discord\\\" src=\\\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\\\"></a>\\n\",\n    \"  <a href=\\\"https://community.ultralytics.com\\\"><img alt=\\\"Ultralytics Forums\\\" src=\\\"https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue\\\"></a>\\n\",\n    \"  <a href=\\\"https://reddit.com/r/ultralytics\\\"><img alt=\\\"Ultralytics Reddit\\\" src=\\\"https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue\\\"></a>\\n\",\n    \"</div>\\n\",\n    \"\\n\",\n    \"This **Ultralytics YOLOv5 Segmentation Colab Notebook** is the easiest way to get started with [YOLO models](https://www.ultralytics.com/yolo)—no installation needed. Built by [Ultralytics](https://www.ultralytics.com/), the creators of YOLO, this notebook walks you through running **state-of-the-art** models directly in your browser.\\n\",\n    \"\\n\",\n    \"Ultralytics models are constantly updated for performance and flexibility. They're **fast**, **accurate**, and **easy to use**, and they excel at [object detection](https://docs.ultralytics.com/tasks/detect/), [tracking](https://docs.ultralytics.com/modes/track/), [instance segmentation](https://docs.ultralytics.com/tasks/segment/), [image classification](https://docs.ultralytics.com/tasks/classify/), and [pose estimation](https://docs.ultralytics.com/tasks/pose/).\\n\",\n    \"\\n\",\n    \"Find detailed documentation in the [Ultralytics Docs](https://docs.ultralytics.com/). Get support via [GitHub Issues](https://github.com/ultralytics/ultralytics/issues/new/choose). Join discussions on [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), and the [Ultralytics Community Forums](https://community.ultralytics.com/)!\\n\",\n    \"\\n\",\n    \"Request an Enterprise License for commercial use at [Ultralytics Licensing](https://www.ultralytics.com/license).\\n\",\n    \"\\n\",\n    \"<br>\\n\",\n    \"<div>\\n\",\n    \"  <a href=\\\"https://www.youtube.com/watch?v=ZN3nRZT7b24\\\" target=\\\"_blank\\\">\\n\",\n    \"    <img src=\\\"https://img.youtube.com/vi/ZN3nRZT7b24/maxresdefault.jpg\\\" alt=\\\"Ultralytics Video\\\" width=\\\"640\\\" style=\\\"border-radius: 10px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);\\\">\\n\",\n    \"  </a>\\n\",\n    \"\\n\",\n    \"  <p style=\\\"font-size: 16px; font-family: Arial, sans-serif; color: #555;\\\">\\n\",\n    \"    <strong>Watch: </strong> How to Train\\n\",\n    \"    <a href=\\\"https://github.com/ultralytics/ultralytics\\\">Ultralytics</a>\\n\",\n    \"    <a href=\\\"https://docs.ultralytics.com/models/yolo11/\\\">YOLO11</a> Model on Custom Dataset using Google Colab Notebook 🚀\\n\",\n    \"  </p>\\n\",\n    \"</div>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"7mGmQbAO5pQb\"\n   },\n   \"source\": [\n    \"# Setup\\n\",\n    \"\\n\",\n    \"Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"wbvMlHd_QwMG\",\n    \"outputId\": \"171b23f0-71b9-4cbf-b666-6fa2ecef70c8\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"!git clone https://github.com/ultralytics/yolov5  # clone\\n\",\n    \"%cd yolov5\\n\",\n    \"%pip install -qr requirements.txt comet_ml  # install\\n\",\n    \"\\n\",\n    \"import torch\\n\",\n    \"\\n\",\n    \"import utils\\n\",\n    \"\\n\",\n    \"display = utils.notebook_init()  # checks\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"4JnkELT0cIJg\"\n   },\n   \"source\": [\n    \"# 1. Predict\\n\",\n    \"\\n\",\n    \"`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\\n\",\n    \"\\n\",\n    \"```shell\\n\",\n    \"python segment/predict.py --source 0  # webcam\\n\",\n    \"                             img.jpg  # image \\n\",\n    \"                             vid.mp4  # video\\n\",\n    \"                             screen  # screenshot\\n\",\n    \"                             path/  # directory\\n\",\n    \"                             'path/*.jpg'  # glob\\n\",\n    \"                             'https://youtu.be/LNwODJXcvt4'  # YouTube\\n\",\n    \"                             'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\\n\",\n    \"```\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"zR9ZbuQCH7FX\",\n    \"outputId\": \"3f67f1c7-f15e-4fa5-d251-967c3b77eaad\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1msegment/predict: \\u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\\n\",\n      \"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\\n\",\n      \"\\n\",\n      \"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\\n\",\n      \"100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\\n\",\n      \"\\n\",\n      \"Fusing layers... \\n\",\n      \"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\\n\",\n      \"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\\n\",\n      \"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\\n\",\n      \"Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\\n\",\n      \"Results saved to \\u001b[1mruns/predict-seg/exp\\u001b[0m\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\\n\",\n    \"# display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"hkAzDWJ7cWTr\"\n   },\n   \"source\": [\n    \"&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\\n\",\n    \"<img align=\\\"left\\\" src=\\\"https://user-images.githubusercontent.com/26833433/199030123-08c72f8d-6871-4116-8ed3-c373642cf28e.jpg\\\" width=\\\"600\\\">\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"0eq1SMWl6Sfn\"\n   },\n   \"source\": [\n    \"# 2. Validate\\n\",\n    \"Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"WQPtK1QYVaD_\",\n    \"outputId\": \"9d751d8c-bee8-4339-cf30-9854ca530449\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017labels-segments.zip  ...\\n\",\n      \"Downloading http://images.cocodataset.org/zips/val2017.zip ...\\n\",\n      \"######################################################################## 100.0%\\n\",\n      \"######################################################################## 100.0%\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# Download COCO val\\n\",\n    \"!bash data/scripts/get_coco.sh --val --segments  # download (780M - 5000 images)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"X58w8JLpMnjH\",\n    \"outputId\": \"a140d67a-02da-479e-9ddb-7d54bf9e407a\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1msegment/val: \\u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\\n\",\n      \"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\\n\",\n      \"\\n\",\n      \"Fusing layers... \\n\",\n      \"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mNew cache created: /content/datasets/coco/val2017.cache\\n\",\n      \"                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 157/157 [01:54<00:00,  1.37it/s]\\n\",\n      \"                   all       5000      36335      0.673      0.517      0.566      0.373      0.672       0.49      0.532      0.319\\n\",\n      \"Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\\n\",\n      \"Results saved to \\u001b[1mruns/val-seg/exp\\u001b[0m\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# Validate YOLOv5s-seg on COCO val\\n\",\n    \"!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"ZY2VXXXu74w5\"\n   },\n   \"source\": \"# 3. Train\\n\\n<p align=\\\"\\\"><a href=\\\"https://platform.ultralytics.com\\\"><img width=\\\"1000\\\" src=\\\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\\\"/></a></p>\\n\\nTrain a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\\n\\n- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\\nautomatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\\n- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\\n- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\\n<br><br>\\n\\nA **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\"\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"id\": \"i3oKtE4g-aNn\"\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# @title Select YOLOv5 🚀 logger {run: 'auto'}\\n\",\n    \"logger = \\\"Comet\\\"  # @param ['Comet', 'ClearML', 'TensorBoard']\\n\",\n    \"\\n\",\n    \"if logger == \\\"Comet\\\":\\n\",\n    \"    %pip install -q comet_ml\\n\",\n    \"    import comet_ml\\n\",\n    \"\\n\",\n    \"    comet_ml.init()\\n\",\n    \"elif logger == \\\"ClearML\\\":\\n\",\n    \"    %pip install -q clearml\\n\",\n    \"    import clearml\\n\",\n    \"\\n\",\n    \"    clearml.browser_login()\\n\",\n    \"elif logger == \\\"TensorBoard\\\":\\n\",\n    \"    %load_ext tensorboard\\n\",\n    \"    %tensorboard --logdir runs/train\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"id\": \"1NcFxRcFdJ_O\",\n    \"outputId\": \"3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988\"\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1msegment/train: \\u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\\n\",\n      \"\\u001b[34m\\u001b[1mgithub: \\u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\\n\",\n      \"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\\n\",\n      \"\\n\",\n      \"\\u001b[34m\\u001b[1mhyperparameters: \\u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\\n\",\n      \"\\u001b[34m\\u001b[1mTensorBoard: \\u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\\n\",\n      \"\\n\",\n      \"Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\\n\",\n      \"Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip to coco128-seg.zip...\\n\",\n      \"100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\\n\",\n      \"Dataset download success ✅ (1.9s), saved to \\u001b[1m/content/datasets\\u001b[0m\\n\",\n      \"\\n\",\n      \"                 from  n    params  module                                  arguments                     \\n\",\n      \"  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \\n\",\n      \"  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \\n\",\n      \"  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \\n\",\n      \"  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \\n\",\n      \"  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \\n\",\n      \"  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \\n\",\n      \"  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \\n\",\n      \"  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \\n\",\n      \"  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \\n\",\n      \"  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \\n\",\n      \" 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \\n\",\n      \" 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \\n\",\n      \" 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \\n\",\n      \" 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \\n\",\n      \" 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \\n\",\n      \" 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \\n\",\n      \" 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \\n\",\n      \" 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \\n\",\n      \" 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \\n\",\n      \" 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \\n\",\n      \" 24      [17, 20, 23]  1    615133  models.yolo.Segment                     [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\\n\",\n      \"Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\\n\",\n      \"\\n\",\n      \"Transferred 367/367 items from yolov5s-seg.pt\\n\",\n      \"\\u001b[34m\\u001b[1mAMP: \\u001b[0mchecks passed ✅\\n\",\n      \"\\u001b[34m\\u001b[1moptimizer:\\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\\n\",\n      \"\\u001b[34m\\u001b[1malbumentations: \\u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\\n\",\n      \"\\u001b[34m\\u001b[1mtrain: \\u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mtrain: \\u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\\n\",\n      \"\\u001b[34m\\u001b[1mtrain: \\u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 98.90it/s]\\n\",\n      \"\\n\",\n      \"\\u001b[34m\\u001b[1mAutoAnchor: \\u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\\n\",\n      \"Plotting labels to runs/train-seg/exp/labels.jpg... \\n\",\n      \"Image sizes 640 train, 640 val\\n\",\n      \"Using 2 dataloader workers\\n\",\n      \"Logging results to \\u001b[1mruns/train-seg/exp\\u001b[0m\\n\",\n      \"Starting training for 3 epochs...\\n\",\n      \"\\n\",\n      \"      Epoch    GPU_mem   box_loss   seg_loss   obj_loss   cls_loss  Instances       Size\\n\",\n      \"        0/2      4.92G     0.0417    0.04646    0.06066    0.02126        192        640: 100% 8/8 [00:08<00:00,  1.10s/it]\\n\",\n      \"                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:02<00:00,  1.81it/s]\\n\",\n      \"                   all        128        929      0.737      0.649      0.715      0.492      0.719      0.617      0.658      0.408\\n\",\n      \"\\n\",\n      \"      Epoch    GPU_mem   box_loss   seg_loss   obj_loss   cls_loss  Instances       Size\\n\",\n      \"        1/2      6.29G    0.04157    0.04503    0.05772    0.01777        208        640: 100% 8/8 [00:09<00:00,  1.21s/it]\\n\",\n      \"                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:02<00:00,  1.87it/s]\\n\",\n      \"                   all        128        929      0.756      0.674      0.738      0.506      0.725       0.64       0.68      0.422\\n\",\n      \"\\n\",\n      \"      Epoch    GPU_mem   box_loss   seg_loss   obj_loss   cls_loss  Instances       Size\\n\",\n      \"        2/2      6.29G     0.0425    0.04793    0.06784    0.01863        161        640: 100% 8/8 [00:03<00:00,  2.02it/s]\\n\",\n      \"                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:02<00:00,  1.88it/s]\\n\",\n      \"                   all        128        929      0.736      0.694      0.747      0.522      0.769      0.622      0.683      0.427\\n\",\n      \"\\n\",\n      \"3 epochs completed in 0.009 hours.\\n\",\n      \"Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\\n\",\n      \"Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\\n\",\n      \"\\n\",\n      \"Validating runs/train-seg/exp/weights/best.pt...\\n\",\n      \"Fusing layers... \\n\",\n      \"Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\\n\",\n      \"                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:06<00:00,  1.59s/it]\\n\",\n      \"                   all        128        929      0.738      0.694      0.746      0.522      0.759      0.625      0.682      0.426\\n\",\n      \"                person        128        254      0.845      0.756      0.836       0.55      0.861      0.669      0.759      0.407\\n\",\n      \"               bicycle        128          6      0.475      0.333      0.549      0.341      0.711      0.333      0.526      0.322\\n\",\n      \"                   car        128         46      0.612      0.565      0.539      0.257      0.555      0.435      0.477      0.171\\n\",\n      \"            motorcycle        128          5       0.73        0.8      0.752      0.571      0.747        0.8      0.752       0.42\\n\",\n      \"              airplane        128          6          1      0.943      0.995      0.732       0.92      0.833      0.839      0.555\\n\",\n      \"                   bus        128          7      0.677      0.714      0.722      0.653      0.711      0.714      0.722      0.593\\n\",\n      \"                 train        128          3          1      0.951      0.995      0.551          1      0.884      0.995      0.781\\n\",\n      \"                 truck        128         12      0.555      0.417      0.457      0.285      0.624      0.417      0.397      0.277\\n\",\n      \"                  boat        128          6      0.624        0.5      0.584      0.186          1      0.326      0.412      0.133\\n\",\n      \"         traffic light        128         14      0.513      0.302      0.411      0.247      0.435      0.214      0.376      0.251\\n\",\n      \"             stop sign        128          2      0.824          1      0.995      0.796      0.906          1      0.995      0.747\\n\",\n      \"                 bench        128          9       0.75      0.667      0.763      0.367      0.724      0.585      0.698      0.209\\n\",\n      \"                  bird        128         16      0.961          1      0.995      0.686      0.918      0.938       0.91      0.525\\n\",\n      \"                   cat        128          4      0.771      0.857      0.945      0.752       0.76        0.8      0.945      0.728\\n\",\n      \"                   dog        128          9      0.987      0.778      0.963      0.681          1      0.705       0.89      0.574\\n\",\n      \"                 horse        128          2      0.703          1      0.995      0.697      0.759          1      0.995      0.249\\n\",\n      \"              elephant        128         17      0.916      0.882       0.93      0.691      0.811      0.765      0.829      0.537\\n\",\n      \"                  bear        128          1      0.664          1      0.995      0.995      0.701          1      0.995      0.895\\n\",\n      \"                 zebra        128          4      0.864          1      0.995      0.921      0.879          1      0.995      0.804\\n\",\n      \"               giraffe        128          9      0.883      0.889       0.94      0.683      0.845      0.778       0.78      0.463\\n\",\n      \"              backpack        128          6          1       0.59      0.701      0.372          1      0.474       0.52      0.252\\n\",\n      \"              umbrella        128         18      0.654      0.839      0.887       0.52      0.517      0.556      0.427      0.229\\n\",\n      \"               handbag        128         19       0.54      0.211      0.408      0.221      0.796      0.206      0.396      0.196\\n\",\n      \"                   tie        128          7      0.864      0.857      0.857      0.577      0.925      0.857      0.857      0.534\\n\",\n      \"              suitcase        128          4      0.716          1      0.945      0.647      0.767          1      0.945      0.634\\n\",\n      \"               frisbee        128          5      0.708        0.8      0.761      0.643      0.737        0.8      0.761      0.501\\n\",\n      \"                  skis        128          1      0.691          1      0.995      0.796      0.761          1      0.995      0.199\\n\",\n      \"             snowboard        128          7      0.918      0.857      0.904      0.604       0.32      0.286      0.235      0.137\\n\",\n      \"           sports ball        128          6      0.902      0.667      0.701      0.466      0.727        0.5      0.497      0.471\\n\",\n      \"                  kite        128         10      0.586        0.4      0.511      0.231      0.663      0.394      0.417      0.139\\n\",\n      \"          baseball bat        128          4      0.359        0.5      0.401      0.169      0.631        0.5      0.526      0.133\\n\",\n      \"        baseball glove        128          7          1      0.519       0.58      0.327      0.687      0.286      0.455      0.328\\n\",\n      \"            skateboard        128          5      0.729        0.8      0.862      0.631      0.599        0.6      0.604      0.379\\n\",\n      \"         tennis racket        128          7       0.57      0.714      0.645      0.448      0.608      0.714      0.645      0.412\\n\",\n      \"                bottle        128         18      0.469      0.393      0.537      0.357      0.661      0.389      0.543      0.349\\n\",\n      \"            wine glass        128         16      0.677      0.938      0.866      0.441       0.53      0.625       0.67      0.334\\n\",\n      \"                   cup        128         36      0.777      0.722      0.812      0.466      0.725      0.583      0.762      0.467\\n\",\n      \"                  fork        128          6      0.948      0.333      0.425       0.27      0.527      0.167       0.18      0.102\\n\",\n      \"                 knife        128         16      0.757      0.587      0.669      0.458       0.79        0.5      0.552       0.34\\n\",\n      \"                 spoon        128         22       0.74      0.364      0.559      0.269      0.925      0.364      0.513      0.213\\n\",\n      \"                  bowl        128         28      0.766      0.714      0.725      0.559      0.803      0.584      0.665      0.353\\n\",\n      \"                banana        128          1      0.408          1      0.995      0.398      0.539          1      0.995      0.497\\n\",\n      \"              sandwich        128          2          1          0      0.695      0.536          1          0      0.498      0.448\\n\",\n      \"                orange        128          4      0.467          1      0.995      0.693      0.518          1      0.995      0.663\\n\",\n      \"              broccoli        128         11      0.462      0.455      0.383      0.259      0.548      0.455      0.384      0.256\\n\",\n      \"                carrot        128         24      0.631      0.875       0.77      0.533      0.757      0.909      0.853      0.499\\n\",\n      \"               hot dog        128          2      0.555          1      0.995      0.995      0.578          1      0.995      0.796\\n\",\n      \"                 pizza        128          5       0.89        0.8      0.962      0.796          1      0.778      0.962      0.766\\n\",\n      \"                 donut        128         14      0.695          1      0.893      0.772      0.704          1      0.893      0.696\\n\",\n      \"                  cake        128          4      0.826          1      0.995       0.92      0.862          1      0.995      0.846\\n\",\n      \"                 chair        128         35       0.53      0.571      0.613      0.336       0.67        0.6      0.538      0.271\\n\",\n      \"                 couch        128          6      0.972      0.667      0.833      0.627          1       0.62      0.696      0.394\\n\",\n      \"          potted plant        128         14        0.7      0.857      0.883      0.552      0.836      0.857      0.883      0.473\\n\",\n      \"                   bed        128          3      0.979      0.667       0.83      0.366          1          0       0.83      0.373\\n\",\n      \"          dining table        128         13      0.775      0.308      0.505      0.364      0.644      0.231       0.25     0.0804\\n\",\n      \"                toilet        128          2      0.836          1      0.995      0.846      0.887          1      0.995      0.797\\n\",\n      \"                    tv        128          2        0.6          1      0.995      0.846      0.655          1      0.995      0.896\\n\",\n      \"                laptop        128          3      0.822      0.333      0.445      0.307          1          0      0.392       0.12\\n\",\n      \"                 mouse        128          2          1          0          0          0          1          0          0          0\\n\",\n      \"                remote        128          8      0.745        0.5       0.62      0.459      0.821        0.5      0.624      0.449\\n\",\n      \"            cell phone        128          8      0.686      0.375      0.502      0.272      0.488       0.25       0.28      0.132\\n\",\n      \"             microwave        128          3      0.831          1      0.995      0.722      0.867          1      0.995      0.592\\n\",\n      \"                  oven        128          5      0.439        0.4      0.435      0.294      0.823        0.6      0.645      0.418\\n\",\n      \"                  sink        128          6      0.677        0.5      0.565      0.448      0.722        0.5       0.46      0.362\\n\",\n      \"          refrigerator        128          5      0.533        0.8      0.783      0.524      0.558        0.8      0.783      0.527\\n\",\n      \"                  book        128         29      0.732      0.379      0.423      0.196       0.69      0.207       0.38      0.131\\n\",\n      \"                 clock        128          9      0.889      0.778      0.917      0.677      0.908      0.778      0.875      0.604\\n\",\n      \"                  vase        128          2      0.375          1      0.995      0.995      0.455          1      0.995      0.796\\n\",\n      \"              scissors        128          1          1          0     0.0166    0.00166          1          0          0          0\\n\",\n      \"            teddy bear        128         21      0.813      0.829      0.841      0.457      0.826      0.678      0.786      0.422\\n\",\n      \"            toothbrush        128          5      0.806          1      0.995      0.733      0.991          1      0.995      0.628\\n\",\n      \"Results saved to \\u001b[1mruns/train-seg/exp\\u001b[0m\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# Train YOLOv5s on COCO128 for 3 epochs\\n\",\n    \"!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"15glLzbQx5u0\"\n   },\n   \"source\": [\n    \"# 4. Visualize\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"nWOsI5wJR1o3\"\n   },\n   \"source\": [\n    \"## Comet Logging and Visualization 🌟 NEW\\n\",\n    \"\\n\",\n    \"[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\\n\",\n    \"\\n\",\n    \"Getting started is easy:\\n\",\n    \"```shell\\n\",\n    \"pip install comet_ml  # 1. install\\n\",\n    \"export COMET_API_KEY=<Your API Key>  # 2. paste API key\\n\",\n    \"python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt  # 3. train\\n\",\n    \"```\\n\",\n    \"To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\\n\",\n    \"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\\n\",\n    \"\\n\",\n    \"<a href=\\\"https://bit.ly/yolov5-readme-comet2\\\">\\n\",\n    \"<img alt=\\\"Comet Dashboard\\\" src=\\\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\\\" width=\\\"1280\\\"/></a>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"Lay2WsTjNJzP\"\n   },\n   \"source\": [\n    \"## ClearML Logging and Automation 🌟 NEW\\n\",\n    \"\\n\",\n    \"[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\\n\",\n    \"\\n\",\n    \"- `pip install clearml`\\n\",\n    \"- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\\n\",\n    \"\\n\",\n    \"You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\\n\",\n    \"\\n\",\n    \"You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\\n\",\n    \"\\n\",\n    \"<a href=\\\"https://cutt.ly/yolov5-notebook-clearml\\\">\\n\",\n    \"<img alt=\\\"ClearML Experiment Management UI\\\" src=\\\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\\\" width=\\\"1280\\\"/></a>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"-WPvRbS5Swl6\"\n   },\n   \"source\": [\n    \"## Local Logging\\n\",\n    \"\\n\",\n    \"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\\n\",\n    \"\\n\",\n    \"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \\n\",\n    \"\\n\",\n    \"<img alt=\\\"Local logging results\\\" src=\\\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\\\" width=\\\"1280\\\"/>\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"Zelyeqbyt3GD\"\n   },\n   \"source\": [\n    \"# Environments\\n\",\n    \"\\n\",\n    \"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\\n\",\n    \"\\n\",\n    \"- **Notebooks** with free GPU: <a href=\\\"https://bit.ly/yolov5-paperspace-notebook\\\"><img src=\\\"https://assets.paperspace.io/img/gradient-badge.svg\\\" alt=\\\"Run on Gradient\\\"></a> <a href=\\\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\\\"><img src=\\\"https://colab.research.google.com/assets/colab-badge.svg\\\" alt=\\\"Open In Colab\\\"></a> <a href=\\\"https://www.kaggle.com/models/ultralytics/yolov5\\\"><img src=\\\"https://kaggle.com/static/images/open-in-kaggle.svg\\\" alt=\\\"Open In Kaggle\\\"></a>\\n\",\n    \"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\\n\",\n    \"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\\n\",\n    \"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\\\"https://hub.docker.com/r/ultralytics/yolov5\\\"><img src=\\\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\\\" alt=\\\"Docker Pulls\\\"></a>\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"6Qu7Iesl0p54\"\n   },\n   \"source\": [\n    \"# Status\\n\",\n    \"\\n\",\n    \"![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\\n\",\n    \"\\n\",\n    \"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"IEijrePND_2I\"\n   },\n   \"source\": [\n    \"# Appendix\\n\",\n    \"\\n\",\n    \"Additional content below.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"id\": \"GMusP4OAxFu6\"\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\\n\",\n    \"\\n\",\n    \"model = torch.hub.load(\\n\",\n    \"    \\\"ultralytics/yolov5\\\", \\\"yolov5s-seg\\\", force_reload=True, trust_repo=True\\n\",\n    \")  # or yolov5n - yolov5x6 or custom\\n\",\n    \"im = \\\"https://ultralytics.com/images/zidane.jpg\\\"  # file, Path, PIL.Image, OpenCV, nparray, list\\n\",\n    \"results = model(im)  # inference\\n\",\n    \"results.print()  # or .show(), .save(), .crop(), .pandas(), etc.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"accelerator\": \"GPU\",\n  \"colab\": {\n   \"name\": \"YOLOv5 Segmentation Tutorial\",\n   \"provenance\": [],\n   \"toc_visible\": true\n  },\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.7.12\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}"
  },
  {
    "path": "segment/val.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nValidate a trained YOLOv5 segment model on a segment dataset.\n\nUsage:\n    $ bash data/scripts/get_coco.sh --val --segments  # download COCO-segments val split (1G, 5000 images)\n    $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640  # validate COCO-segments\n\nUsage - formats:\n    $ python segment/val.py --weights yolov5s-seg.pt                 # PyTorch\n                                      yolov5s-seg.torchscript        # TorchScript\n                                      yolov5s-seg.onnx               # ONNX Runtime or OpenCV DNN with --dnn\n                                      yolov5s-seg_openvino_label     # OpenVINO\n                                      yolov5s-seg.engine             # TensorRT\n                                      yolov5s-seg.mlmodel            # CoreML (macOS-only)\n                                      yolov5s-seg_saved_model        # TensorFlow SavedModel\n                                      yolov5s-seg.pb                 # TensorFlow GraphDef\n                                      yolov5s-seg.tflite             # TensorFlow Lite\n                                      yolov5s-seg_edgetpu.tflite     # TensorFlow Edge TPU\n                                      yolov5s-seg_paddle_model       # PaddlePaddle\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport subprocess\nimport sys\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nimport torch.nn.functional as F\n\nfrom models.common import DetectMultiBackend\nfrom models.yolo import SegmentationModel\nfrom utils.callbacks import Callbacks\nfrom utils.general import (\n    LOGGER,\n    NUM_THREADS,\n    TQDM_BAR_FORMAT,\n    Profile,\n    check_dataset,\n    check_img_size,\n    check_requirements,\n    check_yaml,\n    coco80_to_coco91_class,\n    colorstr,\n    increment_path,\n    non_max_suppression,\n    print_args,\n    scale_boxes,\n    xywh2xyxy,\n    xyxy2xywh,\n)\nfrom utils.metrics import ConfusionMatrix, box_iou\nfrom utils.plots import output_to_target, plot_val_study\nfrom utils.segment.dataloaders import create_dataloader\nfrom utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image\nfrom utils.segment.metrics import Metrics, ap_per_class_box_and_mask\nfrom utils.segment.plots import plot_images_and_masks\nfrom utils.torch_utils import de_parallel, select_device, smart_inference_mode\n\n\ndef save_one_txt(predn, save_conf, shape, file):\n    \"\"\"Saves detection results in txt format; includes class, xywh (normalized), optionally confidence if `save_conf` is\n    True.\n    \"\"\"\n    gn = torch.tensor(shape)[[1, 0, 1, 0]]  # normalization gain whwh\n    for *xyxy, conf, cls in predn.tolist():\n        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh\n        line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format\n        with open(file, \"a\") as f:\n            f.write((\"%g \" * len(line)).rstrip() % line + \"\\n\")\n\n\ndef save_one_json(predn, jdict, path, class_map, pred_masks):\n    \"\"\"Saves a JSON file with detection results including bounding boxes, category IDs, scores, and segmentation masks.\n\n    Example JSON result: {\"image_id\": 42, \"category_id\": 18, \"bbox\": [258.15, 41.29, 348.26, 243.78], \"score\": 0.236}.\n    \"\"\"\n    from pycocotools.mask import encode\n\n    def single_encode(x):\n        \"\"\"Encodes binary mask arrays into RLE (Run-Length Encoding) format for JSON serialization.\"\"\"\n        rle = encode(np.asarray(x[:, :, None], order=\"F\", dtype=\"uint8\"))[0]\n        rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\")\n        return rle\n\n    image_id = int(path.stem) if path.stem.isnumeric() else path.stem\n    box = xyxy2xywh(predn[:, :4])  # xywh\n    box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner\n    pred_masks = np.transpose(pred_masks, (2, 0, 1))\n    with ThreadPool(NUM_THREADS) as pool:\n        rles = pool.map(single_encode, pred_masks)\n    for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):\n        jdict.append(\n            {\n                \"image_id\": image_id,\n                \"category_id\": class_map[int(p[5])],\n                \"bbox\": [round(x, 3) for x in b],\n                \"score\": round(p[4], 5),\n                \"segmentation\": rles[i],\n            }\n        )\n\n\ndef process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):\n    \"\"\"Return correct prediction matrix.\n\n    Args:\n        detections (array[N, 6]): x1, y1, x2, y2, conf, class\n        labels (array[M, 5]): class, x1, y1, x2, y2\n\n    Returns:\n        correct (array[N, 10]), for 10 IoU levels.\n    \"\"\"\n    if masks:\n        if overlap:\n            nl = len(labels)\n            index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1\n            gt_masks = gt_masks.repeat(nl, 1, 1)  # shape(1,640,640) -> (n,640,640)\n            gt_masks = torch.where(gt_masks == index, 1.0, 0.0)\n        if gt_masks.shape[1:] != pred_masks.shape[1:]:\n            gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode=\"bilinear\", align_corners=False)[0]\n            gt_masks = gt_masks.gt_(0.5)\n        iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))\n    else:  # boxes\n        iou = box_iou(labels[:, 1:], detections[:, :4])\n\n    correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)\n    correct_class = labels[:, 0:1] == detections[:, 5]\n    for i in range(len(iouv)):\n        x = torch.where((iou >= iouv[i]) & correct_class)  # IoU > threshold and classes match\n        if x[0].shape[0]:\n            matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()  # [label, detect, iou]\n            if x[0].shape[0] > 1:\n                matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n                # matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n            correct[matches[:, 1].astype(int), i] = True\n    return torch.tensor(correct, dtype=torch.bool, device=iouv.device)\n\n\n@smart_inference_mode()\ndef run(\n    data,\n    weights=None,  # model.pt path(s)\n    batch_size=32,  # batch size\n    imgsz=640,  # inference size (pixels)\n    conf_thres=0.001,  # confidence threshold\n    iou_thres=0.6,  # NMS IoU threshold\n    max_det=300,  # maximum detections per image\n    task=\"val\",  # train, val, test, speed or study\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    workers=8,  # max dataloader workers (per RANK in DDP mode)\n    single_cls=False,  # treat as single-class dataset\n    augment=False,  # augmented inference\n    verbose=False,  # verbose output\n    save_txt=False,  # save results to *.txt\n    save_hybrid=False,  # save label+prediction hybrid results to *.txt\n    save_conf=False,  # save confidences in --save-txt labels\n    save_json=False,  # save a COCO-JSON results file\n    project=ROOT / \"runs/val-seg\",  # save to project/name\n    name=\"exp\",  # save to project/name\n    exist_ok=False,  # existing project/name ok, do not increment\n    half=True,  # use FP16 half-precision inference\n    dnn=False,  # use OpenCV DNN for ONNX inference\n    model=None,\n    dataloader=None,\n    save_dir=Path(\"\"),\n    plots=True,\n    overlap=False,\n    mask_downsample_ratio=1,\n    compute_loss=None,\n    callbacks=Callbacks(),\n):\n    \"\"\"Validate a YOLOv5 segmentation model on specified dataset, producing metrics, plots, and optional JSON output.\"\"\"\n    if save_json:\n        check_requirements(\"pycocotools>=2.0.6\")\n        process = process_mask_native  # more accurate\n    else:\n        process = process_mask  # faster\n\n    # Initialize/load model and set device\n    training = model is not None\n    if training:  # called by train.py\n        device, pt, jit, engine = next(model.parameters()).device, True, False, False  # get model device, PyTorch model\n        half &= device.type != \"cpu\"  # half precision only supported on CUDA\n        model.half() if half else model.float()\n        nm = de_parallel(model).model[-1].nm  # number of masks\n    else:  # called directly\n        device = select_device(device, batch_size=batch_size)\n\n        # Directories\n        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run\n        (save_dir / \"labels\" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir\n\n        # Load model\n        model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)\n        stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine\n        imgsz = check_img_size(imgsz, s=stride)  # check image size\n        half = model.fp16  # FP16 supported on limited backends with CUDA\n        nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32  # number of masks\n        if engine:\n            batch_size = model.batch_size\n        else:\n            device = model.device\n            if not (pt or jit):\n                batch_size = 1  # export.py models default to batch-size 1\n                LOGGER.info(f\"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models\")\n\n        # Data\n        data = check_dataset(data)  # check\n\n    # Configure\n    model.eval()\n    cuda = device.type != \"cpu\"\n    is_coco = isinstance(data.get(\"val\"), str) and data[\"val\"].endswith(f\"coco{os.sep}val2017.txt\")  # COCO dataset\n    nc = 1 if single_cls else int(data[\"nc\"])  # number of classes\n    iouv = torch.linspace(0.5, 0.95, 10, device=device)  # iou vector for mAP@0.5:0.95\n    niou = iouv.numel()\n\n    # Dataloader\n    if not training:\n        if pt and not single_cls:  # check --weights are trained on --data\n            ncm = model.model.nc\n            assert ncm == nc, (\n                f\"{weights} ({ncm} classes) trained on different --data than what you passed ({nc} \"\n                f\"classes). Pass correct combination of --weights and --data that are trained together.\"\n            )\n        model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz))  # warmup\n        pad, rect = (0.0, False) if task == \"speed\" else (0.5, pt)  # square inference for benchmarks\n        task = task if task in (\"train\", \"val\", \"test\") else \"val\"  # path to train/val/test images\n        dataloader = create_dataloader(\n            data[task],\n            imgsz,\n            batch_size,\n            stride,\n            single_cls,\n            pad=pad,\n            rect=rect,\n            workers=workers,\n            prefix=colorstr(f\"{task}: \"),\n            overlap_mask=overlap,\n            mask_downsample_ratio=mask_downsample_ratio,\n        )[0]\n\n    seen = 0\n    confusion_matrix = ConfusionMatrix(nc=nc)\n    names = model.names if hasattr(model, \"names\") else model.module.names  # get class names\n    if isinstance(names, (list, tuple)):  # old format\n        names = dict(enumerate(names))\n    class_map = coco80_to_coco91_class() if is_coco else list(range(1000))\n    s = (\"%22s\" + \"%11s\" * 10) % (\n        \"Class\",\n        \"Images\",\n        \"Instances\",\n        \"Box(P\",\n        \"R\",\n        \"mAP50\",\n        \"mAP50-95)\",\n        \"Mask(P\",\n        \"R\",\n        \"mAP50\",\n        \"mAP50-95)\",\n    )\n    dt = Profile(device=device), Profile(device=device), Profile(device=device)\n    metrics = Metrics()\n    loss = torch.zeros(4, device=device)\n    jdict, stats = [], []\n    # callbacks.run('on_val_start')\n    pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT)  # progress bar\n    for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):\n        # callbacks.run('on_val_batch_start')\n        with dt[0]:\n            if cuda:\n                im = im.to(device, non_blocking=True)\n                targets = targets.to(device)\n                masks = masks.to(device)\n            masks = masks.float()\n            im = im.half() if half else im.float()  # uint8 to fp16/32\n            im /= 255  # 0 - 255 to 0.0 - 1.0\n            nb, _, height, width = im.shape  # batch size, channels, height, width\n\n        # Inference\n        with dt[1]:\n            preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)\n\n        # Loss\n        if compute_loss:\n            loss += compute_loss((train_out, protos), targets, masks)[1]  # box, obj, cls\n\n        # NMS\n        targets[:, 2:] *= torch.tensor((width, height, width, height), device=device)  # to pixels\n        lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []  # for autolabelling\n        with dt[2]:\n            preds = non_max_suppression(\n                preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm\n            )\n\n        # Metrics\n        plot_masks = []  # masks for plotting\n        for si, (pred, proto) in enumerate(zip(preds, protos)):\n            labels = targets[targets[:, 0] == si, 1:]\n            nl, npr = labels.shape[0], pred.shape[0]  # number of labels, predictions\n            path, shape = Path(paths[si]), shapes[si][0]\n            correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init\n            correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init\n            seen += 1\n\n            if npr == 0:\n                if nl:\n                    stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))\n                    if plots:\n                        confusion_matrix.process_batch(detections=None, labels=labels[:, 0])\n                continue\n\n            # Masks\n            midx = [si] if overlap else targets[:, 0] == si\n            gt_masks = masks[midx]\n            pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])\n\n            # Predictions\n            if single_cls:\n                pred[:, 5] = 0\n            predn = pred.clone()\n            scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1])  # native-space pred\n\n            # Evaluate\n            if nl:\n                tbox = xywh2xyxy(labels[:, 1:5])  # target boxes\n                scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1])  # native-space labels\n                labelsn = torch.cat((labels[:, 0:1], tbox), 1)  # native-space labels\n                correct_bboxes = process_batch(predn, labelsn, iouv)\n                correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)\n                if plots:\n                    confusion_matrix.process_batch(predn, labelsn)\n            stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0]))  # (conf, pcls, tcls)\n\n            pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)\n            if plots and batch_i < 3:\n                plot_masks.append(pred_masks[:15])  # filter top 15 to plot\n\n            # Save/log\n            if save_txt:\n                save_one_txt(predn, save_conf, shape, file=save_dir / \"labels\" / f\"{path.stem}.txt\")\n            if save_json:\n                pred_masks = scale_image(\n                    im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]\n                )\n                save_one_json(predn, jdict, path, class_map, pred_masks)  # append to COCO-JSON dictionary\n            # callbacks.run('on_val_image_end', pred, predn, path, names, im[si])\n\n        # Plot images\n        if plots and batch_i < 3:\n            if len(plot_masks):\n                plot_masks = torch.cat(plot_masks, dim=0)\n            plot_images_and_masks(im, targets, masks, paths, save_dir / f\"val_batch{batch_i}_labels.jpg\", names)\n            plot_images_and_masks(\n                im,\n                output_to_target(preds, max_det=15),\n                plot_masks,\n                paths,\n                save_dir / f\"val_batch{batch_i}_pred.jpg\",\n                names,\n            )  # pred\n\n        # callbacks.run('on_val_batch_end')\n\n    # Compute metrics\n    stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)]  # to numpy\n    if len(stats) and stats[0].any():\n        results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)\n        metrics.update(results)\n    nt = np.bincount(stats[4].astype(int), minlength=nc)  # number of targets per class\n\n    # Print results\n    pf = \"%22s\" + \"%11i\" * 2 + \"%11.3g\" * 8  # print format\n    LOGGER.info(pf % (\"all\", seen, nt.sum(), *metrics.mean_results()))\n    if nt.sum() == 0:\n        LOGGER.warning(f\"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels\")\n\n    # Print results per class\n    if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):\n        for i, c in enumerate(metrics.ap_class_index):\n            LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))\n\n    # Print speeds\n    t = tuple(x.t / seen * 1e3 for x in dt)  # speeds per image\n    if not training:\n        shape = (batch_size, 3, imgsz, imgsz)\n        LOGGER.info(f\"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}\" % t)\n\n    # Plots\n    if plots:\n        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))\n    # callbacks.run('on_val_end')\n\n    mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()\n\n    # Save JSON\n    if save_json and len(jdict):\n        w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else \"\"  # weights\n        anno_json = str(Path(\"../datasets/coco/annotations/instances_val2017.json\"))  # annotations\n        pred_json = str(save_dir / f\"{w}_predictions.json\")  # predictions\n        LOGGER.info(f\"\\nEvaluating pycocotools mAP... saving {pred_json}...\")\n        with open(pred_json, \"w\") as f:\n            json.dump(jdict, f)\n\n        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb\n            from pycocotools.coco import COCO\n            from pycocotools.cocoeval import COCOeval\n\n            anno = COCO(anno_json)  # init annotations api\n            pred = anno.loadRes(pred_json)  # init predictions api\n            results = []\n            for eval in COCOeval(anno, pred, \"bbox\"), COCOeval(anno, pred, \"segm\"):\n                if is_coco:\n                    eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files]  # img ID to evaluate\n                eval.evaluate()\n                eval.accumulate()\n                eval.summarize()\n                results.extend(eval.stats[:2])  # update results (mAP@0.5:0.95, mAP@0.5)\n            map_bbox, map50_bbox, map_mask, map50_mask = results\n        except Exception as e:\n            LOGGER.info(f\"pycocotools unable to run: {e}\")\n\n    # Return results\n    model.float()  # for training\n    if not training:\n        s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else \"\"\n        LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\n    final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask\n    return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t\n\n\ndef parse_opt():\n    \"\"\"Parses command line arguments for configuring YOLOv5 options like dataset path, weights, batch size, and\n    inference settings.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128-seg.yaml\", help=\"dataset.yaml path\")\n    parser.add_argument(\"--weights\", nargs=\"+\", type=str, default=ROOT / \"yolov5s-seg.pt\", help=\"model path(s)\")\n    parser.add_argument(\"--batch-size\", type=int, default=32, help=\"batch size\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=640, help=\"inference size (pixels)\")\n    parser.add_argument(\"--conf-thres\", type=float, default=0.001, help=\"confidence threshold\")\n    parser.add_argument(\"--iou-thres\", type=float, default=0.6, help=\"NMS IoU threshold\")\n    parser.add_argument(\"--max-det\", type=int, default=300, help=\"maximum detections per image\")\n    parser.add_argument(\"--task\", default=\"val\", help=\"train, val, test, speed or study\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--workers\", type=int, default=8, help=\"max dataloader workers (per RANK in DDP mode)\")\n    parser.add_argument(\"--single-cls\", action=\"store_true\", help=\"treat as single-class dataset\")\n    parser.add_argument(\"--augment\", action=\"store_true\", help=\"augmented inference\")\n    parser.add_argument(\"--verbose\", action=\"store_true\", help=\"report mAP by class\")\n    parser.add_argument(\"--save-txt\", action=\"store_true\", help=\"save results to *.txt\")\n    parser.add_argument(\"--save-hybrid\", action=\"store_true\", help=\"save label+prediction hybrid results to *.txt\")\n    parser.add_argument(\"--save-conf\", action=\"store_true\", help=\"save confidences in --save-txt labels\")\n    parser.add_argument(\"--save-json\", action=\"store_true\", help=\"save a COCO-JSON results file\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/val-seg\", help=\"save results to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"use FP16 half-precision inference\")\n    parser.add_argument(\"--dnn\", action=\"store_true\", help=\"use OpenCV DNN for ONNX inference\")\n    opt = parser.parse_args()\n    opt.data = check_yaml(opt.data)  # check YAML\n    # opt.save_json |= opt.data.endswith('coco.yaml')\n    opt.save_txt |= opt.save_hybrid\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes YOLOv5 tasks including training, validation, testing, speed, and study with configurable options.\"\"\"\n    check_requirements(ROOT / \"requirements.txt\", exclude=(\"tensorboard\", \"thop\"))\n\n    if opt.task in (\"train\", \"val\", \"test\"):  # run normally\n        if opt.conf_thres > 0.001:  # https://github.com/ultralytics/yolov5/issues/1466\n            LOGGER.warning(f\"WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results\")\n        if opt.save_hybrid:\n            LOGGER.warning(\"WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone\")\n        run(**vars(opt))\n\n    else:\n        weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]\n        opt.half = torch.cuda.is_available() and opt.device != \"cpu\"  # FP16 for fastest results\n        if opt.task == \"speed\":  # speed benchmarks\n            # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...\n            opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False\n            for opt.weights in weights:\n                run(**vars(opt), plots=False)\n\n        elif opt.task == \"study\":  # speed vs mAP benchmarks\n            # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...\n            for opt.weights in weights:\n                f = f\"study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt\"  # filename to save to\n                x, y = list(range(256, 1536 + 128, 128)), []  # x axis (image sizes), y axis\n                for opt.imgsz in x:  # img-size\n                    LOGGER.info(f\"\\nRunning {f} --imgsz {opt.imgsz}...\")\n                    r, _, t = run(**vars(opt), plots=False)\n                    y.append(r + t)  # results and times\n                np.savetxt(f, y, fmt=\"%10.4g\")  # save\n            subprocess.run([\"zip\", \"-r\", \"study.zip\", \"study_*.txt\"])\n            plot_val_study(x=x)  # plot\n        else:\n            raise NotImplementedError(f'--task {opt.task} not in (\"train\", \"val\", \"test\", \"speed\", \"study\")')\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "train.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTrain a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release.\n\nUsage - Single-GPU training:\n    $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640  # from pretrained (recommended)\n    $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640  # from scratch\n\nUsage - Multi-GPU DDP training:\n    $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3\n\nModels:     https://github.com/ultralytics/yolov5/tree/master/models\nDatasets:   https://github.com/ultralytics/yolov5/tree/master/data\nTutorial:   https://docs.ultralytics.com/yolov5/tutorials/train_custom_data\n\"\"\"\n\nimport argparse\nimport math\nimport os\nimport random\nimport subprocess\nimport sys\nimport time\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\ntry:\n    import comet_ml  # must be imported before torch (if installed)\nexcept ImportError:\n    comet_ml = None\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport yaml\nfrom torch.optim import lr_scheduler\nfrom tqdm import tqdm\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom ultralytics.utils.patches import torch_load\n\nimport val as validate  # for end-of-epoch mAP\nfrom models.experimental import attempt_load\nfrom models.yolo import Model\nfrom utils.autoanchor import check_anchors\nfrom utils.autobatch import check_train_batch_size\nfrom utils.callbacks import Callbacks\nfrom utils.dataloaders import create_dataloader\nfrom utils.downloads import attempt_download, is_url\nfrom utils.general import (\n    LOGGER,\n    TQDM_BAR_FORMAT,\n    check_amp,\n    check_dataset,\n    check_file,\n    check_git_info,\n    check_git_status,\n    check_img_size,\n    check_requirements,\n    check_suffix,\n    check_yaml,\n    colorstr,\n    get_latest_run,\n    increment_path,\n    init_seeds,\n    intersect_dicts,\n    labels_to_class_weights,\n    labels_to_image_weights,\n    methods,\n    one_cycle,\n    print_args,\n    print_mutation,\n    strip_optimizer,\n    yaml_save,\n)\nfrom utils.loggers import LOGGERS, Loggers\nfrom utils.loggers.comet.comet_utils import check_comet_resume\nfrom utils.loss import ComputeLoss\nfrom utils.metrics import fitness\nfrom utils.plots import plot_evolve\nfrom utils.torch_utils import (\n    EarlyStopping,\n    ModelEMA,\n    de_parallel,\n    select_device,\n    smart_DDP,\n    smart_optimizer,\n    smart_resume,\n    torch_distributed_zero_first,\n)\n\nLOCAL_RANK = int(os.getenv(\"LOCAL_RANK\", -1))  # https://pytorch.org/docs/stable/elastic/run.html\nRANK = int(os.getenv(\"RANK\", -1))\nWORLD_SIZE = int(os.getenv(\"WORLD_SIZE\", 1))\nGIT_INFO = check_git_info()\n\n\ndef train(hyp, opt, device, callbacks):\n    \"\"\"Train a YOLOv5 model on a custom dataset using specified hyperparameters, options, and device, managing datasets,\n    model architecture, loss computation, and optimizer steps.\n\n    Args:\n        hyp (str | dict): Path to the hyperparameters YAML file or a dictionary of hyperparameters.\n        opt (argparse.Namespace): Parsed command-line arguments containing training options.\n        device (torch.device): Device on which training occurs, e.g., 'cuda' or 'cpu'.\n        callbacks (Callbacks): Callback functions for various training events.\n\n    Returns:\n        None\n\n    Examples:\n        Single-GPU training:\n        ```bash\n        $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640  # from pretrained (recommended)\n        $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640  # from scratch\n        ```\n\n        Multi-GPU DDP training:\n        ```bash\n        $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights\n        yolov5s.pt --img 640 --device 0,1,2,3\n        ```\n\n        For more usage details, refer to:\n        - Models: https://github.com/ultralytics/yolov5/tree/master/models\n        - Datasets: https://github.com/ultralytics/yolov5/tree/master/data\n        - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data\n\n    Notes:\n        Models and datasets download automatically from the latest YOLOv5 release.\n    \"\"\"\n    save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = (\n        Path(opt.save_dir),\n        opt.epochs,\n        opt.batch_size,\n        opt.weights,\n        opt.single_cls,\n        opt.evolve,\n        opt.data,\n        opt.cfg,\n        opt.resume,\n        opt.noval,\n        opt.nosave,\n        opt.workers,\n        opt.freeze,\n    )\n    callbacks.run(\"on_pretrain_routine_start\")\n\n    # Directories\n    w = save_dir / \"weights\"  # weights dir\n    (w.parent if evolve else w).mkdir(parents=True, exist_ok=True)  # make dir\n    last, best = w / \"last.pt\", w / \"best.pt\"\n\n    # Hyperparameters\n    if isinstance(hyp, str):\n        with open(hyp, errors=\"ignore\") as f:\n            hyp = yaml.safe_load(f)  # load hyps dict\n    LOGGER.info(colorstr(\"hyperparameters: \") + \", \".join(f\"{k}={v}\" for k, v in hyp.items()))\n    opt.hyp = hyp.copy()  # for saving hyps to checkpoints\n\n    # Save run settings\n    if not evolve:\n        yaml_save(save_dir / \"hyp.yaml\", hyp)\n        yaml_save(save_dir / \"opt.yaml\", vars(opt))\n\n    # Loggers\n    data_dict = None\n    if RANK in {-1, 0}:\n        include_loggers = list(LOGGERS)\n        if getattr(opt, \"ndjson_console\", False):\n            include_loggers.append(\"ndjson_console\")\n        if getattr(opt, \"ndjson_file\", False):\n            include_loggers.append(\"ndjson_file\")\n\n        loggers = Loggers(\n            save_dir=save_dir,\n            weights=weights,\n            opt=opt,\n            hyp=hyp,\n            logger=LOGGER,\n            include=tuple(include_loggers),\n        )\n\n        # Register actions\n        for k in methods(loggers):\n            callbacks.register_action(k, callback=getattr(loggers, k))\n\n        # Process custom dataset artifact link\n        data_dict = loggers.remote_dataset\n        if resume:  # If resuming runs from remote artifact\n            weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size\n\n    # Config\n    plots = not evolve and not opt.noplots  # create plots\n    cuda = device.type != \"cpu\"\n    init_seeds(opt.seed + 1 + RANK, deterministic=True)\n    with torch_distributed_zero_first(LOCAL_RANK):\n        data_dict = data_dict or check_dataset(data)  # check if None\n    train_path, val_path = data_dict[\"train\"], data_dict[\"val\"]\n    nc = 1 if single_cls else int(data_dict[\"nc\"])  # number of classes\n    names = {0: \"item\"} if single_cls and len(data_dict[\"names\"]) != 1 else data_dict[\"names\"]  # class names\n    is_coco = isinstance(val_path, str) and val_path.endswith(\"coco/val2017.txt\")  # COCO dataset\n\n    # Model\n    check_suffix(weights, \".pt\")  # check weights\n    pretrained = weights.endswith(\".pt\")\n    if pretrained:\n        with torch_distributed_zero_first(LOCAL_RANK):\n            weights = attempt_download(weights)  # download if not found locally\n        ckpt = torch_load(weights, map_location=\"cpu\")  # load checkpoint to CPU to avoid CUDA memory leak\n        model = Model(cfg or ckpt[\"model\"].yaml, ch=3, nc=nc, anchors=hyp.get(\"anchors\")).to(device)  # create\n        exclude = [\"anchor\"] if (cfg or hyp.get(\"anchors\")) and not resume else []  # exclude keys\n        csd = ckpt[\"model\"].float().state_dict()  # checkpoint state_dict as FP32\n        csd = intersect_dicts(csd, model.state_dict(), exclude=exclude)  # intersect\n        model.load_state_dict(csd, strict=False)  # load\n        LOGGER.info(f\"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}\")  # report\n    else:\n        model = Model(cfg, ch=3, nc=nc, anchors=hyp.get(\"anchors\")).to(device)  # create\n    amp = check_amp(model)  # check AMP\n\n    # Freeze\n    freeze = [f\"model.{x}.\" for x in (freeze if len(freeze) > 1 else range(freeze[0]))]  # layers to freeze\n    for k, v in model.named_parameters():\n        v.requires_grad = True  # train all layers\n        # v.register_hook(lambda x: torch.nan_to_num(x))  # NaN to 0 (commented for erratic training results)\n        if any(x in k for x in freeze):\n            LOGGER.info(f\"freezing {k}\")\n            v.requires_grad = False\n\n    # Image size\n    gs = max(int(model.stride.max()), 32)  # grid size (max stride)\n    imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2)  # verify imgsz is gs-multiple\n\n    # Batch size\n    if RANK == -1 and batch_size == -1:  # single-GPU only, estimate best batch size\n        batch_size = check_train_batch_size(model, imgsz, amp)\n        loggers.on_params_update({\"batch_size\": batch_size})\n\n    # Optimizer\n    nbs = 64  # nominal batch size\n    accumulate = max(round(nbs / batch_size), 1)  # accumulate loss before optimizing\n    hyp[\"weight_decay\"] *= batch_size * accumulate / nbs  # scale weight_decay\n    optimizer = smart_optimizer(model, opt.optimizer, hyp[\"lr0\"], hyp[\"momentum\"], hyp[\"weight_decay\"])\n\n    # Scheduler\n    if opt.cos_lr:\n        lf = one_cycle(1, hyp[\"lrf\"], epochs)  # cosine 1->hyp['lrf']\n    else:\n\n        def lf(x):\n            \"\"\"Linear learning rate scheduler function with decay calculated by epoch proportion.\"\"\"\n            return (1 - x / epochs) * (1.0 - hyp[\"lrf\"]) + hyp[\"lrf\"]  # linear\n\n    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)  # plot_lr_scheduler(optimizer, scheduler, epochs)\n\n    # EMA\n    ema = ModelEMA(model) if RANK in {-1, 0} else None\n\n    # Resume\n    best_fitness, start_epoch = 0.0, 0\n    if pretrained:\n        if resume:\n            best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)\n        del ckpt, csd\n\n    # DP mode\n    if cuda and RANK == -1 and torch.cuda.device_count() > 1:\n        LOGGER.warning(\n            \"WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\\n\"\n            \"See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.\"\n        )\n        model = torch.nn.DataParallel(model)\n\n    # SyncBatchNorm\n    if opt.sync_bn and cuda and RANK != -1:\n        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)\n        LOGGER.info(\"Using SyncBatchNorm()\")\n\n    # Trainloader\n    train_loader, dataset = create_dataloader(\n        train_path,\n        imgsz,\n        batch_size // WORLD_SIZE,\n        gs,\n        single_cls,\n        hyp=hyp,\n        augment=True,\n        cache=None if opt.cache == \"val\" else opt.cache,\n        rect=opt.rect,\n        rank=LOCAL_RANK,\n        workers=workers,\n        image_weights=opt.image_weights,\n        quad=opt.quad,\n        prefix=colorstr(\"train: \"),\n        shuffle=True,\n        seed=opt.seed,\n    )\n    labels = np.concatenate(dataset.labels, 0)\n    mlc = int(labels[:, 0].max())  # max label class\n    assert mlc < nc, f\"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}\"\n\n    # Process 0\n    if RANK in {-1, 0}:\n        val_loader = create_dataloader(\n            val_path,\n            imgsz,\n            batch_size // WORLD_SIZE * 2,\n            gs,\n            single_cls,\n            hyp=hyp,\n            cache=None if noval else opt.cache,\n            rect=True,\n            rank=-1,\n            workers=workers * 2,\n            pad=0.5,\n            prefix=colorstr(\"val: \"),\n        )[0]\n\n        if not resume:\n            if not opt.noautoanchor:\n                check_anchors(dataset, model=model, thr=hyp[\"anchor_t\"], imgsz=imgsz)  # run AutoAnchor\n            model.half().float()  # pre-reduce anchor precision\n\n        callbacks.run(\"on_pretrain_routine_end\", labels, names)\n\n    # DDP mode\n    if cuda and RANK != -1:\n        model = smart_DDP(model)\n\n    # Model attributes\n    nl = de_parallel(model).model[-1].nl  # number of detection layers (to scale hyps)\n    hyp[\"box\"] *= 3 / nl  # scale to layers\n    hyp[\"cls\"] *= nc / 80 * 3 / nl  # scale to classes and layers\n    hyp[\"obj\"] *= (imgsz / 640) ** 2 * 3 / nl  # scale to image size and layers\n    hyp[\"label_smoothing\"] = opt.label_smoothing\n    model.nc = nc  # attach number of classes to model\n    model.hyp = hyp  # attach hyperparameters to model\n    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights\n    model.names = names\n\n    # Start training\n    t0 = time.time()\n    nb = len(train_loader)  # number of batches\n    nw = max(round(hyp[\"warmup_epochs\"] * nb), 100)  # number of warmup iterations, max(3 epochs, 100 iterations)\n    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training\n    last_opt_step = -1\n    maps = np.zeros(nc)  # mAP per class\n    results = (0, 0, 0, 0, 0, 0, 0)  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)\n    scheduler.last_epoch = start_epoch - 1  # do not move\n    scaler = torch.cuda.amp.GradScaler(enabled=amp)\n    stopper, stop = EarlyStopping(patience=opt.patience), False\n    compute_loss = ComputeLoss(model)  # init loss class\n    callbacks.run(\"on_train_start\")\n    LOGGER.info(\n        f\"Image sizes {imgsz} train, {imgsz} val\\n\"\n        f\"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\\n\"\n        f\"Logging results to {colorstr('bold', save_dir)}\\n\"\n        f\"Starting training for {epochs} epochs...\"\n    )\n    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------\n        callbacks.run(\"on_train_epoch_start\")\n        model.train()\n\n        # Update image weights (optional, single-GPU only)\n        if opt.image_weights:\n            cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc  # class weights\n            iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights\n            dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx\n\n        # Update mosaic border (optional)\n        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)\n        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders\n\n        mloss = torch.zeros(3, device=device)  # mean losses\n        if RANK != -1:\n            train_loader.sampler.set_epoch(epoch)\n        pbar = enumerate(train_loader)\n        LOGGER.info((\"\\n\" + \"%11s\" * 7) % (\"Epoch\", \"GPU_mem\", \"box_loss\", \"obj_loss\", \"cls_loss\", \"Instances\", \"Size\"))\n        if RANK in {-1, 0}:\n            pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT)  # progress bar\n        optimizer.zero_grad()\n        for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------\n            callbacks.run(\"on_train_batch_start\")\n            ni = i + nb * epoch  # number integrated batches (since train start)\n            imgs = imgs.to(device, non_blocking=True).float() / 255  # uint8 to float32, 0-255 to 0.0-1.0\n\n            # Warmup\n            if ni <= nw:\n                xi = [0, nw]  # x interp\n                # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)\n                accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())\n                for j, x in enumerate(optimizer.param_groups):\n                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0\n                    x[\"lr\"] = np.interp(ni, xi, [hyp[\"warmup_bias_lr\"] if j == 0 else 0.0, x[\"initial_lr\"] * lf(epoch)])\n                    if \"momentum\" in x:\n                        x[\"momentum\"] = np.interp(ni, xi, [hyp[\"warmup_momentum\"], hyp[\"momentum\"]])\n\n            # Multi-scale\n            if opt.multi_scale:\n                sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs  # size\n                sf = sz / max(imgs.shape[2:])  # scale factor\n                if sf != 1:\n                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)\n                    imgs = nn.functional.interpolate(imgs, size=ns, mode=\"bilinear\", align_corners=False)\n\n            # Forward\n            with torch.cuda.amp.autocast(amp):\n                pred = model(imgs)  # forward\n                loss, loss_items = compute_loss(pred, targets.to(device))  # loss scaled by batch_size\n                if RANK != -1:\n                    loss *= WORLD_SIZE  # gradient averaged between devices in DDP mode\n                if opt.quad:\n                    loss *= 4.0\n\n            # Backward\n            scaler.scale(loss).backward()\n\n            # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html\n            if ni - last_opt_step >= accumulate:\n                scaler.unscale_(optimizer)  # unscale gradients\n                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients\n                scaler.step(optimizer)  # optimizer.step\n                scaler.update()\n                optimizer.zero_grad()\n                if ema:\n                    ema.update(model)\n                last_opt_step = ni\n\n            # Log\n            if RANK in {-1, 0}:\n                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses\n                mem = f\"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G\"  # (GB)\n                pbar.set_description(\n                    (\"%11s\" * 2 + \"%11.4g\" * 5)\n                    % (f\"{epoch}/{epochs - 1}\", mem, *mloss, targets.shape[0], imgs.shape[-1])\n                )\n                callbacks.run(\"on_train_batch_end\", model, ni, imgs, targets, paths, list(mloss))\n                if callbacks.stop_training:\n                    return\n            # end batch ------------------------------------------------------------------------------------------------\n\n        # Scheduler\n        lr = [x[\"lr\"] for x in optimizer.param_groups]  # for loggers\n        scheduler.step()\n\n        if RANK in {-1, 0}:\n            # mAP\n            callbacks.run(\"on_train_epoch_end\", epoch=epoch)\n            ema.update_attr(model, include=[\"yaml\", \"nc\", \"hyp\", \"names\", \"stride\", \"class_weights\"])\n            final_epoch = (epoch + 1 == epochs) or stopper.possible_stop\n            if not noval or final_epoch:  # Calculate mAP\n                results, maps, _ = validate.run(\n                    data_dict,\n                    batch_size=batch_size // WORLD_SIZE * 2,\n                    imgsz=imgsz,\n                    half=amp,\n                    model=ema.ema,\n                    single_cls=single_cls,\n                    dataloader=val_loader,\n                    save_dir=save_dir,\n                    plots=False,\n                    callbacks=callbacks,\n                    compute_loss=compute_loss,\n                )\n\n            # Update best mAP\n            fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, mAP@.5, mAP@.5-.95]\n            stop = stopper(epoch=epoch, fitness=fi)  # early stop check\n            if fi > best_fitness:\n                best_fitness = fi\n            log_vals = list(mloss) + list(results) + lr\n            callbacks.run(\"on_fit_epoch_end\", log_vals, epoch, best_fitness, fi)\n\n            # Save model\n            if (not nosave) or (final_epoch and not evolve):  # if save\n                ckpt = {\n                    \"epoch\": epoch,\n                    \"best_fitness\": best_fitness,\n                    \"model\": deepcopy(de_parallel(model)).half(),\n                    \"ema\": deepcopy(ema.ema).half(),\n                    \"updates\": ema.updates,\n                    \"optimizer\": optimizer.state_dict(),\n                    \"opt\": vars(opt),\n                    \"git\": GIT_INFO,  # {remote, branch, commit} if a git repo\n                    \"date\": datetime.now().isoformat(),\n                }\n\n                # Save last, best and delete\n                torch.save(ckpt, last)\n                if best_fitness == fi:\n                    torch.save(ckpt, best)\n                if opt.save_period > 0 and epoch % opt.save_period == 0:\n                    torch.save(ckpt, w / f\"epoch{epoch}.pt\")\n                del ckpt\n                callbacks.run(\"on_model_save\", last, epoch, final_epoch, best_fitness, fi)\n\n        # EarlyStopping\n        if RANK != -1:  # if DDP training\n            broadcast_list = [stop if RANK == 0 else None]\n            dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks\n            if RANK != 0:\n                stop = broadcast_list[0]\n        if stop:\n            break  # must break all DDP ranks\n\n        # end epoch ----------------------------------------------------------------------------------------------------\n    # end training -----------------------------------------------------------------------------------------------------\n    if RANK in {-1, 0}:\n        LOGGER.info(f\"\\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\")\n        for f in last, best:\n            if f.exists():\n                strip_optimizer(f)  # strip optimizers\n                if f is best:\n                    LOGGER.info(f\"\\nValidating {f}...\")\n                    results, _, _ = validate.run(\n                        data_dict,\n                        batch_size=batch_size // WORLD_SIZE * 2,\n                        imgsz=imgsz,\n                        model=attempt_load(f, device).half(),\n                        iou_thres=0.65 if is_coco else 0.60,  # best pycocotools at iou 0.65\n                        single_cls=single_cls,\n                        dataloader=val_loader,\n                        save_dir=save_dir,\n                        save_json=is_coco,\n                        verbose=True,\n                        plots=plots,\n                        callbacks=callbacks,\n                        compute_loss=compute_loss,\n                    )  # val best model with plots\n                    if is_coco:\n                        callbacks.run(\"on_fit_epoch_end\", list(mloss) + list(results) + lr, epoch, best_fitness, fi)\n\n        callbacks.run(\"on_train_end\", last, best, epoch, results)\n\n    torch.cuda.empty_cache()\n    return results\n\n\ndef parse_opt(known=False):\n    \"\"\"Parse command-line arguments for YOLOv5 training, validation, and testing.\n\n    Args:\n        known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False.\n\n    Returns:\n        (argparse.Namespace): Parsed command-line arguments containing options for YOLOv5 execution.\n\n    Examples:\n        ```python\n        from ultralytics.yolo import parse_opt\n        opt = parse_opt()\n        print(opt)\n        ```\n\n    Links:\n        - Models: https://github.com/ultralytics/yolov5/tree/master/models\n        - Datasets: https://github.com/ultralytics/yolov5/tree/master/data\n        - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", type=str, default=ROOT / \"yolov5s.pt\", help=\"initial weights path\")\n    parser.add_argument(\"--cfg\", type=str, default=\"\", help=\"model.yaml path\")\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"dataset.yaml path\")\n    parser.add_argument(\"--hyp\", type=str, default=ROOT / \"data/hyps/hyp.scratch-low.yaml\", help=\"hyperparameters path\")\n    parser.add_argument(\"--epochs\", type=int, default=100, help=\"total training epochs\")\n    parser.add_argument(\"--batch-size\", type=int, default=16, help=\"total batch size for all GPUs, -1 for autobatch\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=640, help=\"train, val image size (pixels)\")\n    parser.add_argument(\"--rect\", action=\"store_true\", help=\"rectangular training\")\n    parser.add_argument(\"--resume\", nargs=\"?\", const=True, default=False, help=\"resume most recent training\")\n    parser.add_argument(\"--nosave\", action=\"store_true\", help=\"only save final checkpoint\")\n    parser.add_argument(\"--noval\", action=\"store_true\", help=\"only validate final epoch\")\n    parser.add_argument(\"--noautoanchor\", action=\"store_true\", help=\"disable AutoAnchor\")\n    parser.add_argument(\"--noplots\", action=\"store_true\", help=\"save no plot files\")\n    parser.add_argument(\"--evolve\", type=int, nargs=\"?\", const=300, help=\"evolve hyperparameters for x generations\")\n    parser.add_argument(\n        \"--evolve_population\", type=str, default=ROOT / \"data/hyps\", help=\"location for loading population\"\n    )\n    parser.add_argument(\"--resume_evolve\", type=str, default=None, help=\"resume evolve from last generation\")\n    parser.add_argument(\"--bucket\", type=str, default=\"\", help=\"gsutil bucket\")\n    parser.add_argument(\"--cache\", type=str, nargs=\"?\", const=\"ram\", help=\"image --cache ram/disk\")\n    parser.add_argument(\"--image-weights\", action=\"store_true\", help=\"use weighted image selection for training\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--multi-scale\", action=\"store_true\", help=\"vary img-size +/- 50%%\")\n    parser.add_argument(\"--single-cls\", action=\"store_true\", help=\"train multi-class data as single-class\")\n    parser.add_argument(\"--optimizer\", type=str, choices=[\"SGD\", \"Adam\", \"AdamW\"], default=\"SGD\", help=\"optimizer\")\n    parser.add_argument(\"--sync-bn\", action=\"store_true\", help=\"use SyncBatchNorm, only available in DDP mode\")\n    parser.add_argument(\"--workers\", type=int, default=8, help=\"max dataloader workers (per RANK in DDP mode)\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/train\", help=\"save to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--quad\", action=\"store_true\", help=\"quad dataloader\")\n    parser.add_argument(\"--cos-lr\", action=\"store_true\", help=\"cosine LR scheduler\")\n    parser.add_argument(\"--label-smoothing\", type=float, default=0.0, help=\"Label smoothing epsilon\")\n    parser.add_argument(\"--patience\", type=int, default=100, help=\"EarlyStopping patience (epochs without improvement)\")\n    parser.add_argument(\"--freeze\", nargs=\"+\", type=int, default=[0], help=\"Freeze layers: backbone=10, first3=0 1 2\")\n    parser.add_argument(\"--save-period\", type=int, default=-1, help=\"Save checkpoint every x epochs (disabled if < 1)\")\n    parser.add_argument(\"--seed\", type=int, default=0, help=\"Global training seed\")\n    parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"Automatic DDP Multi-GPU argument, do not modify\")\n\n    # Logger arguments\n    parser.add_argument(\"--entity\", default=None, help=\"Entity\")\n    parser.add_argument(\"--upload_dataset\", nargs=\"?\", const=True, default=False, help='Upload data, \"val\" option')\n    parser.add_argument(\"--bbox_interval\", type=int, default=-1, help=\"Set bounding-box image logging interval\")\n    parser.add_argument(\"--artifact_alias\", type=str, default=\"latest\", help=\"Version of dataset artifact to use\")\n\n    # NDJSON logging\n    parser.add_argument(\"--ndjson-console\", action=\"store_true\", help=\"Log ndjson to console\")\n    parser.add_argument(\"--ndjson-file\", action=\"store_true\", help=\"Log ndjson to file\")\n\n    return parser.parse_known_args()[0] if known else parser.parse_args()\n\n\ndef main(opt, callbacks=Callbacks()):\n    \"\"\"Runs the main entry point for training or hyperparameter evolution with specified options and optional callbacks.\n\n    Args:\n        opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution.\n        callbacks (ultralytics.utils.callbacks.Callbacks, optional): Callback functions for various training stages.\n            Defaults to Callbacks().\n\n    Returns:\n        None\n\n    Notes:\n        For detailed usage, refer to:\n        https://github.com/ultralytics/yolov5/tree/master/models\n    \"\"\"\n    if RANK in {-1, 0}:\n        print_args(vars(opt))\n        check_git_status()\n        check_requirements(ROOT / \"requirements.txt\")\n\n    # Resume (from specified or most recent last.pt)\n    if opt.resume and not check_comet_resume(opt) and not opt.evolve:\n        last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())\n        opt_yaml = last.parent.parent / \"opt.yaml\"  # train options yaml\n        opt_data = opt.data  # original dataset\n        if opt_yaml.is_file():\n            with open(opt_yaml, errors=\"ignore\") as f:\n                d = yaml.safe_load(f)\n        else:\n            d = torch_load(last, map_location=\"cpu\")[\"opt\"]\n        opt = argparse.Namespace(**d)  # replace\n        opt.cfg, opt.weights, opt.resume = \"\", str(last), True  # reinstate\n        if is_url(opt_data):\n            opt.data = check_file(opt_data)  # avoid HUB resume auth timeout\n    else:\n        opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = (\n            check_file(opt.data),\n            check_yaml(opt.cfg),\n            check_yaml(opt.hyp),\n            str(opt.weights),\n            str(opt.project),\n        )  # checks\n        assert len(opt.cfg) or len(opt.weights), \"either --cfg or --weights must be specified\"\n        if opt.evolve:\n            if opt.project == str(ROOT / \"runs/train\"):  # if default project name, rename to runs/evolve\n                opt.project = str(ROOT / \"runs/evolve\")\n            opt.exist_ok, opt.resume = opt.resume, False  # pass resume to exist_ok and disable resume\n        if opt.name == \"cfg\":\n            opt.name = Path(opt.cfg).stem  # use model.yaml as name\n        opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))\n\n    # DDP mode\n    device = select_device(opt.device, batch_size=opt.batch_size)\n    if LOCAL_RANK != -1:\n        msg = \"is not compatible with YOLOv5 Multi-GPU DDP training\"\n        assert not opt.image_weights, f\"--image-weights {msg}\"\n        assert not opt.evolve, f\"--evolve {msg}\"\n        assert opt.batch_size != -1, f\"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size\"\n        assert opt.batch_size % WORLD_SIZE == 0, f\"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE\"\n        assert torch.cuda.device_count() > LOCAL_RANK, \"insufficient CUDA devices for DDP command\"\n        torch.cuda.set_device(LOCAL_RANK)\n        device = torch.device(\"cuda\", LOCAL_RANK)\n        dist.init_process_group(\n            backend=\"nccl\" if dist.is_nccl_available() else \"gloo\", timeout=timedelta(seconds=10800)\n        )\n\n    # Train\n    if not opt.evolve:\n        train(opt.hyp, opt, device, callbacks)\n\n    # Evolve hyperparameters (optional)\n    else:\n        # Hyperparameter evolution metadata (including this hyperparameter True-False, lower_limit, upper_limit)\n        meta = {\n            \"lr0\": (False, 1e-5, 1e-1),  # initial learning rate (SGD=1E-2, Adam=1E-3)\n            \"lrf\": (False, 0.01, 1.0),  # final OneCycleLR learning rate (lr0 * lrf)\n            \"momentum\": (False, 0.6, 0.98),  # SGD momentum/Adam beta1\n            \"weight_decay\": (False, 0.0, 0.001),  # optimizer weight decay\n            \"warmup_epochs\": (False, 0.0, 5.0),  # warmup epochs (fractions ok)\n            \"warmup_momentum\": (False, 0.0, 0.95),  # warmup initial momentum\n            \"warmup_bias_lr\": (False, 0.0, 0.2),  # warmup initial bias lr\n            \"box\": (False, 0.02, 0.2),  # box loss gain\n            \"cls\": (False, 0.2, 4.0),  # cls loss gain\n            \"cls_pw\": (False, 0.5, 2.0),  # cls BCELoss positive_weight\n            \"obj\": (False, 0.2, 4.0),  # obj loss gain (scale with pixels)\n            \"obj_pw\": (False, 0.5, 2.0),  # obj BCELoss positive_weight\n            \"iou_t\": (False, 0.1, 0.7),  # IoU training threshold\n            \"anchor_t\": (False, 2.0, 8.0),  # anchor-multiple threshold\n            \"anchors\": (False, 2.0, 10.0),  # anchors per output grid (0 to ignore)\n            \"fl_gamma\": (False, 0.0, 2.0),  # focal loss gamma (efficientDet default gamma=1.5)\n            \"hsv_h\": (True, 0.0, 0.1),  # image HSV-Hue augmentation (fraction)\n            \"hsv_s\": (True, 0.0, 0.9),  # image HSV-Saturation augmentation (fraction)\n            \"hsv_v\": (True, 0.0, 0.9),  # image HSV-Value augmentation (fraction)\n            \"degrees\": (True, 0.0, 45.0),  # image rotation (+/- deg)\n            \"translate\": (True, 0.0, 0.9),  # image translation (+/- fraction)\n            \"scale\": (True, 0.0, 0.9),  # image scale (+/- gain)\n            \"shear\": (True, 0.0, 10.0),  # image shear (+/- deg)\n            \"perspective\": (True, 0.0, 0.001),  # image perspective (+/- fraction), range 0-0.001\n            \"flipud\": (True, 0.0, 1.0),  # image flip up-down (probability)\n            \"fliplr\": (True, 0.0, 1.0),  # image flip left-right (probability)\n            \"mosaic\": (True, 0.0, 1.0),  # image mosaic (probability)\n            \"mixup\": (True, 0.0, 1.0),  # image mixup (probability)\n            \"copy_paste\": (True, 0.0, 1.0),  # segment copy-paste (probability)\n        }\n\n        # GA configs\n        pop_size = 50\n        mutation_rate_min = 0.01\n        mutation_rate_max = 0.5\n        crossover_rate_min = 0.5\n        crossover_rate_max = 1\n        min_elite_size = 2\n        max_elite_size = 5\n        tournament_size_min = 2\n        tournament_size_max = 10\n\n        with open(opt.hyp, errors=\"ignore\") as f:\n            hyp = yaml.safe_load(f)  # load hyps dict\n            if \"anchors\" not in hyp:  # anchors commented in hyp.yaml\n                hyp[\"anchors\"] = 3\n        if opt.noautoanchor:\n            del hyp[\"anchors\"], meta[\"anchors\"]\n        opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir)  # only val/save final epoch\n        # ei = [isinstance(x, (int, float)) for x in hyp.values()]  # evolvable indices\n        evolve_yaml, evolve_csv = save_dir / \"hyp_evolve.yaml\", save_dir / \"evolve.csv\"\n        if opt.bucket:\n            # download evolve.csv if exists\n            subprocess.run(\n                [\n                    \"gsutil\",\n                    \"cp\",\n                    f\"gs://{opt.bucket}/evolve.csv\",\n                    str(evolve_csv),\n                ]\n            )\n\n        # Delete the items in meta dictionary whose first value is False\n        del_ = [item for item, value_ in meta.items() if value_[0] is False]\n        hyp_GA = hyp.copy()  # Make a copy of hyp dictionary\n        for item in del_:\n            del meta[item]  # Remove the item from meta dictionary\n            del hyp_GA[item]  # Remove the item from hyp_GA dictionary\n\n        # Set lower_limit and upper_limit arrays to hold the search space boundaries\n        lower_limit = np.array([meta[k][1] for k in hyp_GA.keys()])\n        upper_limit = np.array([meta[k][2] for k in hyp_GA.keys()])\n\n        # Create gene_ranges list to hold the range of values for each gene in the population\n        gene_ranges = [(lower_limit[i], upper_limit[i]) for i in range(len(upper_limit))]\n\n        # Initialize the population with initial_values or random values\n        initial_values = []\n\n        # If resuming evolution from a previous checkpoint\n        if opt.resume_evolve is not None:\n            assert os.path.isfile(ROOT / opt.resume_evolve), \"evolve population path is wrong!\"\n            with open(ROOT / opt.resume_evolve, errors=\"ignore\") as f:\n                evolve_population = yaml.safe_load(f)\n                for value in evolve_population.values():\n                    value = np.array([value[k] for k in hyp_GA.keys()])\n                    initial_values.append(list(value))\n\n        # If not resuming from a previous checkpoint, generate initial values from .yaml files in opt.evolve_population\n        else:\n            yaml_files = [f for f in os.listdir(opt.evolve_population) if f.endswith(\".yaml\")]\n            for file_name in yaml_files:\n                with open(os.path.join(opt.evolve_population, file_name)) as yaml_file:\n                    value = yaml.safe_load(yaml_file)\n                    value = np.array([value[k] for k in hyp_GA.keys()])\n                    initial_values.append(list(value))\n\n        # Generate random values within the search space for the rest of the population\n        if initial_values is None:\n            population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size)]\n        elif pop_size > 1:\n            population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size - len(initial_values))]\n            for initial_value in initial_values:\n                population = [initial_value, *population]\n\n        # Run the genetic algorithm for a fixed number of generations\n        list_keys = list(hyp_GA.keys())\n        for generation in range(opt.evolve):\n            if generation >= 1:\n                save_dict = {}\n                for i in range(len(population)):\n                    little_dict = {list_keys[j]: float(population[i][j]) for j in range(len(population[i]))}\n                    save_dict[f\"gen{generation!s}number{i!s}\"] = little_dict\n\n                with open(save_dir / \"evolve_population.yaml\", \"w\") as outfile:\n                    yaml.dump(save_dict, outfile, default_flow_style=False)\n\n            # Adaptive elite size\n            elite_size = min_elite_size + int((max_elite_size - min_elite_size) * (generation / opt.evolve))\n            # Evaluate the fitness of each individual in the population\n            fitness_scores = []\n            for individual in population:\n                for key, value in zip(hyp_GA.keys(), individual):\n                    hyp_GA[key] = value\n                hyp.update(hyp_GA)\n                results = train(hyp.copy(), opt, device, callbacks)\n                callbacks = Callbacks()\n                # Write mutation results\n                keys = (\n                    \"metrics/precision\",\n                    \"metrics/recall\",\n                    \"metrics/mAP_0.5\",\n                    \"metrics/mAP_0.5:0.95\",\n                    \"val/box_loss\",\n                    \"val/obj_loss\",\n                    \"val/cls_loss\",\n                )\n                print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)\n                fitness_scores.append(results[2])\n\n            # Select the fittest individuals for reproduction using adaptive tournament selection\n            selected_indices = []\n            for _ in range(pop_size - elite_size):\n                # Adaptive tournament size\n                tournament_size = max(\n                    max(2, tournament_size_min),\n                    int(min(tournament_size_max, pop_size) - (generation / (opt.evolve / 10))),\n                )\n                # Perform tournament selection to choose the best individual\n                tournament_indices = random.sample(range(pop_size), tournament_size)\n                tournament_fitness = [fitness_scores[j] for j in tournament_indices]\n                winner_index = tournament_indices[tournament_fitness.index(max(tournament_fitness))]\n                selected_indices.append(winner_index)\n\n            # Add the elite individuals to the selected indices\n            elite_indices = [i for i in range(pop_size) if fitness_scores[i] in sorted(fitness_scores)[-elite_size:]]\n            selected_indices.extend(elite_indices)\n            # Create the next generation through crossover and mutation\n            next_generation = []\n            for _ in range(pop_size):\n                parent1_index = selected_indices[random.randint(0, pop_size - 1)]\n                parent2_index = selected_indices[random.randint(0, pop_size - 1)]\n                # Adaptive crossover rate\n                crossover_rate = max(\n                    crossover_rate_min, min(crossover_rate_max, crossover_rate_max - (generation / opt.evolve))\n                )\n                if random.uniform(0, 1) < crossover_rate:\n                    crossover_point = random.randint(1, len(hyp_GA) - 1)\n                    child = population[parent1_index][:crossover_point] + population[parent2_index][crossover_point:]\n                else:\n                    child = population[parent1_index]\n                # Adaptive mutation rate\n                mutation_rate = max(\n                    mutation_rate_min, min(mutation_rate_max, mutation_rate_max - (generation / opt.evolve))\n                )\n                for j in range(len(hyp_GA)):\n                    if random.uniform(0, 1) < mutation_rate:\n                        child[j] += random.uniform(-0.1, 0.1)\n                        child[j] = min(max(child[j], gene_ranges[j][0]), gene_ranges[j][1])\n                next_generation.append(child)\n            # Replace the old population with the new generation\n            population = next_generation\n        # Print the best solution found\n        best_index = fitness_scores.index(max(fitness_scores))\n        best_individual = population[best_index]\n        print(\"Best solution found:\", best_individual)\n        # Plot results\n        plot_evolve(evolve_csv)\n        LOGGER.info(\n            f\"Hyperparameter evolution finished {opt.evolve} generations\\n\"\n            f\"Results saved to {colorstr('bold', save_dir)}\\n\"\n            f\"Usage example: $ python train.py --hyp {evolve_yaml}\"\n        )\n\n\ndef generate_individual(input_ranges, individual_length):\n    \"\"\"Generate an individual with random hyperparameters within specified ranges.\n\n    Args:\n        input_ranges (list[tuple[float, float]]): List of tuples where each tuple contains the lower and upper bounds\n            for the corresponding gene (hyperparameter).\n        individual_length (int): The number of genes (hyperparameters) in the individual.\n\n    Returns:\n        list[float]: A list representing a generated individual with random gene values within the specified ranges.\n\n    Examples:\n        ```python\n        input_ranges = [(0.01, 0.1), (0.1, 1.0), (0.9, 2.0)]\n        individual_length = 3\n        individual = generate_individual(input_ranges, individual_length)\n        print(individual)  # Output: [0.035, 0.678, 1.456] (example output)\n        ```\n\n    Notes:\n        The individual returned will have a length equal to `individual_length`, with each gene value being a floating-point\n        number within its specified range in `input_ranges`.\n    \"\"\"\n    individual = []\n    for i in range(individual_length):\n        lower_bound, upper_bound = input_ranges[i]\n        individual.append(random.uniform(lower_bound, upper_bound))\n    return individual\n\n\ndef run(**kwargs):\n    \"\"\"Execute YOLOv5 training with specified options, allowing optional overrides through keyword arguments.\n\n    Args:\n        weights (str, optional): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'.\n        cfg (str, optional): Path to model YAML configuration. Defaults to an empty string.\n        data (str, optional): Path to dataset YAML configuration. Defaults to ROOT / 'data/coco128.yaml'.\n        hyp (str, optional): Path to hyperparameters YAML configuration. Defaults to ROOT /\n            'data/hyps/hyp.scratch-low.yaml'.\n        epochs (int, optional): Total number of training epochs. Defaults to 100.\n        batch_size (int, optional): Total batch size for all GPUs. Use -1 for automatic batch size determination.\n            Defaults to 16.\n        imgsz (int, optional): Image size (pixels) for training and validation. Defaults to 640.\n        rect (bool, optional): Use rectangular training. Defaults to False.\n        resume (bool | str, optional): Resume most recent training with an optional path. Defaults to False.\n        nosave (bool, optional): Only save the final checkpoint. Defaults to False.\n        noval (bool, optional): Only validate at the final epoch. Defaults to False.\n        noautoanchor (bool, optional): Disable AutoAnchor. Defaults to False.\n        noplots (bool, optional): Do not save plot files. Defaults to False.\n        evolve (int, optional): Evolve hyperparameters for a specified number of generations. Use 300 if provided\n            without a value.\n        evolve_population (str, optional): Directory for loading population during evolution. Defaults to ROOT / 'data/\n            hyps'.\n        resume_evolve (str, optional): Resume hyperparameter evolution from the last generation. Defaults to None.\n        bucket (str, optional): gsutil bucket for saving checkpoints. Defaults to an empty string.\n        cache (str, optional): Cache image data in 'ram' or 'disk'. Defaults to None.\n        image_weights (bool, optional): Use weighted image selection for training. Defaults to False.\n        device (str, optional): CUDA device identifier, e.g., '0', '0,1,2,3', or 'cpu'. Defaults to an empty string.\n        multi_scale (bool, optional): Use multi-scale training, varying image size by ±50%. Defaults to False.\n        single_cls (bool, optional): Train with multi-class data as single-class. Defaults to False.\n        optimizer (str, optional): Optimizer type, choices are ['SGD', 'Adam', 'AdamW']. Defaults to 'SGD'.\n        sync_bn (bool, optional): Use synchronized BatchNorm, only available in DDP mode. Defaults to False.\n        workers (int, optional): Maximum dataloader workers per rank in DDP mode. Defaults to 8.\n        project (str, optional): Directory for saving training runs. Defaults to ROOT / 'runs/train'.\n        name (str, optional): Name for saving the training run. Defaults to 'exp'.\n        exist_ok (bool, optional): Allow existing project/name without incrementing. Defaults to False.\n        quad (bool, optional): Use quad dataloader. Defaults to False.\n        cos_lr (bool, optional): Use cosine learning rate scheduler. Defaults to False.\n        label_smoothing (float, optional): Label smoothing epsilon value. Defaults to 0.0.\n        patience (int, optional): Patience for early stopping, measured in epochs without improvement. Defaults to 100.\n        freeze (list, optional): Layers to freeze, e.g., backbone=10, first 3 layers = [0, 1, 2]. Defaults to [0].\n        save_period (int, optional): Frequency in epochs to save checkpoints. Disabled if < 1. Defaults to -1.\n        seed (int, optional): Global training random seed. Defaults to 0.\n        local_rank (int, optional): Automatic DDP Multi-GPU argument. Do not modify. Defaults to -1.\n\n    Returns:\n        None: The function initiates YOLOv5 training or hyperparameter evolution based on the provided options.\n\n    Examples:\n        ```python\n        import train\n        train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')\n        ```\n\n    Notes:\n        - Models: https://github.com/ultralytics/yolov5/tree/master/models\n        - Datasets: https://github.com/ultralytics/yolov5/tree/master/data\n        - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data\n    \"\"\"\n    opt = parse_opt(True)\n    for k, v in kwargs.items():\n        setattr(opt, k, v)\n    main(opt)\n    return opt\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "tutorial.ipynb",
    "content": "{\n \"nbformat\": 4,\n \"nbformat_minor\": 0,\n \"metadata\": {\n  \"colab\": {\n   \"name\": \"YOLOv5 Tutorial\",\n   \"provenance\": []\n  },\n  \"kernelspec\": {\n   \"name\": \"python3\",\n   \"display_name\": \"Python 3\"\n  },\n  \"accelerator\": \"GPU\"\n },\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"t6MPjfT5NrKQ\"\n   },\n   \"source\": [\n    \"<div align=\\\"center\\\">\\n\",\n    \"  <a href=\\\"https://ultralytics.com/yolo\\\" target=\\\"_blank\\\">\\n\",\n    \"    <img width=\\\"1024\\\" src=\\\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\\\">\\n\",\n    \"  </a>\\n\",\n    \"\\n\",\n    \"  [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\\n\",\n    \"\\n\",\n    \"  <a href=\\\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml\\\"><img src=\\\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg\\\" alt=\\\"Ultralytics CI\\\"></a>\\n\",\n    \"  <a href=\\\"https://console.paperspace.com/github/ultralytics/ultralytics\\\"><img src=\\\"https://assets.paperspace.io/img/gradient-badge.svg\\\" alt=\\\"Run on Gradient\\\"/></a>\\n\",\n    \"  <a href=\\\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\\\"><img src=\\\"https://colab.research.google.com/assets/colab-badge.svg\\\" alt=\\\"Open In Colab\\\"></a>\\n\",\n    \"  <a href=\\\"https://www.kaggle.com/models/ultralytics/yolo11\\\"><img src=\\\"https://kaggle.com/static/images/open-in-kaggle.svg\\\" alt=\\\"Open In Kaggle\\\"></a>\\n\",\n    \"\\n\",\n    \"  <a href=\\\"https://ultralytics.com/discord\\\"><img alt=\\\"Discord\\\" src=\\\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\\\"></a>\\n\",\n    \"  <a href=\\\"https://community.ultralytics.com\\\"><img alt=\\\"Ultralytics Forums\\\" src=\\\"https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue\\\"></a>\\n\",\n    \"  <a href=\\\"https://reddit.com/r/ultralytics\\\"><img alt=\\\"Ultralytics Reddit\\\" src=\\\"https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue\\\"></a>\\n\",\n    \"</div>\\n\",\n    \"\\n\",\n    \"This **Ultralytics YOLOv5 Colab Notebook** is the easiest way to get started with [YOLO models](https://www.ultralytics.com/yolo)—no installation needed. Built by [Ultralytics](https://www.ultralytics.com/), the creators of YOLO, this notebook walks you through running **state-of-the-art** models directly in your browser.\\n\",\n    \"\\n\",\n    \"Ultralytics models are constantly updated for performance and flexibility. They're **fast**, **accurate**, and **easy to use**, and they excel at [object detection](https://docs.ultralytics.com/tasks/detect/), [tracking](https://docs.ultralytics.com/modes/track/), [instance segmentation](https://docs.ultralytics.com/tasks/segment/), [image classification](https://docs.ultralytics.com/tasks/classify/), and [pose estimation](https://docs.ultralytics.com/tasks/pose/).\\n\",\n    \"\\n\",\n    \"Find detailed documentation in the [Ultralytics Docs](https://docs.ultralytics.com/). Get support via [GitHub Issues](https://github.com/ultralytics/ultralytics/issues/new/choose). Join discussions on [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), and the [Ultralytics Community Forums](https://community.ultralytics.com/)!\\n\",\n    \"\\n\",\n    \"Request an Enterprise License for commercial use at [Ultralytics Licensing](https://www.ultralytics.com/license).\\n\",\n    \"\\n\",\n    \"<br>\\n\",\n    \"<div>\\n\",\n    \"  <a href=\\\"https://www.youtube.com/watch?v=ZN3nRZT7b24\\\" target=\\\"_blank\\\">\\n\",\n    \"    <img src=\\\"https://img.youtube.com/vi/ZN3nRZT7b24/maxresdefault.jpg\\\" alt=\\\"Ultralytics Video\\\" width=\\\"640\\\" style=\\\"border-radius: 10px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);\\\">\\n\",\n    \"  </a>\\n\",\n    \"\\n\",\n    \"  <p style=\\\"font-size: 16px; font-family: Arial, sans-serif; color: #555;\\\">\\n\",\n    \"    <strong>Watch: </strong> How to Train\\n\",\n    \"    <a href=\\\"https://github.com/ultralytics/ultralytics\\\">Ultralytics</a>\\n\",\n    \"    <a href=\\\"https://docs.ultralytics.com/models/yolo11/\\\">YOLO11</a> Model on Custom Dataset using Google Colab Notebook 🚀\\n\",\n    \"  </p>\\n\",\n    \"</div>\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"7mGmQbAO5pQb\"\n   },\n   \"source\": [\n    \"# Setup\\n\",\n    \"\\n\",\n    \"Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"metadata\": {\n    \"id\": \"wbvMlHd_QwMG\",\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"outputId\": \"e8225db4-e61d-4640-8b1f-8bfce3331cea\"\n   },\n   \"source\": [\n    \"!git clone https://github.com/ultralytics/yolov5  # clone\\n\",\n    \"%cd yolov5\\n\",\n    \"%pip install -qr requirements.txt comet_ml  # install\\n\",\n    \"\\n\",\n    \"import torch\\n\",\n    \"import utils\\n\",\n    \"display = utils.notebook_init()  # checks\"\n   ],\n   \"execution_count\": null,\n   \"outputs\": [\n    {\n     \"output_type\": \"stream\",\n     \"name\": \"stderr\",\n     \"text\": [\n      \"YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\\n\"\n     ]\n    },\n    {\n     \"output_type\": \"stream\",\n     \"name\": \"stdout\",\n     \"text\": [\n      \"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 23.3/166.8 GB disk)\\n\"\n     ]\n    }\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"4JnkELT0cIJg\"\n   },\n   \"source\": [\n    \"# 1. Detect\\n\",\n    \"\\n\",\n    \"`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\\n\",\n    \"\\n\",\n    \"```shell\\n\",\n    \"python detect.py --source 0  # webcam\\n\",\n    \"                          img.jpg  # image\\n\",\n    \"                          vid.mp4  # video\\n\",\n    \"                          screen  # screenshot\\n\",\n    \"                          path/  # directory\\n\",\n    \"                         'path/*.jpg'  # glob\\n\",\n    \"                         'https://youtu.be/LNwODJXcvt4'  # YouTube\\n\",\n    \"                         'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\\n\",\n    \"```\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"metadata\": {\n    \"id\": \"zR9ZbuQCH7FX\",\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"outputId\": \"284ef04b-1596-412f-88f6-948828dd2b49\"\n   },\n   \"source\": [\n    \"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\\n\",\n    \"# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)\"\n   ],\n   \"execution_count\": null,\n   \"outputs\": [\n    {\n     \"output_type\": \"stream\",\n     \"name\": \"stdout\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1mdetect: \\u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\\n\",\n      \"YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\\n\",\n      \"\\n\",\n      \"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\\n\",\n      \"100% 14.1M/14.1M [00:00<00:00, 24.5MB/s]\\n\",\n      \"\\n\",\n      \"Fusing layers... \\n\",\n      \"YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\\n\",\n      \"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 41.5ms\\n\",\n      \"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 60.0ms\\n\",\n      \"Speed: 0.5ms pre-process, 50.8ms inference, 37.7ms NMS per image at shape (1, 3, 640, 640)\\n\",\n      \"Results saved to \\u001b[1mruns/detect/exp\\u001b[0m\\n\"\n     ]\n    }\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"hkAzDWJ7cWTr\"\n   },\n   \"source\": [\n    \"&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\\n\",\n    \"<img align=\\\"left\\\" src=\\\"https://user-images.githubusercontent.com/26833433/127574988-6a558aa1-d268-44b9-bf6b-62d4c605cc72.jpg\\\" width=\\\"600\\\">\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"0eq1SMWl6Sfn\"\n   },\n   \"source\": [\n    \"# 2. Validate\\n\",\n    \"Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"metadata\": {\n    \"id\": \"WQPtK1QYVaD_\",\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"outputId\": \"cf7d52f0-281c-4c96-a488-79f5908f8426\"\n   },\n   \"source\": [\n    \"# Download COCO val\\n\",\n    \"torch.hub.download_url_to_file('https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017val.zip', 'tmp.zip')  # download (780M - 5000 images)\\n\",\n    \"!unzip -q tmp.zip -d ../datasets && rm tmp.zip  # unzip\"\n   ],\n   \"execution_count\": null,\n   \"outputs\": [\n    {\n     \"output_type\": \"stream\",\n     \"name\": \"stderr\",\n     \"text\": [\n      \"100%|██████████| 780M/780M [00:12<00:00, 66.6MB/s]\\n\"\n     ]\n    }\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"metadata\": {\n    \"id\": \"X58w8JLpMnjH\",\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"outputId\": \"3e234e05-ee8b-4ad1-b1a4-f6a55d5e4f3d\"\n   },\n   \"source\": [\n    \"# Validate YOLOv5s on COCO val\\n\",\n    \"!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half\"\n   ],\n   \"execution_count\": null,\n   \"outputs\": [\n    {\n     \"output_type\": \"stream\",\n     \"name\": \"stdout\",\n     \"text\": [\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\\n\",\n      \"YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\\n\",\n      \"\\n\",\n      \"Fusing layers... \\n\",\n      \"YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2024.59it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mNew cache created: /content/datasets/coco/val2017.cache\\n\",\n      \"                 Class     Images  Instances          P          R      mAP50   mAP50-95: 100% 157/157 [01:25<00:00,  1.84it/s]\\n\",\n      \"                   all       5000      36335      0.671      0.519      0.566      0.371\\n\",\n      \"Speed: 0.1ms pre-process, 3.1ms inference, 2.3ms NMS per image at shape (32, 3, 640, 640)\\n\",\n      \"\\n\",\n      \"Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\\n\",\n      \"loading annotations into memory...\\n\",\n      \"Done (t=0.43s)\\n\",\n      \"creating index...\\n\",\n      \"index created!\\n\",\n      \"Loading and preparing results...\\n\",\n      \"DONE (t=5.32s)\\n\",\n      \"creating index...\\n\",\n      \"index created!\\n\",\n      \"Running per image evaluation...\\n\",\n      \"Evaluate annotation type *bbox*\\n\",\n      \"DONE (t=78.89s).\\n\",\n      \"Accumulating evaluation results...\\n\",\n      \"DONE (t=14.51s).\\n\",\n      \" Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.374\\n\",\n      \" Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.572\\n\",\n      \" Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.402\\n\",\n      \" Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.211\\n\",\n      \" Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.423\\n\",\n      \" Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.489\\n\",\n      \" Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.311\\n\",\n      \" Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.516\\n\",\n      \" Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.566\\n\",\n      \" Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.378\\n\",\n      \" Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.625\\n\",\n      \" Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.722\\n\",\n      \"Results saved to \\u001b[1mruns/val/exp\\u001b[0m\\n\"\n     ]\n    }\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"ZY2VXXXu74w5\"\n   },\n   \"source\": \"# 3. Train\\n\\n<p align=\\\"\\\"><a href=\\\"https://platform.ultralytics.com\\\"><img width=\\\"1000\\\" src=\\\"https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png\\\"/></a></p>\\nClose the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\\n<br><br>\\n\\nTrain a YOLOv5s model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\\n\\n- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\\nautomatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\\n- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\\n- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\\n<br>\\n\\nA **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\"\n  },\n  {\n   \"cell_type\": \"code\",\n   \"source\": [\n    \"#@title Select YOLOv5 🚀 logger {run: 'auto'}\\n\",\n    \"logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\\n\",\n    \"\\n\",\n    \"if logger == 'Comet':\\n\",\n    \"  %pip install -q comet_ml\\n\",\n    \"  import comet_ml; comet_ml.init()\\n\",\n    \"elif logger == 'ClearML':\\n\",\n    \"  %pip install -q clearml\\n\",\n    \"  import clearml; clearml.browser_login()\\n\",\n    \"elif logger == 'TensorBoard':\\n\",\n    \"  %load_ext tensorboard\\n\",\n    \"  %tensorboard --logdir runs/train\"\n   ],\n   \"metadata\": {\n    \"id\": \"i3oKtE4g-aNn\"\n   },\n   \"execution_count\": null,\n   \"outputs\": []\n  },\n  {\n   \"cell_type\": \"code\",\n   \"metadata\": {\n    \"id\": \"1NcFxRcFdJ_O\",\n    \"colab\": {\n     \"base_uri\": \"https://localhost:8080/\"\n    },\n    \"outputId\": \"bbeeea2b-04fc-4185-aa64-258690495b5a\"\n   },\n   \"source\": [\n    \"# Train YOLOv5s on COCO128 for 3 epochs\\n\",\n    \"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache\"\n   ],\n   \"execution_count\": null,\n   \"outputs\": [\n    {\n     \"output_type\": \"stream\",\n     \"name\": \"stdout\",\n     \"text\": [\n      \"2023-04-09 14:11:38.063605: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\\n\",\n      \"To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\\n\",\n      \"2023-04-09 14:11:39.026661: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\\n\",\n      \"\\u001b[34m\\u001b[1mtrain: \\u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\\n\",\n      \"\\u001b[34m\\u001b[1mgithub: \\u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\\n\",\n      \"YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\\n\",\n      \"\\n\",\n      \"\\u001b[34m\\u001b[1mhyperparameters: \\u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\\n\",\n      \"\\u001b[34m\\u001b[1mClearML: \\u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\\n\",\n      \"\\u001b[34m\\u001b[1mComet: \\u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\\n\",\n      \"\\u001b[34m\\u001b[1mTensorBoard: \\u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\\n\",\n      \"\\n\",\n      \"Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\\n\",\n      \"Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128.zip to coco128.zip...\\n\",\n      \"100% 6.66M/6.66M [00:00<00:00, 75.6MB/s]\\n\",\n      \"Dataset download success ✅ (0.6s), saved to \\u001b[1m/content/datasets\\u001b[0m\\n\",\n      \"\\n\",\n      \"                 from  n    params  module                                  arguments                     \\n\",\n      \"  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \\n\",\n      \"  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \\n\",\n      \"  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \\n\",\n      \"  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \\n\",\n      \"  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \\n\",\n      \"  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \\n\",\n      \"  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \\n\",\n      \"  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \\n\",\n      \"  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \\n\",\n      \"  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \\n\",\n      \" 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \\n\",\n      \" 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \\n\",\n      \" 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \\n\",\n      \" 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \\n\",\n      \" 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \\n\",\n      \" 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \\n\",\n      \" 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \\n\",\n      \" 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \\n\",\n      \" 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \\n\",\n      \" 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \\n\",\n      \" 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \\n\",\n      \" 24      [17, 20, 23]  1    229245  models.yolo.Detect                      [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\\n\",\n      \"Model summary: 214 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\\n\",\n      \"\\n\",\n      \"Transferred 349/349 items from yolov5s.pt\\n\",\n      \"\\u001b[34m\\u001b[1mAMP: \\u001b[0mchecks passed ✅\\n\",\n      \"\\u001b[34m\\u001b[1moptimizer:\\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\\n\",\n      \"\\u001b[34m\\u001b[1malbumentations: \\u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\\n\",\n      \"\\u001b[34m\\u001b[1mtrain: \\u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1709.36it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mtrain: \\u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\\n\",\n      \"\\u001b[34m\\u001b[1mtrain: \\u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 264.35it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\\n\",\n      \"\\u001b[34m\\u001b[1mval: \\u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 107.05it/s]\\n\",\n      \"\\n\",\n      \"\\u001b[34m\\u001b[1mAutoAnchor: \\u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\\n\",\n      \"Plotting labels to runs/train/exp/labels.jpg... \\n\",\n      \"Image sizes 640 train, 640 val\\n\",\n      \"Using 2 dataloader workers\\n\",\n      \"Logging results to \\u001b[1mruns/train/exp\\u001b[0m\\n\",\n      \"Starting training for 3 epochs...\\n\",\n      \"\\n\",\n      \"      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\\n\",\n      \"        0/2      3.91G    0.04618    0.07209    0.01703        232        640: 100% 8/8 [00:09<00:00,  1.17s/it]\\n\",\n      \"                 Class     Images  Instances          P          R      mAP50   mAP50-95: 100% 4/4 [00:01<00:00,  2.01it/s]\\n\",\n      \"                   all        128        929      0.667      0.602       0.68       0.45\\n\",\n      \"\\n\",\n      \"      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\\n\",\n      \"        1/2      4.76G    0.04622    0.06891    0.01817        201        640: 100% 8/8 [00:02<00:00,  3.78it/s]\\n\",\n      \"                 Class     Images  Instances          P          R      mAP50   mAP50-95: 100% 4/4 [00:01<00:00,  2.16it/s]\\n\",\n      \"                   all        128        929      0.709      0.645      0.722      0.478\\n\",\n      \"\\n\",\n      \"      Epoch    GPU_mem   box_loss   obj_loss   cls_loss  Instances       Size\\n\",\n      \"        2/2      4.76G     0.0436     0.0647    0.01698        227        640: 100% 8/8 [00:01<00:00,  4.19it/s]\\n\",\n      \"                 Class     Images  Instances          P          R      mAP50   mAP50-95: 100% 4/4 [00:01<00:00,  2.95it/s]\\n\",\n      \"                   all        128        929      0.761      0.647      0.735       0.49\\n\",\n      \"\\n\",\n      \"3 epochs completed in 0.006 hours.\\n\",\n      \"Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\\n\",\n      \"Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\\n\",\n      \"\\n\",\n      \"Validating runs/train/exp/weights/best.pt...\\n\",\n      \"Fusing layers... \\n\",\n      \"Model summary: 157 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs\\n\",\n      \"                 Class     Images  Instances          P          R      mAP50   mAP50-95: 100% 4/4 [00:06<00:00,  1.56s/it]\\n\",\n      \"                   all        128        929      0.759      0.646      0.734       0.49\\n\",\n      \"                person        128        254      0.857      0.706      0.805      0.525\\n\",\n      \"               bicycle        128          6      0.773      0.577      0.725      0.414\\n\",\n      \"                   car        128         46      0.664      0.435      0.551       0.24\\n\",\n      \"            motorcycle        128          5      0.587        0.8      0.837      0.635\\n\",\n      \"              airplane        128          6          1      0.989      0.995      0.715\\n\",\n      \"                   bus        128          7      0.635      0.714      0.753      0.651\\n\",\n      \"                 train        128          3      0.686      0.333       0.72      0.504\\n\",\n      \"                 truck        128         12      0.604      0.333      0.472      0.259\\n\",\n      \"                  boat        128          6      0.938      0.333      0.449      0.177\\n\",\n      \"         traffic light        128         14      0.778      0.255      0.401      0.217\\n\",\n      \"             stop sign        128          2      0.826          1      0.995      0.895\\n\",\n      \"                 bench        128          9      0.711      0.556      0.661      0.313\\n\",\n      \"                  bird        128         16      0.962          1      0.995      0.642\\n\",\n      \"                   cat        128          4      0.868          1      0.995      0.754\\n\",\n      \"                   dog        128          9          1      0.652      0.899      0.651\\n\",\n      \"                 horse        128          2      0.853          1      0.995      0.622\\n\",\n      \"              elephant        128         17      0.909      0.882      0.934      0.698\\n\",\n      \"                  bear        128          1      0.696          1      0.995      0.995\\n\",\n      \"                 zebra        128          4      0.855          1      0.995      0.905\\n\",\n      \"               giraffe        128          9      0.788      0.828      0.912      0.701\\n\",\n      \"              backpack        128          6      0.835        0.5      0.738      0.311\\n\",\n      \"              umbrella        128         18      0.785      0.814      0.859       0.48\\n\",\n      \"               handbag        128         19      0.759      0.263      0.366      0.205\\n\",\n      \"                   tie        128          7      0.983      0.714       0.77      0.492\\n\",\n      \"              suitcase        128          4      0.656          1      0.945      0.631\\n\",\n      \"               frisbee        128          5      0.721        0.8      0.759      0.724\\n\",\n      \"                  skis        128          1      0.737          1      0.995        0.3\\n\",\n      \"             snowboard        128          7      0.829      0.696       0.83      0.537\\n\",\n      \"           sports ball        128          6      0.637      0.667      0.602      0.311\\n\",\n      \"                  kite        128         10      0.636        0.6      0.599      0.226\\n\",\n      \"          baseball bat        128          4      0.501       0.25      0.468      0.205\\n\",\n      \"        baseball glove        128          7      0.483      0.429      0.465      0.292\\n\",\n      \"            skateboard        128          5      0.932        0.6      0.687      0.493\\n\",\n      \"         tennis racket        128          7       0.77      0.429      0.547      0.332\\n\",\n      \"                bottle        128         18      0.577      0.379      0.554      0.276\\n\",\n      \"            wine glass        128         16      0.704      0.875       0.89       0.51\\n\",\n      \"                   cup        128         36      0.841      0.667      0.837      0.533\\n\",\n      \"                  fork        128          6      0.992      0.333       0.45      0.315\\n\",\n      \"                 knife        128         16      0.768      0.688      0.695      0.403\\n\",\n      \"                 spoon        128         22      0.838       0.47      0.639      0.384\\n\",\n      \"                  bowl        128         28      0.764       0.58      0.716      0.513\\n\",\n      \"                banana        128          1      0.902          1      0.995      0.301\\n\",\n      \"              sandwich        128          2          1          0      0.359      0.326\\n\",\n      \"                orange        128          4      0.722       0.75      0.912      0.581\\n\",\n      \"              broccoli        128         11      0.547      0.364      0.432      0.317\\n\",\n      \"                carrot        128         24      0.619      0.625      0.724      0.495\\n\",\n      \"               hot dog        128          2      0.409          1      0.828      0.762\\n\",\n      \"                 pizza        128          5      0.833      0.995      0.962      0.727\\n\",\n      \"                 donut        128         14      0.631          1       0.96      0.839\\n\",\n      \"                  cake        128          4       0.87          1      0.995       0.83\\n\",\n      \"                 chair        128         35      0.583        0.6      0.608      0.317\\n\",\n      \"                 couch        128          6      0.907      0.667      0.815      0.544\\n\",\n      \"          potted plant        128         14      0.739      0.786      0.823       0.48\\n\",\n      \"                   bed        128          3      0.985      0.333       0.83      0.441\\n\",\n      \"          dining table        128         13      0.821      0.357      0.578      0.342\\n\",\n      \"                toilet        128          2          1      0.988      0.995      0.846\\n\",\n      \"                    tv        128          2       0.57          1      0.995      0.796\\n\",\n      \"                laptop        128          3          1          0      0.593      0.312\\n\",\n      \"                 mouse        128          2          1          0      0.089     0.0445\\n\",\n      \"                remote        128          8          1      0.624      0.634      0.538\\n\",\n      \"            cell phone        128          8      0.622      0.417      0.421      0.187\\n\",\n      \"             microwave        128          3      0.711          1      0.995      0.766\\n\",\n      \"                  oven        128          5      0.329        0.4       0.43      0.282\\n\",\n      \"                  sink        128          6      0.437      0.333      0.338      0.265\\n\",\n      \"          refrigerator        128          5      0.567        0.8      0.799      0.536\\n\",\n      \"                  book        128         29      0.597      0.257      0.349      0.154\\n\",\n      \"                 clock        128          9      0.765      0.889      0.932      0.736\\n\",\n      \"                  vase        128          2       0.33          1      0.995      0.895\\n\",\n      \"              scissors        128          1          1          0      0.497     0.0498\\n\",\n      \"            teddy bear        128         21      0.856      0.569      0.841      0.547\\n\",\n      \"            toothbrush        128          5        0.8          1      0.928      0.574\\n\",\n      \"Results saved to \\u001b[1mruns/train/exp\\u001b[0m\\n\"\n     ]\n    }\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"15glLzbQx5u0\"\n   },\n   \"source\": [\n    \"# 4. Visualize\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Comet Logging and Visualization 🌟 NEW\\n\",\n    \"\\n\",\n    \"[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\\n\",\n    \"\\n\",\n    \"Getting started is easy:\\n\",\n    \"```shell\\n\",\n    \"pip install comet_ml  # 1. install\\n\",\n    \"export COMET_API_KEY=<Your API Key>  # 2. paste API key\\n\",\n    \"python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt  # 3. train\\n\",\n    \"```\\n\",\n    \"To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\\n\",\n    \"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\\n\",\n    \"\\n\",\n    \"<a href=\\\"https://bit.ly/yolov5-readme-comet2\\\">\\n\",\n    \"<img alt=\\\"Comet Dashboard\\\" src=\\\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\\\" width=\\\"1280\\\"/></a>\"\n   ],\n   \"metadata\": {\n    \"id\": \"nWOsI5wJR1o3\"\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## ClearML Logging and Automation 🌟 NEW\\n\",\n    \"\\n\",\n    \"[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\\n\",\n    \"\\n\",\n    \"- `pip install clearml`\\n\",\n    \"- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\\n\",\n    \"\\n\",\n    \"You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\\n\",\n    \"\\n\",\n    \"You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\\n\",\n    \"\\n\",\n    \"<a href=\\\"https://cutt.ly/yolov5-notebook-clearml\\\">\\n\",\n    \"<img alt=\\\"ClearML Experiment Management UI\\\" src=\\\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\\\" width=\\\"1280\\\"/></a>\"\n   ],\n   \"metadata\": {\n    \"id\": \"Lay2WsTjNJzP\"\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"-WPvRbS5Swl6\"\n   },\n   \"source\": [\n    \"## Local Logging\\n\",\n    \"\\n\",\n    \"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\\n\",\n    \"\\n\",\n    \"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices.\\n\",\n    \"\\n\",\n    \"<img alt=\\\"Local logging results\\\" src=\\\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\\\" width=\\\"1280\\\"/>\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"Zelyeqbyt3GD\"\n   },\n   \"source\": [\n    \"# Environments\\n\",\n    \"\\n\",\n    \"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\\n\",\n    \"\\n\",\n    \"- **Notebooks** with free GPU: <a href=\\\"https://bit.ly/yolov5-paperspace-notebook\\\"><img src=\\\"https://assets.paperspace.io/img/gradient-badge.svg\\\" alt=\\\"Run on Gradient\\\"></a> <a href=\\\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\\\"><img src=\\\"https://colab.research.google.com/assets/colab-badge.svg\\\" alt=\\\"Open In Colab\\\"></a> <a href=\\\"https://www.kaggle.com/models/ultralytics/yolov5\\\"><img src=\\\"https://kaggle.com/static/images/open-in-kaggle.svg\\\" alt=\\\"Open In Kaggle\\\"></a>\\n\",\n    \"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\\n\",\n    \"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\\n\",\n    \"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\\\"https://hub.docker.com/r/ultralytics/yolov5\\\"><img src=\\\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\\\" alt=\\\"Docker Pulls\\\"></a>\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"6Qu7Iesl0p54\"\n   },\n   \"source\": [\n    \"# Status\\n\",\n    \"\\n\",\n    \"![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\\n\",\n    \"\\n\",\n    \"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"id\": \"IEijrePND_2I\"\n   },\n   \"source\": [\n    \"# Appendix\\n\",\n    \"\\n\",\n    \"Additional content below.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"metadata\": {\n    \"id\": \"GMusP4OAxFu6\"\n   },\n   \"source\": [\n    \"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\\n\",\n    \"import torch\\n\",\n    \"\\n\",\n    \"model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True)  # or yolov5n - yolov5x6 or custom\\n\",\n    \"im = 'https://ultralytics.com/images/zidane.jpg'  # file, Path, PIL.Image, OpenCV, nparray, list\\n\",\n    \"results = model(im)  # inference\\n\",\n    \"results.print()  # or .show(), .save(), .crop(), .pandas(), etc.\"\n   ],\n   \"execution_count\": null,\n   \"outputs\": []\n  }\n ]\n}"
  },
  {
    "path": "utils/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"utils/initialization.\"\"\"\n\nimport contextlib\nimport platform\nimport threading\n\n\ndef emojis(str=\"\"):\n    \"\"\"Returns an emoji-safe version of a string, stripped of emojis on Windows platforms.\"\"\"\n    return str.encode().decode(\"ascii\", \"ignore\") if platform.system() == \"Windows\" else str\n\n\nclass TryExcept(contextlib.ContextDecorator):\n    \"\"\"A context manager and decorator for error handling that prints an optional message with emojis on exception.\"\"\"\n\n    def __init__(self, msg=\"\"):\n        \"\"\"Initializes TryExcept with an optional message, used as a decorator or context manager for error handling.\"\"\"\n        self.msg = msg\n\n    def __enter__(self):\n        \"\"\"Enter the runtime context related to this object for error handling with an optional message.\"\"\"\n        pass\n\n    def __exit__(self, exc_type, value, traceback):\n        \"\"\"Context manager exit method that prints an error message with emojis if an exception occurred, always returns\n        True.\n        \"\"\"\n        if value:\n            print(emojis(f\"{self.msg}{': ' if self.msg else ''}{value}\"))\n        return True\n\n\ndef threaded(func):\n    \"\"\"Decorator @threaded to run a function in a separate thread, returning the thread instance.\"\"\"\n\n    def wrapper(*args, **kwargs):\n        \"\"\"Runs the decorated function in a separate daemon thread and returns the thread instance.\"\"\"\n        thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)\n        thread.start()\n        return thread\n\n    return wrapper\n\n\ndef join_threads(verbose=False):\n    \"\"\"Joins all daemon threads, optionally printing their names if verbose is True.\n\n    Example: atexit.register(lambda: join_threads())\n    \"\"\"\n    main_thread = threading.current_thread()\n    for t in threading.enumerate():\n        if t is not main_thread:\n            if verbose:\n                print(f\"Joining thread {t.name}\")\n            t.join()\n\n\ndef notebook_init(verbose=True):\n    \"\"\"Initializes notebook environment by checking requirements, cleaning up, and displaying system info.\"\"\"\n    print(\"Checking setup...\")\n\n    import os\n    import shutil\n\n    from ultralytics.utils.checks import check_requirements\n\n    from utils.general import check_font, is_colab\n    from utils.torch_utils import select_device  # imports\n\n    check_font()\n\n    import psutil\n\n    if check_requirements(\"wandb\", install=False):\n        os.system(\"pip uninstall -y wandb\")  # eliminate unexpected account creation prompt with infinite hang\n    if is_colab():\n        shutil.rmtree(\"/content/sample_data\", ignore_errors=True)  # remove colab /sample_data directory\n\n    # System info\n    display = None\n    if verbose:\n        gb = 1 << 30  # bytes to GiB (1024 ** 3)\n        ram = psutil.virtual_memory().total\n        total, _used, free = shutil.disk_usage(\"/\")\n        with contextlib.suppress(Exception):  # clear display if ipython is installed\n            from IPython import display\n\n            display.clear_output()\n        s = f\"({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)\"\n    else:\n        s = \"\"\n\n    select_device(newline=False)\n    print(emojis(f\"Setup complete ✅ {s}\"))\n    return display\n"
  },
  {
    "path": "utils/activations.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Activation functions.\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SiLU(nn.Module):\n    \"\"\"Applies the Sigmoid-weighted Linear Unit (SiLU) activation function, also known as Swish.\"\"\"\n\n    @staticmethod\n    def forward(x):\n        \"\"\"Applies the Sigmoid-weighted Linear Unit (SiLU) activation function.\n\n        https://arxiv.org/pdf/1606.08415.pdf.\n        \"\"\"\n        return x * torch.sigmoid(x)\n\n\nclass Hardswish(nn.Module):\n    \"\"\"Applies the Hardswish activation function, which is efficient for mobile and embedded devices.\"\"\"\n\n    @staticmethod\n    def forward(x):\n        \"\"\"Applies the Hardswish activation function, compatible with TorchScript, CoreML, and ONNX.\n\n        Equivalent to x * F.hardsigmoid(x)\n        \"\"\"\n        return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0  # for TorchScript, CoreML and ONNX\n\n\nclass Mish(nn.Module):\n    \"\"\"Mish activation https://github.com/digantamisra98/Mish.\"\"\"\n\n    @staticmethod\n    def forward(x):\n        \"\"\"Applies the Mish activation function, a smooth alternative to ReLU.\"\"\"\n        return x * F.softplus(x).tanh()\n\n\nclass MemoryEfficientMish(nn.Module):\n    \"\"\"Efficiently applies the Mish activation function using custom autograd for reduced memory usage.\"\"\"\n\n    class F(torch.autograd.Function):\n        \"\"\"Implements a custom autograd function for memory-efficient Mish activation.\"\"\"\n\n        @staticmethod\n        def forward(ctx, x):\n            \"\"\"Applies the Mish activation function, a smooth ReLU alternative, to the input tensor `x`.\"\"\"\n            ctx.save_for_backward(x)\n            return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x)))\n\n        @staticmethod\n        def backward(ctx, grad_output):\n            \"\"\"Computes the gradient of the Mish activation function with respect to input `x`.\"\"\"\n            x = ctx.saved_tensors[0]\n            sx = torch.sigmoid(x)\n            fx = F.softplus(x).tanh()\n            return grad_output * (fx + x * sx * (1 - fx * fx))\n\n    def forward(self, x):\n        \"\"\"Applies the Mish activation function to the input tensor `x`.\"\"\"\n        return self.F.apply(x)\n\n\nclass FReLU(nn.Module):\n    \"\"\"FReLU activation https://arxiv.org/abs/2007.11824.\"\"\"\n\n    def __init__(self, c1, k=3):  # ch_in, kernel\n        \"\"\"Initializes FReLU activation with channel `c1` and kernel size `k`.\"\"\"\n        super().__init__()\n        self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)\n        self.bn = nn.BatchNorm2d(c1)\n\n    def forward(self, x):\n        \"\"\"Applies FReLU activation with max operation between input and BN-convolved input.\n\n        https://arxiv.org/abs/2007.11824\n        \"\"\"\n        return torch.max(x, self.bn(self.conv(x)))\n\n\nclass AconC(nn.Module):\n    \"\"\"ACON activation (activate or not) function.\n\n    AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter See \"Activate or Not: Learning\n    Customized Activation\" https://arxiv.org/pdf/2009.04759.pdf.\n    \"\"\"\n\n    def __init__(self, c1):\n        \"\"\"Initializes AconC with learnable parameters p1, p2, and beta for channel-wise activation control.\"\"\"\n        super().__init__()\n        self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))\n\n    def forward(self, x):\n        \"\"\"Applies AconC activation function with learnable parameters for channel-wise control on input tensor x.\"\"\"\n        dpx = (self.p1 - self.p2) * x\n        return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x\n\n\nclass MetaAconC(nn.Module):\n    \"\"\"ACON activation (activate or not) function.\n\n    AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter See \"Activate or Not: Learning\n    Customized Activation\" https://arxiv.org/pdf/2009.04759.pdf.\n    \"\"\"\n\n    def __init__(self, c1, k=1, s=1, r=16):\n        \"\"\"Initializes MetaAconC with params: channel_in (c1), kernel size (k=1), stride (s=1), reduction (r=16).\"\"\"\n        super().__init__()\n        c2 = max(r, c1 // r)\n        self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)\n        self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)\n        # self.bn1 = nn.BatchNorm2d(c2)\n        # self.bn2 = nn.BatchNorm2d(c1)\n\n    def forward(self, x):\n        \"\"\"Applies a forward pass transforming input `x` using learnable parameters and sigmoid activation.\"\"\"\n        y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)\n        # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891\n        # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y)))))  # bug/unstable\n        beta = torch.sigmoid(self.fc2(self.fc1(y)))  # bug patch BN layers removed\n        dpx = (self.p1 - self.p2) * x\n        return dpx * torch.sigmoid(beta * dpx) + self.p2 * x\n"
  },
  {
    "path": "utils/augmentations.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Image augmentation functions.\"\"\"\n\nimport math\nimport random\n\nimport cv2\nimport numpy as np\nimport torch\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as TF\n\nfrom utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy\nfrom utils.metrics import bbox_ioa\n\nIMAGENET_MEAN = 0.485, 0.456, 0.406  # RGB mean\nIMAGENET_STD = 0.229, 0.224, 0.225  # RGB standard deviation\n\n\nclass Albumentations:\n    \"\"\"Provides optional data augmentation for YOLOv5 using Albumentations library if installed.\"\"\"\n\n    def __init__(self, size=640):\n        \"\"\"Initializes Albumentations class for optional data augmentation in YOLOv5 with specified input size.\"\"\"\n        self.transform = None\n        prefix = colorstr(\"albumentations: \")\n        try:\n            import albumentations as A\n\n            check_version(A.__version__, \"1.0.3\", hard=True)  # version requirement\n\n            T = [\n                A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),\n                A.Blur(p=0.01),\n                A.MedianBlur(p=0.01),\n                A.ToGray(p=0.01),\n                A.CLAHE(p=0.01),\n                A.RandomBrightnessContrast(p=0.0),\n                A.RandomGamma(p=0.0),\n                A.ImageCompression(quality_lower=75, p=0.0),\n            ]  # transforms\n            self.transform = A.Compose(T, bbox_params=A.BboxParams(format=\"yolo\", label_fields=[\"class_labels\"]))\n\n            LOGGER.info(prefix + \", \".join(f\"{x}\".replace(\"always_apply=False, \", \"\") for x in T if x.p))\n        except ImportError:  # package not installed, skip\n            pass\n        except Exception as e:\n            LOGGER.info(f\"{prefix}{e}\")\n\n    def __call__(self, im, labels, p=1.0):\n        \"\"\"Applies transformations to an image and labels with probability `p`, returning updated image and labels.\"\"\"\n        if self.transform and random.random() < p:\n            new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0])  # transformed\n            im, labels = new[\"image\"], np.array([[c, *b] for c, b in zip(new[\"class_labels\"], new[\"bboxes\"])])\n        return im, labels\n\n\ndef normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):\n    \"\"\"Applies ImageNet normalization to RGB images in BCHW format, modifying them in-place if specified.\n\n    Example: y = (x - mean) / std\n    \"\"\"\n    return TF.normalize(x, mean, std, inplace=inplace)\n\n\ndef denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):\n    \"\"\"Reverses ImageNet normalization for BCHW format RGB images by applying `x = x * std + mean`.\"\"\"\n    for i in range(3):\n        x[:, i] = x[:, i] * std[i] + mean[i]\n    return x\n\n\ndef augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n    \"\"\"Applies HSV color-space augmentation to an image with random gains for hue, saturation, and value.\"\"\"\n    if hgain or sgain or vgain:\n        r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains\n        hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n        dtype = im.dtype  # uint8\n\n        x = np.arange(0, 256, dtype=r.dtype)\n        lut_hue = ((x * r[0]) % 180).astype(dtype)\n        lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n        lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n        im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n        cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im)  # no return needed\n\n\ndef hist_equalize(im, clahe=True, bgr=False):\n    \"\"\"Equalizes image histogram, with optional CLAHE, for BGR or RGB image with shape (n,m,3) and range 0-255.\"\"\"\n    yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)\n    if clahe:\n        c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n        yuv[:, :, 0] = c.apply(yuv[:, :, 0])\n    else:\n        yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0])  # equalize Y channel histogram\n    return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB)  # convert YUV image to RGB\n\n\ndef replicate(im, labels):\n    \"\"\"Replicates half of the smallest object labels in an image for data augmentation.\n\n    Returns augmented image and labels.\n    \"\"\"\n    h, w = im.shape[:2]\n    boxes = labels[:, 1:].astype(int)\n    x1, y1, x2, y2 = boxes.T\n    s = ((x2 - x1) + (y2 - y1)) / 2  # side length (pixels)\n    for i in s.argsort()[: round(s.size * 0.5)]:  # smallest indices\n        x1b, y1b, x2b, y2b = boxes[i]\n        bh, bw = y2b - y1b, x2b - x1b\n        yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw))  # offset x, y\n        x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]\n        im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b]  # im4[ymin:ymax, xmin:xmax]\n        labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)\n\n    return im, labels\n\n\ndef letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n    \"\"\"Resizes and pads image to new_shape with stride-multiple constraints, returns resized image, ratio, padding.\"\"\"\n    shape = im.shape[:2]  # current shape [height, width]\n    if isinstance(new_shape, int):\n        new_shape = (new_shape, new_shape)\n\n    # Scale ratio (new / old)\n    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n    if not scaleup:  # only scale down, do not scale up (for better val mAP)\n        r = min(r, 1.0)\n\n    # Compute padding\n    ratio = r, r  # width, height ratios\n    new_unpad = round(shape[1] * r), round(shape[0] * r)\n    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding\n    if auto:  # minimum rectangle\n        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding\n    elif scaleFill:  # stretch\n        dw, dh = 0.0, 0.0\n        new_unpad = (new_shape[1], new_shape[0])\n        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios\n\n    dw /= 2  # divide padding into 2 sides\n    dh /= 2\n\n    if shape[::-1] != new_unpad:  # resize\n        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n    top, bottom = round(dh - 0.1), round(dh + 0.1)\n    left, right = round(dw - 0.1), round(dw + 0.1)\n    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border\n    return im, ratio, (dw, dh)\n\n\ndef random_perspective(\n    im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0)\n):\n    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))\n    # targets = [cls, xyxy]\n    \"\"\"Applies random perspective transformation to an image, modifying the image and corresponding labels.\"\"\"\n    height = im.shape[0] + border[0] * 2  # shape(h,w,c)\n    width = im.shape[1] + border[1] * 2\n\n    # Center\n    C = np.eye(3)\n    C[0, 2] = -im.shape[1] / 2  # x translation (pixels)\n    C[1, 2] = -im.shape[0] / 2  # y translation (pixels)\n\n    # Perspective\n    P = np.eye(3)\n    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)\n    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)\n\n    # Rotation and Scale\n    R = np.eye(3)\n    a = random.uniform(-degrees, degrees)\n    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations\n    s = random.uniform(1 - scale, 1 + scale)\n    # s = 2 ** random.uniform(-scale, scale)\n    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n    # Shear\n    S = np.eye(3)\n    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)\n    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)\n\n    # Translation\n    T = np.eye(3)\n    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation (pixels)\n    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation (pixels)\n\n    # Combined rotation matrix\n    M = T @ S @ R @ P @ C  # order of operations (right to left) is IMPORTANT\n    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed\n        if perspective:\n            im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n        else:  # affine\n            im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n    if n := len(targets):\n        use_segments = any(x.any() for x in segments) and len(segments) == n\n        new = np.zeros((n, 4))\n        if use_segments:  # warp segments\n            segments = resample_segments(segments)  # upsample\n            for i, segment in enumerate(segments):\n                xy = np.ones((len(segment), 3))\n                xy[:, :2] = segment\n                xy = xy @ M.T  # transform\n                xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]  # perspective rescale or affine\n\n                # clip\n                new[i] = segment2box(xy, width, height)\n\n        else:  # warp boxes\n            xy = np.ones((n * 4, 3))\n            xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1\n            xy = xy @ M.T  # transform\n            xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8)  # perspective rescale or affine\n\n            # create new boxes\n            x = xy[:, [0, 2, 4, 6]]\n            y = xy[:, [1, 3, 5, 7]]\n            new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n            # clip\n            new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n            new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n        # filter candidates\n        i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n        targets = targets[i]\n        targets[:, 1:5] = new[i]\n\n    return im, targets\n\n\ndef copy_paste(im, labels, segments, p=0.5):\n    \"\"\"Applies Copy-Paste augmentation by flipping and merging segments and labels on an image.\n\n    Details at https://arxiv.org/abs/2012.07177.\n    \"\"\"\n    n = len(segments)\n    if p and n:\n        _h, w, _c = im.shape  # height, width, channels\n        im_new = np.zeros(im.shape, np.uint8)\n        for j in random.sample(range(n), k=round(p * n)):\n            l, s = labels[j], segments[j]\n            box = w - l[3], l[2], w - l[1], l[4]\n            ioa = bbox_ioa(box, labels[:, 1:5])  # intersection over area\n            if (ioa < 0.30).all():  # allow 30% obscuration of existing labels\n                labels = np.concatenate((labels, [[l[0], *box]]), 0)\n                segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n                cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)\n\n        result = cv2.flip(im, 1)  # augment segments (flip left-right)\n        i = cv2.flip(im_new, 1).astype(bool)\n        im[i] = result[i]  # cv2.imwrite('debug.jpg', im)  # debug\n\n    return im, labels, segments\n\n\ndef cutout(im, labels, p=0.5):\n    \"\"\"Applies cutout augmentation to an image with optional label adjustment, using random masks of varying sizes.\n\n    Details at https://arxiv.org/abs/1708.04552.\n    \"\"\"\n    if random.random() < p:\n        h, w = im.shape[:2]\n        scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16  # image size fraction\n        for s in scales:\n            mask_h = random.randint(1, int(h * s))  # create random masks\n            mask_w = random.randint(1, int(w * s))\n\n            # box\n            xmin = max(0, random.randint(0, w) - mask_w // 2)\n            ymin = max(0, random.randint(0, h) - mask_h // 2)\n            xmax = min(w, xmin + mask_w)\n            ymax = min(h, ymin + mask_h)\n\n            # apply random color mask\n            im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n\n            # return unobscured labels\n            if len(labels) and s > 0.03:\n                box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n                ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h))  # intersection over area\n                labels = labels[ioa < 0.60]  # remove >60% obscured labels\n\n    return labels\n\n\ndef mixup(im, labels, im2, labels2):\n    \"\"\"Applies MixUp augmentation by blending images and labels.\n\n    See https://arxiv.org/pdf/1710.09412.pdf for details.\n    \"\"\"\n    r = np.random.beta(32.0, 32.0)  # mixup ratio, alpha=beta=32.0\n    im = (im * r + im2 * (1 - r)).astype(np.uint8)\n    labels = np.concatenate((labels, labels2), 0)\n    return im, labels\n\n\ndef box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):\n    \"\"\"Filters bounding box candidates by minimum width-height threshold `wh_thr` (pixels), aspect ratio threshold\n    `ar_thr`, and area ratio threshold `area_thr`.\n\n    box1(4,n) is before augmentation, box2(4,n) is after augmentation.\n    \"\"\"\n    w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n    w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n    ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps))  # aspect ratio\n    return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr)  # candidates\n\n\ndef classify_albumentations(\n    augment=True,\n    size=224,\n    scale=(0.08, 1.0),\n    ratio=(0.75, 1.0 / 0.75),  # 0.75, 1.33\n    hflip=0.5,\n    vflip=0.0,\n    jitter=0.4,\n    mean=IMAGENET_MEAN,\n    std=IMAGENET_STD,\n    auto_aug=False,\n):\n    # YOLOv5 classification Albumentations (optional, only used if package is installed)\n    \"\"\"Sets up Albumentations transforms for YOLOv5 classification tasks depending on augmentation settings.\"\"\"\n    prefix = colorstr(\"albumentations: \")\n    try:\n        import albumentations as A\n        from albumentations.pytorch import ToTensorV2\n\n        check_version(A.__version__, \"1.0.3\", hard=True)  # version requirement\n        if augment:  # Resize and crop\n            T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]\n            if auto_aug:\n                # TODO: implement AugMix, AutoAug & RandAug in albumentation\n                LOGGER.info(f\"{prefix}auto augmentations are currently not supported\")\n            else:\n                if hflip > 0:\n                    T += [A.HorizontalFlip(p=hflip)]\n                if vflip > 0:\n                    T += [A.VerticalFlip(p=vflip)]\n                if jitter > 0:\n                    color_jitter = (float(jitter),) * 3  # repeat value for brightness, contrast, saturation, 0 hue\n                    T += [A.ColorJitter(*color_jitter, 0)]\n        else:  # Use fixed crop for eval set (reproducibility)\n            T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]\n        T += [A.Normalize(mean=mean, std=std), ToTensorV2()]  # Normalize and convert to Tensor\n        LOGGER.info(prefix + \", \".join(f\"{x}\".replace(\"always_apply=False, \", \"\") for x in T if x.p))\n        return A.Compose(T)\n\n    except ImportError:  # package not installed, skip\n        LOGGER.warning(f\"{prefix}⚠️ not found, install with `pip install albumentations` (recommended)\")\n    except Exception as e:\n        LOGGER.info(f\"{prefix}{e}\")\n\n\ndef classify_transforms(size=224):\n    \"\"\"Applies a series of transformations including center crop, ToTensor, and normalization for classification.\"\"\"\n    assert isinstance(size, int), f\"ERROR: classify_transforms size {size} must be integer, not (list, tuple)\"\n    # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])\n    return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])\n\n\nclass LetterBox:\n    \"\"\"Resizes and pads images to specified dimensions while maintaining aspect ratio for YOLOv5 preprocessing.\"\"\"\n\n    def __init__(self, size=(640, 640), auto=False, stride=32):\n        \"\"\"Initializes a LetterBox object for YOLOv5 image preprocessing with optional auto sizing and stride\n        adjustment.\n        \"\"\"\n        super().__init__()\n        self.h, self.w = (size, size) if isinstance(size, int) else size\n        self.auto = auto  # pass max size integer, automatically solve for short side using stride\n        self.stride = stride  # used with auto\n\n    def __call__(self, im):\n        \"\"\"Resizes and pads input image `im` (HWC format) to specified dimensions, maintaining aspect ratio.\n\n        im = np.array HWC\n        \"\"\"\n        imh, imw = im.shape[:2]\n        r = min(self.h / imh, self.w / imw)  # ratio of new/old\n        h, w = round(imh * r), round(imw * r)  # resized image\n        hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w\n        top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)\n        im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)\n        im_out[top : top + h, left : left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)\n        return im_out\n\n\nclass CenterCrop:\n    \"\"\"Applies center crop to an image, resizing it to the specified size while maintaining aspect ratio.\"\"\"\n\n    def __init__(self, size=640):\n        \"\"\"Initializes CenterCrop for image preprocessing, accepting single int or tuple for size, defaults to 640.\"\"\"\n        super().__init__()\n        self.h, self.w = (size, size) if isinstance(size, int) else size\n\n    def __call__(self, im):\n        \"\"\"Applies center crop to the input image and resizes it to a specified size, maintaining aspect ratio.\n\n        im = np.array HWC\n        \"\"\"\n        imh, imw = im.shape[:2]\n        m = min(imh, imw)  # min dimension\n        top, left = (imh - m) // 2, (imw - m) // 2\n        return cv2.resize(im[top : top + m, left : left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)\n\n\nclass ToTensor:\n    \"\"\"Converts BGR np.array image from HWC to RGB CHW format, normalizes to [0, 1], and supports FP16 if half=True.\"\"\"\n\n    def __init__(self, half=False):\n        \"\"\"Initializes ToTensor for YOLOv5 image preprocessing, with optional half precision (half=True for FP16).\"\"\"\n        super().__init__()\n        self.half = half\n\n    def __call__(self, im):\n        \"\"\"Converts BGR np.array image from HWC to RGB CHW format, and normalizes to [0, 1], with support for FP16 if\n        `half=True`.\n\n        im = np.array HWC in BGR order\n        \"\"\"\n        im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1])  # HWC to CHW -> BGR to RGB -> contiguous\n        im = torch.from_numpy(im)  # to torch\n        im = im.half() if self.half else im.float()  # uint8 to fp16/32\n        im /= 255.0  # 0-255 to 0.0-1.0\n        return im\n"
  },
  {
    "path": "utils/autoanchor.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"AutoAnchor utils.\"\"\"\n\nimport random\n\nimport numpy as np\nimport torch\nimport yaml\nfrom tqdm import tqdm\n\nfrom utils import TryExcept\nfrom utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr\n\nPREFIX = colorstr(\"AutoAnchor: \")\n\n\ndef check_anchor_order(m):\n    \"\"\"Checks and corrects anchor order against stride in YOLOv5 Detect() module if necessary.\"\"\"\n    a = m.anchors.prod(-1).mean(-1).view(-1)  # mean anchor area per output layer\n    da = a[-1] - a[0]  # delta a\n    ds = m.stride[-1] - m.stride[0]  # delta s\n    if da and (da.sign() != ds.sign()):  # same order\n        LOGGER.info(f\"{PREFIX}Reversing anchor order\")\n        m.anchors[:] = m.anchors.flip(0)\n\n\n@TryExcept(f\"{PREFIX}ERROR\")\ndef check_anchors(dataset, model, thr=4.0, imgsz=640):\n    \"\"\"Evaluates anchor fit to dataset and adjusts if necessary, supporting customizable threshold and image size.\"\"\"\n    m = model.module.model[-1] if hasattr(model, \"module\") else model.model[-1]  # Detect()\n    shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n    scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1))  # augment scale\n    wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float()  # wh\n\n    def metric(k):  # compute metric\n        \"\"\"Computes ratio metric, anchors above threshold, and best possible recall for YOLOv5 anchor evaluation.\"\"\"\n        r = wh[:, None] / k[None]\n        x = torch.min(r, 1 / r).min(2)[0]  # ratio metric\n        best = x.max(1)[0]  # best_x\n        aat = (x > 1 / thr).float().sum(1).mean()  # anchors above threshold\n        bpr = (best > 1 / thr).float().mean()  # best possible recall\n        return bpr, aat\n\n    stride = m.stride.to(m.anchors.device).view(-1, 1, 1)  # model strides\n    anchors = m.anchors.clone() * stride  # current anchors\n    bpr, aat = metric(anchors.cpu().view(-1, 2))\n    s = f\"\\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). \"\n    if bpr > 0.98:  # threshold to recompute\n        LOGGER.info(f\"{s}Current anchors are a good fit to dataset ✅\")\n    else:\n        LOGGER.info(f\"{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...\")\n        na = m.anchors.numel() // 2  # number of anchors\n        anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)\n        new_bpr = metric(anchors)[0]\n        if new_bpr > bpr:  # replace anchors\n            anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)\n            m.anchors[:] = anchors.clone().view_as(m.anchors)\n            check_anchor_order(m)  # must be in pixel-space (not grid-space)\n            m.anchors /= stride\n            s = f\"{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)\"\n        else:\n            s = f\"{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)\"\n        LOGGER.info(s)\n\n\ndef kmean_anchors(dataset=\"./data/coco128.yaml\", n=9, img_size=640, thr=4.0, gen=1000, verbose=True):\n    \"\"\"Creates kmeans-evolved anchors from training dataset.\n\n    Args:\n        dataset: path to data.yaml, or a loaded dataset\n        n: number of anchors\n        img_size: image size used for training\n        thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0\n        gen: generations to evolve anchors using genetic algorithm\n        verbose: print all results\n\n    Returns:\n        k: kmeans evolved anchors\n\n    Examples:\n        from utils.autoanchor import *; _ = kmean_anchors()\n    \"\"\"\n    from scipy.cluster.vq import kmeans\n\n    npr = np.random\n    thr = 1 / thr\n\n    def metric(k, wh):  # compute metrics\n        \"\"\"Computes ratio metric, anchors above threshold, and best possible recall for YOLOv5 anchor evaluation.\"\"\"\n        r = wh[:, None] / k[None]\n        x = torch.min(r, 1 / r).min(2)[0]  # ratio metric\n        # x = wh_iou(wh, torch.tensor(k))  # iou metric\n        return x, x.max(1)[0]  # x, best_x\n\n    def anchor_fitness(k):  # mutation fitness\n        \"\"\"Evaluates fitness of YOLOv5 anchors by computing recall and ratio metrics for an anchor evolution process.\"\"\"\n        _, best = metric(torch.tensor(k, dtype=torch.float32), wh)\n        return (best * (best > thr).float()).mean()  # fitness\n\n    def print_results(k, verbose=True):\n        \"\"\"Sorts and logs kmeans-evolved anchor metrics and best possible recall values for YOLOv5 anchor evaluation.\"\"\"\n        k = k[np.argsort(k.prod(1))]  # sort small to large\n        x, best = metric(k, wh0)\n        bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n  # best possible recall, anch > thr\n        s = (\n            f\"{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\\n\"\n            f\"{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, \"\n            f\"past_thr={x[x > thr].mean():.3f}-mean: \"\n        )\n        for x in k:\n            s += \"%i,%i, \" % (round(x[0]), round(x[1]))\n        if verbose:\n            LOGGER.info(s[:-2])\n        return k\n\n    if isinstance(dataset, str):  # *.yaml file\n        with open(dataset, errors=\"ignore\") as f:\n            data_dict = yaml.safe_load(f)  # model dict\n        from utils.dataloaders import LoadImagesAndLabels\n\n        dataset = LoadImagesAndLabels(data_dict[\"train\"], augment=True, rect=True)\n\n    # Get label wh\n    shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n    wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)])  # wh\n\n    # Filter\n    i = (wh0 < 3.0).any(1).sum()\n    if i:\n        LOGGER.info(f\"{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size\")\n    wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32)  # filter > 2 pixels\n    # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1)  # multiply by random scale 0-1\n\n    # Kmeans init\n    try:\n        LOGGER.info(f\"{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...\")\n        assert n <= len(wh)  # apply overdetermined constraint\n        s = wh.std(0)  # sigmas for whitening\n        k = kmeans(wh / s, n, iter=30)[0] * s  # points\n        assert n == len(k)  # kmeans may return fewer points than requested if wh is insufficient or too similar\n    except Exception:\n        LOGGER.warning(f\"{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init\")\n        k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size  # random init\n    wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))\n    k = print_results(k, verbose=False)\n\n    # Plot\n    # k, d = [None] * 20, [None] * 20\n    # for i in tqdm(range(1, 21)):\n    #     k[i-1], d[i-1] = kmeans(wh / s, i)  # points, mean distance\n    # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)\n    # ax = ax.ravel()\n    # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')\n    # fig, ax = plt.subplots(1, 2, figsize=(14, 7))  # plot wh\n    # ax[0].hist(wh[wh[:, 0]<100, 0],400)\n    # ax[1].hist(wh[wh[:, 1]<100, 1],400)\n    # fig.savefig('wh.png', dpi=200)\n\n    # Evolve\n    f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1  # fitness, generations, mutation prob, sigma\n    pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT)  # progress bar\n    for _ in pbar:\n        v = np.ones(sh)\n        while (v == 1).all():  # mutate until a change occurs (prevent duplicates)\n            v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)\n        kg = (k.copy() * v).clip(min=2.0)\n        fg = anchor_fitness(kg)\n        if fg > f:\n            f, k = fg, kg.copy()\n            pbar.desc = f\"{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}\"\n            if verbose:\n                print_results(k, verbose)\n\n    return print_results(k).astype(np.float32)\n"
  },
  {
    "path": "utils/autobatch.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Auto-batch utils.\"\"\"\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\n\nfrom utils.general import LOGGER, colorstr\nfrom utils.torch_utils import profile\n\n\ndef check_train_batch_size(model, imgsz=640, amp=True):\n    \"\"\"Checks and computes optimal training batch size for YOLOv5 model, given image size and AMP setting.\"\"\"\n    with torch.cuda.amp.autocast(amp):\n        return autobatch(deepcopy(model).train(), imgsz)  # compute optimal batch size\n\n\ndef autobatch(model, imgsz=640, fraction=0.8, batch_size=16):\n    \"\"\"Estimates optimal YOLOv5 batch size using `fraction` of CUDA memory.\"\"\"\n    # Usage:\n    #     import torch\n    #     from utils.autobatch import autobatch\n    #     model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)\n    #     print(autobatch(model))\n\n    # Check device\n    prefix = colorstr(\"AutoBatch: \")\n    LOGGER.info(f\"{prefix}Computing optimal batch size for --imgsz {imgsz}\")\n    device = next(model.parameters()).device  # get model device\n    if device.type == \"cpu\":\n        LOGGER.info(f\"{prefix}CUDA not detected, using default CPU batch-size {batch_size}\")\n        return batch_size\n    if torch.backends.cudnn.benchmark:\n        LOGGER.info(f\"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}\")\n        return batch_size\n\n    # Inspect CUDA memory\n    gb = 1 << 30  # bytes to GiB (1024 ** 3)\n    d = str(device).upper()  # 'CUDA:0'\n    properties = torch.cuda.get_device_properties(device)  # device properties\n    t = properties.total_memory / gb  # GiB total\n    r = torch.cuda.memory_reserved(device) / gb  # GiB reserved\n    a = torch.cuda.memory_allocated(device) / gb  # GiB allocated\n    f = t - (r + a)  # GiB free\n    LOGGER.info(f\"{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free\")\n\n    # Profile batch sizes\n    batch_sizes = [1, 2, 4, 8, 16]\n    try:\n        img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]\n        results = profile(img, model, n=3, device=device)\n    except Exception as e:\n        LOGGER.warning(f\"{prefix}{e}\")\n\n    # Fit a solution\n    y = [x[2] for x in results if x]  # memory [2]\n    p = np.polyfit(batch_sizes[: len(y)], y, deg=1)  # first degree polynomial fit\n    b = int((f * fraction - p[1]) / p[0])  # y intercept (optimal batch size)\n    if None in results:  # some sizes failed\n        i = results.index(None)  # first fail index\n        if b >= batch_sizes[i]:  # y intercept above failure point\n            b = batch_sizes[max(i - 1, 0)]  # select prior safe point\n    if b < 1 or b > 1024:  # b outside of safe range\n        b = batch_size\n        LOGGER.warning(f\"{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.\")\n\n    fraction = (np.polyval(p, b) + r + a) / t  # actual fraction predicted\n    LOGGER.info(f\"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅\")\n    return b\n"
  },
  {
    "path": "utils/aws/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
  },
  {
    "path": "utils/aws/mime.sh",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/\n# This script will run on every instance restart, not only on first start\n# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---\n\nContent-Type: multipart/mixed\nboundary=\"//\"\nMIME-Version: 1.0\n\n--//\nContent-Type: text/cloud-config\ncharset=\"us-ascii\"\nMIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nContent-Disposition: attachment\nfilename=\"cloud-config.txt\"\n\n#cloud-config\ncloud_final_modules:\n- [scripts-user, always]\n\n--//\nContent-Type: text/x-shellscript\ncharset=\"us-ascii\"\nMIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nContent-Disposition: attachment\nfilename=\"userdata.txt\"\n\n#!/bin/bash\n# --- paste contents of userdata.sh here ---\n--//\n"
  },
  {
    "path": "utils/aws/resume.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Resume all interrupted trainings in yolov5/ dir including DDP trainings\n# Usage: $ python utils/aws/resume.py\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport torch\nimport yaml\nfrom ultralytics.utils.patches import torch_load\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[2]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n\nport = 0  # --master_port\npath = Path(\"\").resolve()\nfor last in path.rglob(\"*/**/last.pt\"):\n    ckpt = torch_load(last)\n    if ckpt[\"optimizer\"] is None:\n        continue\n\n    # Load opt.yaml\n    with open(last.parent.parent / \"opt.yaml\", errors=\"ignore\") as f:\n        opt = yaml.safe_load(f)\n\n    # Get device count\n    d = opt[\"device\"].split(\",\")  # devices\n    nd = len(d)  # number of devices\n    ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1)  # distributed data parallel\n\n    if ddp:  # multi-GPU\n        port += 1\n        cmd = f\"python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}\"\n    else:  # single-GPU\n        cmd = f\"python train.py --resume {last}\"\n\n    cmd += \" > /dev/null 2>&1 &\"  # redirect output to dev/null and run in daemon thread\n    print(cmd)\n    os.system(cmd)\n"
  },
  {
    "path": "utils/aws/userdata.sh",
    "content": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html\n# This script will run only once on first instance start (for a re-start script see mime.sh)\n# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir\n# Use >300 GB SSD\n\ncd home/ubuntu\nif [ ! -d yolov5 ]; then\n  echo \"Running first-time script.\" # install dependencies, download COCO, pull Docker\n  git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5\n  cd yolov5\n  bash data/scripts/get_coco.sh && echo \"COCO done.\" &\n  sudo docker pull ultralytics/yolov5:latest && echo \"Docker done.\" &\n  python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo \"Requirements done.\" &\n  wait && echo \"All tasks done.\" # finish background tasks\nelse\n  echo \"Running re-start script.\" # resume interrupted runs\n  i=0\n  list=$(sudo docker ps -qa) # container list i.e. $'one\\ntwo\\nthree\\nfour'\n  while IFS= read -r id; do\n    ((i++))\n    echo \"restarting container $i: $id\"\n    sudo docker start $id\n    # sudo docker exec -it $id python train.py --resume # single-GPU\n    sudo docker exec -d $id python utils/aws/resume.py # multi-scenario\n  done <<< \"$list\"\nfi\n"
  },
  {
    "path": "utils/callbacks.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Callback utils.\"\"\"\n\nimport threading\n\n\nclass Callbacks:\n    \"\"\"Handles all registered callbacks for YOLOv5 Hooks.\"\"\"\n\n    def __init__(self):\n        \"\"\"Initializes a Callbacks object to manage registered YOLOv5 training event hooks.\"\"\"\n        self._callbacks = {\n            \"on_pretrain_routine_start\": [],\n            \"on_pretrain_routine_end\": [],\n            \"on_train_start\": [],\n            \"on_train_epoch_start\": [],\n            \"on_train_batch_start\": [],\n            \"optimizer_step\": [],\n            \"on_before_zero_grad\": [],\n            \"on_train_batch_end\": [],\n            \"on_train_epoch_end\": [],\n            \"on_val_start\": [],\n            \"on_val_batch_start\": [],\n            \"on_val_image_end\": [],\n            \"on_val_batch_end\": [],\n            \"on_val_end\": [],\n            \"on_fit_epoch_end\": [],  # fit = train + val\n            \"on_model_save\": [],\n            \"on_train_end\": [],\n            \"on_params_update\": [],\n            \"teardown\": [],\n        }\n        self.stop_training = False  # set True to interrupt training\n\n    def register_action(self, hook, name=\"\", callback=None):\n        \"\"\"Register a new action to a callback hook.\n\n        Args:\n            hook: The callback hook name to register the action to\n            name: The name of the action for later reference\n            callback: The callback to fire\n        \"\"\"\n        assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n        assert callable(callback), f\"callback '{callback}' is not callable\"\n        self._callbacks[hook].append({\"name\": name, \"callback\": callback})\n\n    def get_registered_actions(self, hook=None):\n        \"\"\"Returns all the registered actions by callback hook.\n\n        Args:\n            hook: The name of the hook to check, defaults to all\n        \"\"\"\n        return self._callbacks[hook] if hook else self._callbacks\n\n    def run(self, hook, *args, thread=False, **kwargs):\n        \"\"\"Loop through the registered actions and fire all callbacks on main thread.\n\n        Args:\n            hook: The name of the hook to check, defaults to all\n            args: Arguments to receive from YOLOv5\n            thread: (boolean) Run callbacks in daemon thread\n            kwargs: Keyword Arguments to receive from YOLOv5\n        \"\"\"\n        assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n        for logger in self._callbacks[hook]:\n            if thread:\n                threading.Thread(target=logger[\"callback\"], args=args, kwargs=kwargs, daemon=True).start()\n            else:\n                logger[\"callback\"](*args, **kwargs)\n"
  },
  {
    "path": "utils/dataloaders.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Dataloaders and dataset utils.\"\"\"\n\nimport contextlib\nimport glob\nimport hashlib\nimport json\nimport math\nimport os\nimport random\nimport shutil\nimport time\nfrom itertools import repeat\nfrom multiprocessing.pool import Pool, ThreadPool\nfrom pathlib import Path\nfrom threading import Thread\nfrom urllib.parse import urlparse\n\nimport numpy as np\nimport psutil\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nimport yaml\nfrom PIL import ExifTags, Image, ImageOps\nfrom torch.utils.data import DataLoader, Dataset, dataloader, distributed\nfrom tqdm import tqdm\n\nfrom utils.augmentations import (\n    Albumentations,\n    augment_hsv,\n    classify_albumentations,\n    classify_transforms,\n    copy_paste,\n    letterbox,\n    mixup,\n    random_perspective,\n)\nfrom utils.general import (\n    DATASETS_DIR,\n    LOGGER,\n    NUM_THREADS,\n    TQDM_BAR_FORMAT,\n    check_dataset,\n    check_requirements,\n    check_yaml,\n    clean_str,\n    cv2,\n    is_colab,\n    is_kaggle,\n    segments2boxes,\n    unzip_file,\n    xyn2xy,\n    xywh2xyxy,\n    xywhn2xyxy,\n    xyxy2xywhn,\n)\nfrom utils.torch_utils import torch_distributed_zero_first\n\n# Parameters\nHELP_URL = \"See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data\"\nIMG_FORMATS = \"bmp\", \"dng\", \"jpeg\", \"jpg\", \"mpo\", \"png\", \"tif\", \"tiff\", \"webp\", \"pfm\"  # include image suffixes\nVID_FORMATS = \"asf\", \"avi\", \"gif\", \"m4v\", \"mkv\", \"mov\", \"mp4\", \"mpeg\", \"mpg\", \"ts\", \"wmv\"  # include video suffixes\nLOCAL_RANK = int(os.getenv(\"LOCAL_RANK\", -1))  # https://pytorch.org/docs/stable/elastic/run.html\nRANK = int(os.getenv(\"RANK\", -1))\nWORLD_SIZE = int(os.getenv(\"WORLD_SIZE\", 1))\nPIN_MEMORY = str(os.getenv(\"PIN_MEMORY\", True)).lower() == \"true\"  # global pin_memory for dataloaders\n\n# Get orientation exif tag\nfor orientation in ExifTags.TAGS.keys():\n    if ExifTags.TAGS[orientation] == \"Orientation\":\n        break\n\n\ndef get_hash(paths):\n    \"\"\"Generates a single SHA256 hash for a list of file or directory paths by combining their sizes and paths.\"\"\"\n    size = sum(os.path.getsize(p) for p in paths if os.path.exists(p))  # sizes\n    h = hashlib.sha256(str(size).encode())  # hash sizes\n    h.update(\"\".join(paths).encode())  # hash paths\n    return h.hexdigest()  # return hash\n\n\ndef exif_size(img):\n    \"\"\"Returns corrected PIL image size (width, height) considering EXIF orientation.\"\"\"\n    s = img.size  # (width, height)\n    with contextlib.suppress(Exception):\n        rotation = dict(img._getexif().items())[orientation]\n        if rotation in [6, 8]:  # rotation 270 or 90\n            s = (s[1], s[0])\n    return s\n\n\ndef exif_transpose(image):\n    \"\"\"\n    Transpose a PIL image accordingly if it has an EXIF Orientation tag.\n    Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose().\n\n    :param image: The image to transpose.\n    :return: An image.\n    \"\"\"\n    exif = image.getexif()\n    orientation = exif.get(0x0112, 1)  # default 1\n    if orientation > 1:\n        method = {\n            2: Image.FLIP_LEFT_RIGHT,\n            3: Image.ROTATE_180,\n            4: Image.FLIP_TOP_BOTTOM,\n            5: Image.TRANSPOSE,\n            6: Image.ROTATE_270,\n            7: Image.TRANSVERSE,\n            8: Image.ROTATE_90,\n        }.get(orientation)\n        if method is not None:\n            image = image.transpose(method)\n            del exif[0x0112]\n            image.info[\"exif\"] = exif.tobytes()\n    return image\n\n\ndef seed_worker(worker_id):\n    \"\"\"Sets the seed for a dataloader worker to ensure reproducibility, based on PyTorch's randomness notes.\n\n    See https://pytorch.org/docs/stable/notes/randomness.html#dataloader.\n    \"\"\"\n    worker_seed = torch.initial_seed() % 2**32\n    np.random.seed(worker_seed)\n    random.seed(worker_seed)\n\n\n# Inherit from DistributedSampler and override iterator\n# https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py\nclass SmartDistributedSampler(distributed.DistributedSampler):\n    \"\"\"A distributed sampler ensuring deterministic shuffling and balanced data distribution across GPUs.\"\"\"\n\n    def __iter__(self):\n        \"\"\"Yields indices for distributed data sampling, shuffled deterministically based on epoch and seed.\"\"\"\n        g = torch.Generator()\n        g.manual_seed(self.seed + self.epoch)\n\n        # determine the eventual size (n) of self.indices (DDP indices)\n        n = int((len(self.dataset) - self.rank - 1) / self.num_replicas) + 1  # num_replicas == WORLD_SIZE\n        idx = torch.randperm(n, generator=g)\n        if not self.shuffle:\n            idx = idx.sort()[0]\n\n        idx = idx.tolist()\n        if self.drop_last:\n            idx = idx[: self.num_samples]\n        else:\n            padding_size = self.num_samples - len(idx)\n            if padding_size <= len(idx):\n                idx += idx[:padding_size]\n            else:\n                idx += (idx * math.ceil(padding_size / len(idx)))[:padding_size]\n\n        return iter(idx)\n\n\ndef create_dataloader(\n    path,\n    imgsz,\n    batch_size,\n    stride,\n    single_cls=False,\n    hyp=None,\n    augment=False,\n    cache=False,\n    pad=0.0,\n    rect=False,\n    rank=-1,\n    workers=8,\n    image_weights=False,\n    quad=False,\n    prefix=\"\",\n    shuffle=False,\n    seed=0,\n):\n    \"\"\"Creates and returns a configured DataLoader instance for loading and processing image datasets.\"\"\"\n    if rect and shuffle:\n        LOGGER.warning(\"WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False\")\n        shuffle = False\n    with torch_distributed_zero_first(rank):  # init dataset *.cache only once if DDP\n        dataset = LoadImagesAndLabels(\n            path,\n            imgsz,\n            batch_size,\n            augment=augment,  # augmentation\n            hyp=hyp,  # hyperparameters\n            rect=rect,  # rectangular batches\n            cache_images=cache,\n            single_cls=single_cls,\n            stride=int(stride),\n            pad=pad,\n            image_weights=image_weights,\n            prefix=prefix,\n            rank=rank,\n        )\n\n    batch_size = min(batch_size, len(dataset))\n    nd = torch.cuda.device_count()  # number of CUDA devices\n    nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])  # number of workers\n    sampler = None if rank == -1 else SmartDistributedSampler(dataset, shuffle=shuffle)\n    loader = DataLoader if image_weights else InfiniteDataLoader  # only DataLoader allows for attribute updates\n    generator = torch.Generator()\n    generator.manual_seed(6148914691236517205 + seed + RANK)\n    return loader(\n        dataset,\n        batch_size=batch_size,\n        shuffle=shuffle and sampler is None,\n        num_workers=nw,\n        sampler=sampler,\n        drop_last=quad,\n        pin_memory=PIN_MEMORY,\n        collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,\n        worker_init_fn=seed_worker,\n        generator=generator,\n    ), dataset\n\n\nclass InfiniteDataLoader(dataloader.DataLoader):\n    \"\"\"Dataloader that reuses workers.\n\n    Uses same syntax as vanilla DataLoader\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        \"\"\"Initializes an InfiniteDataLoader that reuses workers with standard DataLoader syntax, augmenting with a\n        repeating sampler.\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        object.__setattr__(self, \"batch_sampler\", _RepeatSampler(self.batch_sampler))\n        self.iterator = super().__iter__()\n\n    def __len__(self):\n        \"\"\"Returns the length of the batch sampler's sampler in the InfiniteDataLoader.\"\"\"\n        return len(self.batch_sampler.sampler)\n\n    def __iter__(self):\n        \"\"\"Yields batches of data indefinitely in a loop by resetting the sampler when exhausted.\"\"\"\n        for _ in range(len(self)):\n            yield next(self.iterator)\n\n\nclass _RepeatSampler:\n    \"\"\"Sampler that repeats forever.\n\n    Args:\n        sampler (Sampler)\n    \"\"\"\n\n    def __init__(self, sampler):\n        \"\"\"Initializes a perpetual sampler wrapping a provided `Sampler` instance for endless data iteration.\"\"\"\n        self.sampler = sampler\n\n    def __iter__(self):\n        \"\"\"Returns an infinite iterator over the dataset by repeatedly yielding from the given sampler.\"\"\"\n        while True:\n            yield from iter(self.sampler)\n\n\nclass LoadScreenshots:\n    \"\"\"Loads and processes screenshots for YOLOv5 detection from specified screen regions using mss.\"\"\"\n\n    def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):\n        \"\"\"Initializes a screenshot dataloader for YOLOv5 with specified source region, image size, stride, auto, and\n        transforms.\n\n        Source = [screen_number left top width height] (pixels)\n        \"\"\"\n        check_requirements(\"mss\")\n        import mss\n\n        source, *params = source.split()\n        self.screen, left, top, width, height = 0, None, None, None, None  # default to full screen 0\n        if len(params) == 1:\n            self.screen = int(params[0])\n        elif len(params) == 4:\n            left, top, width, height = (int(x) for x in params)\n        elif len(params) == 5:\n            self.screen, left, top, width, height = (int(x) for x in params)\n        self.img_size = img_size\n        self.stride = stride\n        self.transforms = transforms\n        self.auto = auto\n        self.mode = \"stream\"\n        self.frame = 0\n        self.sct = mss.mss()\n\n        # Parse monitor shape\n        monitor = self.sct.monitors[self.screen]\n        self.top = monitor[\"top\"] if top is None else (monitor[\"top\"] + top)\n        self.left = monitor[\"left\"] if left is None else (monitor[\"left\"] + left)\n        self.width = width or monitor[\"width\"]\n        self.height = height or monitor[\"height\"]\n        self.monitor = {\"left\": self.left, \"top\": self.top, \"width\": self.width, \"height\": self.height}\n\n    def __iter__(self):\n        \"\"\"Iterates over itself, enabling use in loops and iterable contexts.\"\"\"\n        return self\n\n    def __next__(self):\n        \"\"\"Captures and returns the next screen frame as a BGR numpy array, cropping to only the first three channels\n        from BGRA.\n        \"\"\"\n        im0 = np.array(self.sct.grab(self.monitor))[:, :, :3]  # [:, :, :3] BGRA to BGR\n        s = f\"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: \"\n\n        if self.transforms:\n            im = self.transforms(im0)  # transforms\n        else:\n            im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0]  # padded resize\n            im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB\n            im = np.ascontiguousarray(im)  # contiguous\n        self.frame += 1\n        return str(self.screen), im, im0, None, s  # screen, img, original img, im0s, s\n\n\nclass LoadImages:\n    \"\"\"YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`.\"\"\"\n\n    def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n        \"\"\"Initializes YOLOv5 loader for images/videos, supporting glob patterns, directories, and lists of paths.\"\"\"\n        if isinstance(path, str) and Path(path).suffix == \".txt\":  # *.txt file with img/vid/dir on each line\n            path = Path(path).read_text().rsplit()\n        files = []\n        for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:\n            p = str(Path(p).resolve())\n            if \"*\" in p:\n                files.extend(sorted(glob.glob(p, recursive=True)))  # glob\n            elif os.path.isdir(p):\n                files.extend(sorted(glob.glob(os.path.join(p, \"*.*\"))))  # dir\n            elif os.path.isfile(p):\n                files.append(p)  # files\n            else:\n                raise FileNotFoundError(f\"{p} does not exist\")\n\n        images = [x for x in files if x.split(\".\")[-1].lower() in IMG_FORMATS]\n        videos = [x for x in files if x.split(\".\")[-1].lower() in VID_FORMATS]\n        ni, nv = len(images), len(videos)\n\n        self.img_size = img_size\n        self.stride = stride\n        self.files = images + videos\n        self.nf = ni + nv  # number of files\n        self.video_flag = [False] * ni + [True] * nv\n        self.mode = \"image\"\n        self.auto = auto\n        self.transforms = transforms  # optional\n        self.vid_stride = vid_stride  # video frame-rate stride\n        if any(videos):\n            self._new_video(videos[0])  # new video\n        else:\n            self.cap = None\n        assert self.nf > 0, (\n            f\"No images or videos found in {p}. Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}\"\n        )\n\n    def __iter__(self):\n        \"\"\"Initializes iterator by resetting count and returns the iterator object itself.\"\"\"\n        self.count = 0\n        return self\n\n    def __next__(self):\n        \"\"\"Advances to the next file in the dataset, raising StopIteration if at the end.\"\"\"\n        if self.count == self.nf:\n            raise StopIteration\n        path = self.files[self.count]\n\n        if self.video_flag[self.count]:\n            # Read video\n            self.mode = \"video\"\n            for _ in range(self.vid_stride):\n                self.cap.grab()\n            ret_val, im0 = self.cap.retrieve()\n            while not ret_val:\n                self.count += 1\n                self.cap.release()\n                if self.count == self.nf:  # last video\n                    raise StopIteration\n                path = self.files[self.count]\n                self._new_video(path)\n                ret_val, im0 = self.cap.read()\n\n            self.frame += 1\n            # im0 = self._cv2_rotate(im0)  # for use if cv2 autorotation is False\n            s = f\"video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: \"\n\n        else:\n            # Read image\n            self.count += 1\n            im0 = cv2.imread(path)  # BGR\n            assert im0 is not None, f\"Image Not Found {path}\"\n            s = f\"image {self.count}/{self.nf} {path}: \"\n\n        if self.transforms:\n            im = self.transforms(im0)  # transforms\n        else:\n            im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0]  # padded resize\n            im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB\n            im = np.ascontiguousarray(im)  # contiguous\n\n        return path, im, im0, self.cap, s\n\n    def _new_video(self, path):\n        \"\"\"Initialize a new video capture object with path, frame count adjusted by stride, and orientation metadata.\"\"\"\n        self.frame = 0\n        self.cap = cv2.VideoCapture(path)\n        self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)\n        self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META))  # rotation degrees\n        # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0)  # disable https://github.com/ultralytics/yolov5/issues/8493\n\n    def _cv2_rotate(self, im):\n        \"\"\"Rotates a cv2 image based on its orientation; supports 0, 90, and 180 degrees rotations.\"\"\"\n        if self.orientation == 0:\n            return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)\n        elif self.orientation == 180:\n            return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)\n        elif self.orientation == 90:\n            return cv2.rotate(im, cv2.ROTATE_180)\n        return im\n\n    def __len__(self):\n        \"\"\"Returns the number of files in the dataset.\"\"\"\n        return self.nf  # number of files\n\n\nclass LoadStreams:\n    \"\"\"Loads and processes video streams for YOLOv5, supporting various sources including YouTube and IP cameras.\"\"\"\n\n    def __init__(self, sources=\"file.streams\", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n        \"\"\"Initializes a stream loader for processing video streams with YOLOv5, supporting various sources including\n        YouTube.\n        \"\"\"\n        torch.backends.cudnn.benchmark = True  # faster for fixed-size inference\n        self.mode = \"stream\"\n        self.img_size = img_size\n        self.stride = stride\n        self.vid_stride = vid_stride  # video frame-rate stride\n        sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]\n        n = len(sources)\n        self.sources = [clean_str(x) for x in sources]  # clean source names for later\n        self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n        for i, s in enumerate(sources):  # index, source\n            # Start thread to read frames from video stream\n            st = f\"{i + 1}/{n}: {s}... \"\n            if urlparse(s).hostname in (\"www.youtube.com\", \"youtube.com\", \"youtu.be\"):  # if source is YouTube video\n                # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'\n                check_requirements((\"pafy\", \"youtube_dl==2020.12.2\"))\n                import pafy\n\n                s = pafy.new(s).getbest(preftype=\"mp4\").url  # YouTube URL\n            s = eval(s) if s.isnumeric() else s  # i.e. s = '0' local webcam\n            if s == 0:\n                assert not is_colab(), \"--source 0 webcam unsupported on Colab. Rerun command in a local environment.\"\n                assert not is_kaggle(), \"--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.\"\n            cap = cv2.VideoCapture(s)\n            assert cap.isOpened(), f\"{st}Failed to open {s}\"\n            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n            fps = cap.get(cv2.CAP_PROP_FPS)  # warning: may return 0 or nan\n            self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float(\"inf\")  # infinite stream fallback\n            self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30  # 30 FPS fallback\n\n            _, self.imgs[i] = cap.read()  # guarantee first frame\n            self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n            LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n            self.threads[i].start()\n        LOGGER.info(\"\")  # newline\n\n        # check for common shapes\n        s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])\n        self.rect = np.unique(s, axis=0).shape[0] == 1  # rect inference if all shapes equal\n        self.auto = auto and self.rect\n        self.transforms = transforms  # optional\n        if not self.rect:\n            LOGGER.warning(\"WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.\")\n\n    def update(self, i, cap, stream):\n        \"\"\"Reads frames from stream `i`, updating imgs array; handles stream reopening on signal loss.\"\"\"\n        n, f = 0, self.frames[i]  # frame number, frame array\n        while cap.isOpened() and n < f:\n            n += 1\n            cap.grab()  # .read() = .grab() followed by .retrieve()\n            if n % self.vid_stride == 0:\n                success, im = cap.retrieve()\n                if success:\n                    self.imgs[i] = im\n                else:\n                    LOGGER.warning(\"WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.\")\n                    self.imgs[i] = np.zeros_like(self.imgs[i])\n                    cap.open(stream)  # re-open stream if signal was lost\n            time.sleep(0.0)  # wait time\n\n    def __iter__(self):\n        \"\"\"Resets and returns the iterator for iterating over video frames or images in a dataset.\"\"\"\n        self.count = -1\n        return self\n\n    def __next__(self):\n        \"\"\"Iterates over video frames or images, halting on thread stop or 'q' key press, raising `StopIteration` when\n        done.\n        \"\"\"\n        self.count += 1\n        if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord(\"q\"):  # q to quit\n            cv2.destroyAllWindows()\n            raise StopIteration\n\n        im0 = self.imgs.copy()\n        if self.transforms:\n            im = np.stack([self.transforms(x) for x in im0])  # transforms\n        else:\n            im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0])  # resize\n            im = im[..., ::-1].transpose((0, 3, 1, 2))  # BGR to RGB, BHWC to BCHW\n            im = np.ascontiguousarray(im)  # contiguous\n\n        return self.sources, im, im0, None, \"\"\n\n    def __len__(self):\n        \"\"\"Returns the number of sources in the dataset, supporting up to 32 streams at 30 FPS over 30 years.\"\"\"\n        return len(self.sources)  # 1E12 frames = 32 streams at 30 FPS for 30 years\n\n\ndef img2label_paths(img_paths):\n    \"\"\"Generates label file paths from corresponding image file paths by replacing `/images/` with `/labels/` and\n    extension with `.txt`.\n    \"\"\"\n    sa, sb = f\"{os.sep}images{os.sep}\", f\"{os.sep}labels{os.sep}\"  # /images/, /labels/ substrings\n    return [sb.join(x.rsplit(sa, 1)).rsplit(\".\", 1)[0] + \".txt\" for x in img_paths]\n\n\nclass LoadImagesAndLabels(Dataset):\n    \"\"\"Loads images and their corresponding labels for training and validation in YOLOv5.\"\"\"\n\n    cache_version = 0.6  # dataset labels *.cache version\n    rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]\n\n    def __init__(\n        self,\n        path,\n        img_size=640,\n        batch_size=16,\n        augment=False,\n        hyp=None,\n        rect=False,\n        image_weights=False,\n        cache_images=False,\n        single_cls=False,\n        stride=32,\n        pad=0.0,\n        min_items=0,\n        prefix=\"\",\n        rank=-1,\n        seed=0,\n    ):\n        \"\"\"Initializes the YOLOv5 dataset loader, handling images and their labels, caching, and preprocessing.\"\"\"\n        self.img_size = img_size\n        self.augment = augment\n        self.hyp = hyp\n        self.image_weights = image_weights\n        self.rect = False if image_weights else rect\n        self.mosaic = self.augment and not self.rect  # load 4 images at a time into a mosaic (only during training)\n        self.mosaic_border = [-img_size // 2, -img_size // 2]\n        self.stride = stride\n        self.path = path\n        self.albumentations = Albumentations(size=img_size) if augment else None\n\n        try:\n            f = []  # image files\n            for p in path if isinstance(path, list) else [path]:\n                p = Path(p)  # os-agnostic\n                if p.is_dir():  # dir\n                    f += glob.glob(str(p / \"**\" / \"*.*\"), recursive=True)\n                    # f = list(p.rglob('*.*'))  # pathlib\n                elif p.is_file():  # file\n                    with open(p) as t:\n                        t = t.read().strip().splitlines()\n                        parent = str(p.parent) + os.sep\n                        f += [x.replace(\"./\", parent, 1) if x.startswith(\"./\") else x for x in t]  # to global path\n                        # f += [p.parent / x.lstrip(os.sep) for x in t]  # to global path (pathlib)\n                else:\n                    raise FileNotFoundError(f\"{prefix}{p} does not exist\")\n            self.im_files = sorted(x.replace(\"/\", os.sep) for x in f if x.split(\".\")[-1].lower() in IMG_FORMATS)\n            # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS])  # pathlib\n            assert self.im_files, f\"{prefix}No images found\"\n        except Exception as e:\n            raise Exception(f\"{prefix}Error loading data from {path}: {e}\\n{HELP_URL}\") from e\n\n        # Check cache\n        self.label_files = img2label_paths(self.im_files)  # labels\n        cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix(\".cache\")\n        try:\n            cache, exists = np.load(cache_path, allow_pickle=True).item(), True  # load dict\n            assert cache[\"version\"] == self.cache_version  # matches current version\n            assert cache[\"hash\"] == get_hash(self.label_files + self.im_files)  # identical hash\n        except Exception:\n            cache, exists = self.cache_labels(cache_path, prefix), False  # run cache ops\n\n        # Display cache\n        nf, nm, ne, nc, n = cache.pop(\"results\")  # found, missing, empty, corrupt, total\n        if exists and LOCAL_RANK in {-1, 0}:\n            d = f\"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt\"\n            tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT)  # display cache results\n            if cache[\"msgs\"]:\n                LOGGER.info(\"\\n\".join(cache[\"msgs\"]))  # display warnings\n        assert nf > 0 or not augment, f\"{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}\"\n\n        # Read cache\n        [cache.pop(k) for k in (\"hash\", \"version\", \"msgs\")]  # remove items\n        labels, shapes, self.segments = zip(*cache.values())\n        nl = len(np.concatenate(labels, 0))  # number of labels\n        assert nl > 0 or not augment, f\"{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}\"\n        self.labels = list(labels)\n        self.shapes = np.array(shapes)\n        self.im_files = list(cache.keys())  # update\n        self.label_files = img2label_paths(cache.keys())  # update\n\n        # Filter images\n        if min_items:\n            include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)\n            LOGGER.info(f\"{prefix}{n - len(include)}/{n} images filtered from dataset\")\n            self.im_files = [self.im_files[i] for i in include]\n            self.label_files = [self.label_files[i] for i in include]\n            self.labels = [self.labels[i] for i in include]\n            self.segments = [self.segments[i] for i in include]\n            self.shapes = self.shapes[include]  # wh\n\n        # Create indices\n        n = len(self.shapes)  # number of images\n        bi = np.floor(np.arange(n) / batch_size).astype(int)  # batch index\n        nb = bi[-1] + 1  # number of batches\n        self.batch = bi  # batch index of image\n        self.n = n\n        self.indices = np.arange(n)\n        if rank > -1:  # DDP indices (see: SmartDistributedSampler)\n            # force each rank (i.e. GPU process) to sample the same subset of data on every epoch\n            self.indices = self.indices[np.random.RandomState(seed=seed).permutation(n) % WORLD_SIZE == RANK]\n\n        # Update labels\n        include_class = []  # filter labels to include only these classes (optional)\n        self.segments = list(self.segments)\n        include_class_array = np.array(include_class).reshape(1, -1)\n        for i, (label, segment) in enumerate(zip(self.labels, self.segments)):\n            if include_class:\n                j = (label[:, 0:1] == include_class_array).any(1)\n                self.labels[i] = label[j]\n                if segment:\n                    self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem]\n            if single_cls:  # single-class training, merge all classes into 0\n                self.labels[i][:, 0] = 0\n\n        # Rectangular Training\n        if self.rect:\n            # Sort by aspect ratio\n            s = self.shapes  # wh\n            ar = s[:, 1] / s[:, 0]  # aspect ratio\n            irect = ar.argsort()\n            self.im_files = [self.im_files[i] for i in irect]\n            self.label_files = [self.label_files[i] for i in irect]\n            self.labels = [self.labels[i] for i in irect]\n            self.segments = [self.segments[i] for i in irect]\n            self.shapes = s[irect]  # wh\n            ar = ar[irect]\n\n            # Set training image shapes\n            shapes = [[1, 1]] * nb\n            for i in range(nb):\n                ari = ar[bi == i]\n                mini, maxi = ari.min(), ari.max()\n                if maxi < 1:\n                    shapes[i] = [maxi, 1]\n                elif mini > 1:\n                    shapes[i] = [1, 1 / mini]\n\n            self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride\n\n        # Cache images into RAM/disk for faster training\n        if cache_images == \"ram\" and not self.check_cache_ram(prefix=prefix):\n            cache_images = False\n        self.ims = [None] * n\n        self.npy_files = [Path(f).with_suffix(\".npy\") for f in self.im_files]\n        if cache_images:\n            b, gb = 0, 1 << 30  # bytes of cached images, bytes per gigabytes\n            self.im_hw0, self.im_hw = [None] * n, [None] * n\n            fcn = self.cache_images_to_disk if cache_images == \"disk\" else self.load_image\n            with ThreadPool(NUM_THREADS) as pool:\n                results = pool.imap(lambda i: (i, fcn(i)), self.indices)\n                pbar = tqdm(results, total=len(self.indices), bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)\n                for i, x in pbar:\n                    if cache_images == \"disk\":\n                        b += self.npy_files[i].stat().st_size\n                    else:  # 'ram'\n                        self.ims[i], self.im_hw0[i], self.im_hw[i] = x  # im, hw_orig, hw_resized = load_image(self, i)\n                        b += self.ims[i].nbytes * WORLD_SIZE\n                    pbar.desc = f\"{prefix}Caching images ({b / gb:.1f}GB {cache_images})\"\n                pbar.close()\n\n    def check_cache_ram(self, safety_margin=0.1, prefix=\"\"):\n        \"\"\"Checks if available RAM is sufficient for caching images, adjusting for a safety margin.\"\"\"\n        b, gb = 0, 1 << 30  # bytes of cached images, bytes per gigabytes\n        n = min(self.n, 30)  # extrapolate from 30 random images\n        for _ in range(n):\n            im = cv2.imread(random.choice(self.im_files))  # sample image\n            ratio = self.img_size / max(im.shape[0], im.shape[1])  # max(h, w)  # ratio\n            b += im.nbytes * ratio**2\n        mem_required = b * self.n / n  # GB required to cache dataset into RAM\n        mem = psutil.virtual_memory()\n        cache = mem_required * (1 + safety_margin) < mem.available  # to cache or not to cache, that is the question\n        if not cache:\n            LOGGER.info(\n                f\"{prefix}{mem_required / gb:.1f}GB RAM required, \"\n                f\"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, \"\n                f\"{'caching images ✅' if cache else 'not caching images ⚠️'}\"\n            )\n        return cache\n\n    def cache_labels(self, path=Path(\"./labels.cache\"), prefix=\"\"):\n        \"\"\"Caches dataset labels, verifies images, reads shapes, and tracks dataset integrity.\"\"\"\n        x = {}  # dict\n        nm, nf, ne, nc, msgs = 0, 0, 0, 0, []  # number missing, found, empty, corrupt, messages\n        desc = f\"{prefix}Scanning {path.parent / path.stem}...\"\n        with Pool(NUM_THREADS) as pool:\n            pbar = tqdm(\n                pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),\n                desc=desc,\n                total=len(self.im_files),\n                bar_format=TQDM_BAR_FORMAT,\n            )\n            for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:\n                nm += nm_f\n                nf += nf_f\n                ne += ne_f\n                nc += nc_f\n                if im_file:\n                    x[im_file] = [lb, shape, segments]\n                if msg:\n                    msgs.append(msg)\n                pbar.desc = f\"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt\"\n\n        pbar.close()\n        if msgs:\n            LOGGER.info(\"\\n\".join(msgs))\n        if nf == 0:\n            LOGGER.warning(f\"{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}\")\n        x[\"hash\"] = get_hash(self.label_files + self.im_files)\n        x[\"results\"] = nf, nm, ne, nc, len(self.im_files)\n        x[\"msgs\"] = msgs  # warnings\n        x[\"version\"] = self.cache_version  # cache version\n        try:\n            np.save(path, x)  # save cache for next time\n            path.with_suffix(\".cache.npy\").rename(path)  # remove .npy suffix\n            LOGGER.info(f\"{prefix}New cache created: {path}\")\n        except Exception as e:\n            LOGGER.warning(f\"{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}\")  # not writeable\n        return x\n\n    def __len__(self):\n        \"\"\"Returns the number of images in the dataset.\"\"\"\n        return len(self.im_files)\n\n    # def __iter__(self):\n    #     self.count = -1\n    #     print('ran dataset iter')\n    #     #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)\n    #     return self\n\n    def __getitem__(self, index):\n        \"\"\"Fetches the dataset item at the given index, considering linear, shuffled, or weighted sampling.\"\"\"\n        index = self.indices[index]  # linear, shuffled, or image_weights\n\n        hyp = self.hyp\n        if mosaic := self.mosaic and random.random() < hyp[\"mosaic\"]:\n            # Load mosaic\n            img, labels = self.load_mosaic(index)\n            shapes = None\n\n            # MixUp augmentation\n            if random.random() < hyp[\"mixup\"]:\n                img, labels = mixup(img, labels, *self.load_mosaic(random.choice(self.indices)))\n\n        else:\n            # Load image\n            img, (h0, w0), (h, w) = self.load_image(index)\n\n            # Letterbox\n            shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape\n            img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n            shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling\n\n            labels = self.labels[index].copy()\n            if labels.size:  # normalized xywh to pixel xyxy format\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])\n\n            if self.augment:\n                img, labels = random_perspective(\n                    img,\n                    labels,\n                    degrees=hyp[\"degrees\"],\n                    translate=hyp[\"translate\"],\n                    scale=hyp[\"scale\"],\n                    shear=hyp[\"shear\"],\n                    perspective=hyp[\"perspective\"],\n                )\n\n        nl = len(labels)  # number of labels\n        if nl:\n            labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3)\n\n        if self.augment:\n            # Albumentations\n            img, labels = self.albumentations(img, labels)\n            nl = len(labels)  # update after albumentations\n\n            # HSV color-space\n            augment_hsv(img, hgain=hyp[\"hsv_h\"], sgain=hyp[\"hsv_s\"], vgain=hyp[\"hsv_v\"])\n\n            # Flip up-down\n            if random.random() < hyp[\"flipud\"]:\n                img = np.flipud(img)\n                if nl:\n                    labels[:, 2] = 1 - labels[:, 2]\n\n            # Flip left-right\n            if random.random() < hyp[\"fliplr\"]:\n                img = np.fliplr(img)\n                if nl:\n                    labels[:, 1] = 1 - labels[:, 1]\n\n            # Cutouts\n            # labels = cutout(img, labels, p=0.5)\n            # nl = len(labels)  # update after cutout\n\n        labels_out = torch.zeros((nl, 6))\n        if nl:\n            labels_out[:, 1:] = torch.from_numpy(labels)\n\n        # Convert\n        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB\n        img = np.ascontiguousarray(img)\n\n        return torch.from_numpy(img), labels_out, self.im_files[index], shapes\n\n    def load_image(self, i):\n        \"\"\"Loads an image by index, returning the image, its original dimensions, and resized dimensions.\n\n        Returns (im, original hw, resized hw)\n        \"\"\"\n        im, f, fn = (\n            self.ims[i],\n            self.im_files[i],\n            self.npy_files[i],\n        )\n        if im is None:  # not cached in RAM\n            if fn.exists():  # load npy\n                im = np.load(fn)\n            else:  # read image\n                im = cv2.imread(f)  # BGR\n                assert im is not None, f\"Image Not Found {f}\"\n            h0, w0 = im.shape[:2]  # orig hw\n            r = self.img_size / max(h0, w0)  # ratio\n            if r != 1:  # if sizes are not equal\n                interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA\n                im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp)\n            return im, (h0, w0), im.shape[:2]  # im, hw_original, hw_resized\n        return self.ims[i], self.im_hw0[i], self.im_hw[i]  # im, hw_original, hw_resized\n\n    def cache_images_to_disk(self, i):\n        \"\"\"Saves an image to disk as an *.npy file for quicker loading, identified by index `i`.\"\"\"\n        f = self.npy_files[i]\n        if not f.exists():\n            np.save(f.as_posix(), cv2.imread(self.im_files[i]))\n\n    def load_mosaic(self, index):\n        \"\"\"Loads a 4-image mosaic for YOLOv5, combining 1 selected and 3 random images, with labels and segments.\"\"\"\n        labels4, segments4 = [], []\n        s = self.img_size\n        yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border)  # mosaic center x, y\n        indices = [index] + random.choices(self.indices, k=3)  # 3 additional image indices\n        random.shuffle(indices)\n        for i, index in enumerate(indices):\n            # Load image\n            img, _, (h, w) = self.load_image(index)\n\n            # place img in img4\n            if i == 0:  # top left\n                img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)  # base image with 4 tiles\n                x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc  # xmin, ymin, xmax, ymax (large image)\n                x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h  # xmin, ymin, xmax, ymax (small image)\n            elif i == 1:  # top right\n                x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n                x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n            elif i == 2:  # bottom left\n                x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n                x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n            elif i == 3:  # bottom right\n                x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n                x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n            img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]  # img4[ymin:ymax, xmin:xmax]\n            padw = x1a - x1b\n            padh = y1a - y1b\n\n            # Labels\n            labels, segments = self.labels[index].copy(), self.segments[index].copy()\n            if labels.size:\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh)  # normalized xywh to pixel xyxy format\n                segments = [xyn2xy(x, w, h, padw, padh) for x in segments]\n            labels4.append(labels)\n            segments4.extend(segments)\n\n        # Concat/clip labels\n        labels4 = np.concatenate(labels4, 0)\n        for x in (labels4[:, 1:], *segments4):\n            np.clip(x, 0, 2 * s, out=x)  # clip when using random_perspective()\n        # img4, labels4 = replicate(img4, labels4)  # replicate\n\n        # Augment\n        img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp[\"copy_paste\"])\n        img4, labels4 = random_perspective(\n            img4,\n            labels4,\n            segments4,\n            degrees=self.hyp[\"degrees\"],\n            translate=self.hyp[\"translate\"],\n            scale=self.hyp[\"scale\"],\n            shear=self.hyp[\"shear\"],\n            perspective=self.hyp[\"perspective\"],\n            border=self.mosaic_border,\n        )  # border to remove\n\n        return img4, labels4\n\n    def load_mosaic9(self, index):\n        \"\"\"Loads 1 image + 8 random images into a 9-image mosaic for augmented YOLOv5 training, returning labels and\n        segments.\n        \"\"\"\n        labels9, segments9 = [], []\n        s = self.img_size\n        indices = [index] + random.choices(self.indices, k=8)  # 8 additional image indices\n        random.shuffle(indices)\n        hp, wp = -1, -1  # height, width previous\n        for i, index in enumerate(indices):\n            # Load image\n            img, _, (h, w) = self.load_image(index)\n\n            # place img in img9\n            if i == 0:  # center\n                img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8)  # base image with 4 tiles\n                h0, w0 = h, w\n                c = s, s, s + w, s + h  # xmin, ymin, xmax, ymax (base) coordinates\n            elif i == 1:  # top\n                c = s, s - h, s + w, s\n            elif i == 2:  # top right\n                c = s + wp, s - h, s + wp + w, s\n            elif i == 3:  # right\n                c = s + w0, s, s + w0 + w, s + h\n            elif i == 4:  # bottom right\n                c = s + w0, s + hp, s + w0 + w, s + hp + h\n            elif i == 5:  # bottom\n                c = s + w0 - w, s + h0, s + w0, s + h0 + h\n            elif i == 6:  # bottom left\n                c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h\n            elif i == 7:  # left\n                c = s - w, s + h0 - h, s, s + h0\n            elif i == 8:  # top left\n                c = s - w, s + h0 - hp - h, s, s + h0 - hp\n\n            padx, pady = c[:2]\n            x1, y1, x2, y2 = (max(x, 0) for x in c)  # allocate coords\n\n            # Labels\n            labels, segments = self.labels[index].copy(), self.segments[index].copy()\n            if labels.size:\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady)  # normalized xywh to pixel xyxy format\n                segments = [xyn2xy(x, w, h, padx, pady) for x in segments]\n            labels9.append(labels)\n            segments9.extend(segments)\n\n            # Image\n            img9[y1:y2, x1:x2] = img[y1 - pady :, x1 - padx :]  # img9[ymin:ymax, xmin:xmax]\n            hp, wp = h, w  # height, width previous\n\n        # Offset\n        yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border)  # mosaic center x, y\n        img9 = img9[yc : yc + 2 * s, xc : xc + 2 * s]\n\n        # Concat/clip labels\n        labels9 = np.concatenate(labels9, 0)\n        labels9[:, [1, 3]] -= xc\n        labels9[:, [2, 4]] -= yc\n        c = np.array([xc, yc])  # centers\n        segments9 = [x - c for x in segments9]\n\n        for x in (labels9[:, 1:], *segments9):\n            np.clip(x, 0, 2 * s, out=x)  # clip when using random_perspective()\n        # img9, labels9 = replicate(img9, labels9)  # replicate\n\n        # Augment\n        img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp[\"copy_paste\"])\n        img9, labels9 = random_perspective(\n            img9,\n            labels9,\n            segments9,\n            degrees=self.hyp[\"degrees\"],\n            translate=self.hyp[\"translate\"],\n            scale=self.hyp[\"scale\"],\n            shear=self.hyp[\"shear\"],\n            perspective=self.hyp[\"perspective\"],\n            border=self.mosaic_border,\n        )  # border to remove\n\n        return img9, labels9\n\n    @staticmethod\n    def collate_fn(batch):\n        \"\"\"Batches images, labels, paths, and shapes, assigning unique indices to targets in merged label tensor.\"\"\"\n        im, label, path, shapes = zip(*batch)  # transposed\n        for i, lb in enumerate(label):\n            lb[:, 0] = i  # add target image index for build_targets()\n        return torch.stack(im, 0), torch.cat(label, 0), path, shapes\n\n    @staticmethod\n    def collate_fn4(batch):\n        \"\"\"Bundles a batch's data by quartering the number of shapes and paths, preparing it for model input.\"\"\"\n        im, label, path, shapes = zip(*batch)  # transposed\n        n = len(shapes) // 4\n        im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]\n\n        ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])\n        wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])\n        s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]])  # scale\n        for i in range(n):  # zidane torch.zeros(16,3,720,1280)  # BCHW\n            i *= 4\n            if random.random() < 0.5:\n                im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode=\"bilinear\", align_corners=False)[\n                    0\n                ].type(im[i].type())\n                lb = label[i]\n            else:\n                im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)\n                lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s\n            im4.append(im1)\n            label4.append(lb)\n\n        for i, lb in enumerate(label4):\n            lb[:, 0] = i  # add target image index for build_targets()\n\n        return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4\n\n\n# Ancillary functions --------------------------------------------------------------------------------------------------\ndef flatten_recursive(path=DATASETS_DIR / \"coco128\"):\n    \"\"\"Flatten a directory by copying all files from subdirs to a new top-level directory, preserving filenames.\"\"\"\n    new_path = Path(f\"{path!s}_flat\")\n    if os.path.exists(new_path):\n        shutil.rmtree(new_path)  # delete output folder\n    os.makedirs(new_path)  # make new output folder\n    for file in tqdm(glob.glob(f\"{Path(path)!s}/**/*.*\", recursive=True)):\n        shutil.copyfile(file, new_path / Path(file).name)\n\n\ndef extract_boxes(path=DATASETS_DIR / \"coco128\"):\n    \"\"\"Converts a detection dataset to a classification dataset, creating a directory for each class and extracting\n    bounding boxes.\n\n    Example: from utils.dataloaders import *; extract_boxes()\n    \"\"\"\n    path = Path(path)  # images dir\n    shutil.rmtree(path / \"classification\") if (path / \"classification\").is_dir() else None  # remove existing\n    files = list(path.rglob(\"*.*\"))\n    n = len(files)  # number of files\n    for im_file in tqdm(files, total=n):\n        if im_file.suffix[1:] in IMG_FORMATS:\n            # image\n            im = cv2.imread(str(im_file))[..., ::-1]  # BGR to RGB\n            h, w = im.shape[:2]\n\n            # labels\n            lb_file = Path(img2label_paths([str(im_file)])[0])\n            if Path(lb_file).exists():\n                with open(lb_file) as f:\n                    lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32)  # labels\n\n                for j, x in enumerate(lb):\n                    c = int(x[0])  # class\n                    f = (path / \"classification\") / f\"{c}\" / f\"{path.stem}_{im_file.stem}_{j}.jpg\"  # new filename\n                    if not f.parent.is_dir():\n                        f.parent.mkdir(parents=True)\n\n                    b = x[1:] * [w, h, w, h]  # box\n                    # b[2:] = b[2:].max()  # rectangle to square\n                    b[2:] = b[2:] * 1.2 + 3  # pad\n                    b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)\n\n                    b[[0, 2]] = np.clip(b[[0, 2]], 0, w)  # clip boxes outside of image\n                    b[[1, 3]] = np.clip(b[[1, 3]], 0, h)\n                    assert cv2.imwrite(str(f), im[b[1] : b[3], b[0] : b[2]]), f\"box failure in {f}\"\n\n\ndef autosplit(path=DATASETS_DIR / \"coco128/images\", weights=(0.9, 0.1, 0.0), annotated_only=False):\n    \"\"\"Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders\n    import *; autosplit().\n\n    Args:\n        path: Path to images directory\n        weights: Train, val, test weights (list, tuple)\n        annotated_only: Only use images with an annotated txt file\n    \"\"\"\n    path = Path(path)  # images dir\n    files = sorted(x for x in path.rglob(\"*.*\") if x.suffix[1:].lower() in IMG_FORMATS)  # image files only\n    n = len(files)  # number of files\n    random.seed(0)  # for reproducibility\n    indices = random.choices([0, 1, 2], weights=weights, k=n)  # assign each image to a split\n\n    txt = [\"autosplit_train.txt\", \"autosplit_val.txt\", \"autosplit_test.txt\"]  # 3 txt files\n    for x in txt:\n        if (path.parent / x).exists():\n            (path.parent / x).unlink()  # remove existing\n\n    print(f\"Autosplitting images from {path}\" + \", using *.txt labeled images only\" * annotated_only)\n    for i, img in tqdm(zip(indices, files), total=n):\n        if not annotated_only or Path(img2label_paths([str(img)])[0]).exists():  # check label\n            with open(path.parent / txt[i], \"a\") as f:\n                f.write(f\"./{img.relative_to(path.parent).as_posix()}\" + \"\\n\")  # add image to txt file\n\n\ndef verify_image_label(args):\n    \"\"\"Verifies a single image-label pair, ensuring image format, size, and legal label values.\"\"\"\n    im_file, lb_file, prefix = args\n    nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, \"\", []  # number (missing, found, empty, corrupt), message, segments\n    try:\n        # verify images\n        im = Image.open(im_file)\n        im.verify()  # PIL verify\n        shape = exif_size(im)  # image size\n        assert (shape[0] > 9) & (shape[1] > 9), f\"image size {shape} <10 pixels\"\n        assert im.format.lower() in IMG_FORMATS, f\"invalid image format {im.format}\"\n        if im.format.lower() in (\"jpg\", \"jpeg\"):\n            with open(im_file, \"rb\") as f:\n                f.seek(-2, 2)\n                if f.read() != b\"\\xff\\xd9\":  # corrupt JPEG\n                    ImageOps.exif_transpose(Image.open(im_file)).save(im_file, \"JPEG\", subsampling=0, quality=100)\n                    msg = f\"{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved\"\n\n        # verify labels\n        if os.path.isfile(lb_file):\n            nf = 1  # label found\n            with open(lb_file) as f:\n                lb = [x.split() for x in f.read().strip().splitlines() if len(x)]\n                if any(len(x) > 6 for x in lb):  # is segment\n                    classes = np.array([x[0] for x in lb], dtype=np.float32)\n                    segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb]  # (cls, xy1...)\n                    lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1)  # (cls, xywh)\n                lb = np.array(lb, dtype=np.float32)\n            if nl := len(lb):\n                assert lb.shape[1] == 5, f\"labels require 5 columns, {lb.shape[1]} columns detected\"\n                assert (lb >= 0).all(), f\"negative label values {lb[lb < 0]}\"\n                assert (lb[:, 1:] <= 1).all(), f\"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}\"\n                _, i = np.unique(lb, axis=0, return_index=True)\n                if len(i) < nl:  # duplicate row check\n                    lb = lb[i]  # remove duplicates\n                    if segments:\n                        segments = [segments[x] for x in i]\n                    msg = f\"{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed\"\n            else:\n                ne = 1  # label empty\n                lb = np.zeros((0, 5), dtype=np.float32)\n        else:\n            nm = 1  # label missing\n            lb = np.zeros((0, 5), dtype=np.float32)\n        return im_file, lb, shape, segments, nm, nf, ne, nc, msg\n    except Exception as e:\n        nc = 1\n        msg = f\"{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}\"\n        return [None, None, None, None, nm, nf, ne, nc, msg]\n\n\nclass HUBDatasetStats:\n    \"\"\"Class for generating HUB dataset JSON and `-hub` dataset directory.\n\n    Args:\n        path: Path to data.yaml or data.zip (with data.yaml inside data.zip)\n        autodownload: Attempt to download dataset if not found locally\n\n            Usage\n        from utils.dataloaders import HUBDatasetStats\n        stats = HUBDatasetStats('coco128.yaml', autodownload=True)  # usage 1\n        stats = HUBDatasetStats('path/to/coco128.zip')  # usage 2\n        stats.get_json(save=False)\n        stats.process_images()\n    \"\"\"\n\n    def __init__(self, path=\"coco128.yaml\", autodownload=False):\n        \"\"\"Initializes HUBDatasetStats with optional auto-download for datasets, given a path to dataset YAML or ZIP\n        file.\n        \"\"\"\n        zipped, data_dir, yaml_path = self._unzip(Path(path))\n        try:\n            with open(check_yaml(yaml_path), errors=\"ignore\") as f:\n                data = yaml.safe_load(f)  # data dict\n                if zipped:\n                    data[\"path\"] = data_dir\n        except Exception as e:\n            raise Exception(\"error/HUB/dataset_stats/yaml_load\") from e\n\n        check_dataset(data, autodownload)  # download dataset if missing\n        self.hub_dir = Path(data[\"path\"] + \"-hub\")\n        self.im_dir = self.hub_dir / \"images\"\n        self.im_dir.mkdir(parents=True, exist_ok=True)  # makes /images\n        self.stats = {\"nc\": data[\"nc\"], \"names\": list(data[\"names\"].values())}  # statistics dictionary\n        self.data = data\n\n    @staticmethod\n    def _find_yaml(dir):\n        \"\"\"Finds and returns the path to a single '.yaml' file in the specified directory, preferring files that match\n        the directory name.\n        \"\"\"\n        files = list(dir.glob(\"*.yaml\")) or list(dir.rglob(\"*.yaml\"))  # try root level first and then recursive\n        assert files, f\"No *.yaml file found in {dir}\"\n        if len(files) > 1:\n            files = [f for f in files if f.stem == dir.stem]  # prefer *.yaml files that match dir name\n            assert files, f\"Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed\"\n        assert len(files) == 1, f\"Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}\"\n        return files[0]\n\n    def _unzip(self, path):\n        \"\"\"Unzips a .zip file at 'path', returning success status, unzipped directory, and path to YAML file within.\"\"\"\n        if not str(path).endswith(\".zip\"):  # path is data.yaml\n            return False, None, path\n        assert Path(path).is_file(), f\"Error unzipping {path}, file not found\"\n        unzip_file(path, path=path.parent)\n        dir = path.with_suffix(\"\")  # dataset directory == zip name\n        assert dir.is_dir(), f\"Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/\"\n        return True, str(dir), self._find_yaml(dir)  # zipped, data_dir, yaml_path\n\n    def _hub_ops(self, f, max_dim=1920):\n        \"\"\"Resizes and saves an image at reduced quality for web/app viewing, supporting both PIL and OpenCV.\"\"\"\n        f_new = self.im_dir / Path(f).name  # dataset-hub image filename\n        try:  # use PIL\n            im = Image.open(f)\n            r = max_dim / max(im.height, im.width)  # ratio\n            if r < 1.0:  # image too large\n                im = im.resize((int(im.width * r), int(im.height * r)))\n            im.save(f_new, \"JPEG\", quality=50, optimize=True)  # save\n        except Exception as e:  # use OpenCV\n            LOGGER.info(f\"WARNING ⚠️ HUB ops PIL failure {f}: {e}\")\n            im = cv2.imread(f)\n            im_height, im_width = im.shape[:2]\n            r = max_dim / max(im_height, im_width)  # ratio\n            if r < 1.0:  # image too large\n                im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)\n            cv2.imwrite(str(f_new), im)\n\n    def get_json(self, save=False, verbose=False):\n        \"\"\"Generates dataset JSON for Ultralytics Platform, optionally saves or prints it; save=bool, verbose=bool.\"\"\"\n\n        def _round(labels):\n            \"\"\"Rounds class labels to integers and coordinates to 4 decimal places for improved label accuracy.\"\"\"\n            return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]\n\n        for split in \"train\", \"val\", \"test\":\n            if self.data.get(split) is None:\n                self.stats[split] = None  # i.e. no test set\n                continue\n            dataset = LoadImagesAndLabels(self.data[split])  # load dataset\n            x = np.array(\n                [\n                    np.bincount(label[:, 0].astype(int), minlength=self.data[\"nc\"])\n                    for label in tqdm(dataset.labels, total=dataset.n, desc=\"Statistics\")\n                ]\n            )  # shape(128x80)\n            self.stats[split] = {\n                \"instance_stats\": {\"total\": int(x.sum()), \"per_class\": x.sum(0).tolist()},\n                \"image_stats\": {\n                    \"total\": dataset.n,\n                    \"unlabelled\": int(np.all(x == 0, 1).sum()),\n                    \"per_class\": (x > 0).sum(0).tolist(),\n                },\n                \"labels\": [{str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)],\n            }\n\n        # Save, print and return\n        if save:\n            stats_path = self.hub_dir / \"stats.json\"\n            print(f\"Saving {stats_path.resolve()}...\")\n            with open(stats_path, \"w\") as f:\n                json.dump(self.stats, f)  # save stats.json\n        if verbose:\n            print(json.dumps(self.stats, indent=2, sort_keys=False))\n        return self.stats\n\n    def process_images(self):\n        \"\"\"Compress images across 'train', 'val', 'test' splits and saves to specified directory.\"\"\"\n        for split in \"train\", \"val\", \"test\":\n            if self.data.get(split) is None:\n                continue\n            dataset = LoadImagesAndLabels(self.data[split])  # load dataset\n            desc = f\"{split} images\"\n            for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc):\n                pass\n        print(f\"Done. All images saved to {self.im_dir}\")\n        return self.im_dir\n\n\n# Classification dataloaders -------------------------------------------------------------------------------------------\nclass ClassificationDataset(torchvision.datasets.ImageFolder):\n    \"\"\"YOLOv5 Classification Dataset.\n\n    Args:\n        root: Dataset path\n        transform: torchvision transforms, used by default\n        album_transform: Albumentations transforms, used if installed\n    \"\"\"\n\n    def __init__(self, root, augment, imgsz, cache=False):\n        \"\"\"Initializes YOLOv5 Classification Dataset with optional caching, augmentations, and transforms for image\n        classification.\n        \"\"\"\n        super().__init__(root=root)\n        self.torch_transforms = classify_transforms(imgsz)\n        self.album_transforms = classify_albumentations(augment, imgsz) if augment else None\n        self.cache_ram = cache is True or cache == \"ram\"\n        self.cache_disk = cache == \"disk\"\n        self.samples = [[*list(x), Path(x[0]).with_suffix(\".npy\"), None] for x in self.samples]  # file, index, npy, im\n\n    def __getitem__(self, i):\n        \"\"\"Fetches and transforms an image sample by index, supporting RAM/disk caching and Augmentations.\"\"\"\n        f, j, fn, im = self.samples[i]  # filename, index, filename.with_suffix('.npy'), image\n        if self.cache_ram and im is None:\n            im = self.samples[i][3] = cv2.imread(f)\n        elif self.cache_disk:\n            if not fn.exists():  # load npy\n                np.save(fn.as_posix(), cv2.imread(f))\n            im = np.load(fn)\n        else:  # read image\n            im = cv2.imread(f)  # BGR\n        if self.album_transforms:\n            sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))[\"image\"]\n        else:\n            sample = self.torch_transforms(im)\n        return sample, j\n\n\ndef create_classification_dataloader(\n    path, imgsz=224, batch_size=16, augment=True, cache=False, rank=-1, workers=8, shuffle=True\n):\n    # Returns Dataloader object to be used with YOLOv5 Classifier\n    \"\"\"Creates a DataLoader for image classification, supporting caching, augmentation, and distributed training.\"\"\"\n    with torch_distributed_zero_first(rank):  # init dataset *.cache only once if DDP\n        dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache)\n    batch_size = min(batch_size, len(dataset))\n    nd = torch.cuda.device_count()\n    nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])\n    sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n    generator = torch.Generator()\n    generator.manual_seed(6148914691236517205 + RANK)\n    return InfiniteDataLoader(\n        dataset,\n        batch_size=batch_size,\n        shuffle=shuffle and sampler is None,\n        num_workers=nw,\n        sampler=sampler,\n        pin_memory=PIN_MEMORY,\n        worker_init_fn=seed_worker,\n        generator=generator,\n    )  # or DataLoader(persistent_workers=True)\n"
  },
  {
    "path": "utils/docker/Dockerfile",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5\n# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference\n\n# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch\nFROM pytorch/pytorch:2.8.0-cuda12.8-cudnn9-runtime\n\n# Downloads to user config dir\nADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/\n\n# Install linux packages\nENV DEBIAN_FRONTEND noninteractive\nRUN apt update\nRUN TZ=Etc/UTC apt install -y tzdata\nRUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg\n# RUN alias python=python3\n\n# Security updates\n# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796\nRUN apt upgrade --no-install-recommends -y openssl\n\n# Create working directory\nRUN rm -rf /usr/src/app && mkdir -p /usr/src/app\nWORKDIR /usr/src/app\n\n# Copy contents\nCOPY . /usr/src/app\n\n# Install pip packages\nCOPY requirements.txt .\nRUN python3 -m pip install --upgrade pip wheel\nRUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \\\n    coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0'\n    # tensorflow tensorflowjs \\\n\n# Set environment variables\nENV OMP_NUM_THREADS=1\n\n# Cleanup\nENV DEBIAN_FRONTEND teletype\n\n\n# Usage Examples -------------------------------------------------------------------------------------------------------\n\n# Build and Push\n# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t\n\n# Pull and Run\n# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t\n\n# Pull and Run with local directory access\n# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v \"$(pwd)\"/datasets:/usr/src/datasets $t\n\n# Kill all\n# sudo docker kill $(sudo docker ps -q)\n\n# Kill all image-based\n# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)\n\n# DockerHub tag update\n# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew\n\n# Clean up\n# sudo docker system prune -a --volumes\n\n# Update Ubuntu drivers\n# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/\n\n# DDP test\n# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3\n\n# GCP VM from Image\n# docker.io/ultralytics/yolov5:latest\n"
  },
  {
    "path": "utils/docker/Dockerfile-arm64",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5\n# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi\n\n# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu\nFROM arm64v8/ubuntu:22.10\n\n# Downloads to user config dir\nADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/\n\n# Install linux packages\nENV DEBIAN_FRONTEND noninteractive\nRUN apt update\nRUN TZ=Etc/UTC apt install -y tzdata\nRUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev\n# RUN alias python=python3\n\n# Install pip packages\nCOPY requirements.txt .\nRUN python3 -m pip install --upgrade pip wheel\nRUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \\\n    coremltools onnx onnxruntime\n    # tensorflow-aarch64 tensorflowjs \\\n\n# Create working directory\nRUN mkdir -p /usr/src/app\nWORKDIR /usr/src/app\n\n# Copy contents\nCOPY . /usr/src/app\nENV DEBIAN_FRONTEND teletype\n\n\n# Usage Examples -------------------------------------------------------------------------------------------------------\n\n# Build and Push\n# t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t\n\n# Pull and Run\n# t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v \"$(pwd)\"/datasets:/usr/src/datasets $t\n"
  },
  {
    "path": "utils/docker/Dockerfile-cpu",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5\n# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments\n\n# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu\nFROM ubuntu:23.10\n\n# Downloads to user config dir\nADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/\n\n# Install linux packages\n# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package\nRUN apt update \\\n    && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0\n# RUN alias python=python3\n\n# Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error\nRUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED\n\n# Install pip packages\nCOPY requirements.txt .\nRUN python3 -m pip install --upgrade pip wheel\nRUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \\\n    coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \\\n    # tensorflow tensorflowjs \\\n    --extra-index-url https://download.pytorch.org/whl/cpu\n\n# Create working directory\nRUN mkdir -p /usr/src/app\nWORKDIR /usr/src/app\n\n# Copy contents\nCOPY . /usr/src/app\n\n\n# Usage Examples -------------------------------------------------------------------------------------------------------\n\n# Build and Push\n# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t\n\n# Pull and Run\n# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v \"$(pwd)\"/datasets:/usr/src/datasets $t\n"
  },
  {
    "path": "utils/downloads.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Download utils.\"\"\"\n\nimport logging\nimport subprocess\nimport urllib\nfrom pathlib import Path\n\nimport requests\nimport torch\n\n\ndef is_url(url, check=True):\n    \"\"\"Determines if a string is a URL and optionally checks its existence online, returning a boolean.\"\"\"\n    try:\n        url = str(url)\n        result = urllib.parse.urlparse(url)\n        assert all([result.scheme, result.netloc])  # check if is url\n        return (urllib.request.urlopen(url).getcode() == 200) if check else True  # check if exists online\n    except (AssertionError, urllib.request.HTTPError):\n        return False\n\n\ndef gsutil_getsize(url=\"\"):\n    \"\"\"Returns the size in bytes of a file at a Google Cloud Storage URL using `gsutil du`.\n\n    Returns 0 if the command fails or output is empty.\n    \"\"\"\n    output = subprocess.check_output([\"gsutil\", \"du\", url], shell=True, encoding=\"utf-8\")\n    return int(output.split()[0]) if output else 0\n\n\ndef url_getsize(url=\"https://ultralytics.com/images/bus.jpg\"):\n    \"\"\"Returns the size in bytes of a downloadable file at a given URL; defaults to -1 if not found.\"\"\"\n    response = requests.head(url, allow_redirects=True)\n    return int(response.headers.get(\"content-length\", -1))\n\n\ndef curl_download(url, filename, *, silent: bool = False) -> bool:\n    \"\"\"Download a file from a url to a filename using curl.\"\"\"\n    silent_option = \"sS\" if silent else \"\"  # silent\n    proc = subprocess.run(\n        [\n            \"curl\",\n            \"-#\",\n            f\"-{silent_option}L\",\n            url,\n            \"--output\",\n            filename,\n            \"--retry\",\n            \"9\",\n            \"-C\",\n            \"-\",\n        ]\n    )\n    return proc.returncode == 0\n\n\ndef safe_download(file, url, url2=None, min_bytes=1e0, error_msg=\"\"):\n    \"\"\"Downloads a file from a URL (or alternate URL) to a specified path if file is above a minimum size.\n\n    Removes incomplete downloads.\n    \"\"\"\n    from utils.general import LOGGER\n\n    file = Path(file)\n    assert_msg = f\"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}\"\n    try:  # url1\n        LOGGER.info(f\"Downloading {url} to {file}...\")\n        torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO)\n        assert file.exists() and file.stat().st_size > min_bytes, assert_msg  # check\n    except Exception as e:  # url2\n        if file.exists():\n            file.unlink()  # remove partial downloads\n        LOGGER.info(f\"ERROR: {e}\\nRe-attempting {url2 or url} to {file}...\")\n        # curl download, retry and resume on fail\n        curl_download(url2 or url, file)\n    finally:\n        if not file.exists() or file.stat().st_size < min_bytes:  # check\n            if file.exists():\n                file.unlink()  # remove partial downloads\n            LOGGER.info(f\"ERROR: {assert_msg}\\n{error_msg}\")\n        LOGGER.info(\"\")\n\n\ndef attempt_download(file, repo=\"ultralytics/yolov5\", release=\"v7.0\"):\n    \"\"\"Download a file from GitHub release assets or via direct URL if not found locally.\"\"\"\n    from utils.general import LOGGER\n\n    def github_assets(repository, version=\"latest\"):\n        \"\"\"Fetches GitHub repository release tag and asset names using the GitHub API.\"\"\"\n        if version != \"latest\":\n            version = f\"tags/{version}\"  # i.e. tags/v7.0\n        response = requests.get(f\"https://api.github.com/repos/{repository}/releases/{version}\").json()  # github api\n        return response[\"tag_name\"], [x[\"name\"] for x in response[\"assets\"]]  # tag, assets\n\n    file = Path(str(file).strip().replace(\"'\", \"\"))\n    if not file.exists():\n        # URL specified\n        name = Path(urllib.parse.unquote(str(file))).name  # decode '%2F' to '/' etc.\n        if str(file).startswith((\"http:/\", \"https:/\")):  # download\n            url = str(file).replace(\":/\", \"://\")  # Pathlib turns :// -> :/\n            file = name.split(\"?\")[0]  # parse authentication https://url.com/file.txt?auth...\n            if Path(file).is_file():\n                LOGGER.info(f\"Found {url} locally at {file}\")  # file already exists\n            else:\n                safe_download(file=file, url=url, min_bytes=1e5)\n            return file\n\n        # GitHub assets\n        assets = [f\"yolov5{size}{suffix}.pt\" for size in \"nsmlx\" for suffix in (\"\", \"6\", \"-cls\", \"-seg\")]  # default\n        try:\n            tag, assets = github_assets(repo, release)\n        except Exception:\n            try:\n                tag, assets = github_assets(repo)  # latest release\n            except Exception:\n                try:\n                    tag = subprocess.check_output(\"git tag\", shell=True, stderr=subprocess.STDOUT).decode().split()[-1]\n                except Exception:\n                    tag = release\n\n        if name in assets:\n            file.parent.mkdir(parents=True, exist_ok=True)  # make parent dir (if required)\n            safe_download(\n                file,\n                url=f\"https://github.com/{repo}/releases/download/{tag}/{name}\",\n                min_bytes=1e5,\n                error_msg=f\"{file} missing, try downloading from https://github.com/{repo}/releases/{tag}\",\n            )\n\n    return str(file)\n"
  },
  {
    "path": "utils/flask_rest_api/README.md",
    "content": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg\" width=\"320\" alt=\"Ultralytics logo\"></a>\n\n# Flask REST API for YOLOv5\n\n[Representational State Transfer (REST)](https://en.wikipedia.org/wiki/Representational_state_transfer) [Application Programming Interfaces (APIs)](https://en.wikipedia.org/wiki/API) provide a standardized way to expose [Machine Learning (ML)](https://www.ultralytics.com/glossary/machine-learning-ml) models for use by other services or applications. This directory contains an example REST API built with the [Flask](https://flask.palletsprojects.com/en/stable/) web framework to serve the [Ultralytics YOLOv5s](https://docs.ultralytics.com/models/yolov5/) model, loaded directly from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). This setup allows you to easily integrate YOLOv5 [object detection](https://docs.ultralytics.com/tasks/detect/) capabilities into your web applications or microservices, aligning with common [model deployment options](https://docs.ultralytics.com/guides/model-deployment-options/).\n\n## 💻 Requirements\n\nThe primary requirement is the [Flask](https://flask.palletsprojects.com/en/stable/) web framework. You can install it using pip:\n\n```shell\npip install Flask\n```\n\nYou will also need `torch` and `yolov5`. These are implicitly handled by the script when it loads the model from PyTorch Hub. Ensure you have a functioning Python environment set up.\n\n## ▶️ Run the API\n\nOnce Flask is installed, you can start the API server using the following command:\n\n```shell\npython restapi.py --port 5000\n```\n\nThe server will begin listening on the specified port (defaulting to 5000). You can then send inference requests to the API endpoint using tools like [curl](https://curl.se/) or any other HTTP client.\n\nTo test the API with a local image file (e.g., `zidane.jpg` located in the `yolov5/data/images` directory relative to the script):\n\n```shell\ncurl -X POST -F image=@../data/images/zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'\n```\n\nThe API processes the submitted image using the YOLOv5s model and returns the detection results in [JSON](https://www.json.org/json-en.html) format. Each object within the JSON array represents a detected item, including its class ID, confidence score, normalized [bounding box](https://www.ultralytics.com/glossary/bounding-box) coordinates (`xcenter`, `ycenter`, `width`, `height`), and class name.\n\n```json\n[\n  {\n    \"class\": 0,\n    \"confidence\": 0.8900438547,\n    \"height\": 0.9318675399,\n    \"name\": \"person\",\n    \"width\": 0.3264600933,\n    \"xcenter\": 0.7438579798,\n    \"ycenter\": 0.5207948685\n  },\n  {\n    \"class\": 0,\n    \"confidence\": 0.8440024257,\n    \"height\": 0.7155083418,\n    \"name\": \"person\",\n    \"width\": 0.6546785235,\n    \"xcenter\": 0.427829951,\n    \"ycenter\": 0.6334488392\n  },\n  {\n    \"class\": 27,\n    \"confidence\": 0.3771208823,\n    \"height\": 0.3902671337,\n    \"name\": \"tie\",\n    \"width\": 0.0696444362,\n    \"xcenter\": 0.3675483763,\n    \"ycenter\": 0.7991207838\n  },\n  {\n    \"class\": 27,\n    \"confidence\": 0.3527112305,\n    \"height\": 0.1540903747,\n    \"name\": \"tie\",\n    \"width\": 0.0336618312,\n    \"xcenter\": 0.7814827561,\n    \"ycenter\": 0.5065554976\n  }\n]\n```\n\nAn example Python script, `example_request.py`, is included to demonstrate how to perform inference using the popular [requests](https://requests.readthedocs.io/en/latest/) library. This script offers a straightforward method for interacting with the running API programmatically.\n\n## 🤝 Contribute\n\nContributions to enhance this Flask API example are highly encouraged! Whether you're interested in adding support for different YOLO models, improving error handling, or implementing new features, please feel free to fork the repository, apply your changes, and submit a pull request. For more comprehensive contribution guidelines, please refer to the main [Ultralytics YOLOv5 repository](https://github.com/ultralytics/yolov5) and the general [Ultralytics documentation](https://docs.ultralytics.com/).\n"
  },
  {
    "path": "utils/flask_rest_api/example_request.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Perform test request.\"\"\"\n\nimport pprint\n\nimport requests\n\nDETECTION_URL = \"http://localhost:5000/v1/object-detection/yolov5s\"\nIMAGE = \"zidane.jpg\"\n\n# Read image\nwith open(IMAGE, \"rb\") as f:\n    image_data = f.read()\n\nresponse = requests.post(DETECTION_URL, files={\"image\": image_data}).json()\n\npprint.pprint(response)\n"
  },
  {
    "path": "utils/flask_rest_api/restapi.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Run a Flask REST API exposing one or more YOLOv5s models.\"\"\"\n\nimport argparse\nimport io\n\nimport torch\nfrom flask import Flask, request\nfrom PIL import Image\n\napp = Flask(__name__)\nmodels = {}\n\nDETECTION_URL = \"/v1/object-detection/<model>\"\n\n\n@app.route(DETECTION_URL, methods=[\"POST\"])\ndef predict(model):\n    \"\"\"Predict and return object detections in JSON format given an image and model name via a Flask REST API POST\n    request.\n    \"\"\"\n    if request.method != \"POST\":\n        return\n\n    if request.files.get(\"image\"):\n        # Method 1\n        # with request.files[\"image\"] as f:\n        #     im = Image.open(io.BytesIO(f.read()))\n\n        # Method 2\n        im_file = request.files[\"image\"]\n        im_bytes = im_file.read()\n        im = Image.open(io.BytesIO(im_bytes))\n\n        if model in models:\n            results = models[model](im, size=640)  # reduce size=320 for faster inference\n            return results.pandas().xyxy[0].to_json(orient=\"records\")\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Flask API exposing YOLOv5 model\")\n    parser.add_argument(\"--port\", default=5000, type=int, help=\"port number\")\n    parser.add_argument(\"--model\", nargs=\"+\", default=[\"yolov5s\"], help=\"model(s) to run, i.e. --model yolov5n yolov5s\")\n    opt = parser.parse_args()\n\n    for m in opt.model:\n        models[m] = torch.hub.load(\"ultralytics/yolov5\", m, force_reload=True, skip_validation=True)\n\n    app.run(host=\"0.0.0.0\", port=opt.port)  # debug=True causes Restarting with stat\n"
  },
  {
    "path": "utils/general.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"General utils.\"\"\"\n\nfrom __future__ import annotations\n\nimport contextlib\nimport glob\nimport inspect\nimport logging\nimport logging.config\nimport math\nimport os\nimport platform\nimport random\nimport re\nimport signal\nimport subprocess\nimport sys\nimport time\nimport urllib\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom itertools import repeat\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom subprocess import check_output\nfrom tarfile import is_tarfile\nfrom zipfile import ZipFile, is_zipfile\n\nimport cv2\nimport numpy as np\nimport packaging\nimport pandas as pd\nimport torch\nimport torchvision\nimport yaml\n\n# Import 'ultralytics' package or install if missing\ntry:\n    import ultralytics\n\n    assert hasattr(ultralytics, \"__version__\")  # verify package is not directory\nexcept (ImportError, AssertionError):\n    os.system(\"pip install -U ultralytics\")\n    import ultralytics\n\nfrom ultralytics.utils.checks import check_requirements\nfrom ultralytics.utils.patches import torch_load\n\nfrom utils import TryExcept, emojis\nfrom utils.downloads import curl_download, gsutil_getsize\nfrom utils.metrics import box_iou, fitness\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nRANK = int(os.getenv(\"RANK\", -1))\n\n# Settings\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1))  # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv(\"YOLOv5_DATASETS_DIR\", ROOT.parent / \"datasets\"))  # global datasets directory\nAUTOINSTALL = str(os.getenv(\"YOLOv5_AUTOINSTALL\", True)).lower() == \"true\"  # global auto-install mode\nVERBOSE = str(os.getenv(\"YOLOv5_VERBOSE\", True)).lower() == \"true\"  # global verbose mode\nTQDM_BAR_FORMAT = \"{l_bar}{bar:10}{r_bar}\"  # tqdm bar format\nFONT = \"Arial.ttf\"  # https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf\n\ntorch.set_printoptions(linewidth=320, precision=5, profile=\"long\")\nnp.set_printoptions(linewidth=320, formatter={\"float_kind\": \"{:11.5g}\".format})  # format short g, %precision=5\npd.options.display.max_columns = 10\ncv2.setNumThreads(0)  # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)\nos.environ[\"NUMEXPR_MAX_THREADS\"] = str(NUM_THREADS)  # NumExpr max threads\nos.environ[\"OMP_NUM_THREADS\"] = \"1\" if platform.system() == \"darwin\" else str(NUM_THREADS)  # OpenMP (PyTorch and SciPy)\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"  # suppress verbose TF compiler warnings in Colab\nos.environ[\"TORCH_CPP_LOG_LEVEL\"] = \"ERROR\"  # suppress \"NNPACK.cpp could not initialize NNPACK\" warnings\nos.environ[\"KINETO_LOG_LEVEL\"] = \"5\"  # suppress verbose PyTorch profiler output when computing FLOPs\n\n\ndef is_ascii(s=\"\"):\n    \"\"\"Checks if input string `s` contains only ASCII characters; returns `True` if so, otherwise `False`.\"\"\"\n    s = str(s)  # convert list, tuple, None, etc. to str\n    return len(s.encode().decode(\"ascii\", \"ignore\")) == len(s)\n\n\ndef is_chinese(s=\"人工智能\"):\n    \"\"\"Determines if a string `s` contains any Chinese characters; returns `True` if so, otherwise `False`.\"\"\"\n    return bool(re.search(\"[\\u4e00-\\u9fff]\", str(s)))\n\n\ndef is_colab():\n    \"\"\"Checks if the current environment is a Google Colab instance; returns `True` for Colab, otherwise `False`.\"\"\"\n    return \"google.colab\" in sys.modules\n\n\ndef is_jupyter():\n    \"\"\"Check if the current script is running inside a Jupyter Notebook. Verified on Colab, Jupyterlab, Kaggle,\n    Paperspace.\n\n    Returns:\n        bool: True if running inside a Jupyter Notebook, False otherwise.\n    \"\"\"\n    with contextlib.suppress(Exception):\n        from IPython import get_ipython\n\n        return get_ipython() is not None\n    return False\n\n\ndef is_kaggle():\n    \"\"\"Checks if the current environment is a Kaggle Notebook by validating environment variables.\"\"\"\n    return os.environ.get(\"PWD\") == \"/kaggle/working\" and os.environ.get(\"KAGGLE_URL_BASE\") == \"https://www.kaggle.com\"\n\n\ndef is_docker() -> bool:\n    \"\"\"Check if the process runs inside a docker container.\"\"\"\n    if Path(\"/.dockerenv\").exists():\n        return True\n    try:  # check if docker is in control groups\n        with open(\"/proc/self/cgroup\") as file:\n            return any(\"docker\" in line for line in file)\n    except OSError:\n        return False\n\n\ndef is_writeable(dir, test=False):\n    \"\"\"Checks if a directory is writable, optionally testing by creating a temporary file if `test=True`.\"\"\"\n    if not test:\n        return os.access(dir, os.W_OK)  # possible issues on Windows\n    file = Path(dir) / \"tmp.txt\"\n    try:\n        with open(file, \"w\"):  # open file with write permissions\n            pass\n        file.unlink()  # remove file\n        return True\n    except OSError:\n        return False\n\n\nLOGGING_NAME = \"yolov5\"\n\n\ndef set_logging(name=LOGGING_NAME, verbose=True):\n    \"\"\"Configures logging with specified verbosity; `name` sets the logger's name, `verbose` controls logging level.\"\"\"\n    rank = int(os.getenv(\"RANK\", -1))  # rank in world for Multi-GPU trainings\n    level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR\n    logging.config.dictConfig(\n        {\n            \"version\": 1,\n            \"disable_existing_loggers\": False,\n            \"formatters\": {name: {\"format\": \"%(message)s\"}},\n            \"handlers\": {\n                name: {\n                    \"class\": \"logging.StreamHandler\",\n                    \"formatter\": name,\n                    \"level\": level,\n                }\n            },\n            \"loggers\": {\n                name: {\n                    \"level\": level,\n                    \"handlers\": [name],\n                    \"propagate\": False,\n                }\n            },\n        }\n    )\n\n\nset_logging(LOGGING_NAME)  # run before defining LOGGER\nLOGGER = logging.getLogger(LOGGING_NAME)  # define globally (used in train.py, val.py, detect.py, etc.)\nif platform.system() == \"Windows\":\n    for fn in LOGGER.info, LOGGER.warning:\n        setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x)))  # emoji safe logging\n\n\ndef user_config_dir(dir=\"Ultralytics\", env_var=\"YOLOV5_CONFIG_DIR\"):\n    \"\"\"Returns user configuration directory path, preferring environment variable `YOLOV5_CONFIG_DIR` if set, else OS-\n    specific.\n    \"\"\"\n    if env := os.getenv(env_var):\n        path = Path(env)  # use environment variable\n    else:\n        cfg = {\"Windows\": \"AppData/Roaming\", \"Linux\": \".config\", \"Darwin\": \"Library/Application Support\"}  # 3 OS dirs\n        path = Path.home() / cfg.get(platform.system(), \"\")  # OS-specific config dir\n        path = (path if is_writeable(path) else Path(\"/tmp\")) / dir  # GCP and AWS lambda fix, only /tmp is writeable\n    path.mkdir(exist_ok=True)  # make if required\n    return path\n\n\nCONFIG_DIR = user_config_dir()  # Ultralytics settings dir\n\n\nclass Profile(contextlib.ContextDecorator):\n    \"\"\"Context manager and decorator for profiling code execution time, with optional CUDA synchronization.\"\"\"\n\n    def __init__(self, t=0.0, device: torch.device = None):\n        \"\"\"Initializes a profiling context for YOLOv5 with optional timing threshold and device specification.\"\"\"\n        self.t = t\n        self.device = device\n        self.cuda = bool(device and str(device).startswith(\"cuda\"))\n\n    def __enter__(self):\n        \"\"\"Initializes timing at the start of a profiling context block for performance measurement.\"\"\"\n        self.start = self.time()\n        return self\n\n    def __exit__(self, type, value, traceback):\n        \"\"\"Concludes timing, updating duration for profiling upon exiting a context block.\"\"\"\n        self.dt = self.time() - self.start  # delta-time\n        self.t += self.dt  # accumulate dt\n\n    def time(self):\n        \"\"\"Measures and returns the current time, synchronizing CUDA operations if `cuda` is True.\"\"\"\n        if self.cuda:\n            torch.cuda.synchronize(self.device)\n        return time.time()\n\n\nclass Timeout(contextlib.ContextDecorator):\n    \"\"\"Enforces a timeout on code execution, raising TimeoutError if the specified duration is exceeded.\"\"\"\n\n    def __init__(self, seconds, *, timeout_msg=\"\", suppress_timeout_errors=True):\n        \"\"\"Initializes a timeout context/decorator with defined seconds, optional message, and error suppression.\"\"\"\n        self.seconds = int(seconds)\n        self.timeout_message = timeout_msg\n        self.suppress = bool(suppress_timeout_errors)\n\n    def _timeout_handler(self, signum, frame):\n        \"\"\"Raises a TimeoutError with a custom message when a timeout event occurs.\"\"\"\n        raise TimeoutError(self.timeout_message)\n\n    def __enter__(self):\n        \"\"\"Initializes timeout mechanism on non-Windows platforms, starting a countdown to raise TimeoutError.\"\"\"\n        if platform.system() != \"Windows\":  # not supported on Windows\n            signal.signal(signal.SIGALRM, self._timeout_handler)  # Set handler for SIGALRM\n            signal.alarm(self.seconds)  # start countdown for SIGALRM to be raised\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"Disables active alarm on non-Windows systems and optionally suppresses TimeoutError if set.\"\"\"\n        if platform.system() != \"Windows\":\n            signal.alarm(0)  # Cancel SIGALRM if it's scheduled\n            if self.suppress and exc_type is TimeoutError:  # Suppress TimeoutError\n                return True\n\n\nclass WorkingDirectory(contextlib.ContextDecorator):\n    \"\"\"Context manager/decorator to temporarily change the working directory within a 'with' statement or decorator.\"\"\"\n\n    def __init__(self, new_dir):\n        \"\"\"Initializes a context manager/decorator to temporarily change the working directory.\"\"\"\n        self.dir = new_dir  # new dir\n        self.cwd = Path.cwd().resolve()  # current dir\n\n    def __enter__(self):\n        \"\"\"Temporarily changes the working directory within a 'with' statement context.\"\"\"\n        os.chdir(self.dir)\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"Restores the original working directory upon exiting a 'with' statement context.\"\"\"\n        os.chdir(self.cwd)\n\n\ndef methods(instance):\n    \"\"\"Returns list of method names for a class/instance excluding dunder methods.\"\"\"\n    return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith(\"__\")]\n\n\ndef print_args(args: dict | None = None, show_file=True, show_func=False):\n    \"\"\"Logs the arguments of the calling function, with options to include the filename and function name.\"\"\"\n    x = inspect.currentframe().f_back  # previous frame\n    file, _, func, _, _ = inspect.getframeinfo(x)\n    if args is None:  # get args automatically\n        args, _, _, frm = inspect.getargvalues(x)\n        args = {k: v for k, v in frm.items() if k in args}\n    try:\n        file = Path(file).resolve().relative_to(ROOT).with_suffix(\"\")\n    except ValueError:\n        file = Path(file).stem\n    s = (f\"{file}: \" if show_file else \"\") + (f\"{func}: \" if show_func else \"\")\n    LOGGER.info(colorstr(s) + \", \".join(f\"{k}={v}\" for k, v in args.items()))\n\n\ndef init_seeds(seed=0, deterministic=False):\n    \"\"\"Initializes RNG seeds and sets deterministic options if specified.\n\n    See https://pytorch.org/docs/stable/notes/randomness.html\n    \"\"\"\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed(seed)\n    torch.cuda.manual_seed_all(seed)  # for Multi-GPU, exception safe\n    # torch.backends.cudnn.benchmark = True  # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287\n    if deterministic and check_version(torch.__version__, \"1.12.0\"):  # https://github.com/ultralytics/yolov5/pull/8213\n        torch.use_deterministic_algorithms(True)\n        torch.backends.cudnn.deterministic = True\n        os.environ[\"CUBLAS_WORKSPACE_CONFIG\"] = \":4096:8\"\n        os.environ[\"PYTHONHASHSEED\"] = str(seed)\n\n\ndef intersect_dicts(da, db, exclude=()):\n    \"\"\"Returns intersection of `da` and `db` dicts with matching keys and shapes, excluding `exclude` keys; uses `da`\n    values.\n    \"\"\"\n    return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape}\n\n\ndef get_default_args(func):\n    \"\"\"Returns a dict of `func` default arguments by inspecting its signature.\"\"\"\n    signature = inspect.signature(func)\n    return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}\n\n\ndef get_latest_run(search_dir=\".\"):\n    \"\"\"Returns the path to the most recent 'last.pt' file in /runs to resume from, searches in `search_dir`.\"\"\"\n    last_list = glob.glob(f\"{search_dir}/**/last*.pt\", recursive=True)\n    return max(last_list, key=os.path.getctime) if last_list else \"\"\n\n\ndef file_age(path=__file__):\n    \"\"\"Calculates and returns the age of a file in days based on its last modification time.\"\"\"\n    dt = datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)  # delta\n    return dt.days  # + dt.seconds / 86400  # fractional days\n\n\ndef file_date(path=__file__):\n    \"\"\"Returns a human-readable file modification date in 'YYYY-M-D' format, given a file path.\"\"\"\n    t = datetime.fromtimestamp(Path(path).stat().st_mtime)\n    return f\"{t.year}-{t.month}-{t.day}\"\n\n\ndef file_size(path):\n    \"\"\"Returns file or directory size in megabytes (MB) for a given path, where directories are recursively summed.\"\"\"\n    mb = 1 << 20  # bytes to MiB (1024 ** 2)\n    path = Path(path)\n    if path.is_file():\n        return path.stat().st_size / mb\n    elif path.is_dir():\n        return sum(f.stat().st_size for f in path.glob(\"**/*\") if f.is_file()) / mb\n    else:\n        return 0.0\n\n\ndef check_online():\n    \"\"\"Checks internet connectivity by attempting to create a connection to \"1.1.1.1\" on port 443, retries once if the\n    first attempt fails.\n    \"\"\"\n    import socket\n\n    def run_once():\n        \"\"\"Checks internet connectivity by attempting to create a connection to \"1.1.1.1\" on port 443.\"\"\"\n        try:\n            socket.create_connection((\"1.1.1.1\", 443), 5)  # check host accessibility\n            return True\n        except OSError:\n            return False\n\n    return run_once() or run_once()  # check twice to increase robustness to intermittent connectivity issues\n\n\ndef git_describe(path=ROOT):\n    \"\"\"Returns a human-readable git description of the repository at `path`, or an empty string on failure.\n\n    Example output is 'fv5.0-5-g3e25f1e'. See https://git-scm.com/docs/git-describe.\n    \"\"\"\n    try:\n        assert (Path(path) / \".git\").is_dir()\n        return check_output(f\"git -C {path} describe --tags --long --always\", shell=True).decode()[:-1]\n    except Exception:\n        return \"\"\n\n\n@TryExcept()\n@WorkingDirectory(ROOT)\ndef check_git_status(repo=\"ultralytics/yolov5\", branch=\"master\"):\n    \"\"\"Checks if YOLOv5 code is up-to-date with the repository, advising 'git pull' if behind; errors return informative\n    messages.\n    \"\"\"\n    url = f\"https://github.com/{repo}\"\n    msg = f\", for updates see {url}\"\n    s = colorstr(\"github: \")  # string\n    assert Path(\".git\").exists(), s + \"skipping check (not a git repository)\" + msg\n    assert check_online(), s + \"skipping check (offline)\" + msg\n\n    splits = re.split(pattern=r\"\\s\", string=check_output(\"git remote -v\", shell=True).decode())\n    matches = [repo in s for s in splits]\n    if any(matches):\n        remote = splits[matches.index(True) - 1]\n    else:\n        remote = \"ultralytics\"\n        check_output(f\"git remote add {remote} {url}\", shell=True)\n    check_output(f\"git fetch {remote}\", shell=True, timeout=5)  # git fetch\n    local_branch = check_output(\"git rev-parse --abbrev-ref HEAD\", shell=True).decode().strip()  # checked out\n    n = int(check_output(f\"git rev-list {local_branch}..{remote}/{branch} --count\", shell=True))  # commits behind\n    if n > 0:\n        pull = \"git pull\" if remote == \"origin\" else f\"git pull {remote} {branch}\"\n        s += f\"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update.\"\n    else:\n        s += f\"up to date with {url} ✅\"\n    LOGGER.info(s)\n\n\n@WorkingDirectory(ROOT)\ndef check_git_info(path=\".\"):\n    \"\"\"Checks YOLOv5 git info, returning a dict with remote URL, branch name, and commit hash.\"\"\"\n    check_requirements(\"gitpython\")\n    import git\n\n    try:\n        repo = git.Repo(path)\n        remote = repo.remotes.origin.url.replace(\".git\", \"\")  # i.e. 'https://github.com/ultralytics/yolov5'\n        commit = repo.head.commit.hexsha  # i.e. '3134699c73af83aac2a481435550b968d5792c0d'\n        try:\n            branch = repo.active_branch.name  # i.e. 'main'\n        except TypeError:  # not on any branch\n            branch = None  # i.e. 'detached HEAD' state\n        return {\"remote\": remote, \"branch\": branch, \"commit\": commit}\n    except git.exc.InvalidGitRepositoryError:  # path is not a git dir\n        return {\"remote\": None, \"branch\": None, \"commit\": None}\n\n\ndef check_python(minimum=\"3.8.0\"):\n    \"\"\"Checks if current Python version meets the minimum required version, exits if not.\"\"\"\n    check_version(platform.python_version(), minimum, name=\"Python \", hard=True)\n\n\ndef check_version(current=\"0.0.0\", minimum=\"0.0.0\", name=\"version \", pinned=False, hard=False, verbose=False):\n    \"\"\"Checks if the current version meets the minimum required version, exits or warns based on parameters.\"\"\"\n    current, minimum = (packaging.version.parse(x) for x in (current, minimum))\n    result = (current == minimum) if pinned else (current >= minimum)  # bool\n    s = f\"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed\"  # string\n    if hard:\n        assert result, emojis(s)  # assert min requirements met\n    if verbose and not result:\n        LOGGER.warning(s)\n    return result\n\n\ndef check_img_size(imgsz, s=32, floor=0):\n    \"\"\"Adjusts image size to be divisible by stride `s`, supports int or list/tuple input, returns adjusted size.\"\"\"\n    if isinstance(imgsz, int):  # integer i.e. img_size=640\n        new_size = max(make_divisible(imgsz, int(s)), floor)\n    else:  # list i.e. img_size=[640, 480]\n        imgsz = list(imgsz)  # convert to list if tuple\n        new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n    if new_size != imgsz:\n        LOGGER.warning(f\"WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}\")\n    return new_size\n\n\ndef check_imshow(warn=False):\n    \"\"\"Checks environment support for image display; warns on failure if `warn=True`.\"\"\"\n    try:\n        assert not is_jupyter()\n        assert not is_docker()\n        cv2.imshow(\"test\", np.zeros((1, 1, 3)))\n        cv2.waitKey(1)\n        cv2.destroyAllWindows()\n        cv2.waitKey(1)\n        return True\n    except Exception as e:\n        if warn:\n            LOGGER.warning(f\"WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\\n{e}\")\n        return False\n\n\ndef check_suffix(file=\"yolov5s.pt\", suffix=(\".pt\",), msg=\"\"):\n    \"\"\"Validates if a file or files have an acceptable suffix, raising an error if not.\"\"\"\n    if file and suffix:\n        if isinstance(suffix, str):\n            suffix = [suffix]\n        for f in file if isinstance(file, (list, tuple)) else [file]:\n            s = Path(f).suffix.lower()  # file suffix\n            if len(s):\n                assert s in suffix, f\"{msg}{f} acceptable suffix is {suffix}\"\n\n\ndef check_yaml(file, suffix=(\".yaml\", \".yml\")):\n    \"\"\"Searches/downloads a YAML file, verifies its suffix (.yaml or .yml), and returns the file path.\"\"\"\n    return check_file(file, suffix)\n\n\ndef check_file(file, suffix=\"\"):\n    \"\"\"Searches/downloads a file, checks its suffix (if provided), and returns the file path.\"\"\"\n    check_suffix(file, suffix)  # optional\n    file = str(file)  # convert to str()\n    if os.path.isfile(file) or not file:  # exists\n        return file\n    elif file.startswith((\"http:/\", \"https:/\")):  # download\n        url = file  # warning: Pathlib turns :// -> :/\n        file = Path(urllib.parse.unquote(file).split(\"?\")[0]).name  # '%2F' to '/', split https://url.com/file.txt?auth\n        if os.path.isfile(file):\n            LOGGER.info(f\"Found {url} locally at {file}\")  # file already exists\n        else:\n            LOGGER.info(f\"Downloading {url} to {file}...\")\n            torch.hub.download_url_to_file(url, file)\n            assert Path(file).exists() and Path(file).stat().st_size > 0, f\"File download failed: {url}\"  # check\n        return file\n    elif file.startswith(\"clearml://\"):  # ClearML Dataset ID\n        assert \"clearml\" in sys.modules, (\n            \"ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'.\"\n        )\n        return file\n    else:  # search\n        files = []\n        for d in \"data\", \"models\", \"utils\":  # search directories\n            files.extend(glob.glob(str(ROOT / d / \"**\" / file), recursive=True))  # find file\n        assert len(files), f\"File not found: {file}\"  # assert file was found\n        assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\"  # assert unique\n        return files[0]  # return file\n\n\ndef check_font(font=FONT, progress=False):\n    \"\"\"Ensures specified font exists or downloads it from Ultralytics assets, optionally displaying progress.\"\"\"\n    font = Path(font)\n    file = CONFIG_DIR / font.name\n    if not font.exists() and not file.exists():\n        url = f\"https://github.com/ultralytics/assets/releases/download/v0.0.0/{font.name}\"\n        LOGGER.info(f\"Downloading {url} to {file}...\")\n        torch.hub.download_url_to_file(url, str(file), progress=progress)\n\n\ndef check_dataset(data, autodownload=True):\n    \"\"\"Validates and/or auto-downloads a dataset, returning its configuration as a dictionary.\"\"\"\n    # Download (optional)\n    extract_dir = \"\"\n    if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):\n        download(data, dir=f\"{DATASETS_DIR}/{Path(data).stem}\", unzip=True, delete=False, curl=False, threads=1)\n        data = next((DATASETS_DIR / Path(data).stem).rglob(\"*.yaml\"))\n        extract_dir, autodownload = data.parent, False\n\n    # Read yaml (optional)\n    if isinstance(data, (str, Path)):\n        data = yaml_load(data)  # dictionary\n\n    # Checks\n    for k in \"train\", \"val\", \"names\":\n        assert k in data, emojis(f\"data.yaml '{k}:' field missing ❌\")\n    if isinstance(data[\"names\"], (list, tuple)):  # old array format\n        data[\"names\"] = dict(enumerate(data[\"names\"]))  # convert to dict\n    assert all(isinstance(k, int) for k in data[\"names\"].keys()), \"data.yaml names keys must be integers, i.e. 2: car\"\n    data[\"nc\"] = len(data[\"names\"])\n\n    # Resolve paths\n    path = Path(extract_dir or data.get(\"path\") or \"\")  # optional 'path' default to '.'\n    if not path.is_absolute():\n        path = (ROOT / path).resolve()\n        data[\"path\"] = path  # download scripts\n    for k in \"train\", \"val\", \"test\":\n        if data.get(k):  # prepend path\n            if isinstance(data[k], str):\n                x = (path / data[k]).resolve()\n                if not x.exists() and data[k].startswith(\"../\"):\n                    x = (path / data[k][3:]).resolve()\n                data[k] = str(x)\n            else:\n                data[k] = [str((path / x).resolve()) for x in data[k]]\n\n    # Parse yaml\n    _train, val, _test, s = (data.get(x) for x in (\"train\", \"val\", \"test\", \"download\"))\n    if val:\n        val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])]  # val path\n        if not all(x.exists() for x in val):\n            LOGGER.info(\"\\nDataset not found ⚠️, missing paths %s\" % [str(x) for x in val if not x.exists()])\n            if not s or not autodownload:\n                raise Exception(\"Dataset not found ❌\")\n            t = time.time()\n            if s.startswith(\"http\") and s.endswith(\".zip\"):  # URL\n                f = Path(s).name  # filename\n                LOGGER.info(f\"Downloading {s} to {f}...\")\n                torch.hub.download_url_to_file(s, f)\n                Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True)  # create root\n                unzip_file(f, path=DATASETS_DIR)  # unzip\n                Path(f).unlink()  # remove zip\n                r = None  # success\n            elif s.startswith(\"bash \"):  # bash script\n                LOGGER.info(f\"Running {s} ...\")\n                r = subprocess.run(s, shell=True)\n            else:  # python script\n                r = exec(s, {\"yaml\": data})  # return None\n            dt = f\"({round(time.time() - t, 1)}s)\"\n            s = f\"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}\" if r in (0, None) else f\"failure {dt} ❌\"\n            LOGGER.info(f\"Dataset download {s}\")\n    check_font(\"Arial.ttf\" if is_ascii(data[\"names\"]) else \"Arial.Unicode.ttf\", progress=True)  # download fonts\n    return data  # dictionary\n\n\ndef check_amp(model):\n    \"\"\"Checks PyTorch AMP functionality for a model, returns True if AMP operates correctly, otherwise False.\"\"\"\n    from models.common import AutoShape, DetectMultiBackend\n\n    def amp_allclose(model, im):\n        \"\"\"Compares FP32 and AMP model inference outputs, ensuring they are close within a 10% absolute tolerance.\"\"\"\n        m = AutoShape(model, verbose=False)  # model\n        a = m(im).xywhn[0]  # FP32 inference\n        m.amp = True\n        b = m(im).xywhn[0]  # AMP inference\n        return a.shape == b.shape and torch.allclose(a, b, atol=0.1)  # close to 10% absolute tolerance\n\n    prefix = colorstr(\"AMP: \")\n    device = next(model.parameters()).device  # get model device\n    if device.type in (\"cpu\", \"mps\"):\n        return False  # AMP only used on CUDA devices\n    f = ROOT / \"data\" / \"images\" / \"bus.jpg\"  # image to check\n    im = f if f.exists() else \"https://ultralytics.com/images/bus.jpg\" if check_online() else np.ones((640, 640, 3))\n    try:\n        assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend(\"yolov5n.pt\", device), im)\n        LOGGER.info(f\"{prefix}checks passed ✅\")\n        return True\n    except Exception:\n        help_url = \"https://github.com/ultralytics/yolov5/issues/7908\"\n        LOGGER.warning(f\"{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}\")\n        return False\n\n\ndef yaml_load(file=\"data.yaml\"):\n    \"\"\"Safely loads and returns the contents of a YAML file specified by `file` argument.\"\"\"\n    with open(file, errors=\"ignore\") as f:\n        return yaml.safe_load(f)\n\n\ndef yaml_save(file=\"data.yaml\", data=None):\n    \"\"\"Safely saves `data` to a YAML file specified by `file`, converting `Path` objects to strings; `data` is a\n    dictionary.\n    \"\"\"\n    if data is None:\n        data = {}\n    with open(file, \"w\") as f:\n        yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False)\n\n\ndef unzip_file(file, path=None, exclude=(\".DS_Store\", \"__MACOSX\")):\n    \"\"\"Unzips `file` to `path` (default: file's parent), excluding filenames containing any in `exclude` (`.DS_Store`,\n    `__MACOSX`).\n    \"\"\"\n    if path is None:\n        path = Path(file).parent  # default path\n    with ZipFile(file) as zipObj:\n        for f in zipObj.namelist():  # list all archived filenames in the zip\n            if all(x not in f for x in exclude):\n                zipObj.extract(f, path=path)\n\n\ndef url2file(url):\n    \"\"\"Converts a URL string to a valid filename by stripping protocol, domain, and any query parameters.\n\n    Example https://url.com/file.txt?auth -> file.txt\n    \"\"\"\n    url = str(Path(url)).replace(\":/\", \"://\")  # Pathlib turns :// -> :/\n    return Path(urllib.parse.unquote(url)).name.split(\"?\")[0]  # '%2F' to '/', split https://url.com/file.txt?auth\n\n\ndef download(url, dir=\".\", unzip=True, delete=True, curl=False, threads=1, retry=3):\n    \"\"\"Downloads and optionally unzips files concurrently, supporting retries and curl fallback.\"\"\"\n\n    def download_one(url, dir):\n        \"\"\"Downloads a single file from `url` to `dir`, with retry support and optional curl fallback.\"\"\"\n        success = True\n        if os.path.isfile(url):\n            f = Path(url)  # filename\n        else:  # does not exist\n            f = dir / Path(url).name\n            LOGGER.info(f\"Downloading {url} to {f}...\")\n            for i in range(retry + 1):\n                if curl:\n                    success = curl_download(url, f, silent=(threads > 1))\n                else:\n                    torch.hub.download_url_to_file(url, f, progress=threads == 1)  # torch download\n                    success = f.is_file()\n                if success:\n                    break\n                elif i < retry:\n                    LOGGER.warning(f\"⚠️ Download failure, retrying {i + 1}/{retry} {url}...\")\n                else:\n                    LOGGER.warning(f\"❌ Failed to download {url}...\")\n\n        if unzip and success and (f.suffix == \".gz\" or is_zipfile(f) or is_tarfile(f)):\n            LOGGER.info(f\"Unzipping {f}...\")\n            if is_zipfile(f):\n                unzip_file(f, dir)  # unzip\n            elif is_tarfile(f):\n                subprocess.run([\"tar\", \"xf\", f, \"--directory\", f.parent], check=True)  # unzip\n            elif f.suffix == \".gz\":\n                subprocess.run([\"tar\", \"xfz\", f, \"--directory\", f.parent], check=True)  # unzip\n            if delete:\n                f.unlink()  # remove zip\n\n    dir = Path(dir)\n    dir.mkdir(parents=True, exist_ok=True)  # make directory\n    if threads > 1:\n        pool = ThreadPool(threads)\n        pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))  # multithreaded\n        pool.close()\n        pool.join()\n    else:\n        for u in [url] if isinstance(url, (str, Path)) else url:\n            download_one(u, dir)\n\n\ndef make_divisible(x, divisor):\n    \"\"\"Adjusts `x` to be divisible by `divisor`, returning the nearest greater or equal value.\"\"\"\n    if isinstance(divisor, torch.Tensor):\n        divisor = int(divisor.max())  # to int\n    return math.ceil(x / divisor) * divisor\n\n\ndef clean_str(s):\n    \"\"\"Cleans a string by replacing special characters with underscore, e.g., `clean_str('#example!')` returns\n    '_example_'.\n    \"\"\"\n    return re.sub(pattern=\"[|@#!¡·$€%&()=?¿^*;:,¨´><+]\", repl=\"_\", string=s)\n\n\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\n    \"\"\"Generates a lambda for a sinusoidal ramp from y1 to y2 over 'steps'.\n\n    See https://arxiv.org/pdf/1812.01187.pdf for details.\n    \"\"\"\n    return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1\n\n\ndef colorstr(*input):\n    \"\"\"Colors a string using ANSI escape codes, e.g., colorstr('blue', 'hello world').\n\n    See https://en.wikipedia.org/wiki/ANSI_escape_code.\n    \"\"\"\n    *args, string = input if len(input) > 1 else (\"blue\", \"bold\", input[0])  # color arguments, string\n    colors = {\n        \"black\": \"\\033[30m\",  # basic colors\n        \"red\": \"\\033[31m\",\n        \"green\": \"\\033[32m\",\n        \"yellow\": \"\\033[33m\",\n        \"blue\": \"\\033[34m\",\n        \"magenta\": \"\\033[35m\",\n        \"cyan\": \"\\033[36m\",\n        \"white\": \"\\033[37m\",\n        \"bright_black\": \"\\033[90m\",  # bright colors\n        \"bright_red\": \"\\033[91m\",\n        \"bright_green\": \"\\033[92m\",\n        \"bright_yellow\": \"\\033[93m\",\n        \"bright_blue\": \"\\033[94m\",\n        \"bright_magenta\": \"\\033[95m\",\n        \"bright_cyan\": \"\\033[96m\",\n        \"bright_white\": \"\\033[97m\",\n        \"end\": \"\\033[0m\",  # misc\n        \"bold\": \"\\033[1m\",\n        \"underline\": \"\\033[4m\",\n    }\n    return \"\".join(colors[x] for x in args) + f\"{string}\" + colors[\"end\"]\n\n\ndef labels_to_class_weights(labels, nc=80):\n    \"\"\"Calculates class weights from labels to handle class imbalance in training; input shape: (n, 5).\"\"\"\n    if labels[0] is None:  # no labels loaded\n        return torch.Tensor()\n\n    labels = np.concatenate(labels, 0)  # labels.shape = (866643, 5) for COCO\n    classes = labels[:, 0].astype(int)  # labels = [class xywh]\n    weights = np.bincount(classes, minlength=nc)  # occurrences per class\n\n    # Prepend gridpoint count (for uCE training)\n    # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum()  # gridpoints per image\n    # weights = np.hstack([gpi * len(labels)  - weights.sum() * 9, weights * 9]) ** 0.5  # prepend gridpoints to start\n\n    weights[weights == 0] = 1  # replace empty bins with 1\n    weights = 1 / weights  # number of targets per class\n    weights /= weights.sum()  # normalize\n    return torch.from_numpy(weights).float()\n\n\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\n    \"\"\"Calculates image weights from labels using class weights for weighted sampling.\"\"\"\n    # Usage: index = random.choices(range(n), weights=image_weights, k=1)  # weighted image sample\n    class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels])\n    return (class_weights.reshape(1, nc) * class_counts).sum(1)\n\n\ndef coco80_to_coco91_class():\n    \"\"\"Converts COCO 80-class index to COCO 91-class index used in the paper.\n\n    Reference: https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n    \"\"\"\n    # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n    # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n    # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)]  # darknet to coco\n    # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)]  # coco to darknet\n    return [\n        1,\n        2,\n        3,\n        4,\n        5,\n        6,\n        7,\n        8,\n        9,\n        10,\n        11,\n        13,\n        14,\n        15,\n        16,\n        17,\n        18,\n        19,\n        20,\n        21,\n        22,\n        23,\n        24,\n        25,\n        27,\n        28,\n        31,\n        32,\n        33,\n        34,\n        35,\n        36,\n        37,\n        38,\n        39,\n        40,\n        41,\n        42,\n        43,\n        44,\n        46,\n        47,\n        48,\n        49,\n        50,\n        51,\n        52,\n        53,\n        54,\n        55,\n        56,\n        57,\n        58,\n        59,\n        60,\n        61,\n        62,\n        63,\n        64,\n        65,\n        67,\n        70,\n        72,\n        73,\n        74,\n        75,\n        76,\n        77,\n        78,\n        79,\n        80,\n        81,\n        82,\n        84,\n        85,\n        86,\n        87,\n        88,\n        89,\n        90,\n    ]\n\n\ndef xyxy2xywh(x):\n    \"\"\"Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right.\"\"\"\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[..., 0] = (x[..., 0] + x[..., 2]) / 2  # x center\n    y[..., 1] = (x[..., 1] + x[..., 3]) / 2  # y center\n    y[..., 2] = x[..., 2] - x[..., 0]  # width\n    y[..., 3] = x[..., 3] - x[..., 1]  # height\n    return y\n\n\ndef xywh2xyxy(x):\n    \"\"\"Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right.\"\"\"\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[..., 0] = x[..., 0] - x[..., 2] / 2  # top left x\n    y[..., 1] = x[..., 1] - x[..., 3] / 2  # top left y\n    y[..., 2] = x[..., 0] + x[..., 2] / 2  # bottom right x\n    y[..., 3] = x[..., 1] + x[..., 3] / 2  # bottom right y\n    return y\n\n\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\n    \"\"\"Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right.\"\"\"\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw  # top left x\n    y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh  # top left y\n    y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw  # bottom right x\n    y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh  # bottom right y\n    return y\n\n\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\n    \"\"\"Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right.\"\"\"\n    if clip:\n        clip_boxes(x, (h - eps, w - eps))  # warning: inplace clip\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w  # x center\n    y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h  # y center\n    y[..., 2] = (x[..., 2] - x[..., 0]) / w  # width\n    y[..., 3] = (x[..., 3] - x[..., 1]) / h  # height\n    return y\n\n\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\n    \"\"\"Convert normalized segments into pixel segments, shape (n,2).\"\"\"\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[..., 0] = w * x[..., 0] + padw  # top left x\n    y[..., 1] = h * x[..., 1] + padh  # top left y\n    return y\n\n\ndef segment2box(segment, width=640, height=640):\n    \"\"\"Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy).\"\"\"\n    x, y = segment.T  # segment xy\n    inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)\n    (\n        x,\n        y,\n    ) = x[inside], y[inside]\n    return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4))  # xyxy\n\n\ndef segments2boxes(segments):\n    \"\"\"Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh).\"\"\"\n    boxes = []\n    for s in segments:\n        x, y = s.T  # segment xy\n        boxes.append([x.min(), y.min(), x.max(), y.max()])  # cls, xyxy\n    return xyxy2xywh(np.array(boxes))  # cls, xywh\n\n\ndef resample_segments(segments, n=1000):\n    \"\"\"Resamples an (n,2) segment to a fixed number of points for consistent representation.\"\"\"\n    for i, s in enumerate(segments):\n        s = np.concatenate((s, s[0:1, :]), axis=0)\n        x = np.linspace(0, len(s) - 1, n)\n        xp = np.arange(len(s))\n        segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T  # segment xy\n    return segments\n\n\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\n    \"\"\"Rescales (xyxy) bounding boxes from img1_shape to img0_shape, optionally using provided `ratio_pad`.\"\"\"\n    if ratio_pad is None:  # calculate from img0_shape\n        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new\n        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding\n    else:\n        gain = ratio_pad[0][0]\n        pad = ratio_pad[1]\n\n    boxes[..., [0, 2]] -= pad[0]  # x padding\n    boxes[..., [1, 3]] -= pad[1]  # y padding\n    boxes[..., :4] /= gain\n    clip_boxes(boxes, img0_shape)\n    return boxes\n\n\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\n    \"\"\"Rescales segment coordinates from img1_shape to img0_shape, optionally normalizing them with custom padding.\"\"\"\n    if ratio_pad is None:  # calculate from img0_shape\n        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new\n        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding\n    else:\n        gain = ratio_pad[0][0]\n        pad = ratio_pad[1]\n\n    segments[:, 0] -= pad[0]  # x padding\n    segments[:, 1] -= pad[1]  # y padding\n    segments /= gain\n    clip_segments(segments, img0_shape)\n    if normalize:\n        segments[:, 0] /= img0_shape[1]  # width\n        segments[:, 1] /= img0_shape[0]  # height\n    return segments\n\n\ndef clip_boxes(boxes, shape):\n    \"\"\"Clips bounding box coordinates (xyxy) to fit within the specified image shape (height, width).\"\"\"\n    if isinstance(boxes, torch.Tensor):  # faster individually\n        boxes[..., 0].clamp_(0, shape[1])  # x1\n        boxes[..., 1].clamp_(0, shape[0])  # y1\n        boxes[..., 2].clamp_(0, shape[1])  # x2\n        boxes[..., 3].clamp_(0, shape[0])  # y2\n    else:  # np.array (faster grouped)\n        boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2\n        boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2\n\n\ndef clip_segments(segments, shape):\n    \"\"\"Clips segment coordinates (xy1, xy2, ...) to an image's boundaries given its shape (height, width).\"\"\"\n    if isinstance(segments, torch.Tensor):  # faster individually\n        segments[:, 0].clamp_(0, shape[1])  # x\n        segments[:, 1].clamp_(0, shape[0])  # y\n    else:  # np.array (faster grouped)\n        segments[:, 0] = segments[:, 0].clip(0, shape[1])  # x\n        segments[:, 1] = segments[:, 1].clip(0, shape[0])  # y\n\n\ndef non_max_suppression(\n    prediction,\n    conf_thres=0.25,\n    iou_thres=0.45,\n    classes=None,\n    agnostic=False,\n    multi_label=False,\n    labels=(),\n    max_det=300,\n    nm=0,  # number of masks\n):\n    \"\"\"Non-Maximum Suppression (NMS) on inference results to reject overlapping detections.\n\n    Returns:\n        list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n    \"\"\"\n    # Checks\n    assert 0 <= conf_thres <= 1, f\"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0\"\n    assert 0 <= iou_thres <= 1, f\"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0\"\n    if isinstance(prediction, (list, tuple)):  # YOLOv5 model in validation model, output = (inference_out, loss_out)\n        prediction = prediction[0]  # select only inference output\n\n    device = prediction.device\n    mps = \"mps\" in device.type  # Apple MPS\n    if mps:  # MPS not fully supported yet, convert tensors to CPU before NMS\n        prediction = prediction.cpu()\n    bs = prediction.shape[0]  # batch size\n    nc = prediction.shape[2] - nm - 5  # number of classes\n    xc = prediction[..., 4] > conf_thres  # candidates\n\n    # Settings\n    # min_wh = 2  # (pixels) minimum box width and height\n    max_wh = 7680  # (pixels) maximum box width and height\n    max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()\n    time_limit = 0.5 + 0.05 * bs  # seconds to quit after\n    redundant = True  # require redundant detections\n    multi_label &= nc > 1  # multiple labels per box (adds 0.5ms/img)\n    merge = False  # use merge-NMS\n\n    t = time.time()\n    mi = 5 + nc  # mask start index\n    output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs\n    for xi, x in enumerate(prediction):  # image index, image inference\n        # Apply constraints\n        # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-height\n        x = x[xc[xi]]  # confidence\n\n        # Cat apriori labels if autolabelling\n        if labels and len(labels[xi]):\n            lb = labels[xi]\n            v = torch.zeros((len(lb), nc + nm + 5), device=x.device)\n            v[:, :4] = lb[:, 1:5]  # box\n            v[:, 4] = 1.0  # conf\n            v[range(len(lb)), lb[:, 0].long() + 5] = 1.0  # cls\n            x = torch.cat((x, v), 0)\n\n        # If none remain process next image\n        if not x.shape[0]:\n            continue\n\n        # Compute conf\n        x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf\n\n        # Box/Mask\n        box = xywh2xyxy(x[:, :4])  # center_x, center_y, width, height) to (x1, y1, x2, y2)\n        mask = x[:, mi:]  # zero columns if no masks\n\n        # Detections matrix nx6 (xyxy, conf, cls)\n        if multi_label:\n            i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T\n            x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)\n        else:  # best class only\n            conf, j = x[:, 5:mi].max(1, keepdim=True)\n            x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]\n\n        # Filter by class\n        if classes is not None:\n            x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n        # Apply finite constraint\n        # if not torch.isfinite(x).all():\n        #     x = x[torch.isfinite(x).all(1)]\n\n        # Check shape\n        n = x.shape[0]  # number of boxes\n        if not n:  # no boxes\n            continue\n        x = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence and remove excess boxes\n\n        # Batched NMS\n        c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes\n        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores\n        i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS\n        i = i[:max_det]  # limit detections\n        if merge and (1 < n < 3e3):  # Merge NMS (boxes merged using weighted mean)\n            # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n            iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrix\n            weights = iou * scores[None]  # box weights\n            x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes\n            if redundant:\n                i = i[iou.sum(1) > 1]  # require redundancy\n\n        output[xi] = x[i]\n        if mps:\n            output[xi] = output[xi].to(device)\n        if (time.time() - t) > time_limit:\n            LOGGER.warning(f\"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded\")\n            break  # time limit exceeded\n\n    return output\n\n\ndef strip_optimizer(f=\"best.pt\", s=\"\"):\n    \"\"\"Strips optimizer and optionally saves checkpoint to finalize training; arguments are file path 'f' and save path\n    's'.\n\n    Example: from utils.general import *; strip_optimizer()\n    \"\"\"\n    x = torch_load(f, map_location=torch.device(\"cpu\"))\n    if x.get(\"ema\"):\n        x[\"model\"] = x[\"ema\"]  # replace model with ema\n    for k in \"optimizer\", \"best_fitness\", \"ema\", \"updates\":  # keys\n        x[k] = None\n    x[\"epoch\"] = -1\n    x[\"model\"].half()  # to FP16\n    for p in x[\"model\"].parameters():\n        p.requires_grad = False\n    torch.save(x, s or f)\n    mb = os.path.getsize(s or f) / 1e6  # filesize\n    LOGGER.info(f\"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB\")\n\n\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr(\"evolve: \")):\n    \"\"\"Logs evolution results and saves to CSV and YAML in `save_dir`, optionally syncs with `bucket`.\"\"\"\n    evolve_csv = save_dir / \"evolve.csv\"\n    evolve_yaml = save_dir / \"hyp_evolve.yaml\"\n    keys = tuple(keys) + tuple(hyp.keys())  # [results + hyps]\n    keys = tuple(x.strip() for x in keys)\n    vals = results + tuple(hyp.values())\n    n = len(keys)\n\n    # Download (optional)\n    if bucket:\n        url = f\"gs://{bucket}/evolve.csv\"\n        if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):\n            subprocess.run([\"gsutil\", \"cp\", f\"{url}\", f\"{save_dir}\"])  # download evolve.csv if larger than local\n\n    # Log to evolve.csv\n    s = \"\" if evolve_csv.exists() else ((\"%20s,\" * n % keys).rstrip(\",\") + \"\\n\")  # add header\n    with open(evolve_csv, \"a\") as f:\n        f.write(s + (\"%20.5g,\" * n % vals).rstrip(\",\") + \"\\n\")\n\n    # Save yaml\n    with open(evolve_yaml, \"w\") as f:\n        data = pd.read_csv(evolve_csv, skipinitialspace=True)\n        data = data.rename(columns=lambda x: x.strip())  # strip keys\n        i = np.argmax(fitness(data.values[:, :4]))  #\n        generations = len(data)\n        f.write(\n            \"# YOLOv5 Hyperparameter Evolution Results\\n\"\n            + f\"# Best generation: {i}\\n\"\n            + f\"# Last generation: {generations - 1}\\n\"\n            + \"# \"\n            + \", \".join(f\"{x.strip():>20s}\" for x in keys[:7])\n            + \"\\n\"\n            + \"# \"\n            + \", \".join(f\"{x:>20.5g}\" for x in data.values[i, :7])\n            + \"\\n\\n\"\n        )\n        yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)\n\n    # Print to screen\n    LOGGER.info(\n        prefix\n        + f\"{generations} generations finished, current result:\\n\"\n        + prefix\n        + \", \".join(f\"{x.strip():>20s}\" for x in keys)\n        + \"\\n\"\n        + prefix\n        + \", \".join(f\"{x:20.5g}\" for x in vals)\n        + \"\\n\\n\"\n    )\n\n    if bucket:\n        subprocess.run([\"gsutil\", \"cp\", f\"{evolve_csv}\", f\"{evolve_yaml}\", f\"gs://{bucket}\"])  # upload\n\n\ndef apply_classifier(x, model, img, im0):\n    \"\"\"Applies second-stage classifier to YOLO outputs, filtering detections by class match.\"\"\"\n    # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()\n    im0 = [im0] if isinstance(im0, np.ndarray) else im0\n    for i, d in enumerate(x):  # per image\n        if d is not None and len(d):\n            d = d.clone()\n\n            # Reshape and pad cutouts\n            b = xyxy2xywh(d[:, :4])  # boxes\n            b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1)  # rectangle to square\n            b[:, 2:] = b[:, 2:] * 1.3 + 30  # pad\n            d[:, :4] = xywh2xyxy(b).long()\n\n            # Rescale boxes from img_size to im0 size\n            scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)\n\n            # Classes\n            pred_cls1 = d[:, 5].long()\n            ims = []\n            for a in d:\n                cutout = im0[i][int(a[1]) : int(a[3]), int(a[0]) : int(a[2])]\n                im = cv2.resize(cutout, (224, 224))  # BGR\n\n                im = im[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416\n                im = np.ascontiguousarray(im, dtype=np.float32)  # uint8 to float32\n                im /= 255  # 0 - 255 to 0.0 - 1.0\n                ims.append(im)\n\n            pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1)  # classifier prediction\n            x[i] = x[i][pred_cls1 == pred_cls2]  # retain matching class detections\n\n    return x\n\n\ndef increment_path(path, exist_ok=False, sep=\"\", mkdir=False):\n    \"\"\"Generates an incremented file or directory path if it exists, with optional mkdir; args: path, exist_ok=False,\n    sep=\"\", mkdir=False.\n\n    Example: runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc\n    \"\"\"\n    path = Path(path)  # os-agnostic\n    if path.exists() and not exist_ok:\n        path, suffix = (path.with_suffix(\"\"), path.suffix) if path.is_file() else (path, \"\")\n\n        # Method 1\n        for n in range(2, 9999):\n            p = f\"{path}{sep}{n}{suffix}\"  # increment path\n            if not os.path.exists(p):  #\n                break\n        path = Path(p)\n\n        # Method 2 (deprecated)\n        # dirs = glob.glob(f\"{path}{sep}*\")  # similar paths\n        # matches = [re.search(rf\"{path.stem}{sep}(\\d+)\", d) for d in dirs]\n        # i = [int(m.groups()[0]) for m in matches if m]  # indices\n        # n = max(i) + 1 if i else 2  # increment number\n        # path = Path(f\"{path}{sep}{n}{suffix}\")  # increment path\n\n    if mkdir:\n        path.mkdir(parents=True, exist_ok=True)  # make directory\n\n    return path\n\n\n# OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------------\nimshow_ = cv2.imshow  # copy to avoid recursion errors\n\n\ndef imread(filename, flags=cv2.IMREAD_COLOR):\n    \"\"\"Reads an image from a file and returns it as a numpy array, using OpenCV's imdecode to support multilanguage\n    paths.\n    \"\"\"\n    return cv2.imdecode(np.fromfile(filename, np.uint8), flags)\n\n\ndef imwrite(filename, img):\n    \"\"\"Writes an image to a file, returns True on success and False on failure, supports multilanguage paths.\"\"\"\n    try:\n        cv2.imencode(Path(filename).suffix, img)[1].tofile(filename)\n        return True\n    except Exception:\n        return False\n\n\ndef imshow(path, im):\n    \"\"\"Displays an image using Unicode path, requires encoded path and image matrix as input.\"\"\"\n    imshow_(path.encode(\"unicode_escape\").decode(), im)\n\n\nif Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:\n    cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow  # redefine\n\n# Variables ------------------------------------------------------------------------------------------------------------\n"
  },
  {
    "path": "utils/google_app_engine/Dockerfile",
    "content": "FROM gcr.io/google-appengine/python\n\n# Create a virtualenv for dependencies. This isolates these packages from\n# system-level packages.\n# Use -p python3 or -p python3.7 to select python version. Default is version 2.\nRUN virtualenv /env -p python3\n\n# Setting these environment variables are the same as running\n# source /env/bin/activate.\nENV VIRTUAL_ENV /env\nENV PATH /env/bin:$PATH\n\nRUN apt-get update && apt-get install -y python-opencv\n\n# Copy the application's requirements.txt and run pip to install all\n# dependencies into the virtualenv.\nADD requirements.txt /app/requirements.txt\nRUN pip install -r /app/requirements.txt\n\n# Add the application source code.\nADD . /app\n\n# Run a WSGI server to serve the application. gunicorn must be declared as\n# a dependency in requirements.txt.\nCMD gunicorn -b :$PORT main:app\n"
  },
  {
    "path": "utils/google_app_engine/additional_requirements.txt",
    "content": "# add these requirements in your app on top of the existing ones\npip==26.0\nFlask==2.3.2\ngunicorn==23.0.0\nwerkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability\nzipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability\n"
  },
  {
    "path": "utils/google_app_engine/app.yaml",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nruntime: custom\nenv: flex\n\nservice: yolov5app\n\nliveness_check:\n  initial_delay_sec: 600\n\nmanual_scaling:\n  instances: 1\nresources:\n  cpu: 1\n  memory_gb: 4\n  disk_size_gb: 20\n"
  },
  {
    "path": "utils/loggers/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Logging utils.\"\"\"\n\nimport json\nimport os\nimport warnings\nfrom pathlib import Path\n\nimport torch\nfrom packaging.version import parse\n\nfrom utils.general import LOGGER, colorstr, cv2\nfrom utils.loggers.clearml.clearml_utils import ClearmlLogger\nfrom utils.loggers.wandb.wandb_utils import WandbLogger\nfrom utils.plots import plot_images, plot_labels, plot_results\nfrom utils.torch_utils import de_parallel\n\nLOGGERS = (\"csv\", \"tb\", \"wandb\", \"clearml\", \"comet\")  # *.csv, TensorBoard, Weights & Biases, ClearML\nRANK = int(os.getenv(\"RANK\", -1))\n\ntry:\n    from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n\n    def SummaryWriter(*args):\n        \"\"\"Fall back to SummaryWriter returning None if TensorBoard is not installed.\"\"\"\n        return None  # None = SummaryWriter(str)\n\n\ntry:\n    import wandb\n\n    assert hasattr(wandb, \"__version__\")  # verify package import not local dir\n    if parse(wandb.__version__) >= parse(\"0.12.2\") and RANK in {0, -1}:\n        try:\n            wandb_login_success = wandb.login(timeout=30)\n        except wandb.errors.UsageError:  # known non-TTY terminal issue\n            wandb_login_success = False\n        if not wandb_login_success:\n            wandb = None\nexcept (ImportError, AssertionError):\n    wandb = None\n\ntry:\n    import clearml\n\n    assert hasattr(clearml, \"__version__\")  # verify package import not local dir\nexcept (ImportError, AssertionError):\n    clearml = None\n\ntry:\n    if RANK in {0, -1}:\n        import comet_ml\n\n        assert hasattr(comet_ml, \"__version__\")  # verify package import not local dir\n        from utils.loggers.comet import CometLogger\n\n    else:\n        comet_ml = None\nexcept (ImportError, AssertionError):\n    comet_ml = None\n\n\ndef _json_default(value):\n    \"\"\"Format `value` for JSON serialization (e.g. unwrap tensors).\n\n    Fall back to strings.\n    \"\"\"\n    if isinstance(value, torch.Tensor):\n        try:\n            value = value.item()\n        except ValueError:  # \"only one element tensors can be converted to Python scalars\"\n            pass\n    return value if isinstance(value, float) else str(value)\n\n\nclass Loggers:\n    \"\"\"Initializes and manages various logging utilities for tracking YOLOv5 training and validation metrics.\"\"\"\n\n    def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):\n        \"\"\"Initializes loggers for YOLOv5 training and validation metrics, paths, and options.\"\"\"\n        self.save_dir = save_dir\n        self.weights = weights\n        self.opt = opt\n        self.hyp = hyp\n        self.plots = not opt.noplots  # plot results\n        self.logger = logger  # for printing results to console\n        self.include = include\n        self.keys = [\n            \"train/box_loss\",\n            \"train/obj_loss\",\n            \"train/cls_loss\",  # train loss\n            \"metrics/precision\",\n            \"metrics/recall\",\n            \"metrics/mAP_0.5\",\n            \"metrics/mAP_0.5:0.95\",  # metrics\n            \"val/box_loss\",\n            \"val/obj_loss\",\n            \"val/cls_loss\",  # val loss\n            \"x/lr0\",\n            \"x/lr1\",\n            \"x/lr2\",\n        ]  # params\n        self.best_keys = [\"best/epoch\", \"best/precision\", \"best/recall\", \"best/mAP_0.5\", \"best/mAP_0.5:0.95\"]\n        for k in LOGGERS:\n            setattr(self, k, None)  # init empty logger dictionary\n        self.csv = True  # always log to csv\n        self.ndjson_console = \"ndjson_console\" in self.include  # log ndjson to console\n        self.ndjson_file = \"ndjson_file\" in self.include  # log ndjson to file\n\n        # Messages\n        if not comet_ml:\n            prefix = colorstr(\"Comet: \")\n            s = f\"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\"\n            self.logger.info(s)\n        # TensorBoard\n        s = self.save_dir\n        if \"tb\" in self.include and not self.opt.evolve:\n            prefix = colorstr(\"TensorBoard: \")\n            self.logger.info(f\"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/\")\n            self.tb = SummaryWriter(str(s))\n\n        # W&B\n        if wandb and \"wandb\" in self.include:\n            self.opt.hyp = self.hyp  # add hyperparameters\n            self.wandb = WandbLogger(self.opt)\n        else:\n            self.wandb = None\n\n        # ClearML\n        if clearml and \"clearml\" in self.include:\n            try:\n                self.clearml = ClearmlLogger(self.opt, self.hyp)\n            except Exception:\n                self.clearml = None\n                prefix = colorstr(\"ClearML: \")\n                LOGGER.warning(\n                    f\"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.\"\n                    f\" See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme\"\n                )\n\n        else:\n            self.clearml = None\n\n        # Comet\n        if comet_ml and \"comet\" in self.include:\n            if isinstance(self.opt.resume, str) and self.opt.resume.startswith(\"comet://\"):\n                run_id = self.opt.resume.split(\"/\")[-1]\n                self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id)\n\n            else:\n                self.comet_logger = CometLogger(self.opt, self.hyp)\n\n        else:\n            self.comet_logger = None\n\n    @property\n    def remote_dataset(self):\n        \"\"\"Fetches dataset dictionary from remote logging services like ClearML, Weights & Biases, or Comet ML.\"\"\"\n        data_dict = None\n        if self.clearml:\n            data_dict = self.clearml.data_dict\n        if self.wandb:\n            data_dict = self.wandb.data_dict\n        if self.comet_logger:\n            data_dict = self.comet_logger.data_dict\n\n        return data_dict\n\n    def on_train_start(self):\n        \"\"\"Initializes the training process for Comet ML logger if it's configured.\"\"\"\n        if self.comet_logger:\n            self.comet_logger.on_train_start()\n\n    def on_pretrain_routine_start(self):\n        \"\"\"Invokes pre-training routine start hook for Comet ML logger if available.\"\"\"\n        if self.comet_logger:\n            self.comet_logger.on_pretrain_routine_start()\n\n    def on_pretrain_routine_end(self, labels, names):\n        \"\"\"Callback that runs at the end of pre-training routine, logging label plots if enabled.\"\"\"\n        if self.plots:\n            plot_labels(labels, names, self.save_dir)\n            paths = self.save_dir.glob(\"*labels*.jpg\")  # training labels\n            if self.wandb:\n                self.wandb.log({\"Labels\": [wandb.Image(str(x), caption=x.name) for x in paths]})\n            if self.comet_logger:\n                self.comet_logger.on_pretrain_routine_end(paths)\n            if self.clearml:\n                for path in paths:\n                    self.clearml.log_plot(title=path.stem, plot_path=path)\n\n    def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):\n        \"\"\"Logs training batch end events, plots images, and updates external loggers with batch-end data.\"\"\"\n        log_dict = dict(zip(self.keys[:3], vals))\n        # Callback runs on train batch end\n        # ni: number integrated batches (since train start)\n        if self.plots:\n            if ni < 3:\n                f = self.save_dir / f\"train_batch{ni}.jpg\"  # filename\n                plot_images(imgs, targets, paths, f)\n                if ni == 0 and self.tb and not self.opt.sync_bn:\n                    log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz))\n            if ni == 10 and (self.wandb or self.clearml):\n                files = sorted(self.save_dir.glob(\"train*.jpg\"))\n                if self.wandb:\n                    self.wandb.log({\"Mosaics\": [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})\n                if self.clearml:\n                    self.clearml.log_debug_samples(files, title=\"Mosaics\")\n\n        if self.comet_logger:\n            self.comet_logger.on_train_batch_end(log_dict, step=ni)\n\n    def on_train_epoch_end(self, epoch):\n        \"\"\"Callback that updates the current epoch in Weights & Biases at the end of a training epoch.\"\"\"\n        if self.wandb:\n            self.wandb.current_epoch = epoch + 1\n\n        if self.comet_logger:\n            self.comet_logger.on_train_epoch_end(epoch)\n\n    def on_val_start(self):\n        \"\"\"Callback that signals the start of a validation phase to the Comet logger.\"\"\"\n        if self.comet_logger:\n            self.comet_logger.on_val_start()\n\n    def on_val_image_end(self, pred, predn, path, names, im):\n        \"\"\"Callback that logs a validation image and its predictions to WandB or ClearML.\"\"\"\n        if self.wandb:\n            self.wandb.val_one_image(pred, predn, path, names, im)\n        if self.clearml:\n            self.clearml.log_image_with_boxes(path, pred, names, im)\n\n    def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):\n        \"\"\"Logs validation batch results to Comet ML during training at the end of each validation batch.\"\"\"\n        if self.comet_logger:\n            self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out)\n\n    def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):\n        \"\"\"Logs validation results to WandB or ClearML at the end of the validation process.\"\"\"\n        if self.wandb or self.clearml:\n            files = sorted(self.save_dir.glob(\"val*.jpg\"))\n        if self.wandb:\n            self.wandb.log({\"Validation\": [wandb.Image(str(f), caption=f.name) for f in files]})\n        if self.clearml:\n            self.clearml.log_debug_samples(files, title=\"Validation\")\n\n        if self.comet_logger:\n            self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)\n\n    def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):\n        \"\"\"Callback that logs metrics and saves them to CSV or NDJSON at the end of each fit (train+val) epoch.\"\"\"\n        x = dict(zip(self.keys, vals))\n        if self.csv:\n            file = self.save_dir / \"results.csv\"\n            n = len(x) + 1  # number of cols\n            s = \"\" if file.exists() else ((\"%20s,\" * n % tuple([\"epoch\", *self.keys])).rstrip(\",\") + \"\\n\")  # add header\n            with open(file, \"a\") as f:\n                f.write(s + (\"%20.5g,\" * n % tuple([epoch, *vals])).rstrip(\",\") + \"\\n\")\n        if self.ndjson_console or self.ndjson_file:\n            json_data = json.dumps(dict(epoch=epoch, **x), default=_json_default)\n        if self.ndjson_console:\n            print(json_data)\n        if self.ndjson_file:\n            file = self.save_dir / \"results.ndjson\"\n            with open(file, \"a\") as f:\n                print(json_data, file=f)\n\n        if self.tb:\n            for k, v in x.items():\n                self.tb.add_scalar(k, v, epoch)\n        elif self.clearml:  # log to ClearML if TensorBoard not used\n            self.clearml.log_scalars(x, epoch)\n\n        if self.wandb:\n            if best_fitness == fi:\n                best_results = [epoch, *vals[3:7]]\n                for i, name in enumerate(self.best_keys):\n                    self.wandb.wandb_run.summary[name] = best_results[i]  # log best results in the summary\n            self.wandb.log(x)\n            self.wandb.end_epoch()\n\n        if self.clearml:\n            self.clearml.current_epoch_logged_images = set()  # reset epoch image limit\n            self.clearml.current_epoch += 1\n\n        if self.comet_logger:\n            self.comet_logger.on_fit_epoch_end(x, epoch=epoch)\n\n    def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):\n        \"\"\"Callback that handles model saving events, logging to Weights & Biases or ClearML if enabled.\"\"\"\n        if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1:\n            if self.wandb:\n                self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)\n            if self.clearml:\n                self.clearml.task.update_output_model(\n                    model_path=str(last), model_name=\"Latest Model\", auto_delete_file=False\n                )\n\n        if self.comet_logger:\n            self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi)\n\n    def on_train_end(self, last, best, epoch, results):\n        \"\"\"Callback that runs at the end of training to save plots and log results.\"\"\"\n        if self.plots:\n            plot_results(file=self.save_dir / \"results.csv\")  # save results.png\n        files = [\"results.png\", \"confusion_matrix.png\", *(f\"{x}_curve.png\" for x in (\"F1\", \"PR\", \"P\", \"R\"))]\n        files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()]  # filter\n        self.logger.info(f\"Results saved to {colorstr('bold', self.save_dir)}\")\n\n        if self.tb and not self.clearml:  # These images are already captured by ClearML by now, we don't want doubles\n            for f in files:\n                self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats=\"HWC\")\n\n        if self.wandb:\n            self.wandb.log(dict(zip(self.keys[3:10], results)))\n            self.wandb.log({\"Results\": [wandb.Image(str(f), caption=f.name) for f in files]})\n            # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model\n            if not self.opt.evolve:\n                wandb.log_artifact(\n                    str(best if best.exists() else last),\n                    type=\"model\",\n                    name=f\"run_{self.wandb.wandb_run.id}_model\",\n                    aliases=[\"latest\", \"best\", \"stripped\"],\n                )\n            self.wandb.finish_run()\n\n        if self.clearml and not self.opt.evolve:\n            self.clearml.log_summary(dict(zip(self.keys[3:10], results)))\n            [self.clearml.log_plot(title=f.stem, plot_path=f) for f in files]\n            self.clearml.log_model(\n                str(best if best.exists() else last), \"Best Model\" if best.exists() else \"Last Model\", epoch\n            )\n\n        if self.comet_logger:\n            final_results = dict(zip(self.keys[3:10], results))\n            self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results)\n\n    def on_params_update(self, params: dict):\n        \"\"\"Updates experiment hyperparameters or configurations in WandB, Comet, or ClearML.\"\"\"\n        if self.wandb:\n            self.wandb.wandb_run.config.update(params, allow_val_change=True)\n        if self.comet_logger:\n            self.comet_logger.on_params_update(params)\n        if self.clearml:\n            self.clearml.task.connect(params)\n\n\nclass GenericLogger:\n    \"\"\"YOLOv5 General purpose logger for non-task specific logging Usage: from utils.loggers import GenericLogger;\n    logger = GenericLogger(...).\n\n    Args:\n        opt: Run arguments\n        console_logger: Console logger\n        include: loggers to include\n    \"\"\"\n\n    def __init__(self, opt, console_logger, include=(\"tb\", \"wandb\", \"clearml\")):\n        \"\"\"Initializes a generic logger with optional TensorBoard, W&B, and ClearML support.\"\"\"\n        self.save_dir = Path(opt.save_dir)\n        self.include = include\n        self.console_logger = console_logger\n        self.csv = self.save_dir / \"results.csv\"  # CSV logger\n        if \"tb\" in self.include:\n            prefix = colorstr(\"TensorBoard: \")\n            self.console_logger.info(\n                f\"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/\"\n            )\n            self.tb = SummaryWriter(str(self.save_dir))\n\n        if wandb and \"wandb\" in self.include:\n            self.wandb = wandb.init(\n                project=web_project_name(str(opt.project)), name=None if opt.name == \"exp\" else opt.name, config=opt\n            )\n        else:\n            self.wandb = None\n\n        if clearml and \"clearml\" in self.include:\n            try:\n                # Hyp is not available in classification mode\n                hyp = {} if \"hyp\" not in opt else opt.hyp\n                self.clearml = ClearmlLogger(opt, hyp)\n            except Exception:\n                self.clearml = None\n                prefix = colorstr(\"ClearML: \")\n                LOGGER.warning(\n                    f\"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.\"\n                    f\" See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration\"\n                )\n        else:\n            self.clearml = None\n\n    def log_metrics(self, metrics, epoch):\n        \"\"\"Logs metrics to CSV, TensorBoard, W&B, and ClearML; `metrics` is a dict, `epoch` is an int.\"\"\"\n        if self.csv:\n            keys, vals = list(metrics.keys()), list(metrics.values())\n            n = len(metrics) + 1  # number of cols\n            s = \"\" if self.csv.exists() else ((\"%23s,\" * n % tuple([\"epoch\", *keys])).rstrip(\",\") + \"\\n\")  # header\n            with open(self.csv, \"a\") as f:\n                f.write(s + (\"%23.5g,\" * n % tuple([epoch, *vals])).rstrip(\",\") + \"\\n\")\n\n        if self.tb:\n            for k, v in metrics.items():\n                self.tb.add_scalar(k, v, epoch)\n\n        if self.wandb:\n            self.wandb.log(metrics, step=epoch)\n\n        if self.clearml:\n            self.clearml.log_scalars(metrics, epoch)\n\n    def log_images(self, files, name=\"Images\", epoch=0):\n        \"\"\"Logs images to all loggers with optional naming and epoch specification.\"\"\"\n        files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])]  # to Path\n        files = [f for f in files if f.exists()]  # filter by exists\n\n        if self.tb:\n            for f in files:\n                self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats=\"HWC\")\n\n        if self.wandb:\n            self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch)\n\n        if self.clearml:\n            if name == \"Results\":\n                [self.clearml.log_plot(f.stem, f) for f in files]\n            else:\n                self.clearml.log_debug_samples(files, title=name)\n\n    def log_graph(self, model, imgsz=(640, 640)):\n        \"\"\"Logs model graph to all configured loggers with specified input image size.\"\"\"\n        if self.tb:\n            log_tensorboard_graph(self.tb, model, imgsz)\n\n    def log_model(self, model_path, epoch=0, metadata=None):\n        \"\"\"Logs the model to all configured loggers with optional epoch and metadata.\"\"\"\n        if metadata is None:\n            metadata = {}\n        # Log model to all loggers\n        if self.wandb:\n            art = wandb.Artifact(name=f\"run_{wandb.run.id}_model\", type=\"model\", metadata=metadata)\n            art.add_file(str(model_path))\n            wandb.log_artifact(art)\n        if self.clearml:\n            self.clearml.log_model(model_path=model_path, model_name=model_path.stem)\n\n    def update_params(self, params):\n        \"\"\"Updates logged parameters in WandB and/or ClearML if enabled.\"\"\"\n        if self.wandb:\n            wandb.run.config.update(params, allow_val_change=True)\n        if self.clearml:\n            self.clearml.task.connect(params)\n\n\ndef log_tensorboard_graph(tb, model, imgsz=(640, 640)):\n    \"\"\"Logs the model graph to TensorBoard with specified image size and model.\"\"\"\n    try:\n        p = next(model.parameters())  # for device, type\n        imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz  # expand\n        im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p)  # input image (WARNING: must be zeros, not empty)\n        with warnings.catch_warnings():\n            warnings.simplefilter(\"ignore\")  # suppress jit trace warning\n            tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), [])\n    except Exception as e:\n        LOGGER.warning(f\"WARNING ⚠️ TensorBoard graph visualization failure {e}\")\n\n\ndef web_project_name(project):\n    \"\"\"Converts a local project name to a standardized web project name with optional suffixes.\"\"\"\n    if not project.startswith(\"runs/train\"):\n        return project\n    suffix = \"-Classify\" if project.endswith(\"-cls\") else \"-Segment\" if project.endswith(\"-seg\") else \"\"\n    return f\"YOLOv5{suffix}\"\n"
  },
  {
    "path": "utils/loggers/clearml/README.md",
    "content": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg\" width=\"320\" alt=\"Ultralytics logo\"></a>\n\n# ClearML Integration with Ultralytics YOLO\n\n<img align=\"center\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/logos_dark.png#gh-light-mode-only\" alt=\"ClearML\"><img align=\"center\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/logos_light.png#gh-dark-mode-only\" alt=\"ClearML\">\n\n## ℹ️ About ClearML\n\n[ClearML](https://clear.ml/) is an [open-source MLOps platform](https://github.com/clearml/clearml) designed to streamline your machine learning workflow and maximize productivity. Integrating ClearML with [Ultralytics YOLO](https://docs.ultralytics.com/models/yolov5/) unlocks a robust suite of tools for experiment tracking, data management, and scalable deployment:\n\n- **Experiment Management:** Effortlessly track every [YOLO training run](https://docs.ultralytics.com/modes/train/), including parameters, metrics, and outputs. Explore the [Ultralytics ClearML integration guide](https://docs.ultralytics.com/integrations/clearml/) for step-by-step instructions.\n- **Data Versioning:** Manage and access your custom training data with ClearML's Data Versioning Tool, similar to [DVC integration](https://docs.ultralytics.com/integrations/dvc/).\n- **Remote Execution:** [Remotely train and monitor models](https://docs.ultralytics.com/hub/cloud-training/) using ClearML Agent for seamless scaling.\n- **Hyperparameter Optimization:** Boost your [mean average precision (mAP)](https://docs.ultralytics.com/guides/yolo-performance-metrics/) with ClearML's [hyperparameter tuning](https://docs.ultralytics.com/guides/hyperparameter-tuning/) capabilities.\n- **Model Deployment:** Deploy your trained YOLO model as an API with ClearML Serving, complementing [Ultralytics model deployment options](https://docs.ultralytics.com/guides/model-deployment-options/).\n\nYou can use ClearML's experiment manager alone or combine these features into a comprehensive [MLOps pipeline](https://www.ultralytics.com/glossary/machine-learning-operations-mlops).\n\n![ClearML scalars dashboard](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/experiment_manager_with_compare.gif)\n\n## 🦾 Setting Up ClearML\n\nClearML requires a server to track experiments and data. You have two main options:\n\n1. **ClearML Hosted Service:** Sign up for a free account at [app.clear.ml](https://app.clear.ml/).\n2. **Self-Hosted Server:** Deploy your own ClearML server using the [official setup guide](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). The server is open-source, ensuring data privacy and control.\n\nTo get started:\n\n1. **Install the ClearML Python package:**\n\n   ```bash\n   pip install clearml\n   ```\n\n   _Note: The `clearml` package is included in the YOLO requirements._\n\n2. **Connect the ClearML SDK to your server:**  \n   [Create credentials](https://app.clear.ml/settings/workspace-configuration) (Settings → Workspace → Create new credentials), then run:\n\n   ```bash\n   clearml-init\n   ```\n\n   Follow the prompts to complete setup.\n\nFor a general Ultralytics setup, see the [Quickstart Guide](https://docs.ultralytics.com/quickstart/).\n\n## 🚀 Training YOLO with ClearML\n\nWhen the `clearml` package is installed, experiment tracking is automatically enabled for every [YOLO training run](https://docs.ultralytics.com/modes/train/). All experiment details are captured and stored in the ClearML experiment manager.\n\nTo customize your project or task name in ClearML, use the `--project` and `--name` arguments. By default, the project is `YOLO` and the task is `Training`. ClearML uses `/` as a delimiter for subprojects.\n\n**Example Training Command:**\n\n```bash\n# Train YOLO on COCO128 dataset for 3 epochs\npython train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache\n```\n\n**Example with Custom Project and Task Names:**\n\n```bash\n# Train with custom project and experiment names\npython train.py --project my_yolo_project --name experiment_001 --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache\n```\n\nClearML automatically logs:\n\n- Source code and uncommitted changes\n- Installed Python packages\n- Hyperparameters and configuration settings\n- Model checkpoints (use `--save-period n` to save every `n` epochs)\n- Console output logs\n- Performance metrics ([precision, recall](https://docs.ultralytics.com/guides/yolo-performance-metrics/), [losses](https://docs.ultralytics.com/reference/utils/loss/), [learning rates](https://www.ultralytics.com/glossary/learning-rate), mAP<sub>0.5</sub>, mAP<sub>0.5:0.95</sub>)\n- System details (hardware specs, runtime, creation date)\n- Generated plots (label correlogram, [confusion matrix](https://www.ultralytics.com/glossary/confusion-matrix))\n- Images with bounding boxes per epoch\n- Mosaic augmentation previews per epoch\n- Validation images per epoch\n\nAll this information can be visualized in the ClearML UI. You can customize table views, sort experiments by metrics, and compare multiple runs. This enables advanced features like hyperparameter optimization and remote execution.\n\n## 🔗 Dataset Version Management\n\nVersioning your [datasets](https://docs.ultralytics.com/datasets/) independently from code is essential for reproducibility and collaboration. ClearML's Data Versioning Tool streamlines this process. YOLO supports ClearML dataset version IDs, automatically downloading data as needed. The dataset ID is saved as a task parameter, ensuring traceability for every experiment.\n\n![ClearML Dataset Interface](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/clearml_data.gif)\n\n### Prepare Your Dataset\n\nYOLO uses [YAML files](https://www.ultralytics.com/glossary/yaml) to define dataset configurations. By default, datasets are expected in the `../datasets` directory relative to the repository root. For example, the [COCO128 dataset](https://docs.ultralytics.com/datasets/detect/coco128/) structure:\n\n```\n../\n├── yolov5/          # Your YOLO repository clone\n└── datasets/\n    └── coco128/\n        ├── images/\n        ├── labels/\n        ├── LICENSE\n        └── README.txt\n```\n\nEnsure your custom dataset follows a similar structure.\n\nNext, ⚠️ **copy the corresponding dataset `.yaml` file into the root of your dataset folder**. This file contains essential information (`path`, `train`, `test`, `val`, `nc`, `names`) required by ClearML.\n\n```\n../\n└── datasets/\n    └── coco128/\n        ├── images/\n        ├── labels/\n        ├── coco128.yaml  # <---- Place the YAML file here!\n        ├── LICENSE\n        └── README.txt\n```\n\n### Upload Your Dataset\n\nNavigate to your dataset's root directory and use the `clearml-data` CLI tool:\n\n```bash\ncd ../datasets/coco128\nclearml-data sync --project YOLO_Datasets --name coco128 --folder .\n```\n\nAlternatively, use the following commands:\n\n```bash\n# Create a new dataset entry in ClearML\nclearml-data create --project YOLO_Datasets --name coco128\n\n# Add the dataset files (use '.' for the current directory)\nclearml-data add --files .\n\n# Finalize and upload the dataset version\nclearml-data close\n```\n\n_Tip: Use `--parent <parent_dataset_id>` with `clearml-data create` to link versions and avoid re-uploading unchanged files._\n\n### Run Training Using a ClearML Dataset\n\nOnce your dataset is versioned in ClearML, you can use it for training by providing the dataset ID via the `--data` argument with the `clearml://` prefix:\n\n```bash\n# Replace YOUR_DATASET_ID with the actual ID from ClearML\npython train.py --img 640 --batch 16 --epochs 3 --data clearml://YOUR_DATASET_ID --weights yolov5s.pt --cache\n```\n\n## 👀 Hyperparameter Optimization\n\nWith experiments and data versioned, you can leverage ClearML for [hyperparameter optimization](https://docs.ultralytics.com/guides/hyperparameter-tuning/). ClearML captures all necessary information (code, packages, environment), making experiments fully reproducible. Its HPO tools clone an existing experiment, modify hyperparameters, and rerun it automatically.\n\nTo run HPO locally, use the provided script `utils/loggers/clearml/hpo.py`. You'll need the ID of a previously run training task (the \"template task\") to clone. Update the script with this ID and run:\n\n```bash\n# Install Optuna for advanced optimization strategies (optional)\n# pip install optuna\n\n# Run the HPO script\npython utils/loggers/clearml/hpo.py\n```\n\nThe script uses [Optuna](https://optuna.org/) by default if installed, or falls back to `RandomSearch`. You can modify `task.execute_locally()` to `task.execute()` in the script to enqueue HPO tasks for a remote ClearML agent.\n\n![HPO in ClearML UI](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/hpo.png)\n\n## 🤯 Remote Execution (Advanced)\n\nClearML Agent enables you to execute experiments on remote machines, including on-premise servers or cloud GPUs such as [AWS](https://aws.amazon.com/), [Google Cloud](https://cloud.google.com/), or [Azure](https://azure.microsoft.com/). The agent listens to task queues, reproduces the experiment environment, runs the task, and reports results back to the ClearML server.\n\nLearn more about ClearML Agent:\n\n- [YouTube Introduction to ClearML Agent](https://www.youtube.com/watch?v=MX3BrXnaULs)\n- [Official ClearML Agent Documentation](https://clear.ml/docs/latest/docs/clearml_agent)\n\nTurn any machine into a ClearML agent by running:\n\n```bash\n# Replace QUEUES_TO_LISTEN_TO with your queue name(s)\nclearml-agent daemon --queue QUEUES_TO_LISTEN_TO [--docker] # Use --docker to run in a Docker container\n```\n\n### Cloning, Editing, and Enqueuing Tasks\n\nYou can manage remote execution directly from the ClearML web UI:\n\n1. **Clone:** Right-click an existing experiment to clone it.\n2. **Edit:** Modify hyperparameters or other settings in the cloned task.\n3. **Enqueue:** Right-click the modified task and select \"Enqueue\" to assign it to a specific queue for an agent to pick up.\n\n![Enqueue a task from the ClearML UI](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/enqueue.gif)\n\n### Executing a Task Remotely via Code\n\nYou can also modify your training script to automatically enqueue tasks for remote execution. Add `task.execute_remotely()` after the ClearML logger is initialized in `train.py`:\n\n```python\n# Inside train.py, after logger initialization...\nif RANK in {-1, 0}:\n    # Initialize loggers\n    loggers = Loggers(save_dir, weights, opt, hyp, LOGGER)\n\n    # Check if ClearML logger is active and enqueue the task\n    if loggers.clearml:\n        # Specify the queue name for the remote agent\n        loggers.clearml.task.execute_remotely(queue_name=\"my_remote_queue\")  # <------ ADD THIS LINE\n        # data_dict might be populated by ClearML if using a ClearML dataset\n        data_dict = loggers.clearml.data_dict\n```\n\nRunning the script with this modification will package the code and its environment and send it to the specified queue, rather than executing locally.\n\n### Autoscaling Workers\n\nClearML provides Autoscalers that automatically manage cloud resources (AWS, GCP, Azure). They spin up new virtual machines as ClearML agents when tasks appear in a queue, and shut them down when the queue is empty, optimizing cost.\n\nWatch the Autoscalers getting started video:\n\n[![Watch the ClearML Autoscalers video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E)\n\n## 🤝 Contributing\n\nContributions to enhance the ClearML integration are welcome! Please see the [Ultralytics Contributing Guide](https://docs.ultralytics.com/help/contributing/) for details on how to get involved.\n\n---\n\n[![Ultralytics open-source contributors](https://raw.githubusercontent.com/ultralytics/assets/main/im/image-contributors.png)](https://github.com/ultralytics/ultralytics/graphs/contributors)\n"
  },
  {
    "path": "utils/loggers/clearml/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
  },
  {
    "path": "utils/loggers/clearml/clearml_utils.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Main Logger class for ClearML experiment tracking.\"\"\"\n\nimport glob\nimport re\nfrom pathlib import Path\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport yaml\nfrom ultralytics.utils.plotting import Annotator, colors\n\ntry:\n    import clearml\n    from clearml import Dataset, Task\n\n    assert hasattr(clearml, \"__version__\")  # verify package import not local dir\nexcept (ImportError, AssertionError):\n    clearml = None\n\n\ndef construct_dataset(clearml_info_string):\n    \"\"\"Load in a clearml dataset and fill the internal data_dict with its contents.\"\"\"\n    dataset_id = clearml_info_string.replace(\"clearml://\", \"\")\n    dataset = Dataset.get(dataset_id=dataset_id)\n    dataset_root_path = Path(dataset.get_local_copy())\n\n    # We'll search for the yaml file definition in the dataset\n    yaml_filenames = list(glob.glob(str(dataset_root_path / \"*.yaml\")) + glob.glob(str(dataset_root_path / \"*.yml\")))\n    if len(yaml_filenames) > 1:\n        raise ValueError(\n            \"More than one yaml file was found in the dataset root, cannot determine which one contains \"\n            \"the dataset definition this way.\"\n        )\n    elif not yaml_filenames:\n        raise ValueError(\n            \"No yaml definition found in dataset root path, check that there is a correct yaml file \"\n            \"inside the dataset root path.\"\n        )\n    with open(yaml_filenames[0]) as f:\n        dataset_definition = yaml.safe_load(f)\n\n    assert set(dataset_definition.keys()).issuperset({\"train\", \"test\", \"val\", \"nc\", \"names\"}), (\n        \"The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')\"\n    )\n\n    data_dict = {\n        \"train\": (\n            str((dataset_root_path / dataset_definition[\"train\"]).resolve()) if dataset_definition[\"train\"] else None\n        )\n    }\n    data_dict[\"test\"] = (\n        str((dataset_root_path / dataset_definition[\"test\"]).resolve()) if dataset_definition[\"test\"] else None\n    )\n    data_dict[\"val\"] = (\n        str((dataset_root_path / dataset_definition[\"val\"]).resolve()) if dataset_definition[\"val\"] else None\n    )\n    data_dict[\"nc\"] = dataset_definition[\"nc\"]\n    data_dict[\"names\"] = dataset_definition[\"names\"]\n\n    return data_dict\n\n\nclass ClearmlLogger:\n    \"\"\"Log training runs, datasets, models, and predictions to ClearML.\n\n    This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, this information\n    includes hyperparameters, system configuration and metrics, model metrics, code information and basic data metrics\n    and analyses.\n\n    By providing additional command line arguments to train.py, datasets, models and predictions can also be logged.\n    \"\"\"\n\n    def __init__(self, opt, hyp):\n        \"\"\"- Initialize ClearML Task, this object will capture the experiment - Upload dataset version to ClearML Data\n        if opt.upload_dataset is True.\n\n        Args:\n            opt (namespace): Commandline arguments for this run\n            hyp (dict): Hyperparameters for this run\n        \"\"\"\n        self.current_epoch = 0\n        # Keep tracked of amount of logged images to enforce a limit\n        self.current_epoch_logged_images = set()\n        # Maximum number of images to log to clearML per epoch\n        self.max_imgs_to_log_per_epoch = 16\n        # Get the interval of epochs when bounding box images should be logged\n        # Only for detection task though!\n        if \"bbox_interval\" in opt:\n            self.bbox_interval = opt.bbox_interval\n        self.clearml = clearml\n        self.task = None\n        self.data_dict = None\n        if self.clearml:\n            self.task = Task.init(\n                project_name=\"YOLOv5\" if str(opt.project).startswith(\"runs/\") else opt.project,\n                task_name=opt.name if opt.name != \"exp\" else \"Training\",\n                tags=[\"YOLOv5\"],\n                output_uri=True,\n                reuse_last_task_id=opt.exist_ok,\n                auto_connect_frameworks={\"pytorch\": False, \"matplotlib\": False},\n                # We disconnect pytorch auto-detection, because we added manual model save points in the code\n            )\n            # ClearML's hooks will already grab all general parameters\n            # Only the hyperparameters coming from the yaml config file\n            # will have to be added manually!\n            self.task.connect(hyp, name=\"Hyperparameters\")\n            self.task.connect(opt, name=\"Args\")\n\n            # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent\n            self.task.set_base_docker(\n                \"ultralytics/yolov5:latest\",\n                docker_arguments=':ipc=host -e=\"CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1\"',\n                docker_setup_bash_script=\"pip install clearml\",\n            )\n\n            # Get ClearML Dataset Version if requested\n            if opt.data.startswith(\"clearml://\"):\n                # data_dict should have the following keys:\n                # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)\n                self.data_dict = construct_dataset(opt.data)\n                # Set data to data_dict because wandb will crash without this information and opt is the best way\n                # to give it to them\n                opt.data = self.data_dict\n\n    def log_scalars(self, metrics, epoch):\n        \"\"\"Log scalars/metrics to ClearML.\n\n        Args:\n            metrics (dict): Metrics in dict format: {\"metrics/mAP\": 0.8, ...}\n            epoch (int): iteration number for the current set of metrics\n        \"\"\"\n        for k, v in metrics.items():\n            title, series = k.split(\"/\")\n            self.task.get_logger().report_scalar(title, series, v, epoch)\n\n    def log_model(self, model_path, model_name, epoch=0):\n        \"\"\"Log model weights to ClearML.\n\n        Args:\n            model_path (PosixPath or str): Path to the model weights\n            model_name (str): Name of the model visible in ClearML\n            epoch (int): Iteration / epoch of the model weights\n        \"\"\"\n        self.task.update_output_model(\n            model_path=str(model_path), name=model_name, iteration=epoch, auto_delete_file=False\n        )\n\n    def log_summary(self, metrics):\n        \"\"\"Log final metrics to a summary table.\n\n        Args:\n            metrics (dict): Metrics in dict format: {\"metrics/mAP\": 0.8, ...}\n        \"\"\"\n        for k, v in metrics.items():\n            self.task.get_logger().report_single_value(k, v)\n\n    def log_plot(self, title, plot_path):\n        \"\"\"Log image as plot in the plot section of ClearML.\n\n        Args:\n            title (str): Title of the plot\n            plot_path (PosixPath or str): Path to the saved image file\n        \"\"\"\n        img = mpimg.imread(plot_path)\n        fig = plt.figure()\n        ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=\"auto\", xticks=[], yticks=[])  # no ticks\n        ax.imshow(img)\n\n        self.task.get_logger().report_matplotlib_figure(title, \"\", figure=fig, report_interactive=False)\n\n    def log_debug_samples(self, files, title=\"Debug Samples\"):\n        \"\"\"Log files (images) as debug samples in the ClearML task.\n\n        Args:\n            files (List(PosixPath)): a list of file paths in PosixPath format\n            title (str): A title that groups together images with the same values\n        \"\"\"\n        for f in files:\n            if f.exists():\n                it = re.search(r\"_batch(\\d+)\", f.name)\n                iteration = int(it.groups()[0]) if it else 0\n                self.task.get_logger().report_image(\n                    title=title, series=f.name.replace(f\"_batch{iteration}\", \"\"), local_path=str(f), iteration=iteration\n                )\n\n    def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):\n        \"\"\"Draw the bounding boxes on a single image and report the result as a ClearML debug sample.\n\n        Args:\n            image_path (PosixPath) the path the original image file\n            boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]\n            class_names (dict): dict containing mapping of class int to class name\n            image (Tensor): A torch tensor containing the actual image data\n        \"\"\"\n        if (\n            len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch\n            and self.current_epoch >= 0\n            and (self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images)\n        ):\n            im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))\n            annotator = Annotator(im=im, pil=True)\n            for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):\n                color = colors(i)\n\n                class_name = class_names[int(class_nr)]\n                confidence_percentage = round(float(conf) * 100, 2)\n                label = f\"{class_name}: {confidence_percentage}%\"\n\n                if conf > conf_threshold:\n                    annotator.rectangle(box.cpu().numpy(), outline=color)\n                    annotator.box_label(box.cpu().numpy(), label=label, color=color)\n\n            annotated_image = annotator.result()\n            self.task.get_logger().report_image(\n                title=\"Bounding Boxes\", series=image_path.name, iteration=self.current_epoch, image=annotated_image\n            )\n            self.current_epoch_logged_images.add(image_path)\n"
  },
  {
    "path": "utils/loggers/clearml/hpo.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nfrom clearml import Task\n\n# Connecting ClearML with the current process,\n# from here on everything is logged automatically\nfrom clearml.automation import HyperParameterOptimizer, UniformParameterRange\nfrom clearml.automation.optuna import OptimizerOptuna\n\ntask = Task.init(\n    project_name=\"Hyper-Parameter Optimization\",\n    task_name=\"YOLOv5\",\n    task_type=Task.TaskTypes.optimizer,\n    reuse_last_task_id=False,\n)\n\n# Example use case:\noptimizer = HyperParameterOptimizer(\n    # This is the experiment we want to optimize\n    base_task_id=\"<your_template_task_id>\",\n    # here we define the hyper-parameters to optimize\n    # Notice: The parameter name should exactly match what you see in the UI: <section_name>/<parameter>\n    # For Example, here we see in the base experiment a section Named: \"General\"\n    # under it a parameter named \"batch_size\", this becomes \"General/batch_size\"\n    # If you have `argparse` for example, then arguments will appear under the \"Args\" section,\n    # and you should instead pass \"Args/batch_size\"\n    hyper_parameters=[\n        UniformParameterRange(\"Hyperparameters/lr0\", min_value=1e-5, max_value=1e-1),\n        UniformParameterRange(\"Hyperparameters/lrf\", min_value=0.01, max_value=1.0),\n        UniformParameterRange(\"Hyperparameters/momentum\", min_value=0.6, max_value=0.98),\n        UniformParameterRange(\"Hyperparameters/weight_decay\", min_value=0.0, max_value=0.001),\n        UniformParameterRange(\"Hyperparameters/warmup_epochs\", min_value=0.0, max_value=5.0),\n        UniformParameterRange(\"Hyperparameters/warmup_momentum\", min_value=0.0, max_value=0.95),\n        UniformParameterRange(\"Hyperparameters/warmup_bias_lr\", min_value=0.0, max_value=0.2),\n        UniformParameterRange(\"Hyperparameters/box\", min_value=0.02, max_value=0.2),\n        UniformParameterRange(\"Hyperparameters/cls\", min_value=0.2, max_value=4.0),\n        UniformParameterRange(\"Hyperparameters/cls_pw\", min_value=0.5, max_value=2.0),\n        UniformParameterRange(\"Hyperparameters/obj\", min_value=0.2, max_value=4.0),\n        UniformParameterRange(\"Hyperparameters/obj_pw\", min_value=0.5, max_value=2.0),\n        UniformParameterRange(\"Hyperparameters/iou_t\", min_value=0.1, max_value=0.7),\n        UniformParameterRange(\"Hyperparameters/anchor_t\", min_value=2.0, max_value=8.0),\n        UniformParameterRange(\"Hyperparameters/fl_gamma\", min_value=0.0, max_value=4.0),\n        UniformParameterRange(\"Hyperparameters/hsv_h\", min_value=0.0, max_value=0.1),\n        UniformParameterRange(\"Hyperparameters/hsv_s\", min_value=0.0, max_value=0.9),\n        UniformParameterRange(\"Hyperparameters/hsv_v\", min_value=0.0, max_value=0.9),\n        UniformParameterRange(\"Hyperparameters/degrees\", min_value=0.0, max_value=45.0),\n        UniformParameterRange(\"Hyperparameters/translate\", min_value=0.0, max_value=0.9),\n        UniformParameterRange(\"Hyperparameters/scale\", min_value=0.0, max_value=0.9),\n        UniformParameterRange(\"Hyperparameters/shear\", min_value=0.0, max_value=10.0),\n        UniformParameterRange(\"Hyperparameters/perspective\", min_value=0.0, max_value=0.001),\n        UniformParameterRange(\"Hyperparameters/flipud\", min_value=0.0, max_value=1.0),\n        UniformParameterRange(\"Hyperparameters/fliplr\", min_value=0.0, max_value=1.0),\n        UniformParameterRange(\"Hyperparameters/mosaic\", min_value=0.0, max_value=1.0),\n        UniformParameterRange(\"Hyperparameters/mixup\", min_value=0.0, max_value=1.0),\n        UniformParameterRange(\"Hyperparameters/copy_paste\", min_value=0.0, max_value=1.0),\n    ],\n    # this is the objective metric we want to maximize/minimize\n    objective_metric_title=\"metrics\",\n    objective_metric_series=\"mAP_0.5\",\n    # now we decide if we want to maximize it or minimize it (accuracy we maximize)\n    objective_metric_sign=\"max\",\n    # let us limit the number of concurrent experiments,\n    # this in turn will make sure we don't bombard the scheduler with experiments.\n    # if we have an auto-scaler connected, this, by proxy, will limit the number of machine\n    max_number_of_concurrent_tasks=1,\n    # this is the optimizer class (actually doing the optimization)\n    # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)\n    optimizer_class=OptimizerOptuna,\n    # If specified only the top K performing Tasks will be kept, the others will be automatically archived\n    save_top_k_tasks_only=5,  # 5,\n    compute_time_limit=None,\n    total_max_jobs=20,\n    min_iteration_per_job=None,\n    max_iteration_per_job=None,\n)\n\n# report every 10 seconds, this is way too often, but we are testing here\noptimizer.set_report_period(10 / 60)\n# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent\n# an_optimizer.start_locally(job_complete_callback=job_complete_callback)\n# set the time limit for the optimization process (2 hours)\noptimizer.set_time_limit(in_minutes=120.0)\n# Start the optimization process in the local environment\noptimizer.start_locally()\n# wait until process is done (notice we are controlling the optimization process in the background)\noptimizer.wait()\n# make sure background optimization stopped\noptimizer.stop()\n\nprint(\"We are done, good bye\")\n"
  },
  {
    "path": "utils/loggers/comet/README.md",
    "content": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg\" width=\"320\" alt=\"Ultralytics logo\"></a>\n\n<img src=\"https://cdn.comet.ml/img/notebook_logo.png\">\n\n# Using Ultralytics YOLO With Comet\n\nWelcome to the guide for integrating [Ultralytics YOLO](https://github.com/ultralytics/yolov5) with [Comet](https://www.comet.com/site/)! Comet offers robust experiment tracking, model management, and visualization tools to enhance your [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) workflow. This guide explains how to leverage Comet for monitoring training, logging results, managing datasets, and optimizing hyperparameters for your YOLO models.\n\n[![Ultralytics Actions](https://github.com/ultralytics/velocity/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/velocity/actions/workflows/format.yml)\n[![Ultralytics Discord](https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue)](https://discord.com/invite/ultralytics)\n[![Ultralytics Forums](https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue)](https://community.ultralytics.com/)\n[![Ultralytics Reddit](https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue)](https://reddit.com/r/ultralytics)\n\n## 🧪 About Comet\n\n[Comet](https://www.comet.com/site/) provides tools for data scientists, engineers, and teams to accelerate and optimize [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) and machine learning models.\n\nWith Comet, you can track and visualize model metrics in real time, save [hyperparameters](https://docs.ultralytics.com/guides/hyperparameter-tuning/), datasets, and model checkpoints, and visualize predictions using Custom Panels. Comet ensures you never lose track of your work and makes sharing results and collaborating across teams seamless. For more details, see the [Comet Documentation](https://www.comet.com/docs/v2/).\n\n## 🚀 Getting Started\n\nFollow these steps to set up Comet for your YOLO projects.\n\n### Install Comet\n\nInstall the [comet_ml Python package](https://pypi.org/project/comet-ml/) using pip:\n\n```shell\npip install comet_ml\n```\n\n### Configure Comet Credentials\n\nYou can configure Comet in two ways:\n\n1. **Environment Variables:**  \n   Set your credentials directly in your environment.\n\n   ```shell\n   export COMET_API_KEY=YOUR_COMET_API_KEY\n   export COMET_PROJECT_NAME=YOUR_COMET_PROJECT_NAME # Defaults to 'yolov5' if not set\n   ```\n\n   Find your API key in your [Comet Account Settings](https://www.comet.com/site/).\n\n2. **Configuration File:**  \n   Create a `.comet.config` file in your working directory:\n\n   ```ini\n   [comet]\n   api_key=YOUR_COMET_API_KEY\n   project_name=YOUR_COMET_PROJECT_NAME # Defaults to 'yolov5' if not set\n   ```\n\n### Run the Training Script\n\nRun the YOLO [training script](https://docs.ultralytics.com/modes/train/). Comet will automatically log your run.\n\n```shell\n# Train YOLO on COCO128 for 5 epochs\npython train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt\n```\n\nComet automatically logs hyperparameters, command-line arguments, and training/validation metrics. Visualize and analyze your runs in the Comet UI. For more details, see the [Ultralytics training documentation](https://docs.ultralytics.com/modes/train/).\n\n<img width=\"1920\" alt=\"Comet UI showing YOLO training metrics\" src=\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\">\n\n## ✨ Try an Example!\n\nExplore a completed YOLO training run tracked with Comet:\n\n- **[View Example Run on Comet](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github_readme)**\n\nRun the example yourself using this [Google Colab](https://colab.research.google.com/) notebook:\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-training/yolov5/notebooks/Comet_and_YOLOv5.ipynb)\n\n## 📊 Automatic Logging\n\nComet automatically logs the following information by default:\n\n### Metrics\n\n- **Losses:** Box Loss, Object Loss, Classification Loss (Training and Validation)\n- **Performance:** [mAP@0.5](https://www.ultralytics.com/glossary/mean-average-precision-map), mAP@0.5:0.95 (Validation). Learn more in the [YOLO Performance Metrics guide](https://docs.ultralytics.com/guides/yolo-performance-metrics/).\n- **[Precision](https://www.ultralytics.com/glossary/precision) and [Recall](https://www.ultralytics.com/glossary/recall):** Validation data metrics\n\n### Parameters\n\n- **Model Hyperparameters:** Configuration used for the model\n- **Command Line Arguments:** All arguments passed via the [CLI](https://docs.ultralytics.com/usage/cli/)\n\n### Visualizations\n\n- **[Confusion Matrix](https://www.ultralytics.com/glossary/confusion-matrix):** Model predictions on validation data ([Wikipedia](https://en.wikipedia.org/wiki/Confusion_matrix))\n- **Curves:** PR and F1 curves across all classes\n- **Label Correlogram:** Correlation visualization of class labels\n\n## ⚙️ Advanced Configuration\n\nCustomize Comet's logging behavior using command-line flags or environment variables.\n\n```shell\n# Environment Variables for Comet Configuration\nexport COMET_MODE=online                                    # 'online' or 'offline'. Default: online\nexport COMET_MODEL_NAME=YOUR_MODEL_NAME                     # Name for the saved model. Default: yolov5\nexport COMET_LOG_CONFUSION_MATRIX=false                     # Disable confusion matrix logging. Default: true\nexport COMET_MAX_IMAGE_UPLOADS=NUMBER                       # Max prediction images to log. Default: 100\nexport COMET_LOG_PER_CLASS_METRICS=true                     # Log metrics per class. Default: false\nexport COMET_DEFAULT_CHECKPOINT_FILENAME=checkpoint_file.pt # Checkpoint for resuming. Default: 'last.pt'\nexport COMET_LOG_BATCH_LEVEL_METRICS=true                   # Log training metrics per batch. Default: false\nexport COMET_LOG_PREDICTIONS=true                           # Disable prediction logging if set to false. Default: true\n```\n\nFor more configuration options, see the [Comet documentation](https://www.comet.com/docs/v2/).\n\n### Logging Checkpoints With Comet\n\nModel checkpoint logging to Comet is disabled by default. Enable it using the `--save-period` argument during training to save checkpoints at the specified epoch interval.\n\n```shell\npython train.py \\\n  --img 640 \\\n  --batch 16 \\\n  --epochs 5 \\\n  --data coco128.yaml \\\n  --weights yolov5s.pt \\\n  --save-period 1 # Save checkpoint every epoch\n```\n\nCheckpoints will appear in the \"Assets & Artifacts\" tab of your Comet experiment. Learn more about model management in the [Comet Model Registry documentation](https://www.comet.com/docs/v2/guides/model-registry/using-model-registry/).\n\n### Logging Model Predictions\n\nBy default, model predictions (images, ground truth labels, [bounding boxes](https://www.ultralytics.com/glossary/bounding-box)) for the validation set are logged. Control the logging frequency using the `--bbox_interval` argument, which specifies logging every Nth batch per epoch.\n\n**Note:** The YOLO validation dataloader defaults to a batch size of 32. Adjust `--bbox_interval` as needed.\n\nVisualize predictions using Comet's Object Detection Custom Panel. See an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github_readme).\n\n```shell\npython train.py \\\n  --img 640 \\\n  --batch 16 \\\n  --epochs 5 \\\n  --data coco128.yaml \\\n  --weights yolov5s.pt \\\n  --bbox_interval 2 # Log predictions every 2nd validation batch per epoch\n```\n\n#### Controlling the Number of Prediction Images\n\nAdjust the maximum number of validation images logged using the `COMET_MAX_IMAGE_UPLOADS` environment variable.\n\n```shell\nenv COMET_MAX_IMAGE_UPLOADS=200 python train.py \\\n  --img 640 \\\n  --batch 16 \\\n  --epochs 5 \\\n  --data coco128.yaml \\\n  --weights yolov5s.pt \\\n  --bbox_interval 1 # Log every batch\n```\n\n### Logging Class Level Metrics\n\nEnable logging of mAP, precision, recall, and F1-score for each class using the `COMET_LOG_PER_CLASS_METRICS` environment variable.\n\n```shell\nenv COMET_LOG_PER_CLASS_METRICS=true python train.py \\\n  --img 640 \\\n  --batch 16 \\\n  --epochs 5 \\\n  --data coco128.yaml \\\n  --weights yolov5s.pt\n```\n\n## 💾 Dataset Management With Comet Artifacts\n\nUse [Comet Artifacts](https://www.comet.com/docs/v2/guides/artifacts/using-artifacts/) to version, store, and manage your datasets.\n\n### Uploading a Dataset\n\nUpload your dataset using the `--upload_dataset` flag. Ensure your dataset follows the structure described in the [Ultralytics Datasets documentation](https://docs.ultralytics.com/datasets/) and that your dataset config [YAML](https://www.ultralytics.com/glossary/yaml) file matches the format of `coco128.yaml` (see the [COCO128 dataset docs](https://docs.ultralytics.com/datasets/detect/coco128/)).\n\n```shell\npython train.py \\\n  --img 640 \\\n  --batch 16 \\\n  --epochs 5 \\\n  --data coco128.yaml \\\n  --weights yolov5s.pt \\\n  --upload_dataset # Upload the dataset specified in coco128.yaml\n```\n\nView the uploaded dataset in the Artifacts tab of your Comet Workspace.\n<img width=\"1073\" alt=\"Comet Artifacts tab showing uploaded dataset\" src=\"https://user-images.githubusercontent.com/7529846/186929193-162718bf-ec7b-4eb9-8c3b-86b3763ef8ea.png\">\n\nPreview data directly in the Comet UI.\n<img width=\"1082\" alt=\"Comet UI previewing dataset images\" src=\"https://user-images.githubusercontent.com/7529846/186929215-432c36a9-c109-4eb0-944b-84c2786590d6.png\">\n\nArtifacts are versioned and support metadata. Comet automatically logs metadata from your dataset YAML file.\n<img width=\"963\" alt=\"Comet Artifact metadata view\" src=\"https://user-images.githubusercontent.com/7529846/186929256-9d44d6eb-1a19-42de-889a-bcbca3018f2e.png\">\n\n### Using a Saved Artifact\n\nTo use a dataset stored in Comet Artifacts, update the `path` in your dataset YAML file to the Artifact resource URL:\n\n```yaml\n# contents of artifact.yaml\npath: \"comet://WORKSPACE_NAME/ARTIFACT_NAME:ARTIFACT_VERSION_OR_ALIAS\"\ntrain: images/train # Adjust subdirectory if needed\nval: images/val # Adjust subdirectory if needed\n\n# Other dataset configurations...\n```\n\nThen, pass this configuration file to your training script:\n\n```shell\npython train.py \\\n  --img 640 \\\n  --batch 16 \\\n  --epochs 5 \\\n  --data artifact.yaml \\\n  --weights yolov5s.pt\n```\n\nArtifacts track data lineage, showing which experiments used specific dataset versions.\n<img width=\"1391\" alt=\"Comet Artifact lineage graph\" src=\"https://user-images.githubusercontent.com/7529846/186929264-4c4014fa-fe51-4f3c-a5c5-f6d24649b1b4.png\">\n\n## 🔄 Resuming Training Runs\n\nIf a training run is interrupted (for example, due to connection issues), you can resume it using the `--resume` flag with the Comet Run Path (`comet://YOUR_WORKSPACE/YOUR_PROJECT/EXPERIMENT_ID`).\n\nThis restores the model state, hyperparameters, arguments, and downloads necessary Artifacts, continuing logging to the existing Comet Experiment. Learn more about [resuming runs in the Comet documentation](https://www.comet.com/docs/v2/guides/experiment-management/resume-experiment/).\n\n```shell\npython train.py \\\n  --resume \"comet://YOUR_WORKSPACE/YOUR_PROJECT/EXPERIMENT_ID\"\n```\n\n## 🔍 Hyperparameter Optimization (HPO)\n\nYOLO integrates with the [Comet Optimizer](https://www.comet.com/docs/v2/guides/optimizer/configure-optimizer/) for easy hyperparameter sweeps and visualization. This helps you find the best set of parameters for your model, a process often referred to as [Hyperparameter Tuning](https://docs.ultralytics.com/guides/hyperparameter-tuning/).\n\n### Configuring an Optimizer Sweep\n\nCreate a [JSON](https://www.ultralytics.com/glossary/json) configuration file defining the sweep parameters, search strategy, and objective metric. An example is provided at `utils/loggers/comet/optimizer_config.json`.\n\nRun the sweep using the `hpo.py` script:\n\n```shell\npython utils/loggers/comet/hpo.py \\\n  --comet_optimizer_config \"utils/loggers/comet/optimizer_config.json\"\n```\n\nThe `hpo.py` script accepts the same arguments as `train.py`. Pass additional fixed arguments for the sweep:\n\n```shell\npython utils/loggers/comet/hpo.py \\\n  --comet_optimizer_config \"utils/loggers/comet/optimizer_config.json\" \\\n  --save-period 1 \\\n  --bbox_interval 1\n```\n\n### Running a Sweep in Parallel\n\nExecute multiple sweep trials concurrently using the `comet optimizer` command:\n\n```shell\ncomet optimizer -j \\\n  utils/loggers/comet/hpo.py NUM_WORKERS utils/loggers/comet/optimizer_config.json\n```\n\nReplace `NUM_WORKERS` with the desired number of parallel processes.\n\n### Visualizing HPO Results\n\nComet offers various visualizations for analyzing sweep results, such as parallel coordinate plots and parameter importance plots. Explore a [project with a completed sweep](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github_readme).\n\n<img width=\"1626\" alt=\"Comet HPO visualization\" src=\"https://user-images.githubusercontent.com/7529846/186914869-7dc1de14-583f-4323-967b-c9a66a29e495.png\">\n\n## 🤝 Contributing\n\nContributions to enhance the YOLO-Comet integration are welcome! Please see the [Ultralytics Contributing Guide](https://docs.ultralytics.com/help/contributing/) for more information on how to get involved. Thank you for helping improve this integration!\n"
  },
  {
    "path": "utils/loggers/comet/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport glob\nimport json\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nlogger = logging.getLogger(__name__)\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[3]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n\ntry:\n    import comet_ml\n\n    # Project Configuration\n    config = comet_ml.config.get_config()\n    COMET_PROJECT_NAME = config.get_string(os.getenv(\"COMET_PROJECT_NAME\"), \"comet.project_name\", default=\"yolov5\")\nexcept ImportError:\n    comet_ml = None\n    COMET_PROJECT_NAME = None\n\nimport PIL\nimport torch\nimport torchvision.transforms as T\nimport yaml\n\nfrom utils.dataloaders import img2label_paths\nfrom utils.general import check_dataset, scale_boxes, xywh2xyxy\nfrom utils.metrics import box_iou\n\nCOMET_PREFIX = \"comet://\"\n\nCOMET_MODE = os.getenv(\"COMET_MODE\", \"online\")\n\n# Model Saving Settings\nCOMET_MODEL_NAME = os.getenv(\"COMET_MODEL_NAME\", \"yolov5\")\n\n# Dataset Artifact Settings\nCOMET_UPLOAD_DATASET = os.getenv(\"COMET_UPLOAD_DATASET\", \"false\").lower() == \"true\"\n\n# Evaluation Settings\nCOMET_LOG_CONFUSION_MATRIX = os.getenv(\"COMET_LOG_CONFUSION_MATRIX\", \"true\").lower() == \"true\"\nCOMET_LOG_PREDICTIONS = os.getenv(\"COMET_LOG_PREDICTIONS\", \"true\").lower() == \"true\"\nCOMET_MAX_IMAGE_UPLOADS = int(os.getenv(\"COMET_MAX_IMAGE_UPLOADS\", 100))\n\n# Confusion Matrix Settings\nCONF_THRES = float(os.getenv(\"CONF_THRES\", 0.001))\nIOU_THRES = float(os.getenv(\"IOU_THRES\", 0.6))\n\n# Batch Logging Settings\nCOMET_LOG_BATCH_METRICS = os.getenv(\"COMET_LOG_BATCH_METRICS\", \"false\").lower() == \"true\"\nCOMET_BATCH_LOGGING_INTERVAL = os.getenv(\"COMET_BATCH_LOGGING_INTERVAL\", 1)\nCOMET_PREDICTION_LOGGING_INTERVAL = os.getenv(\"COMET_PREDICTION_LOGGING_INTERVAL\", 1)\nCOMET_LOG_PER_CLASS_METRICS = os.getenv(\"COMET_LOG_PER_CLASS_METRICS\", \"false\").lower() == \"true\"\n\nRANK = int(os.getenv(\"RANK\", -1))\n\nto_pil = T.ToPILImage()\n\n\nclass CometLogger:\n    \"\"\"Log metrics, parameters, source code, models and much more with Comet.\"\"\"\n\n    def __init__(self, opt, hyp, run_id=None, job_type=\"Training\", **experiment_kwargs) -> None:\n        \"\"\"Initializes CometLogger with given options, hyperparameters, run ID, job type, and additional experiment\n        arguments.\n        \"\"\"\n        self.job_type = job_type\n        self.opt = opt\n        self.hyp = hyp\n\n        # Comet Flags\n        self.comet_mode = COMET_MODE\n\n        self.save_model = opt.save_period > -1\n        self.model_name = COMET_MODEL_NAME\n\n        # Batch Logging Settings\n        self.log_batch_metrics = COMET_LOG_BATCH_METRICS\n        self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL\n\n        # Dataset Artifact Settings\n        self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET\n        self.resume = self.opt.resume\n\n        self.default_experiment_kwargs = {\n            \"log_code\": False,\n            \"log_env_gpu\": True,\n            \"log_env_cpu\": True,\n            \"project_name\": COMET_PROJECT_NAME,\n        } | experiment_kwargs\n        self.experiment = self._get_experiment(self.comet_mode, run_id)\n        self.experiment.set_name(self.opt.name)\n\n        self.data_dict = self.check_dataset(self.opt.data)\n        self.class_names = self.data_dict[\"names\"]\n        self.num_classes = self.data_dict[\"nc\"]\n\n        self.logged_images_count = 0\n        self.max_images = COMET_MAX_IMAGE_UPLOADS\n\n        if run_id is None:\n            self.experiment.log_other(\"Created from\", \"YOLOv5\")\n            if not isinstance(self.experiment, comet_ml.OfflineExperiment):\n                workspace, project_name, experiment_id = self.experiment.url.split(\"/\")[-3:]\n                self.experiment.log_other(\n                    \"Run Path\",\n                    f\"{workspace}/{project_name}/{experiment_id}\",\n                )\n            self.log_parameters(vars(opt))\n            self.log_parameters(self.opt.hyp)\n            self.log_asset_data(\n                self.opt.hyp,\n                name=\"hyperparameters.json\",\n                metadata={\"type\": \"hyp-config-file\"},\n            )\n            self.log_asset(\n                f\"{self.opt.save_dir}/opt.yaml\",\n                metadata={\"type\": \"opt-config-file\"},\n            )\n\n        self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX\n\n        if hasattr(self.opt, \"conf_thres\"):\n            self.conf_thres = self.opt.conf_thres\n        else:\n            self.conf_thres = CONF_THRES\n        if hasattr(self.opt, \"iou_thres\"):\n            self.iou_thres = self.opt.iou_thres\n        else:\n            self.iou_thres = IOU_THRES\n\n        self.log_parameters({\"val_iou_threshold\": self.iou_thres, \"val_conf_threshold\": self.conf_thres})\n\n        self.comet_log_predictions = COMET_LOG_PREDICTIONS\n        if self.opt.bbox_interval == -1:\n            self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10\n        else:\n            self.comet_log_prediction_interval = self.opt.bbox_interval\n\n        if self.comet_log_predictions:\n            self.metadata_dict = {}\n            self.logged_image_names = []\n\n        self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS\n\n        self.experiment.log_others(\n            {\n                \"comet_mode\": COMET_MODE,\n                \"comet_max_image_uploads\": COMET_MAX_IMAGE_UPLOADS,\n                \"comet_log_per_class_metrics\": COMET_LOG_PER_CLASS_METRICS,\n                \"comet_log_batch_metrics\": COMET_LOG_BATCH_METRICS,\n                \"comet_log_confusion_matrix\": COMET_LOG_CONFUSION_MATRIX,\n                \"comet_model_name\": COMET_MODEL_NAME,\n            }\n        )\n\n        # Check if running the Experiment with the Comet Optimizer\n        if hasattr(self.opt, \"comet_optimizer_id\"):\n            self.experiment.log_other(\"optimizer_id\", self.opt.comet_optimizer_id)\n            self.experiment.log_other(\"optimizer_objective\", self.opt.comet_optimizer_objective)\n            self.experiment.log_other(\"optimizer_metric\", self.opt.comet_optimizer_metric)\n            self.experiment.log_other(\"optimizer_parameters\", json.dumps(self.hyp))\n\n    def _get_experiment(self, mode, experiment_id=None):\n        \"\"\"Returns a new or existing Comet.ml experiment based on mode and optional experiment_id.\"\"\"\n        if mode == \"offline\":\n            return (\n                comet_ml.ExistingOfflineExperiment(\n                    previous_experiment=experiment_id,\n                    **self.default_experiment_kwargs,\n                )\n                if experiment_id is not None\n                else comet_ml.OfflineExperiment(\n                    **self.default_experiment_kwargs,\n                )\n            )\n        try:\n            if experiment_id is not None:\n                return comet_ml.ExistingExperiment(\n                    previous_experiment=experiment_id,\n                    **self.default_experiment_kwargs,\n                )\n\n            return comet_ml.Experiment(**self.default_experiment_kwargs)\n\n        except ValueError:\n            logger.warning(\n                \"COMET WARNING: \"\n                \"Comet credentials have not been set. \"\n                \"Comet will default to offline logging. \"\n                \"Please set your credentials to enable online logging.\"\n            )\n            return self._get_experiment(\"offline\", experiment_id)\n\n        return\n\n    def log_metrics(self, log_dict, **kwargs):\n        \"\"\"Logs metrics to the current experiment, accepting a dictionary of metric names and values.\"\"\"\n        self.experiment.log_metrics(log_dict, **kwargs)\n\n    def log_parameters(self, log_dict, **kwargs):\n        \"\"\"Logs parameters to the current experiment, accepting a dictionary of parameter names and values.\"\"\"\n        self.experiment.log_parameters(log_dict, **kwargs)\n\n    def log_asset(self, asset_path, **kwargs):\n        \"\"\"Logs a file or directory as an asset to the current experiment.\"\"\"\n        self.experiment.log_asset(asset_path, **kwargs)\n\n    def log_asset_data(self, asset, **kwargs):\n        \"\"\"Logs in-memory data as an asset to the current experiment, with optional kwargs.\"\"\"\n        self.experiment.log_asset_data(asset, **kwargs)\n\n    def log_image(self, img, **kwargs):\n        \"\"\"Logs an image to the current experiment with optional kwargs.\"\"\"\n        self.experiment.log_image(img, **kwargs)\n\n    def log_model(self, path, opt, epoch, fitness_score, best_model=False):\n        \"\"\"Logs model checkpoint to experiment with path, options, epoch, fitness, and best model flag.\"\"\"\n        if not self.save_model:\n            return\n\n        model_metadata = {\n            \"fitness_score\": fitness_score[-1],\n            \"epochs_trained\": epoch + 1,\n            \"save_period\": opt.save_period,\n            \"total_epochs\": opt.epochs,\n        }\n\n        model_files = glob.glob(f\"{path}/*.pt\")\n        for model_path in model_files:\n            name = Path(model_path).name\n\n            self.experiment.log_model(\n                self.model_name,\n                file_or_folder=model_path,\n                file_name=name,\n                metadata=model_metadata,\n                overwrite=True,\n            )\n\n    def check_dataset(self, data_file):\n        \"\"\"Validates the dataset configuration by loading the YAML file specified in `data_file`.\"\"\"\n        with open(data_file) as f:\n            data_config = yaml.safe_load(f)\n\n        path = data_config.get(\"path\")\n        if path and path.startswith(COMET_PREFIX):\n            path = data_config[\"path\"].replace(COMET_PREFIX, \"\")\n            return self.download_dataset_artifact(path)\n        self.log_asset(self.opt.data, metadata={\"type\": \"data-config-file\"})\n\n        return check_dataset(data_file)\n\n    def log_predictions(self, image, labelsn, path, shape, predn):\n        \"\"\"Logs predictions with IOU filtering, given image, labels, path, shape, and predictions.\"\"\"\n        if self.logged_images_count >= self.max_images:\n            return\n        detections = predn[predn[:, 4] > self.conf_thres]\n        iou = box_iou(labelsn[:, 1:], detections[:, :4])\n        mask, _ = torch.where(iou > self.iou_thres)\n        if len(mask) == 0:\n            return\n\n        filtered_detections = detections[mask]\n        filtered_labels = labelsn[mask]\n\n        image_id = path.split(\"/\")[-1].split(\".\")[0]\n        image_name = f\"{image_id}_curr_epoch_{self.experiment.curr_epoch}\"\n        if image_name not in self.logged_image_names:\n            native_scale_image = PIL.Image.open(path)\n            self.log_image(native_scale_image, name=image_name)\n            self.logged_image_names.append(image_name)\n\n        metadata = [\n            {\n                \"label\": f\"{self.class_names[int(cls)]}-gt\",\n                \"score\": 100,\n                \"box\": {\"x\": xyxy[0], \"y\": xyxy[1], \"x2\": xyxy[2], \"y2\": xyxy[3]},\n            }\n            for cls, *xyxy in filtered_labels.tolist()\n        ]\n        metadata.extend(\n            {\n                \"label\": f\"{self.class_names[int(cls)]}\",\n                \"score\": conf * 100,\n                \"box\": {\"x\": xyxy[0], \"y\": xyxy[1], \"x2\": xyxy[2], \"y2\": xyxy[3]},\n            }\n            for *xyxy, conf, cls in filtered_detections.tolist()\n        )\n        self.metadata_dict[image_name] = metadata\n        self.logged_images_count += 1\n\n        return\n\n    def preprocess_prediction(self, image, labels, shape, pred):\n        \"\"\"Processes prediction data, resizing labels and adding dataset metadata.\"\"\"\n        nl, _ = labels.shape[0], pred.shape[0]\n\n        # Predictions\n        if self.opt.single_cls:\n            pred[:, 5] = 0\n\n        predn = pred.clone()\n        scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])\n\n        labelsn = None\n        if nl:\n            tbox = xywh2xyxy(labels[:, 1:5])  # target boxes\n            scale_boxes(image.shape[1:], tbox, shape[0], shape[1])  # native-space labels\n            labelsn = torch.cat((labels[:, 0:1], tbox), 1)  # native-space labels\n            scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])  # native-space pred\n\n        return predn, labelsn\n\n    def add_assets_to_artifact(self, artifact, path, asset_path, split):\n        \"\"\"Adds image and label assets to a wandb artifact given dataset split and paths.\"\"\"\n        img_paths = sorted(glob.glob(f\"{asset_path}/*\"))\n        label_paths = img2label_paths(img_paths)\n\n        for image_file, label_file in zip(img_paths, label_paths):\n            image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])\n\n            try:\n                artifact.add(\n                    image_file,\n                    logical_path=image_logical_path,\n                    metadata={\"split\": split},\n                )\n                artifact.add(\n                    label_file,\n                    logical_path=label_logical_path,\n                    metadata={\"split\": split},\n                )\n            except ValueError as e:\n                logger.error(\"COMET ERROR: Error adding file to Artifact. Skipping file.\")\n                logger.error(f\"COMET ERROR: {e}\")\n                continue\n\n        return artifact\n\n    def upload_dataset_artifact(self):\n        \"\"\"Uploads a YOLOv5 dataset as an artifact to the Comet.ml platform.\"\"\"\n        dataset_name = self.data_dict.get(\"dataset_name\", \"yolov5-dataset\")\n        path = str((ROOT / Path(self.data_dict[\"path\"])).resolve())\n\n        metadata = self.data_dict.copy()\n        for key in [\"train\", \"val\", \"test\"]:\n            split_path = metadata.get(key)\n            if split_path is not None:\n                metadata[key] = split_path.replace(path, \"\")\n\n        artifact = comet_ml.Artifact(name=dataset_name, artifact_type=\"dataset\", metadata=metadata)\n        for key in metadata.keys():\n            if key in [\"train\", \"val\", \"test\"]:\n                if isinstance(self.upload_dataset, str) and (key != self.upload_dataset):\n                    continue\n\n                asset_path = self.data_dict.get(key)\n                if asset_path is not None:\n                    artifact = self.add_assets_to_artifact(artifact, path, asset_path, key)\n\n        self.experiment.log_artifact(artifact)\n\n        return\n\n    def download_dataset_artifact(self, artifact_path):\n        \"\"\"Downloads a dataset artifact to a specified directory using the experiment's logged artifact.\"\"\"\n        logged_artifact = self.experiment.get_artifact(artifact_path)\n        artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)\n        logged_artifact.download(artifact_save_dir)\n\n        metadata = logged_artifact.metadata\n        data_dict = metadata.copy()\n        data_dict[\"path\"] = artifact_save_dir\n\n        metadata_names = metadata.get(\"names\")\n        if isinstance(metadata_names, dict):\n            data_dict[\"names\"] = {int(k): v for k, v in metadata.get(\"names\").items()}\n        elif isinstance(metadata_names, list):\n            data_dict[\"names\"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}\n        else:\n            raise \"Invalid 'names' field in dataset yaml file. Please use a list or dictionary\"\n\n        return self.update_data_paths(data_dict)\n\n    def update_data_paths(self, data_dict):\n        \"\"\"Updates data paths in the dataset dictionary, defaulting 'path' to an empty string if not present.\"\"\"\n        path = data_dict.get(\"path\", \"\")\n\n        for split in [\"train\", \"val\", \"test\"]:\n            if data_dict.get(split):\n                split_path = data_dict.get(split)\n                data_dict[split] = (\n                    f\"{path}/{split_path}\" if isinstance(split, str) else [f\"{path}/{x}\" for x in split_path]\n                )\n\n        return data_dict\n\n    def on_pretrain_routine_end(self, paths):\n        \"\"\"Called at the end of pretraining routine to handle paths if training is not being resumed.\"\"\"\n        if self.opt.resume:\n            return\n\n        for path in paths:\n            self.log_asset(str(path))\n\n        if self.upload_dataset and not self.resume:\n            self.upload_dataset_artifact()\n\n        return\n\n    def on_train_start(self):\n        \"\"\"Logs hyperparameters at the start of training.\"\"\"\n        self.log_parameters(self.hyp)\n\n    def on_train_epoch_start(self):\n        \"\"\"Called at the start of each training epoch.\"\"\"\n        return\n\n    def on_train_epoch_end(self, epoch):\n        \"\"\"Updates the current epoch in the experiment tracking at the end of each epoch.\"\"\"\n        self.experiment.curr_epoch = epoch\n\n        return\n\n    def on_train_batch_start(self):\n        \"\"\"Called at the start of each training batch.\"\"\"\n        return\n\n    def on_train_batch_end(self, log_dict, step):\n        \"\"\"Callback function that updates and logs metrics at the end of each training batch if conditions are met.\"\"\"\n        self.experiment.curr_step = step\n        if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0):\n            self.log_metrics(log_dict, step=step)\n\n        return\n\n    def on_train_end(self, files, save_dir, last, best, epoch, results):\n        \"\"\"Logs metadata and optionally saves model files at the end of training.\"\"\"\n        if self.comet_log_predictions:\n            curr_epoch = self.experiment.curr_epoch\n            self.experiment.log_asset_data(self.metadata_dict, \"image-metadata.json\", epoch=curr_epoch)\n\n        for f in files:\n            self.log_asset(f, metadata={\"epoch\": epoch})\n        self.log_asset(f\"{save_dir}/results.csv\", metadata={\"epoch\": epoch})\n\n        if not self.opt.evolve:\n            model_path = str(best if best.exists() else last)\n            name = Path(model_path).name\n            if self.save_model:\n                self.experiment.log_model(\n                    self.model_name,\n                    file_or_folder=model_path,\n                    file_name=name,\n                    overwrite=True,\n                )\n\n        # Check if running Experiment with Comet Optimizer\n        if hasattr(self.opt, \"comet_optimizer_id\"):\n            metric = results.get(self.opt.comet_optimizer_metric)\n            self.experiment.log_other(\"optimizer_metric_value\", metric)\n\n        self.finish_run()\n\n    def on_val_start(self):\n        \"\"\"Called at the start of validation, currently a placeholder with no functionality.\"\"\"\n        return\n\n    def on_val_batch_start(self):\n        \"\"\"Placeholder called at the start of a validation batch with no current functionality.\"\"\"\n        return\n\n    def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):\n        \"\"\"Callback executed at the end of a validation batch, conditionally logs predictions to Comet ML.\"\"\"\n        if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)):\n            return\n\n        for si, pred in enumerate(outputs):\n            if len(pred) == 0:\n                continue\n\n            image = images[si]\n            labels = targets[targets[:, 0] == si, 1:]\n            shape = shapes[si]\n            path = paths[si]\n            predn, labelsn = self.preprocess_prediction(image, labels, shape, pred)\n            if labelsn is not None:\n                self.log_predictions(image, labelsn, path, shape, predn)\n\n        return\n\n    def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):\n        \"\"\"Logs per-class metrics to Comet.ml after validation if enabled and more than one class exists.\"\"\"\n        if self.comet_log_per_class_metrics and self.num_classes > 1:\n            for i, c in enumerate(ap_class):\n                class_name = self.class_names[c]\n                self.experiment.log_metrics(\n                    {\n                        \"mAP@.5\": ap50[i],\n                        \"mAP@.5:.95\": ap[i],\n                        \"precision\": p[i],\n                        \"recall\": r[i],\n                        \"f1\": f1[i],\n                        \"true_positives\": tp[i],\n                        \"false_positives\": fp[i],\n                        \"support\": nt[c],\n                    },\n                    prefix=class_name,\n                )\n\n        if self.comet_log_confusion_matrix:\n            epoch = self.experiment.curr_epoch\n            class_names = list(self.class_names.values())\n            class_names.append(\"background\")\n            num_classes = len(class_names)\n\n            self.experiment.log_confusion_matrix(\n                matrix=confusion_matrix.matrix,\n                max_categories=num_classes,\n                labels=class_names,\n                epoch=epoch,\n                column_label=\"Actual Category\",\n                row_label=\"Predicted Category\",\n                file_name=f\"confusion-matrix-epoch-{epoch}.json\",\n            )\n\n    def on_fit_epoch_end(self, result, epoch):\n        \"\"\"Logs metrics at the end of each training epoch.\"\"\"\n        self.log_metrics(result, epoch=epoch)\n\n    def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):\n        \"\"\"Callback to save model checkpoints periodically if conditions are met.\"\"\"\n        if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:\n            self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)\n\n    def on_params_update(self, params):\n        \"\"\"Logs updated parameters during training.\"\"\"\n        self.log_parameters(params)\n\n    def finish_run(self):\n        \"\"\"Ends the current experiment and logs its completion.\"\"\"\n        self.experiment.end()\n"
  },
  {
    "path": "utils/loggers/comet/comet_utils.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\ntry:\n    import comet_ml\nexcept ImportError:\n    comet_ml = None\n\nimport yaml\n\nlogger = logging.getLogger(__name__)\n\nCOMET_PREFIX = \"comet://\"\nCOMET_MODEL_NAME = os.getenv(\"COMET_MODEL_NAME\", \"yolov5\")\nCOMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv(\"COMET_DEFAULT_CHECKPOINT_FILENAME\", \"last.pt\")\n\n\ndef download_model_checkpoint(opt, experiment):\n    \"\"\"Downloads YOLOv5 model checkpoint from Comet ML experiment, updating `opt.weights` with download path.\"\"\"\n    model_dir = f\"{opt.project}/{experiment.name}\"\n    os.makedirs(model_dir, exist_ok=True)\n\n    model_name = COMET_MODEL_NAME\n    model_asset_list = experiment.get_model_asset_list(model_name)\n\n    if len(model_asset_list) == 0:\n        logger.error(f\"COMET ERROR: No checkpoints found for model name : {model_name}\")\n        return\n\n    model_asset_list = sorted(\n        model_asset_list,\n        key=lambda x: x[\"step\"],\n        reverse=True,\n    )\n    logged_checkpoint_map = {asset[\"fileName\"]: asset[\"assetId\"] for asset in model_asset_list}\n\n    resource_url = urlparse(opt.weights)\n    checkpoint_filename = resource_url.query\n\n    if checkpoint_filename:\n        asset_id = logged_checkpoint_map.get(checkpoint_filename)\n    else:\n        asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)\n        checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME\n\n    if asset_id is None:\n        logger.error(f\"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment\")\n        return\n\n    try:\n        logger.info(f\"COMET INFO: Downloading checkpoint {checkpoint_filename}\")\n        asset_filename = checkpoint_filename\n\n        model_binary = experiment.get_asset(asset_id, return_type=\"binary\", stream=False)\n        model_download_path = f\"{model_dir}/{asset_filename}\"\n        with open(model_download_path, \"wb\") as f:\n            f.write(model_binary)\n\n        opt.weights = model_download_path\n\n    except Exception as e:\n        logger.warning(\"COMET WARNING: Unable to download checkpoint from Comet\")\n        logger.exception(e)\n\n\ndef set_opt_parameters(opt, experiment):\n    \"\"\"Update the opts Namespace with parameters from Comet's ExistingExperiment when resuming a run.\n\n    Args:\n        opt (argparse.Namespace): Namespace of command line options\n        experiment (comet_ml.APIExperiment): Comet API Experiment object\n    \"\"\"\n    asset_list = experiment.get_asset_list()\n    resume_string = opt.resume\n\n    for asset in asset_list:\n        if asset[\"fileName\"] == \"opt.yaml\":\n            asset_id = asset[\"assetId\"]\n            asset_binary = experiment.get_asset(asset_id, return_type=\"binary\", stream=False)\n            opt_dict = yaml.safe_load(asset_binary)\n            for key, value in opt_dict.items():\n                setattr(opt, key, value)\n            opt.resume = resume_string\n\n    # Save hyperparameters to YAML file\n    # Necessary to pass checks in training script\n    save_dir = f\"{opt.project}/{experiment.name}\"\n    os.makedirs(save_dir, exist_ok=True)\n\n    hyp_yaml_path = f\"{save_dir}/hyp.yaml\"\n    with open(hyp_yaml_path, \"w\") as f:\n        yaml.dump(opt.hyp, f)\n    opt.hyp = hyp_yaml_path\n\n\ndef check_comet_weights(opt):\n    \"\"\"Downloads model weights from Comet and updates the weights path to point to saved weights location.\n\n    Args:\n        opt (argparse.Namespace): Command Line arguments passed to YOLOv5 training script\n\n    Returns:\n        None/bool: Return True if weights are successfully downloaded else return None\n    \"\"\"\n    if comet_ml is None:\n        return\n\n    if isinstance(opt.weights, str) and opt.weights.startswith(COMET_PREFIX):\n        api = comet_ml.API()\n        resource = urlparse(opt.weights)\n        experiment_path = f\"{resource.netloc}{resource.path}\"\n        experiment = api.get(experiment_path)\n        download_model_checkpoint(opt, experiment)\n        return True\n\n    return None\n\n\ndef check_comet_resume(opt):\n    \"\"\"Restores run parameters to its original state based on the model checkpoint and logged Experiment parameters.\n\n    Args:\n        opt (argparse.Namespace): Command Line arguments passed to YOLOv5 training script\n\n    Returns:\n        None/bool: Return True if the run is restored successfully else return None\n    \"\"\"\n    if comet_ml is None:\n        return\n\n    if isinstance(opt.resume, str) and opt.resume.startswith(COMET_PREFIX):\n        api = comet_ml.API()\n        resource = urlparse(opt.resume)\n        experiment_path = f\"{resource.netloc}{resource.path}\"\n        experiment = api.get(experiment_path)\n        set_opt_parameters(opt, experiment)\n        download_model_checkpoint(opt, experiment)\n\n        return True\n\n    return None\n"
  },
  {
    "path": "utils/loggers/comet/hpo.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport argparse\nimport json\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nimport comet_ml\n\nlogger = logging.getLogger(__name__)\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[3]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n\nfrom train import train\nfrom utils.callbacks import Callbacks\nfrom utils.general import increment_path\nfrom utils.torch_utils import select_device\n\n# Project Configuration\nconfig = comet_ml.config.get_config()\nCOMET_PROJECT_NAME = config.get_string(os.getenv(\"COMET_PROJECT_NAME\"), \"comet.project_name\", default=\"yolov5\")\n\n\ndef get_args(known=False):\n    \"\"\"Parses command-line arguments for YOLOv5 training, supporting configuration of weights, data paths,\n    hyperparameters, and more.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--weights\", type=str, default=ROOT / \"yolov5s.pt\", help=\"initial weights path\")\n    parser.add_argument(\"--cfg\", type=str, default=\"\", help=\"model.yaml path\")\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"dataset.yaml path\")\n    parser.add_argument(\"--hyp\", type=str, default=ROOT / \"data/hyps/hyp.scratch-low.yaml\", help=\"hyperparameters path\")\n    parser.add_argument(\"--epochs\", type=int, default=300, help=\"total training epochs\")\n    parser.add_argument(\"--batch-size\", type=int, default=16, help=\"total batch size for all GPUs, -1 for autobatch\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=640, help=\"train, val image size (pixels)\")\n    parser.add_argument(\"--rect\", action=\"store_true\", help=\"rectangular training\")\n    parser.add_argument(\"--resume\", nargs=\"?\", const=True, default=False, help=\"resume most recent training\")\n    parser.add_argument(\"--nosave\", action=\"store_true\", help=\"only save final checkpoint\")\n    parser.add_argument(\"--noval\", action=\"store_true\", help=\"only validate final epoch\")\n    parser.add_argument(\"--noautoanchor\", action=\"store_true\", help=\"disable AutoAnchor\")\n    parser.add_argument(\"--noplots\", action=\"store_true\", help=\"save no plot files\")\n    parser.add_argument(\"--evolve\", type=int, nargs=\"?\", const=300, help=\"evolve hyperparameters for x generations\")\n    parser.add_argument(\"--bucket\", type=str, default=\"\", help=\"gsutil bucket\")\n    parser.add_argument(\"--cache\", type=str, nargs=\"?\", const=\"ram\", help='--cache images in \"ram\" (default) or \"disk\"')\n    parser.add_argument(\"--image-weights\", action=\"store_true\", help=\"use weighted image selection for training\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--multi-scale\", action=\"store_true\", help=\"vary img-size +/- 50%%\")\n    parser.add_argument(\"--single-cls\", action=\"store_true\", help=\"train multi-class data as single-class\")\n    parser.add_argument(\"--optimizer\", type=str, choices=[\"SGD\", \"Adam\", \"AdamW\"], default=\"SGD\", help=\"optimizer\")\n    parser.add_argument(\"--sync-bn\", action=\"store_true\", help=\"use SyncBatchNorm, only available in DDP mode\")\n    parser.add_argument(\"--workers\", type=int, default=8, help=\"max dataloader workers (per RANK in DDP mode)\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/train\", help=\"save to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--quad\", action=\"store_true\", help=\"quad dataloader\")\n    parser.add_argument(\"--cos-lr\", action=\"store_true\", help=\"cosine LR scheduler\")\n    parser.add_argument(\"--label-smoothing\", type=float, default=0.0, help=\"Label smoothing epsilon\")\n    parser.add_argument(\"--patience\", type=int, default=100, help=\"EarlyStopping patience (epochs without improvement)\")\n    parser.add_argument(\"--freeze\", nargs=\"+\", type=int, default=[0], help=\"Freeze layers: backbone=10, first3=0 1 2\")\n    parser.add_argument(\"--save-period\", type=int, default=-1, help=\"Save checkpoint every x epochs (disabled if < 1)\")\n    parser.add_argument(\"--seed\", type=int, default=0, help=\"Global training seed\")\n    parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"Automatic DDP Multi-GPU argument, do not modify\")\n\n    # Weights & Biases arguments\n    parser.add_argument(\"--entity\", default=None, help=\"W&B: Entity\")\n    parser.add_argument(\"--upload_dataset\", nargs=\"?\", const=True, default=False, help='W&B: Upload data, \"val\" option')\n    parser.add_argument(\"--bbox_interval\", type=int, default=-1, help=\"W&B: Set bounding-box image logging interval\")\n    parser.add_argument(\"--artifact_alias\", type=str, default=\"latest\", help=\"W&B: Version of dataset artifact to use\")\n\n    # Comet Arguments\n    parser.add_argument(\"--comet_optimizer_config\", type=str, help=\"Comet: Path to a Comet Optimizer Config File.\")\n    parser.add_argument(\"--comet_optimizer_id\", type=str, help=\"Comet: ID of the Comet Optimizer sweep.\")\n    parser.add_argument(\"--comet_optimizer_objective\", type=str, help=\"Comet: Set to 'minimize' or 'maximize'.\")\n    parser.add_argument(\"--comet_optimizer_metric\", type=str, help=\"Comet: Metric to Optimize.\")\n    parser.add_argument(\n        \"--comet_optimizer_workers\",\n        type=int,\n        default=1,\n        help=\"Comet: Number of Parallel Workers to use with the Comet Optimizer.\",\n    )\n\n    return parser.parse_known_args()[0] if known else parser.parse_args()\n\n\ndef run(parameters, opt):\n    \"\"\"Executes YOLOv5 training with given hyperparameters and options, setting up device and training directories.\"\"\"\n    hyp_dict = {k: v for k, v in parameters.items() if k not in [\"epochs\", \"batch_size\"]}\n\n    opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))\n    opt.batch_size = parameters.get(\"batch_size\")\n    opt.epochs = parameters.get(\"epochs\")\n\n    device = select_device(opt.device, batch_size=opt.batch_size)\n    train(hyp_dict, opt, device, callbacks=Callbacks())\n\n\nif __name__ == \"__main__\":\n    opt = get_args(known=True)\n\n    opt.weights = str(opt.weights)\n    opt.cfg = str(opt.cfg)\n    opt.data = str(opt.data)\n    opt.project = str(opt.project)\n\n    optimizer_id = os.getenv(\"COMET_OPTIMIZER_ID\")\n    if optimizer_id is None:\n        with open(opt.comet_optimizer_config) as f:\n            optimizer_config = json.load(f)\n        optimizer = comet_ml.Optimizer(optimizer_config)\n    else:\n        optimizer = comet_ml.Optimizer(optimizer_id)\n\n    opt.comet_optimizer_id = optimizer.id\n    status = optimizer.status()\n\n    opt.comet_optimizer_objective = status[\"spec\"][\"objective\"]\n    opt.comet_optimizer_metric = status[\"spec\"][\"metric\"]\n\n    logger.info(\"COMET INFO: Starting Hyperparameter Sweep\")\n    for parameter in optimizer.get_parameters():\n        run(parameter[\"parameters\"], opt)\n"
  },
  {
    "path": "utils/loggers/wandb/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
  },
  {
    "path": "utils/loggers/wandb/wandb_utils.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# WARNING ⚠️ wandb is deprecated and will be removed in future release.\n# See supported integrations at https://github.com/ultralytics/yolov5#integrations\n\nimport logging\nimport os\nimport sys\nfrom contextlib import contextmanager\nfrom pathlib import Path\n\nfrom utils.general import LOGGER, colorstr\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[3]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nRANK = int(os.getenv(\"RANK\", -1))\nDEPRECATION_WARNING = (\n    f\"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. \"\n    f\"See supported integrations at https://github.com/ultralytics/yolov5#integrations.\"\n)\n\ntry:\n    import wandb\n\n    assert hasattr(wandb, \"__version__\")  # verify package import not local dir\n    LOGGER.warning(DEPRECATION_WARNING)\nexcept (ImportError, AssertionError):\n    wandb = None\n\n\nclass WandbLogger:\n    \"\"\"Log training runs, datasets, models, and predictions to Weights & Biases.\n\n    This logger sends information to W&B at wandb.ai. By default, this information includes hyperparameters, system\n    configuration and metrics, model metrics, and basic data metrics and analyses.\n\n    By providing additional command line arguments to train.py, datasets, models and predictions can also be logged.\n\n    For more on how this logger is used, see the Weights & Biases documentation:\n    https://docs.wandb.com/guides/integrations/yolov5\n    \"\"\"\n\n    def __init__(self, opt, run_id=None, job_type=\"Training\"):\n        \"\"\"- Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - Setup training processes\n        if job_type is 'Training'.\n\n        Args:\n            opt (namespace): Commandline arguments for this run:\n            run_id (str): Run ID of W&B run to be resumed\n            job_type (str): To set the job_type for this run\n        \"\"\"\n        # Pre-training routine\n        self.job_type = job_type\n        self.wandb, self.wandb_run = wandb, wandb.run if wandb else None\n        self.val_artifact, self.train_artifact = None, None\n        self.train_artifact_path, self.val_artifact_path = None, None\n        self.result_artifact = None\n        self.val_table, self.result_table = None, None\n        self.max_imgs_to_log = 16\n        self.data_dict = None\n        if self.wandb:\n            self.wandb_run = wandb.run or wandb.init(\n                config=opt,\n                resume=\"allow\",\n                project=\"YOLOv5\" if opt.project == \"runs/train\" else Path(opt.project).stem,\n                entity=opt.entity,\n                name=opt.name if opt.name != \"exp\" else None,\n                job_type=job_type,\n                id=run_id,\n                allow_val_change=True,\n            )\n\n        if self.wandb_run and self.job_type == \"Training\":\n            if isinstance(opt.data, dict):\n                # This means another dataset manager has already processed the dataset info (e.g. ClearML)\n                # and they will have stored the already processed dict in opt.data\n                self.data_dict = opt.data\n            self.setup_training(opt)\n\n    def setup_training(self, opt):\n        \"\"\"Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset\n        artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous\n        run if resumed and the paths of dataset artifact if downloaded - Setup log_dict,\n        initialize bbox_interval.\n\n        Args:\n            opt (namespace): commandline arguments for this run\n        \"\"\"\n        self.log_dict, self.current_epoch = {}, 0\n        self.bbox_interval = opt.bbox_interval\n        if isinstance(opt.resume, str):\n            model_dir, _ = self.download_model_artifact(opt)\n            if model_dir:\n                self.weights = Path(model_dir) / \"last.pt\"\n                config = self.wandb_run.config\n                opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = (\n                    str(self.weights),\n                    config.save_period,\n                    config.batch_size,\n                    config.bbox_interval,\n                    config.epochs,\n                    config.hyp,\n                    config.imgsz,\n                )\n\n        if opt.bbox_interval == -1:\n            self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1\n            if opt.evolve or opt.noplots:\n                self.bbox_interval = opt.bbox_interval = opt.epochs + 1  # disable bbox_interval\n\n    def log_model(self, path, opt, epoch, fitness_score, best_model=False):\n        \"\"\"Log the model checkpoint as W&B artifact.\n\n        Args:\n            path (Path): Path of directory containing the checkpoints\n            opt (namespace): Command line arguments for this run\n            epoch (int): Current epoch number\n            fitness_score (float): fitness score for current epoch\n            best_model (boolean): Boolean representing if the current checkpoint is the best yet.\n        \"\"\"\n        model_artifact = wandb.Artifact(\n            f\"run_{wandb.run.id}_model\",\n            type=\"model\",\n            metadata={\n                \"original_url\": str(path),\n                \"epochs_trained\": epoch + 1,\n                \"save period\": opt.save_period,\n                \"project\": opt.project,\n                \"total_epochs\": opt.epochs,\n                \"fitness_score\": fitness_score,\n            },\n        )\n        model_artifact.add_file(str(path / \"last.pt\"), name=\"last.pt\")\n        wandb.log_artifact(\n            model_artifact,\n            aliases=[\n                \"latest\",\n                \"last\",\n                f\"epoch {self.current_epoch!s}\",\n                \"best\" if best_model else \"\",\n            ],\n        )\n        LOGGER.info(f\"Saving model artifact on epoch {epoch + 1}\")\n\n    def val_one_image(self, pred, predn, path, names, im):\n        \"\"\"Evaluates model prediction for a single image, returning metrics and visualizations.\"\"\"\n        pass\n\n    def log(self, log_dict):\n        \"\"\"Save the metrics to the logging dictionary.\n\n        Args:\n            log_dict (Dict): metrics/media to be logged in current step\n        \"\"\"\n        if self.wandb_run:\n            for key, value in log_dict.items():\n                self.log_dict[key] = value\n\n    def end_epoch(self):\n        \"\"\"Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.\n\n        Args:\n            best_result (boolean): Boolean representing if the result of this evaluation is best or not\n        \"\"\"\n        if self.wandb_run:\n            with all_logging_disabled():\n                try:\n                    wandb.log(self.log_dict)\n                except BaseException as e:\n                    LOGGER.info(\n                        f\"An error occurred in wandb logger. The training will proceed without interruption. More info\\n{e}\"\n                    )\n                    self.wandb_run.finish()\n                    self.wandb_run = None\n                self.log_dict = {}\n\n    def finish_run(self):\n        \"\"\"Log metrics if any and finish the current W&B run.\"\"\"\n        if self.wandb_run:\n            if self.log_dict:\n                with all_logging_disabled():\n                    wandb.log(self.log_dict)\n            wandb.run.finish()\n            LOGGER.warning(DEPRECATION_WARNING)\n\n\n@contextmanager\ndef all_logging_disabled(highest_level=logging.CRITICAL):\n    \"\"\"Source - https://gist.github.com/simon-weber/7853144\n    A context manager that will prevent any logging messages triggered during the body from being processed.\n    :param highest_level: the maximum logging level in use.\n      This would only need to be changed if a custom level greater than CRITICAL is defined.\n    \"\"\"\n    previous_level = logging.root.manager.disable\n    logging.disable(highest_level)\n    try:\n        yield\n    finally:\n        logging.disable(previous_level)\n"
  },
  {
    "path": "utils/loss.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Loss functions.\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.metrics import bbox_iou\nfrom utils.torch_utils import de_parallel\n\n\ndef smooth_BCE(eps=0.1):\n    \"\"\"Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details\n    see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441.\n    \"\"\"\n    return 1.0 - 0.5 * eps, 0.5 * eps\n\n\nclass BCEBlurWithLogitsLoss(nn.Module):\n    \"\"\"Modified BCEWithLogitsLoss to reduce missing label effects in YOLOv5 training with optional alpha smoothing.\"\"\"\n\n    def __init__(self, alpha=0.05):\n        \"\"\"Initializes a modified BCEWithLogitsLoss with reduced missing label effects, taking optional alpha smoothing\n        parameter.\n        \"\"\"\n        super().__init__()\n        self.loss_fcn = nn.BCEWithLogitsLoss(reduction=\"none\")  # must be nn.BCEWithLogitsLoss()\n        self.alpha = alpha\n\n    def forward(self, pred, true):\n        \"\"\"Computes modified BCE loss for YOLOv5 with reduced missing label effects, taking pred and true tensors,\n        returns mean loss.\n        \"\"\"\n        loss = self.loss_fcn(pred, true)\n        pred = torch.sigmoid(pred)  # prob from logits\n        dx = pred - true  # reduce only missing label effects\n        # dx = (pred - true).abs()  # reduce missing label and false label effects\n        alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))\n        loss *= alpha_factor\n        return loss.mean()\n\n\nclass FocalLoss(nn.Module):\n    \"\"\"Applies focal loss to address class imbalance by modifying BCEWithLogitsLoss with gamma and alpha parameters.\"\"\"\n\n    def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):\n        \"\"\"Initializes FocalLoss with specified loss function, gamma, and alpha values; modifies loss reduction to\n        'none'.\n        \"\"\"\n        super().__init__()\n        self.loss_fcn = loss_fcn  # must be nn.BCEWithLogitsLoss()\n        self.gamma = gamma\n        self.alpha = alpha\n        self.reduction = loss_fcn.reduction\n        self.loss_fcn.reduction = \"none\"  # required to apply FL to each element\n\n    def forward(self, pred, true):\n        \"\"\"Calculates the focal loss between predicted and true labels using a modified BCEWithLogitsLoss.\"\"\"\n        loss = self.loss_fcn(pred, true)\n        # p_t = torch.exp(-loss)\n        # loss *= self.alpha * (1.000001 - p_t) ** self.gamma  # non-zero power for gradient stability\n\n        # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py\n        pred_prob = torch.sigmoid(pred)  # prob from logits\n        p_t = true * pred_prob + (1 - true) * (1 - pred_prob)\n        alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)\n        modulating_factor = (1.0 - p_t) ** self.gamma\n        loss *= alpha_factor * modulating_factor\n\n        if self.reduction == \"mean\":\n            return loss.mean()\n        elif self.reduction == \"sum\":\n            return loss.sum()\n        else:  # 'none'\n            return loss\n\n\nclass QFocalLoss(nn.Module):\n    \"\"\"Implements Quality Focal Loss to address class imbalance by modulating loss based on prediction confidence.\"\"\"\n\n    def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):\n        \"\"\"Initializes Quality Focal Loss with given loss function, gamma, alpha; modifies reduction to 'none'.\"\"\"\n        super().__init__()\n        self.loss_fcn = loss_fcn  # must be nn.BCEWithLogitsLoss()\n        self.gamma = gamma\n        self.alpha = alpha\n        self.reduction = loss_fcn.reduction\n        self.loss_fcn.reduction = \"none\"  # required to apply FL to each element\n\n    def forward(self, pred, true):\n        \"\"\"Computes the focal loss between `pred` and `true` using BCEWithLogitsLoss, adjusting for imbalance with\n        `gamma` and `alpha`.\n        \"\"\"\n        loss = self.loss_fcn(pred, true)\n\n        pred_prob = torch.sigmoid(pred)  # prob from logits\n        alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)\n        modulating_factor = torch.abs(true - pred_prob) ** self.gamma\n        loss *= alpha_factor * modulating_factor\n\n        if self.reduction == \"mean\":\n            return loss.mean()\n        elif self.reduction == \"sum\":\n            return loss.sum()\n        else:  # 'none'\n            return loss\n\n\nclass ComputeLoss:\n    \"\"\"Computes the total loss for YOLOv5 model predictions, including classification, box, and objectness losses.\"\"\"\n\n    sort_obj_iou = False\n\n    # Compute losses\n    def __init__(self, model, autobalance=False):\n        \"\"\"Initializes ComputeLoss with model and autobalance option, autobalances losses if True.\"\"\"\n        device = next(model.parameters()).device  # get model device\n        h = model.hyp  # hyperparameters\n\n        # Define criteria\n        BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h[\"cls_pw\"]], device=device))\n        BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h[\"obj_pw\"]], device=device))\n\n        # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n        self.cp, self.cn = smooth_BCE(eps=h.get(\"label_smoothing\", 0.0))  # positive, negative BCE targets\n\n        # Focal loss\n        g = h[\"fl_gamma\"]  # focal loss gamma\n        if g > 0:\n            BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n        m = de_parallel(model).model[-1]  # Detect() module\n        self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02])  # P3-P7\n        self.ssi = list(m.stride).index(16) if autobalance else 0  # stride 16 index\n        self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance\n        self.na = m.na  # number of anchors\n        self.nc = m.nc  # number of classes\n        self.nl = m.nl  # number of layers\n        self.anchors = m.anchors\n        self.device = device\n\n    def __call__(self, p, targets):  # predictions, targets\n        \"\"\"Performs forward pass, calculating class, box, and object loss for given predictions and targets.\"\"\"\n        lcls = torch.zeros(1, device=self.device)  # class loss\n        lbox = torch.zeros(1, device=self.device)  # box loss\n        lobj = torch.zeros(1, device=self.device)  # object loss\n        tcls, tbox, indices, anchors = self.build_targets(p, targets)  # targets\n\n        # Losses\n        for i, pi in enumerate(p):  # layer index, layer predictions\n            b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n            tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device)  # target obj\n\n            if n := b.shape[0]:\n                # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1)  # faster, requires torch 1.8.0\n                pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1)  # target-subset of predictions\n\n                # Regression\n                pxy = pxy.sigmoid() * 2 - 0.5\n                pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]\n                pbox = torch.cat((pxy, pwh), 1)  # predicted box\n                iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze()  # iou(prediction, target)\n                lbox += (1.0 - iou).mean()  # iou loss\n\n                # Objectness\n                iou = iou.detach().clamp(0).type(tobj.dtype)\n                if self.sort_obj_iou:\n                    j = iou.argsort()\n                    b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]\n                if self.gr < 1:\n                    iou = (1.0 - self.gr) + self.gr * iou\n                tobj[b, a, gj, gi] = iou  # iou ratio\n\n                # Classification\n                if self.nc > 1:  # cls loss (only if multiple classes)\n                    t = torch.full_like(pcls, self.cn, device=self.device)  # targets\n                    t[range(n), tcls[i]] = self.cp\n                    lcls += self.BCEcls(pcls, t)  # BCE\n\n            obji = self.BCEobj(pi[..., 4], tobj)\n            lobj += obji * self.balance[i]  # obj loss\n            if self.autobalance:\n                self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n        if self.autobalance:\n            self.balance = [x / self.balance[self.ssi] for x in self.balance]\n        lbox *= self.hyp[\"box\"]\n        lobj *= self.hyp[\"obj\"]\n        lcls *= self.hyp[\"cls\"]\n        bs = tobj.shape[0]  # batch size\n\n        return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()\n\n    def build_targets(self, p, targets):\n        \"\"\"Prepares model targets from input targets (image,class,x,y,w,h) for loss computation, returning class, box,\n        indices, and anchors.\n        \"\"\"\n        na, nt = self.na, targets.shape[0]  # number of anchors, targets\n        tcls, tbox, indices, anch = [], [], [], []\n        gain = torch.ones(7, device=self.device)  # normalized to gridspace gain\n        ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt)  # same as .repeat_interleave(nt)\n        targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2)  # append anchor indices\n\n        g = 0.5  # bias\n        off = (\n            torch.tensor(\n                [\n                    [0, 0],\n                    [1, 0],\n                    [0, 1],\n                    [-1, 0],\n                    [0, -1],  # j,k,l,m\n                    # [1, 1], [1, -1], [-1, 1], [-1, -1],  # jk,jm,lk,lm\n                ],\n                device=self.device,\n            ).float()\n            * g\n        )  # offsets\n\n        for i in range(self.nl):\n            anchors, shape = self.anchors[i], p[i].shape\n            gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]]  # xyxy gain\n\n            # Match targets to anchors\n            t = targets * gain  # shape(3,n,7)\n            if nt:\n                # Matches\n                r = t[..., 4:6] / anchors[:, None]  # wh ratio\n                j = torch.max(r, 1 / r).max(2)[0] < self.hyp[\"anchor_t\"]  # compare\n                # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t']  # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n                t = t[j]  # filter\n\n                # Offsets\n                gxy = t[:, 2:4]  # grid xy\n                gxi = gain[[2, 3]] - gxy  # inverse\n                j, k = ((gxy % 1 < g) & (gxy > 1)).T\n                l, m = ((gxi % 1 < g) & (gxi > 1)).T\n                j = torch.stack((torch.ones_like(j), j, k, l, m))\n                t = t.repeat((5, 1, 1))[j]\n                offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n            else:\n                t = targets[0]\n                offsets = 0\n\n            # Define\n            bc, gxy, gwh, a = t.chunk(4, 1)  # (image, class), grid xy, grid wh, anchors\n            a, (b, c) = a.long().view(-1), bc.long().T  # anchors, image, class\n            gij = (gxy - offsets).long()\n            gi, gj = gij.T  # grid indices\n\n            # Append\n            indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1)))  # image, anchor, grid\n            tbox.append(torch.cat((gxy - gij, gwh), 1))  # box\n            anch.append(anchors[a])  # anchors\n            tcls.append(c)  # class\n\n        return tcls, tbox, indices, anch\n"
  },
  {
    "path": "utils/metrics.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Model validation metrics.\"\"\"\n\nimport math\nimport warnings\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\n# NumPy 2.0 compatibility: trapezoid was renamed from trapz\ntrapezoid = np.trapezoid if hasattr(np, \"trapezoid\") else np.trapz\n\nfrom utils import TryExcept, threaded\n\n\ndef fitness(x):\n    \"\"\"Calculates fitness of a model using weighted sum of metrics P, R, mAP@0.5, mAP@0.5:0.95.\"\"\"\n    w = [0.0, 0.0, 0.1, 0.9]  # weights for [P, R, mAP@0.5, mAP@0.5:0.95]\n    return (x[:, :4] * w).sum(1)\n\n\ndef smooth(y, f=0.05):\n    \"\"\"Applies box filter smoothing to array `y` with fraction `f`, yielding a smoothed array.\"\"\"\n    nf = round(len(y) * f * 2) // 2 + 1  # number of filter elements (must be odd)\n    p = np.ones(nf // 2)  # ones padding\n    yp = np.concatenate((p * y[0], y, p * y[-1]), 0)  # y padded\n    return np.convolve(yp, np.ones(nf) / nf, mode=\"valid\")  # y-smoothed\n\n\ndef ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=\".\", names=(), eps=1e-16, prefix=\"\"):\n    \"\"\"Compute the average precision, given the recall and precision curves.\n\n    Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n\n    Args:\n        tp: True positives (nparray, nx1 or nx10).\n        conf: Objectness value from 0-1 (nparray).\n        pred_cls: Predicted object classes (nparray).\n        target_cls: True object classes (nparray).\n        plot: Plot precision-recall curve at mAP@0.5\n        save_dir: Plot save directory\n\n    Returns:\n        The average precision as computed in py-faster-rcnn.\n    \"\"\"\n    # Sort by objectness\n    i = np.argsort(-conf)\n    tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n    # Find unique classes\n    unique_classes, nt = np.unique(target_cls, return_counts=True)\n    nc = unique_classes.shape[0]  # number of classes, number of detections\n\n    # Create Precision-Recall curve and compute AP for each class\n    px, py = np.linspace(0, 1, 1000), []  # for plotting\n    ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))\n    for ci, c in enumerate(unique_classes):\n        i = pred_cls == c\n        n_l = nt[ci]  # number of labels\n        n_p = i.sum()  # number of predictions\n        if n_p == 0 or n_l == 0:\n            continue\n\n        # Accumulate FPs and TPs\n        fpc = (1 - tp[i]).cumsum(0)\n        tpc = tp[i].cumsum(0)\n\n        # Recall\n        recall = tpc / (n_l + eps)  # recall curve\n        r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0)  # negative x, xp because xp decreases\n\n        # Precision\n        precision = tpc / (tpc + fpc)  # precision curve\n        p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1)  # p at pr_score\n\n        # AP from recall-precision curve\n        for j in range(tp.shape[1]):\n            ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])\n            if plot and j == 0:\n                py.append(np.interp(px, mrec, mpre))  # precision at mAP@0.5\n\n    # Compute F1 (harmonic mean of precision and recall)\n    f1 = 2 * p * r / (p + r + eps)\n    names = [v for k, v in names.items() if k in unique_classes]  # list: only classes that have data\n    names = dict(enumerate(names))  # to dict\n    if plot:\n        plot_pr_curve(px, py, ap, Path(save_dir) / f\"{prefix}PR_curve.png\", names)\n        plot_mc_curve(px, f1, Path(save_dir) / f\"{prefix}F1_curve.png\", names, ylabel=\"F1\")\n        plot_mc_curve(px, p, Path(save_dir) / f\"{prefix}P_curve.png\", names, ylabel=\"Precision\")\n        plot_mc_curve(px, r, Path(save_dir) / f\"{prefix}R_curve.png\", names, ylabel=\"Recall\")\n\n    i = smooth(f1.mean(0), 0.1).argmax()  # max F1 index\n    p, r, f1 = p[:, i], r[:, i], f1[:, i]\n    tp = (r * nt).round()  # true positives\n    fp = (tp / (p + eps) - tp).round()  # false positives\n    return tp, fp, p, r, f1, ap, unique_classes.astype(int)\n\n\ndef compute_ap(recall, precision):\n    \"\"\"Compute the average precision, given the recall and precision curves.\n\n    Args:\n        recall: The recall curve (list)\n        precision: The precision curve (list)\n\n    Returns:\n        Average precision\n        precision curve\n        recall curve\n    \"\"\"\n    # Append sentinel values to beginning and end\n    mrec = np.concatenate(([0.0], recall, [1.0]))\n    mpre = np.concatenate(([1.0], precision, [0.0]))\n\n    # Compute the precision envelope\n    mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))\n\n    # Integrate area under curve\n    method = \"interp\"  # methods: 'continuous', 'interp'\n    if method == \"interp\":\n        x = np.linspace(0, 1, 101)  # 101-point interp (COCO)\n        ap = trapezoid(np.interp(x, mrec, mpre), x)  # integrate\n    else:  # 'continuous'\n        i = np.where(mrec[1:] != mrec[:-1])[0]  # points where x axis (recall) changes\n        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])  # area under curve\n\n    return ap, mpre, mrec\n\n\nclass ConfusionMatrix:\n    \"\"\"Generates and visualizes a confusion matrix for evaluating object detection classification performance.\"\"\"\n\n    def __init__(self, nc, conf=0.25, iou_thres=0.45):\n        \"\"\"Initializes ConfusionMatrix with given number of classes, confidence, and IoU threshold.\"\"\"\n        self.matrix = np.zeros((nc + 1, nc + 1))\n        self.nc = nc  # number of classes\n        self.conf = conf\n        self.iou_thres = iou_thres\n\n    def process_batch(self, detections, labels):\n        \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n\n        Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n\n        Args:\n            detections (Array[N, 6]): x1, y1, x2, y2, conf, class\n            labels (Array[M, 5]): class, x1, y1, x2, y2\n\n        Returns:\n            None, updates confusion matrix accordingly\n        \"\"\"\n        if detections is None:\n            gt_classes = labels.int()\n            for gc in gt_classes:\n                self.matrix[self.nc, gc] += 1  # background FN\n            return\n\n        detections = detections[detections[:, 4] > self.conf]\n        gt_classes = labels[:, 0].int()\n        detection_classes = detections[:, 5].int()\n        iou = box_iou(labels[:, 1:], detections[:, :4])\n\n        x = torch.where(iou > self.iou_thres)\n        if x[0].shape[0]:\n            matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n            if x[0].shape[0] > 1:\n                matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n                matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n        else:\n            matches = np.zeros((0, 3))\n\n        n = matches.shape[0] > 0\n        m0, m1, _ = matches.transpose().astype(int)\n        for i, gc in enumerate(gt_classes):\n            j = m0 == i\n            if n and sum(j) == 1:\n                self.matrix[detection_classes[m1[j]], gc] += 1  # correct\n            else:\n                self.matrix[self.nc, gc] += 1  # true background\n\n        if n:\n            for i, dc in enumerate(detection_classes):\n                if not any(m1 == i):\n                    self.matrix[dc, self.nc] += 1  # predicted background\n\n    def tp_fp(self):\n        \"\"\"Calculates true positives (tp) and false positives (fp) excluding the background class from the confusion\n        matrix.\n        \"\"\"\n        tp = self.matrix.diagonal()  # true positives\n        fp = self.matrix.sum(1) - tp  # false positives\n        # fn = self.matrix.sum(0) - tp  # false negatives (missed detections)\n        return tp[:-1], fp[:-1]  # remove background class\n\n    @TryExcept(\"WARNING ⚠️ ConfusionMatrix plot failure\")\n    def plot(self, normalize=True, save_dir=\"\", names=()):\n        \"\"\"Plots confusion matrix using seaborn, optional normalization; can save plot to specified directory.\"\"\"\n        import seaborn as sn\n\n        array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1)  # normalize columns\n        array[array < 0.005] = np.nan  # don't annotate (would appear as 0.00)\n\n        fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)\n        nc, nn = self.nc, len(names)  # number of classes, names\n        sn.set(font_scale=1.0 if nc < 50 else 0.8)  # for label size\n        labels = (0 < nn < 99) and (nn == nc)  # apply names to ticklabels\n        ticklabels = ([*names, \"background\"]) if labels else \"auto\"\n        with warnings.catch_warnings():\n            warnings.simplefilter(\"ignore\")  # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n            sn.heatmap(\n                array,\n                ax=ax,\n                annot=nc < 30,\n                annot_kws={\"size\": 8},\n                cmap=\"Blues\",\n                fmt=\".2f\",\n                square=True,\n                vmin=0.0,\n                xticklabels=ticklabels,\n                yticklabels=ticklabels,\n            ).set_facecolor((1, 1, 1))\n        ax.set_xlabel(\"True\")\n        ax.set_ylabel(\"Predicted\")\n        ax.set_title(\"Confusion Matrix\")\n        fig.savefig(Path(save_dir) / \"confusion_matrix.png\", dpi=250)\n        plt.close(fig)\n\n    def print(self):\n        \"\"\"Prints the confusion matrix row-wise, with each class and its predictions separated by spaces.\"\"\"\n        for i in range(self.nc + 1):\n            print(\" \".join(map(str, self.matrix[i])))\n\n\ndef bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):\n    \"\"\"Calculates IoU, GIoU, DIoU, or CIoU between two boxes, supporting xywh/xyxy formats.\n\n    Input shapes are box1(1,4) to box2(n,4).\n    \"\"\"\n    # Get the coordinates of bounding boxes\n    if xywh:  # transform from xywh to xyxy\n        (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)\n        w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2\n        b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_\n        b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_\n    else:  # x1, y1, x2, y2 = box1\n        b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)\n        b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)\n        w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps)\n        w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps)\n\n    # Intersection area\n    inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * (\n        b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)\n    ).clamp(0)\n\n    # Union Area\n    union = w1 * h1 + w2 * h2 - inter + eps\n\n    # IoU\n    iou = inter / union\n    if CIoU or DIoU or GIoU:\n        cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1)  # convex (smallest enclosing box) width\n        ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1)  # convex height\n        if CIoU or DIoU:  # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n            c2 = cw**2 + ch**2 + eps  # convex diagonal squared\n            rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4  # center dist ** 2\n            if CIoU:  # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n                v = (4 / math.pi**2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)\n                with torch.no_grad():\n                    alpha = v / (v - iou + (1 + eps))\n                return iou - (rho2 / c2 + v * alpha)  # CIoU\n            return iou - rho2 / c2  # DIoU\n        c_area = cw * ch + eps  # convex area\n        return iou - (c_area - union) / c_area  # GIoU https://arxiv.org/pdf/1902.09630.pdf\n    return iou  # IoU\n\n\ndef box_iou(box1, box2, eps=1e-7):\n    # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n    \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n\n    Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n\n    Args:\n        box1: (Tensor[N, 4])\n        box2: (Tensor[M, 4])\n\n    Returns:\n        iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2\n    \"\"\"\n    # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n    (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)\n    inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n    # IoU = inter / (area1 + area2 - inter)\n    return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)\n\n\ndef bbox_ioa(box1, box2, eps=1e-7):\n    \"\"\"Returns the intersection over box2 area given box1, box2.\n\n    Args:\n        box1: np.array of shape(4)\n        box2: np.array of shape(nx4)\n\n    Returns:\n        np.array of shape(n)\n\n    Notes:\n        - Boxes are x1y1x2y2\n    \"\"\"\n    # Get the coordinates of bounding boxes\n    b1_x1, b1_y1, b1_x2, b1_y2 = box1\n    b2_x1, b2_y1, b2_x2, b2_y2 = box2.T\n\n    # Intersection area\n    inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * (\n        np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)\n    ).clip(0)\n\n    # box2 area\n    box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps\n\n    # Intersection over box2 area\n    return inter_area / box2_area\n\n\ndef wh_iou(wh1, wh2, eps=1e-7):\n    \"\"\"Calculates the Intersection over Union (IoU) for two sets of widths and heights; `wh1` and `wh2` should be nx2\n    and mx2 tensors.\n    \"\"\"\n    wh1 = wh1[:, None]  # [N,1,2]\n    wh2 = wh2[None]  # [1,M,2]\n    inter = torch.min(wh1, wh2).prod(2)  # [N,M]\n    return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps)  # iou = inter / (area1 + area2 - inter)\n\n\n# Plots ----------------------------------------------------------------------------------------------------------------\n\n\n@threaded\ndef plot_pr_curve(px, py, ap, save_dir=Path(\"pr_curve.png\"), names=()):\n    \"\"\"Plots precision-recall curve, optionally per class, saving to `save_dir`; `px`, `py` are lists, `ap` is Nx2\n    array, `names` optional.\n    \"\"\"\n    fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)\n    py = np.stack(py, axis=1)\n\n    if 0 < len(names) < 21:  # display per-class legend if < 21 classes\n        for i, y in enumerate(py.T):\n            ax.plot(px, y, linewidth=1, label=f\"{names[i]} {ap[i, 0]:.3f}\")  # plot(recall, precision)\n    else:\n        ax.plot(px, py, linewidth=1, color=\"grey\")  # plot(recall, precision)\n\n    ax.plot(px, py.mean(1), linewidth=3, color=\"blue\", label=f\"all classes {ap[:, 0].mean():.3f} mAP@0.5\")\n    ax.set_xlabel(\"Recall\")\n    ax.set_ylabel(\"Precision\")\n    ax.set_xlim(0, 1)\n    ax.set_ylim(0, 1)\n    ax.legend(bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n    ax.set_title(\"Precision-Recall Curve\")\n    fig.savefig(save_dir, dpi=250)\n    plt.close(fig)\n\n\n@threaded\ndef plot_mc_curve(px, py, save_dir=Path(\"mc_curve.png\"), names=(), xlabel=\"Confidence\", ylabel=\"Metric\"):\n    \"\"\"Plots a metric-confidence curve for model predictions, supporting per-class visualization and smoothing.\"\"\"\n    fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)\n\n    if 0 < len(names) < 21:  # display per-class legend if < 21 classes\n        for i, y in enumerate(py):\n            ax.plot(px, y, linewidth=1, label=f\"{names[i]}\")  # plot(confidence, metric)\n    else:\n        ax.plot(px, py.T, linewidth=1, color=\"grey\")  # plot(confidence, metric)\n\n    y = smooth(py.mean(0), 0.05)\n    ax.plot(px, y, linewidth=3, color=\"blue\", label=f\"all classes {y.max():.2f} at {px[y.argmax()]:.3f}\")\n    ax.set_xlabel(xlabel)\n    ax.set_ylabel(ylabel)\n    ax.set_xlim(0, 1)\n    ax.set_ylim(0, 1)\n    ax.legend(bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n    ax.set_title(f\"{ylabel}-Confidence Curve\")\n    fig.savefig(save_dir, dpi=250)\n    plt.close(fig)\n"
  },
  {
    "path": "utils/plots.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Plotting utils.\"\"\"\n\nimport contextlib\nimport math\nimport os\nfrom copy import copy\nfrom pathlib import Path\n\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport torch\nfrom PIL import Image, ImageDraw\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom ultralytics.utils.plotting import Annotator\n\nfrom utils import TryExcept, threaded\nfrom utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh\nfrom utils.metrics import fitness\n\n# Settings\nRANK = int(os.getenv(\"RANK\", -1))\nmatplotlib.rc(\"font\", **{\"size\": 11})\nmatplotlib.use(\"Agg\")  # for writing to files only\n\n\nclass Colors:\n    \"\"\"Provides an RGB color palette derived from Ultralytics color scheme for visualization tasks.\"\"\"\n\n    def __init__(self):\n        \"\"\"Initializes the Colors class with a palette derived from Ultralytics color scheme, converting hex codes to\n        RGB.\n\n        Colors derived from `hex = matplotlib.colors.TABLEAU_COLORS.values()`.\n        \"\"\"\n        hexs = (\n            \"FF3838\",\n            \"FF9D97\",\n            \"FF701F\",\n            \"FFB21D\",\n            \"CFD231\",\n            \"48F90A\",\n            \"92CC17\",\n            \"3DDB86\",\n            \"1A9334\",\n            \"00D4BB\",\n            \"2C99A8\",\n            \"00C2FF\",\n            \"344593\",\n            \"6473FF\",\n            \"0018EC\",\n            \"8438FF\",\n            \"520085\",\n            \"CB38FF\",\n            \"FF95C8\",\n            \"FF37C7\",\n        )\n        self.palette = [self.hex2rgb(f\"#{c}\") for c in hexs]\n        self.n = len(self.palette)\n\n    def __call__(self, i, bgr=False):\n        \"\"\"Returns color from palette by index `i`, in BGR format if `bgr=True`, else RGB; `i` is an integer index.\"\"\"\n        c = self.palette[int(i) % self.n]\n        return (c[2], c[1], c[0]) if bgr else c\n\n    @staticmethod\n    def hex2rgb(h):\n        \"\"\"Converts hexadecimal color `h` to an RGB tuple (PIL-compatible) with order (R, G, B).\"\"\"\n        return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))\n\n\ncolors = Colors()  # create instance for 'from utils.plots import colors'\n\n\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path(\"runs/detect/exp\")):\n    \"\"\"\n    Args:\n        x: Features to be visualized\n        module_type: Module type\n        stage: Module stage within model\n        n: Maximum number of feature maps to plot\n        save_dir: Directory to save results.\n    \"\"\"\n    if (\"Detect\" not in module_type) and (\n        \"Segment\" not in module_type\n    ):  # 'Detect' for Object Detect task,'Segment' for Segment task\n        _batch, channels, height, width = x.shape  # batch, channels, height, width\n        if height > 1 and width > 1:\n            f = save_dir / f\"stage{stage}_{module_type.split('.')[-1]}_features.png\"  # filename\n\n            blocks = torch.chunk(x[0].cpu(), channels, dim=0)  # select batch index 0, block by channels\n            n = min(n, channels)  # number of plots\n            _fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)\n            ax = ax.ravel()\n            plt.subplots_adjust(wspace=0.05, hspace=0.05)\n            for i in range(n):\n                ax[i].imshow(blocks[i].squeeze())  # cmap='gray'\n                ax[i].axis(\"off\")\n\n            LOGGER.info(f\"Saving {f}... ({n}/{channels})\")\n            plt.savefig(f, dpi=300, bbox_inches=\"tight\")\n            plt.close()\n            np.save(str(f.with_suffix(\".npy\")), x[0].cpu().numpy())  # npy save\n\n\ndef hist2d(x, y, n=100):\n    \"\"\"Generates a logarithmic 2D histogram, useful for visualizing label or evolution distributions.\n\n    Used in used in labels.png and evolve.png.\n    \"\"\"\n    xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)\n    hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))\n    xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)\n    yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)\n    return np.log(hist[xidx, yidx])\n\n\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n    \"\"\"Applies a low-pass Butterworth filter to `data` with specified `cutoff`, `fs`, and `order`.\"\"\"\n    from scipy.signal import butter, filtfilt\n\n    # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy\n    def butter_lowpass(cutoff, fs, order):\n        \"\"\"Applies a low-pass Butterworth filter to a signal with specified cutoff frequency, sample rate, and filter\n        order.\n        \"\"\"\n        nyq = 0.5 * fs\n        normal_cutoff = cutoff / nyq\n        return butter(order, normal_cutoff, btype=\"low\", analog=False)\n\n    b, a = butter_lowpass(cutoff, fs, order=order)\n    return filtfilt(b, a, data)  # forward-backward filter\n\n\ndef output_to_target(output, max_det=300):\n    \"\"\"Converts YOLOv5 model output to [batch_id, class_id, x, y, w, h, conf] format for plotting, limiting detections\n    to `max_det`.\n    \"\"\"\n    targets = []\n    for i, o in enumerate(output):\n        box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)\n        j = torch.full((conf.shape[0], 1), i)\n        targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))\n    return torch.cat(targets, 0).numpy()\n\n\n@threaded\ndef plot_images(images, targets, paths=None, fname=\"images.jpg\", names=None):\n    \"\"\"Plots an image grid with labels from YOLOv5 predictions or targets, saving to `fname`.\"\"\"\n    if isinstance(images, torch.Tensor):\n        images = images.cpu().float().numpy()\n    if isinstance(targets, torch.Tensor):\n        targets = targets.cpu().numpy()\n\n    max_size = 1920  # max image size\n    max_subplots = 16  # max image subplots, i.e. 4x4\n    bs, _, h, w = images.shape  # batch size, _, height, width\n    bs = min(bs, max_subplots)  # limit plot images\n    ns = np.ceil(bs**0.5)  # number of subplots (square)\n    if np.max(images[0]) <= 1:\n        images *= 255  # de-normalise (optional)\n\n    # Build Image\n    mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)  # init\n    for i, im in enumerate(images):\n        if i == max_subplots:  # if last batch has fewer images than we expect\n            break\n        x, y = int(w * (i // ns)), int(h * (i % ns))  # block origin\n        im = im.transpose(1, 2, 0)\n        mosaic[y : y + h, x : x + w, :] = im\n\n    # Resize (optional)\n    scale = max_size / ns / max(h, w)\n    if scale < 1:\n        h = math.ceil(scale * h)\n        w = math.ceil(scale * w)\n        mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n    # Annotate\n    fs = int((h + w) * ns * 0.01)  # font size\n    annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n    for i in range(bs):\n        x, y = int(w * (i // ns)), int(h * (i % ns))  # block origin\n        annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2)  # borders\n        if paths:\n            annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220))  # filenames\n        if len(targets) > 0:\n            ti = targets[targets[:, 0] == i]  # image targets\n            boxes = xywh2xyxy(ti[:, 2:6]).T\n            classes = ti[:, 1].astype(\"int\")\n            labels = ti.shape[1] == 6  # labels if no conf column\n            conf = None if labels else ti[:, 6]  # check for confidence presence (label vs pred)\n\n            if boxes.shape[1]:\n                if boxes.max() <= 1.01:  # if normalized with tolerance 0.01\n                    boxes[[0, 2]] *= w  # scale to pixels\n                    boxes[[1, 3]] *= h\n                elif scale < 1:  # absolute coords need scale if image scales\n                    boxes *= scale\n            boxes[[0, 2]] += x\n            boxes[[1, 3]] += y\n            for j, box in enumerate(boxes.T.tolist()):\n                cls = classes[j]\n                color = colors(cls)\n                cls = names[cls] if names else cls\n                if labels or conf[j] > 0.25:  # 0.25 conf thresh\n                    label = f\"{cls}\" if labels else f\"{cls} {conf[j]:.1f}\"\n                    annotator.box_label(box, label, color=color)\n    annotator.im.save(fname)  # save\n\n\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=\"\"):\n    \"\"\"Plots learning rate schedule for given optimizer and scheduler, saving plot to `save_dir`.\"\"\"\n    optimizer, scheduler = copy(optimizer), copy(scheduler)  # do not modify originals\n    y = []\n    for _ in range(epochs):\n        scheduler.step()\n        y.append(optimizer.param_groups[0][\"lr\"])\n    plt.plot(y, \".-\", label=\"LR\")\n    plt.xlabel(\"epoch\")\n    plt.ylabel(\"LR\")\n    plt.grid()\n    plt.xlim(0, epochs)\n    plt.ylim(0)\n    plt.savefig(Path(save_dir) / \"LR.png\", dpi=200)\n    plt.close()\n\n\ndef plot_val_txt():\n    \"\"\"Plots 2D and 1D histograms of bounding box centers from 'val.txt' using matplotlib, saving as 'hist2d.png' and\n    'hist1d.png'.\n\n    Example: from utils.plots import *; plot_val()\n    \"\"\"\n    x = np.loadtxt(\"val.txt\", dtype=np.float32)\n    box = xyxy2xywh(x[:, :4])\n    cx, cy = box[:, 0], box[:, 1]\n\n    fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)\n    ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)\n    ax.set_aspect(\"equal\")\n    plt.savefig(\"hist2d.png\", dpi=300)\n\n    _fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)\n    ax[0].hist(cx, bins=600)\n    ax[1].hist(cy, bins=600)\n    plt.savefig(\"hist1d.png\", dpi=200)\n\n\ndef plot_targets_txt():\n    \"\"\"Plots histograms of object detection targets from 'targets.txt', saving the figure as 'targets.jpg'.\n\n    Example: from utils.plots import *; plot_targets_txt()\n    \"\"\"\n    x = np.loadtxt(\"targets.txt\", dtype=np.float32).T\n    s = [\"x targets\", \"y targets\", \"width targets\", \"height targets\"]\n    _fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)\n    ax = ax.ravel()\n    for i in range(4):\n        ax[i].hist(x[i], bins=100, label=f\"{x[i].mean():.3g} +/- {x[i].std():.3g}\")\n        ax[i].legend()\n        ax[i].set_title(s[i])\n    plt.savefig(\"targets.jpg\", dpi=200)\n\n\ndef plot_val_study(file=\"\", dir=\"\", x=None):\n    \"\"\"Plots validation study results from 'study*.txt' files in a directory or a specific file, comparing model\n    performance and speed.\n\n    Example: from utils.plots import *; plot_val_study()\n    \"\"\"\n    save_dir = Path(file).parent if file else Path(dir)\n    plot2 = False  # plot additional results\n    if plot2:\n        ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n    _fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n    # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n    for f in sorted(save_dir.glob(\"study*.txt\")):\n        y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n        x = np.arange(y.shape[1]) if x is None else np.array(x)\n        if plot2:\n            s = [\"P\", \"R\", \"mAP@.5\", \"mAP@.5:.95\", \"t_preprocess (ms/img)\", \"t_inference (ms/img)\", \"t_NMS (ms/img)\"]\n            for i in range(7):\n                ax[i].plot(x, y[i], \".-\", linewidth=2, markersize=8)\n                ax[i].set_title(s[i])\n\n        j = y[3].argmax() + 1\n        ax2.plot(\n            y[5, 1:j],\n            y[3, 1:j] * 1e2,\n            \".-\",\n            linewidth=2,\n            markersize=8,\n            label=f.stem.replace(\"study_coco_\", \"\").replace(\"yolo\", \"YOLO\"),\n        )\n\n    ax2.plot(\n        1e3 / np.array([209, 140, 97, 58, 35, 18]),\n        [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n        \"k.-\",\n        linewidth=2,\n        markersize=8,\n        alpha=0.25,\n        label=\"EfficientDet\",\n    )\n\n    ax2.grid(alpha=0.2)\n    ax2.set_yticks(np.arange(20, 60, 5))\n    ax2.set_xlim(0, 57)\n    ax2.set_ylim(25, 55)\n    ax2.set_xlabel(\"GPU Speed (ms/img)\")\n    ax2.set_ylabel(\"COCO AP val\")\n    ax2.legend(loc=\"lower right\")\n    f = save_dir / \"study.png\"\n    print(f\"Saving {f}...\")\n    plt.savefig(f, dpi=300)\n\n\n@TryExcept()  # known issue https://github.com/ultralytics/yolov5/issues/5395\ndef plot_labels(labels, names=(), save_dir=Path(\"\")):\n    \"\"\"Plots dataset labels, saving correlogram and label images, handles classes, and visualizes bounding boxes.\"\"\"\n    LOGGER.info(f\"Plotting labels to {save_dir / 'labels.jpg'}... \")\n    c, b = labels[:, 0], labels[:, 1:].transpose()  # classes, boxes\n    nc = int(c.max() + 1)  # number of classes\n    x = pd.DataFrame(b.transpose(), columns=[\"x\", \"y\", \"width\", \"height\"])\n\n    # seaborn correlogram\n    sn.pairplot(x, corner=True, diag_kind=\"auto\", kind=\"hist\", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))\n    plt.savefig(save_dir / \"labels_correlogram.jpg\", dpi=200)\n    plt.close()\n\n    # matplotlib labels\n    matplotlib.use(\"svg\")  # faster\n    ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()\n    y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)\n    with contextlib.suppress(Exception):  # color histogram bars by class\n        [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)]  # known issue #3195\n    ax[0].set_ylabel(\"instances\")\n    if 0 < len(names) < 30:\n        ax[0].set_xticks(range(len(names)))\n        ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)\n    else:\n        ax[0].set_xlabel(\"classes\")\n    sn.histplot(x, x=\"x\", y=\"y\", ax=ax[2], bins=50, pmax=0.9)\n    sn.histplot(x, x=\"width\", y=\"height\", ax=ax[3], bins=50, pmax=0.9)\n\n    # rectangles\n    labels[:, 1:3] = 0.5  # center\n    labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000\n    img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)\n    for cls, *box in labels[:1000]:\n        ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls))  # plot\n    ax[1].imshow(img)\n    ax[1].axis(\"off\")\n\n    for a in [0, 1, 2, 3]:\n        for s in [\"top\", \"right\", \"left\", \"bottom\"]:\n            ax[a].spines[s].set_visible(False)\n\n    plt.savefig(save_dir / \"labels.jpg\", dpi=200)\n    matplotlib.use(\"Agg\")\n    plt.close()\n\n\ndef imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path(\"images.jpg\")):\n    \"\"\"Displays a grid of images with optional labels and predictions, saving to a file.\"\"\"\n    from utils.augmentations import denormalize\n\n    names = names or [f\"class{i}\" for i in range(1000)]\n    blocks = torch.chunk(\n        denormalize(im.clone()).cpu().float(), len(im), dim=0\n    )  # select batch index 0, block by channels\n    n = min(len(blocks), nmax)  # number of plots\n    m = min(8, round(n**0.5))  # 8 x 8 default\n    _fig, ax = plt.subplots(math.ceil(n / m), m)  # 8 rows x n/8 cols\n    ax = ax.ravel() if m > 1 else [ax]\n    # plt.subplots_adjust(wspace=0.05, hspace=0.05)\n    for i in range(n):\n        ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0))\n        ax[i].axis(\"off\")\n        if labels is not None:\n            s = names[labels[i]] + (f\"—{names[pred[i]]}\" if pred is not None else \"\")\n            ax[i].set_title(s, fontsize=8, verticalalignment=\"top\")\n    plt.savefig(f, dpi=300, bbox_inches=\"tight\")\n    plt.close()\n    if verbose:\n        LOGGER.info(f\"Saving {f}\")\n        if labels is not None:\n            LOGGER.info(\"True:     \" + \" \".join(f\"{names[i]:3s}\" for i in labels[:nmax]))\n        if pred is not None:\n            LOGGER.info(\"Predicted:\" + \" \".join(f\"{names[i]:3s}\" for i in pred[:nmax]))\n    return f\n\n\ndef plot_evolve(evolve_csv=\"path/to/evolve.csv\"):\n    \"\"\"Plots hyperparameter evolution results from a given CSV, saving the plot and displaying best results.\n\n    Example: from utils.plots import *; plot_evolve()\n    \"\"\"\n    evolve_csv = Path(evolve_csv)\n    data = pd.read_csv(evolve_csv)\n    keys = [x.strip() for x in data.columns]\n    x = data.values\n    f = fitness(x)\n    j = np.argmax(f)  # max fitness index\n    plt.figure(figsize=(10, 12), tight_layout=True)\n    matplotlib.rc(\"font\", **{\"size\": 8})\n    print(f\"Best results from row {j} of {evolve_csv}:\")\n    for i, k in enumerate(keys[7:]):\n        v = x[:, 7 + i]\n        mu = v[j]  # best single result\n        plt.subplot(6, 5, i + 1)\n        plt.scatter(v, f, c=hist2d(v, f, 20), cmap=\"viridis\", alpha=0.8, edgecolors=\"none\")\n        plt.plot(mu, f.max(), \"k+\", markersize=15)\n        plt.title(f\"{k} = {mu:.3g}\", fontdict={\"size\": 9})  # limit to 40 characters\n        if i % 5 != 0:\n            plt.yticks([])\n        print(f\"{k:>15}: {mu:.3g}\")\n    f = evolve_csv.with_suffix(\".png\")  # filename\n    plt.savefig(f, dpi=200)\n    plt.close()\n    print(f\"Saved {f}\")\n\n\ndef plot_results(file=\"path/to/results.csv\", dir=\"\"):\n    \"\"\"Plots training results from a 'results.csv' file; accepts file path and directory as arguments.\n\n    Example: from utils.plots import *; plot_results('path/to/results.csv')\n    \"\"\"\n    save_dir = Path(file).parent if file else Path(dir)\n    fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)\n    ax = ax.ravel()\n    files = list(save_dir.glob(\"results*.csv\"))\n    assert len(files), f\"No results.csv files found in {save_dir.resolve()}, nothing to plot.\"\n    for f in files:\n        try:\n            data = pd.read_csv(f)\n            s = [x.strip() for x in data.columns]\n            x = data.values[:, 0]\n            for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):\n                y = data.values[:, j].astype(\"float\")\n                # y[y == 0] = np.nan  # don't show zero values\n                ax[i].plot(x, y, marker=\".\", label=f.stem, linewidth=2, markersize=8)  # actual results\n                ax[i].plot(x, gaussian_filter1d(y, sigma=3), \":\", label=\"smooth\", linewidth=2)  # smoothing line\n                ax[i].set_title(s[j], fontsize=12)\n                # if j in [8, 9, 10]:  # share train and val loss y axes\n                #     ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n        except Exception as e:\n            LOGGER.info(f\"Warning: Plotting error for {f}: {e}\")\n    ax[1].legend()\n    fig.savefig(save_dir / \"results.png\", dpi=200)\n    plt.close()\n\n\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=\"\"):\n    \"\"\"Plots per-image iDetection logs, comparing metrics like storage and performance over time.\n\n    Example: from utils.plots import *; profile_idetection()\n    \"\"\"\n    ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()\n    s = [\"Images\", \"Free Storage (GB)\", \"RAM Usage (GB)\", \"Battery\", \"dt_raw (ms)\", \"dt_smooth (ms)\", \"real-world FPS\"]\n    files = list(Path(save_dir).glob(\"frames*.txt\"))\n    for fi, f in enumerate(files):\n        try:\n            results = np.loadtxt(f, ndmin=2).T[:, 90:-30]  # clip first and last rows\n            n = results.shape[1]  # number of rows\n            x = np.arange(start, min(stop, n) if stop else n)\n            results = results[:, x]\n            t = results[0] - results[0].min()  # set t0=0s\n            results[0] = x\n            for i, a in enumerate(ax):\n                if i < len(results):\n                    label = labels[fi] if len(labels) else f.stem.replace(\"frames_\", \"\")\n                    a.plot(t, results[i], marker=\".\", label=label, linewidth=1, markersize=5)\n                    a.set_title(s[i])\n                    a.set_xlabel(\"time (s)\")\n                    # if fi == len(files) - 1:\n                    #     a.set_ylim(bottom=0)\n                    for side in [\"top\", \"right\"]:\n                        a.spines[side].set_visible(False)\n                else:\n                    a.remove()\n        except Exception as e:\n            print(f\"Warning: Plotting error for {f}; {e}\")\n    ax[1].legend()\n    plt.savefig(Path(save_dir) / \"idetection_profile.png\", dpi=200)\n\n\ndef save_one_box(xyxy, im, file=Path(\"im.jpg\"), gain=1.02, pad=10, square=False, BGR=False, save=True):\n    \"\"\"Crops and saves an image from bounding box `xyxy`, applied with `gain` and `pad`, optionally squares and adjusts\n    for BGR.\n    \"\"\"\n    xyxy = torch.tensor(xyxy).view(-1, 4)\n    b = xyxy2xywh(xyxy)  # boxes\n    if square:\n        b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1)  # attempt rectangle to square\n    b[:, 2:] = b[:, 2:] * gain + pad  # box wh * gain + pad\n    xyxy = xywh2xyxy(b).long()\n    clip_boxes(xyxy, im.shape)\n    crop = im[int(xyxy[0, 1]) : int(xyxy[0, 3]), int(xyxy[0, 0]) : int(xyxy[0, 2]), :: (1 if BGR else -1)]\n    if save:\n        file.parent.mkdir(parents=True, exist_ok=True)  # make directory\n        f = str(increment_path(file).with_suffix(\".jpg\"))\n        # cv2.imwrite(f, crop)  # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue\n        Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0)  # save RGB\n    return crop\n"
  },
  {
    "path": "utils/segment/__init__.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
  },
  {
    "path": "utils/segment/augmentations.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Image augmentation functions.\"\"\"\n\nimport math\nimport random\n\nimport cv2\nimport numpy as np\n\nfrom ..augmentations import box_candidates\nfrom ..general import resample_segments, segment2box\n\n\ndef mixup(im, labels, segments, im2, labels2, segments2):\n    \"\"\"Applies MixUp augmentation blending two images, labels, and segments with a random ratio.\n\n    See https://arxiv.org/pdf/1710.09412.pdf\n    \"\"\"\n    r = np.random.beta(32.0, 32.0)  # mixup ratio, alpha=beta=32.0\n    im = (im * r + im2 * (1 - r)).astype(np.uint8)\n    labels = np.concatenate((labels, labels2), 0)\n    segments = np.concatenate((segments, segments2), 0)\n    return im, labels, segments\n\n\ndef random_perspective(\n    im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0)\n):\n    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n    # targets = [cls, xyxy]\n    \"\"\"Applies random perspective, rotation, scale, shear, and translation augmentations to an image and targets.\"\"\"\n    height = im.shape[0] + border[0] * 2  # shape(h,w,c)\n    width = im.shape[1] + border[1] * 2\n\n    # Center\n    C = np.eye(3)\n    C[0, 2] = -im.shape[1] / 2  # x translation (pixels)\n    C[1, 2] = -im.shape[0] / 2  # y translation (pixels)\n\n    # Perspective\n    P = np.eye(3)\n    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)\n    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)\n\n    # Rotation and Scale\n    R = np.eye(3)\n    a = random.uniform(-degrees, degrees)\n    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations\n    s = random.uniform(1 - scale, 1 + scale)\n    # s = 2 ** random.uniform(-scale, scale)\n    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n    # Shear\n    S = np.eye(3)\n    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)\n    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)\n\n    # Translation\n    T = np.eye(3)\n    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation (pixels)\n    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation (pixels)\n\n    # Combined rotation matrix\n    M = T @ S @ R @ P @ C  # order of operations (right to left) is IMPORTANT\n    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed\n        if perspective:\n            im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n        else:  # affine\n            im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n    new_segments = []\n    if n := len(targets):\n        new = np.zeros((n, 4))\n        segments = resample_segments(segments)  # upsample\n        for i, segment in enumerate(segments):\n            xy = np.ones((len(segment), 3))\n            xy[:, :2] = segment\n            xy = xy @ M.T  # transform\n            xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]  # perspective rescale or affine\n\n            # clip\n            new[i] = segment2box(xy, width, height)\n            new_segments.append(xy)\n\n        # filter candidates\n        i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)\n        targets = targets[i]\n        targets[:, 1:5] = new[i]\n        new_segments = np.array(new_segments)[i]\n\n    return im, targets, new_segments\n"
  },
  {
    "path": "utils/segment/dataloaders.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Dataloaders.\"\"\"\n\nimport os\nimport random\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom ..augmentations import augment_hsv, copy_paste, letterbox\nfrom ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, SmartDistributedSampler, seed_worker\nfrom ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn\nfrom ..torch_utils import torch_distributed_zero_first\nfrom .augmentations import mixup, random_perspective\n\nRANK = int(os.getenv(\"RANK\", -1))\n\n\ndef create_dataloader(\n    path,\n    imgsz,\n    batch_size,\n    stride,\n    single_cls=False,\n    hyp=None,\n    augment=False,\n    cache=False,\n    pad=0.0,\n    rect=False,\n    rank=-1,\n    workers=8,\n    image_weights=False,\n    quad=False,\n    prefix=\"\",\n    shuffle=False,\n    mask_downsample_ratio=1,\n    overlap_mask=False,\n    seed=0,\n):\n    \"\"\"Creates a dataloader for training, validating, or testing YOLO models with various dataset options.\"\"\"\n    if rect and shuffle:\n        LOGGER.warning(\"WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False\")\n        shuffle = False\n    with torch_distributed_zero_first(rank):  # init dataset *.cache only once if DDP\n        dataset = LoadImagesAndLabelsAndMasks(\n            path,\n            imgsz,\n            batch_size,\n            augment=augment,  # augmentation\n            hyp=hyp,  # hyperparameters\n            rect=rect,  # rectangular batches\n            cache_images=cache,\n            single_cls=single_cls,\n            stride=int(stride),\n            pad=pad,\n            image_weights=image_weights,\n            prefix=prefix,\n            downsample_ratio=mask_downsample_ratio,\n            overlap=overlap_mask,\n            rank=rank,\n        )\n\n    batch_size = min(batch_size, len(dataset))\n    nd = torch.cuda.device_count()  # number of CUDA devices\n    nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])  # number of workers\n    sampler = None if rank == -1 else SmartDistributedSampler(dataset, shuffle=shuffle)\n    loader = DataLoader if image_weights else InfiniteDataLoader  # only DataLoader allows for attribute updates\n    generator = torch.Generator()\n    generator.manual_seed(6148914691236517205 + seed + RANK)\n    return loader(\n        dataset,\n        batch_size=batch_size,\n        shuffle=shuffle and sampler is None,\n        num_workers=nw,\n        sampler=sampler,\n        drop_last=quad,\n        pin_memory=True,\n        collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,\n        worker_init_fn=seed_worker,\n        generator=generator,\n    ), dataset\n\n\nclass LoadImagesAndLabelsAndMasks(LoadImagesAndLabels):  # for training/testing\n    \"\"\"Loads images, labels, and segmentation masks for training and testing YOLO models with augmentation support.\"\"\"\n\n    def __init__(\n        self,\n        path,\n        img_size=640,\n        batch_size=16,\n        augment=False,\n        hyp=None,\n        rect=False,\n        image_weights=False,\n        cache_images=False,\n        single_cls=False,\n        stride=32,\n        pad=0,\n        min_items=0,\n        prefix=\"\",\n        downsample_ratio=1,\n        overlap=False,\n        rank=-1,\n        seed=0,\n    ):\n        \"\"\"Initializes the dataset with image, label, and mask loading capabilities for training/testing.\"\"\"\n        super().__init__(\n            path,\n            img_size,\n            batch_size,\n            augment,\n            hyp,\n            rect,\n            image_weights,\n            cache_images,\n            single_cls,\n            stride,\n            pad,\n            min_items,\n            prefix,\n            rank,\n            seed,\n        )\n        self.downsample_ratio = downsample_ratio\n        self.overlap = overlap\n\n    def __getitem__(self, index):\n        \"\"\"Returns a transformed item from the dataset at the specified index, handling indexing and image weighting.\"\"\"\n        index = self.indices[index]  # linear, shuffled, or image_weights\n\n        hyp = self.hyp\n        if mosaic := self.mosaic and random.random() < hyp[\"mosaic\"]:\n            # Load mosaic\n            img, labels, segments = self.load_mosaic(index)\n            shapes = None\n\n            # MixUp augmentation\n            if random.random() < hyp[\"mixup\"]:\n                img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1)))\n\n        else:\n            # Load image\n            img, (h0, w0), (h, w) = self.load_image(index)\n\n            # Letterbox\n            shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape\n            img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n            shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling\n\n            labels = self.labels[index].copy()\n            # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy\n            segments = self.segments[index].copy()\n            if len(segments):\n                for i_s in range(len(segments)):\n                    segments[i_s] = xyn2xy(\n                        segments[i_s],\n                        ratio[0] * w,\n                        ratio[1] * h,\n                        padw=pad[0],\n                        padh=pad[1],\n                    )\n            if labels.size:  # normalized xywh to pixel xyxy format\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])\n\n            if self.augment:\n                img, labels, segments = random_perspective(\n                    img,\n                    labels,\n                    segments=segments,\n                    degrees=hyp[\"degrees\"],\n                    translate=hyp[\"translate\"],\n                    scale=hyp[\"scale\"],\n                    shear=hyp[\"shear\"],\n                    perspective=hyp[\"perspective\"],\n                )\n\n        nl = len(labels)  # number of labels\n        masks = []\n        if nl:\n            labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3)\n            if self.overlap:\n                masks, sorted_idx = polygons2masks_overlap(\n                    img.shape[:2], segments, downsample_ratio=self.downsample_ratio\n                )\n                masks = masks[None]  # (640, 640) -> (1, 640, 640)\n                labels = labels[sorted_idx]\n            else:\n                masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio)\n\n        masks = (\n            torch.from_numpy(masks)\n            if len(masks)\n            else torch.zeros(\n                1 if self.overlap else nl, img.shape[0] // self.downsample_ratio, img.shape[1] // self.downsample_ratio\n            )\n        )\n        # TODO: albumentations support\n        if self.augment:\n            # Albumentations\n            # there are some augmentation that won't change boxes and masks,\n            # so just be it for now.\n            img, labels = self.albumentations(img, labels)\n            nl = len(labels)  # update after albumentations\n\n            # HSV color-space\n            augment_hsv(img, hgain=hyp[\"hsv_h\"], sgain=hyp[\"hsv_s\"], vgain=hyp[\"hsv_v\"])\n\n            # Flip up-down\n            if random.random() < hyp[\"flipud\"]:\n                img = np.flipud(img)\n                if nl:\n                    labels[:, 2] = 1 - labels[:, 2]\n                    masks = torch.flip(masks, dims=[1])\n\n            # Flip left-right\n            if random.random() < hyp[\"fliplr\"]:\n                img = np.fliplr(img)\n                if nl:\n                    labels[:, 1] = 1 - labels[:, 1]\n                    masks = torch.flip(masks, dims=[2])\n\n            # Cutouts  # labels = cutout(img, labels, p=0.5)\n\n        labels_out = torch.zeros((nl, 6))\n        if nl:\n            labels_out[:, 1:] = torch.from_numpy(labels)\n\n        # Convert\n        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB\n        img = np.ascontiguousarray(img)\n\n        return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks)\n\n    def load_mosaic(self, index):\n        \"\"\"Loads 1 image + 3 random images into a 4-image YOLOv5 mosaic, adjusting labels and segments accordingly.\"\"\"\n        labels4, segments4 = [], []\n        s = self.img_size\n        yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border)  # mosaic center x, y\n\n        # 3 additional image indices\n        indices = [index] + random.choices(self.indices, k=3)  # 3 additional image indices\n        for i, index in enumerate(indices):\n            # Load image\n            img, _, (h, w) = self.load_image(index)\n\n            # place img in img4\n            if i == 0:  # top left\n                img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)  # base image with 4 tiles\n                x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc  # xmin, ymin, xmax, ymax (large image)\n                x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h  # xmin, ymin, xmax, ymax (small image)\n            elif i == 1:  # top right\n                x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n                x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n            elif i == 2:  # bottom left\n                x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n                x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n            elif i == 3:  # bottom right\n                x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n                x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n            img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]  # img4[ymin:ymax, xmin:xmax]\n            padw = x1a - x1b\n            padh = y1a - y1b\n\n            labels, segments = self.labels[index].copy(), self.segments[index].copy()\n\n            if labels.size:\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh)  # normalized xywh to pixel xyxy format\n                segments = [xyn2xy(x, w, h, padw, padh) for x in segments]\n            labels4.append(labels)\n            segments4.extend(segments)\n\n        # Concat/clip labels\n        labels4 = np.concatenate(labels4, 0)\n        for x in (labels4[:, 1:], *segments4):\n            np.clip(x, 0, 2 * s, out=x)  # clip when using random_perspective()\n        # img4, labels4 = replicate(img4, labels4)  # replicate\n\n        # Augment\n        img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp[\"copy_paste\"])\n        img4, labels4, segments4 = random_perspective(\n            img4,\n            labels4,\n            segments4,\n            degrees=self.hyp[\"degrees\"],\n            translate=self.hyp[\"translate\"],\n            scale=self.hyp[\"scale\"],\n            shear=self.hyp[\"shear\"],\n            perspective=self.hyp[\"perspective\"],\n            border=self.mosaic_border,\n        )  # border to remove\n        return img4, labels4, segments4\n\n    @staticmethod\n    def collate_fn(batch):\n        \"\"\"Custom collation function for DataLoader, batches images, labels, paths, shapes, and segmentation masks.\"\"\"\n        img, label, path, shapes, masks = zip(*batch)  # transposed\n        batched_masks = torch.cat(masks, 0)\n        for i, l in enumerate(label):\n            l[:, 0] = i  # add target image index for build_targets()\n        return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks\n\n\ndef polygon2mask(img_size, polygons, color=1, downsample_ratio=1):\n    \"\"\"\n    Args:\n        img_size (tuple): The image size.\n        polygons (np.ndarray): [N, M], N is the number of polygons, M is the number of points(Be divided by 2).\n    \"\"\"\n    mask = np.zeros(img_size, dtype=np.uint8)\n    polygons = np.asarray(polygons)\n    polygons = polygons.astype(np.int32)\n    shape = polygons.shape\n    polygons = polygons.reshape(shape[0], -1, 2)\n    cv2.fillPoly(mask, polygons, color=color)\n    nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio)\n    # NOTE: fillPoly firstly then resize is trying the keep the same way\n    # of loss calculation when mask-ratio=1.\n    mask = cv2.resize(mask, (nw, nh))\n    return mask\n\n\ndef polygons2masks(img_size, polygons, color, downsample_ratio=1):\n    \"\"\"\n    Args:\n        img_size (tuple): The image size.\n        polygons (list[np.ndarray]): each polygon is [N, M], N is the number of polygons, M is the number of points(Be\n            divided by 2).\n    \"\"\"\n    masks = []\n    for si in range(len(polygons)):\n        mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio)\n        masks.append(mask)\n    return np.array(masks)\n\n\ndef polygons2masks_overlap(img_size, segments, downsample_ratio=1):\n    \"\"\"Return a (640, 640) overlap mask.\"\"\"\n    masks = np.zeros(\n        (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio),\n        dtype=np.int32 if len(segments) > 255 else np.uint8,\n    )\n    areas = []\n    ms = []\n    for si in range(len(segments)):\n        mask = polygon2mask(\n            img_size,\n            [segments[si].reshape(-1)],\n            downsample_ratio=downsample_ratio,\n            color=1,\n        )\n        ms.append(mask)\n        areas.append(mask.sum())\n    areas = np.asarray(areas)\n    index = np.argsort(-areas)\n    ms = np.array(ms)[index]\n    for i in range(len(segments)):\n        mask = ms[i] * (i + 1)\n        masks = masks + mask\n        masks = np.clip(masks, a_min=0, a_max=i + 1)\n    return masks, index\n"
  },
  {
    "path": "utils/segment/general.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef crop_mask(masks, boxes):\n    \"\"\"Crop predicted masks by zeroing out everything not in the predicted bbox.\n\n    Args:\n        - masks should be a size [n, h, w] tensor of masks\n        - boxes should be a size [n, 4] tensor of bbox coords in relative point form.\n    \"\"\"\n    _n, h, w = masks.shape\n    x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1)  # x1 shape(1,1,n)\n    r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :]  # rows shape(1,w,1)\n    c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None]  # cols shape(h,1,1)\n\n    return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))\n\n\ndef process_mask_upsample(protos, masks_in, bboxes, shape):\n    \"\"\"Crop after upsample.\n\n    Args:\n        protos: [mask_dim, mask_h, mask_w]\n        masks_in: [n, mask_dim], n is number of masks after nms\n        bboxes: [n, 4], n is number of masks after nms\n        shape: input_image_size, (h, w).\n\n    Returns:\n        h, w, n\n    \"\"\"\n    c, mh, mw = protos.shape  # CHW\n    masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n    masks = F.interpolate(masks[None], shape, mode=\"bilinear\", align_corners=False)[0]  # CHW\n    masks = crop_mask(masks, bboxes)  # CHW\n    return masks.gt_(0.5)\n\n\ndef process_mask(protos, masks_in, bboxes, shape, upsample=False):\n    \"\"\"Crop before upsample.\n\n    Args:\n        proto_out: [mask_dim, mask_h, mask_w]\n        out_masks: [n, mask_dim], n is number of masks after nms\n        bboxes: [n, 4], n is number of masks after nms\n        shape: input_image_size, (h, w).\n\n    Returns:\n        h, w, n\n    \"\"\"\n    c, mh, mw = protos.shape  # CHW\n    ih, iw = shape\n    masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)  # CHW\n\n    downsampled_bboxes = bboxes.clone()\n    downsampled_bboxes[:, 0] *= mw / iw\n    downsampled_bboxes[:, 2] *= mw / iw\n    downsampled_bboxes[:, 3] *= mh / ih\n    downsampled_bboxes[:, 1] *= mh / ih\n\n    masks = crop_mask(masks, downsampled_bboxes)  # CHW\n    if upsample:\n        masks = F.interpolate(masks[None], shape, mode=\"bilinear\", align_corners=False)[0]  # CHW\n    return masks.gt_(0.5)\n\n\ndef process_mask_native(protos, masks_in, bboxes, shape):\n    \"\"\"Crop after upsample.\n\n    Args:\n        protos: [mask_dim, mask_h, mask_w]\n        masks_in: [n, mask_dim], n is number of masks after nms\n        bboxes: [n, 4], n is number of masks after nms\n        shape: input_image_size, (h, w).\n\n    Returns:\n        h, w, n\n    \"\"\"\n    c, mh, mw = protos.shape  # CHW\n    masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n    gain = min(mh / shape[0], mw / shape[1])  # gain  = old / new\n    pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2  # wh padding\n    top, left = int(pad[1]), int(pad[0])  # y, x\n    bottom, right = int(mh - pad[1]), int(mw - pad[0])\n    masks = masks[:, top:bottom, left:right]\n\n    masks = F.interpolate(masks[None], shape, mode=\"bilinear\", align_corners=False)[0]  # CHW\n    masks = crop_mask(masks, bboxes)  # CHW\n    return masks.gt_(0.5)\n\n\ndef scale_image(im1_shape, masks, im0_shape, ratio_pad=None):\n    \"\"\"Img1_shape: model input shape, [h, w] img0_shape: origin pic shape, [h, w, 3] masks: [h, w, num].\"\"\"\n    # Rescale coordinates (xyxy) from im1_shape to im0_shape\n    if ratio_pad is None:  # calculate from im0_shape\n        gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1])  # gain  = old / new\n        pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2  # wh padding\n    else:\n        pad = ratio_pad[1]\n    top, left = int(pad[1]), int(pad[0])  # y, x\n    bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])\n\n    if len(masks.shape) < 2:\n        raise ValueError(f'\"len of masks shape\" should be 2 or 3, but got {len(masks.shape)}')\n    masks = masks[top:bottom, left:right]\n    # masks = masks.permute(2, 0, 1).contiguous()\n    # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]\n    # masks = masks.permute(1, 2, 0).contiguous()\n    masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))\n\n    if len(masks.shape) == 2:\n        masks = masks[:, :, None]\n    return masks\n\n\ndef mask_iou(mask1, mask2, eps=1e-7):\n    \"\"\"\n    Args:\n        mask1: [N, n] m1 means number of predicted objects\n        mask2: [M, n] m2 means number of gt objects.\n\n    Returns:\n        masks iou, [N, M]\n\n    Notes:\n        - n means image_w, x image_h.\n    \"\"\"\n    intersection = torch.matmul(mask1, mask2.t()).clamp(0)\n    union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection  # (area1 + area2) - intersection\n    return intersection / (union + eps)\n\n\ndef masks_iou(mask1, mask2, eps=1e-7):\n    \"\"\"\n    Args:\n        mask1: [N, n] m1 means number of predicted objects\n        mask2: [N, n] m2 means number of gt objects.\n\n    Returns:\n        masks iou, (N, )\n\n    Notes:\n        - n means image_w, x image_h.\n    \"\"\"\n    intersection = (mask1 * mask2).sum(1).clamp(0)  # (N, )\n    union = (mask1.sum(1) + mask2.sum(1))[None] - intersection  # (area1 + area2) - intersection\n    return intersection / (union + eps)\n\n\ndef masks2segments(masks, strategy=\"largest\"):\n    \"\"\"Converts binary (n,160,160) masks to polygon segments with options for concatenation or selecting the largest\n    segment.\n    \"\"\"\n    segments = []\n    for x in masks.int().cpu().numpy().astype(\"uint8\"):\n        c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n        if c:\n            if strategy == \"concat\":  # concatenate all segments\n                c = np.concatenate([x.reshape(-1, 2) for x in c])\n            elif strategy == \"largest\":  # select largest segment\n                c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)\n        else:\n            c = np.zeros((0, 2))  # no segments found\n        segments.append(c.astype(\"float32\"))\n    return segments\n"
  },
  {
    "path": "utils/segment/loss.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..general import xywh2xyxy\nfrom ..loss import FocalLoss, smooth_BCE\nfrom ..metrics import bbox_iou\nfrom ..torch_utils import de_parallel\nfrom .general import crop_mask\n\n\nclass ComputeLoss:\n    \"\"\"Computes the YOLOv5 model's loss components including classification, objectness, box, and mask losses.\"\"\"\n\n    def __init__(self, model, autobalance=False, overlap=False):\n        \"\"\"Initialize compute loss function for YOLOv5 models with options for autobalancing and overlap handling.\"\"\"\n        self.sort_obj_iou = False\n        self.overlap = overlap\n        device = next(model.parameters()).device  # get model device\n        h = model.hyp  # hyperparameters\n\n        # Define criteria\n        BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h[\"cls_pw\"]], device=device))\n        BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h[\"obj_pw\"]], device=device))\n\n        # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n        self.cp, self.cn = smooth_BCE(eps=h.get(\"label_smoothing\", 0.0))  # positive, negative BCE targets\n\n        # Focal loss\n        g = h[\"fl_gamma\"]  # focal loss gamma\n        if g > 0:\n            BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n        m = de_parallel(model).model[-1]  # Detect() module\n        self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02])  # P3-P7\n        self.ssi = list(m.stride).index(16) if autobalance else 0  # stride 16 index\n        self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance\n        self.na = m.na  # number of anchors\n        self.nc = m.nc  # number of classes\n        self.nl = m.nl  # number of layers\n        self.nm = m.nm  # number of masks\n        self.anchors = m.anchors\n        self.device = device\n\n    def __call__(self, preds, targets, masks):  # predictions, targets, model\n        \"\"\"Evaluates YOLOv5 model's loss for given predictions, targets, and masks; returns total loss components.\"\"\"\n        p, proto = preds\n        bs, nm, mask_h, mask_w = proto.shape  # batch size, number of masks, mask height, mask width\n        lcls = torch.zeros(1, device=self.device)\n        lbox = torch.zeros(1, device=self.device)\n        lobj = torch.zeros(1, device=self.device)\n        lseg = torch.zeros(1, device=self.device)\n        tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets)  # targets\n\n        # Losses\n        for i, pi in enumerate(p):  # layer index, layer predictions\n            b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n            tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device)  # target obj\n\n            if n := b.shape[0]:\n                pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1)  # subset of predictions\n\n                # Box regression\n                pxy = pxy.sigmoid() * 2 - 0.5\n                pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]\n                pbox = torch.cat((pxy, pwh), 1)  # predicted box\n                iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze()  # iou(prediction, target)\n                lbox += (1.0 - iou).mean()  # iou loss\n\n                # Objectness\n                iou = iou.detach().clamp(0).type(tobj.dtype)\n                if self.sort_obj_iou:\n                    j = iou.argsort()\n                    b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]\n                if self.gr < 1:\n                    iou = (1.0 - self.gr) + self.gr * iou\n                tobj[b, a, gj, gi] = iou  # iou ratio\n\n                # Classification\n                if self.nc > 1:  # cls loss (only if multiple classes)\n                    t = torch.full_like(pcls, self.cn, device=self.device)  # targets\n                    t[range(n), tcls[i]] = self.cp\n                    lcls += self.BCEcls(pcls, t)  # BCE\n\n                # Mask regression\n                if tuple(masks.shape[-2:]) != (mask_h, mask_w):  # downsample\n                    masks = F.interpolate(masks[None], (mask_h, mask_w), mode=\"nearest\")[0]\n                marea = xywhn[i][:, 2:].prod(1)  # mask width, height normalized\n                mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device))\n                for bi in b.unique():\n                    j = b == bi  # matching index\n                    if self.overlap:\n                        mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0)\n                    else:\n                        mask_gti = masks[tidxs[i]][j]\n                    lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j])\n\n            obji = self.BCEobj(pi[..., 4], tobj)\n            lobj += obji * self.balance[i]  # obj loss\n            if self.autobalance:\n                self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n        if self.autobalance:\n            self.balance = [x / self.balance[self.ssi] for x in self.balance]\n        lbox *= self.hyp[\"box\"]\n        lobj *= self.hyp[\"obj\"]\n        lcls *= self.hyp[\"cls\"]\n        lseg *= self.hyp[\"box\"] / bs\n\n        loss = lbox + lobj + lcls + lseg\n        return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach()\n\n    def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):\n        \"\"\"Calculates and normalizes single mask loss for YOLOv5 between predicted and ground truth masks.\"\"\"\n        pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:])  # (n,32) @ (32,80,80) -> (n,80,80)\n        loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction=\"none\")\n        return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()\n\n    def build_targets(self, p, targets):\n        \"\"\"Prepares YOLOv5 targets for loss computation; inputs targets (image, class, x, y, w, h), output target\n        classes/boxes.\n        \"\"\"\n        na, nt = self.na, targets.shape[0]  # number of anchors, targets\n        tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], []\n        gain = torch.ones(8, device=self.device)  # normalized to gridspace gain\n        ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt)  # same as .repeat_interleave(nt)\n        if self.overlap:\n            batch = p[0].shape[0]\n            ti = []\n            for i in range(batch):\n                num = (targets[:, 0] == i).sum()  # find number of targets of each image\n                ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1)  # (na, num)\n            ti = torch.cat(ti, 1)  # (na, nt)\n        else:\n            ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1)\n        targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2)  # append anchor indices\n\n        g = 0.5  # bias\n        off = (\n            torch.tensor(\n                [\n                    [0, 0],\n                    [1, 0],\n                    [0, 1],\n                    [-1, 0],\n                    [0, -1],  # j,k,l,m\n                    # [1, 1], [1, -1], [-1, 1], [-1, -1],  # jk,jm,lk,lm\n                ],\n                device=self.device,\n            ).float()\n            * g\n        )  # offsets\n\n        for i in range(self.nl):\n            anchors, shape = self.anchors[i], p[i].shape\n            gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]]  # xyxy gain\n\n            # Match targets to anchors\n            t = targets * gain  # shape(3,n,7)\n            if nt:\n                # Matches\n                r = t[..., 4:6] / anchors[:, None]  # wh ratio\n                j = torch.max(r, 1 / r).max(2)[0] < self.hyp[\"anchor_t\"]  # compare\n                # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t']  # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n                t = t[j]  # filter\n\n                # Offsets\n                gxy = t[:, 2:4]  # grid xy\n                gxi = gain[[2, 3]] - gxy  # inverse\n                j, k = ((gxy % 1 < g) & (gxy > 1)).T\n                l, m = ((gxi % 1 < g) & (gxi > 1)).T\n                j = torch.stack((torch.ones_like(j), j, k, l, m))\n                t = t.repeat((5, 1, 1))[j]\n                offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n            else:\n                t = targets[0]\n                offsets = 0\n\n            # Define\n            bc, gxy, gwh, at = t.chunk(4, 1)  # (image, class), grid xy, grid wh, anchors\n            (a, tidx), (b, c) = at.long().T, bc.long().T  # anchors, image, class\n            gij = (gxy - offsets).long()\n            gi, gj = gij.T  # grid indices\n\n            # Append\n            indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1)))  # image, anchor, grid\n            tbox.append(torch.cat((gxy - gij, gwh), 1))  # box\n            anch.append(anchors[a])  # anchors\n            tcls.append(c)  # class\n            tidxs.append(tidx)\n            xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6])  # xywh normalized\n\n        return tcls, tbox, indices, anch, tidxs, xywhn\n"
  },
  {
    "path": "utils/segment/metrics.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Model validation metrics.\"\"\"\n\nimport numpy as np\n\nfrom ..metrics import ap_per_class\n\n\ndef fitness(x):\n    \"\"\"Evaluates model fitness by a weighted sum of 8 metrics, `x`: [N,8] array, weights: [0.1, 0.9] for mAP and F1.\"\"\"\n    w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]\n    return (x[:, :8] * w).sum(1)\n\n\ndef ap_per_class_box_and_mask(\n    tp_m,\n    tp_b,\n    conf,\n    pred_cls,\n    target_cls,\n    plot=False,\n    save_dir=\".\",\n    names=(),\n):\n    \"\"\"\n    Args:\n        tp_b: tp of boxes.\n        tp_m: tp of masks.\n        other arguments see `func: ap_per_class`.\n    \"\"\"\n    results_boxes = ap_per_class(\n        tp_b, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix=\"Box\"\n    )[2:]\n    results_masks = ap_per_class(\n        tp_m, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix=\"Mask\"\n    )[2:]\n\n    return {\n        \"boxes\": {\n            \"p\": results_boxes[0],\n            \"r\": results_boxes[1],\n            \"ap\": results_boxes[3],\n            \"f1\": results_boxes[2],\n            \"ap_class\": results_boxes[4],\n        },\n        \"masks\": {\n            \"p\": results_masks[0],\n            \"r\": results_masks[1],\n            \"ap\": results_masks[3],\n            \"f1\": results_masks[2],\n            \"ap_class\": results_masks[4],\n        },\n    }\n\n\nclass Metric:\n    \"\"\"Computes performance metrics like precision, recall, F1 score, and average precision for model evaluation.\"\"\"\n\n    def __init__(self) -> None:\n        \"\"\"Initializes performance metric attributes for precision, recall, F1 score, average precision, and class\n        indices.\n        \"\"\"\n        self.p = []  # (nc, )\n        self.r = []  # (nc, )\n        self.f1 = []  # (nc, )\n        self.all_ap = []  # (nc, 10)\n        self.ap_class_index = []  # (nc, )\n\n    @property\n    def ap50(self):\n        \"\"\"AP@0.5 of all classes.\n\n        Returns:\n            (nc, ) or [].\n        \"\"\"\n        return self.all_ap[:, 0] if len(self.all_ap) else []\n\n    @property\n    def ap(self):\n        \"\"\"AP@0.5:0.95.\n\n        Returns:\n            (nc, ) or []\n        \"\"\"\n        return self.all_ap.mean(1) if len(self.all_ap) else []\n\n    @property\n    def mp(self):\n        \"\"\"Mean precision of all classes.\n\n        Returns:\n            float.\n        \"\"\"\n        return self.p.mean() if len(self.p) else 0.0\n\n    @property\n    def mr(self):\n        \"\"\"Mean recall of all classes.\n\n        Returns:\n            float.\n        \"\"\"\n        return self.r.mean() if len(self.r) else 0.0\n\n    @property\n    def map50(self):\n        \"\"\"Mean AP@0.5 of all classes.\n\n        Returns:\n            float.\n        \"\"\"\n        return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0\n\n    @property\n    def map(self):\n        \"\"\"Mean AP@0.5:0.95 of all classes.\n\n        Returns:\n            float.\n        \"\"\"\n        return self.all_ap.mean() if len(self.all_ap) else 0.0\n\n    def mean_results(self):\n        \"\"\"Mean of results, return mp, mr, map50, map.\"\"\"\n        return (self.mp, self.mr, self.map50, self.map)\n\n    def class_result(self, i):\n        \"\"\"Class-aware result, return p[i], r[i], ap50[i], ap[i].\"\"\"\n        return (self.p[i], self.r[i], self.ap50[i], self.ap[i])\n\n    def get_maps(self, nc):\n        \"\"\"Calculates and returns mean Average Precision (mAP) for each class given number of classes `nc`.\"\"\"\n        maps = np.zeros(nc) + self.map\n        for i, c in enumerate(self.ap_class_index):\n            maps[c] = self.ap[i]\n        return maps\n\n    def update(self, results):\n        \"\"\"\n        Args:\n            results: tuple(p, r, ap, f1, ap_class).\n        \"\"\"\n        p, r, all_ap, f1, ap_class_index = results\n        self.p = p\n        self.r = r\n        self.all_ap = all_ap\n        self.f1 = f1\n        self.ap_class_index = ap_class_index\n\n\nclass Metrics:\n    \"\"\"Metric for boxes and masks.\"\"\"\n\n    def __init__(self) -> None:\n        \"\"\"Initialize Metric objects for bounding boxes and masks to compute performance metrics.\"\"\"\n        self.metric_box = Metric()\n        self.metric_mask = Metric()\n\n    def update(self, results):\n        \"\"\"\n        Args:\n            results: Dict{'boxes': Dict{}, 'masks': Dict{}}.\n        \"\"\"\n        self.metric_box.update(list(results[\"boxes\"].values()))\n        self.metric_mask.update(list(results[\"masks\"].values()))\n\n    def mean_results(self):\n        \"\"\"Computes and returns the mean results for both box and mask metrics by summing their individual means.\"\"\"\n        return self.metric_box.mean_results() + self.metric_mask.mean_results()\n\n    def class_result(self, i):\n        \"\"\"Returns the sum of box and mask metric results for a specified class index `i`.\"\"\"\n        return self.metric_box.class_result(i) + self.metric_mask.class_result(i)\n\n    def get_maps(self, nc):\n        \"\"\"Calculates and returns the sum of mean average precisions (mAPs) for both box and mask metrics for `nc`\n        classes.\n        \"\"\"\n        return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)\n\n    @property\n    def ap_class_index(self):\n        \"\"\"Returns the class index for average precision, shared by both box and mask metrics.\"\"\"\n        return self.metric_box.ap_class_index\n\n\nKEYS = [\n    \"train/box_loss\",\n    \"train/seg_loss\",  # train loss\n    \"train/obj_loss\",\n    \"train/cls_loss\",\n    \"metrics/precision(B)\",\n    \"metrics/recall(B)\",\n    \"metrics/mAP_0.5(B)\",\n    \"metrics/mAP_0.5:0.95(B)\",  # metrics\n    \"metrics/precision(M)\",\n    \"metrics/recall(M)\",\n    \"metrics/mAP_0.5(M)\",\n    \"metrics/mAP_0.5:0.95(M)\",  # metrics\n    \"val/box_loss\",\n    \"val/seg_loss\",  # val loss\n    \"val/obj_loss\",\n    \"val/cls_loss\",\n    \"x/lr0\",\n    \"x/lr1\",\n    \"x/lr2\",\n]\n\nBEST_KEYS = [\n    \"best/epoch\",\n    \"best/precision(B)\",\n    \"best/recall(B)\",\n    \"best/mAP_0.5(B)\",\n    \"best/mAP_0.5:0.95(B)\",\n    \"best/precision(M)\",\n    \"best/recall(M)\",\n    \"best/mAP_0.5(M)\",\n    \"best/mAP_0.5:0.95(M)\",\n]\n"
  },
  {
    "path": "utils/segment/plots.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport contextlib\nimport math\nfrom pathlib import Path\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom .. import threaded\nfrom ..general import xywh2xyxy\nfrom ..plots import Annotator, colors\n\n\n@threaded\ndef plot_images_and_masks(images, targets, masks, paths=None, fname=\"images.jpg\", names=None):\n    \"\"\"Plots a grid of images, their labels, and masks with optional resizing and annotations, saving to fname.\"\"\"\n    if isinstance(images, torch.Tensor):\n        images = images.cpu().float().numpy()\n    if isinstance(targets, torch.Tensor):\n        targets = targets.cpu().numpy()\n    if isinstance(masks, torch.Tensor):\n        masks = masks.cpu().numpy().astype(int)\n\n    max_size = 1920  # max image size\n    max_subplots = 16  # max image subplots, i.e. 4x4\n    bs, _, h, w = images.shape  # batch size, _, height, width\n    bs = min(bs, max_subplots)  # limit plot images\n    ns = np.ceil(bs**0.5)  # number of subplots (square)\n    if np.max(images[0]) <= 1:\n        images *= 255  # de-normalise (optional)\n\n    # Build Image\n    mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)  # init\n    for i, im in enumerate(images):\n        if i == max_subplots:  # if last batch has fewer images than we expect\n            break\n        x, y = int(w * (i // ns)), int(h * (i % ns))  # block origin\n        im = im.transpose(1, 2, 0)\n        mosaic[y : y + h, x : x + w, :] = im\n\n    # Resize (optional)\n    scale = max_size / ns / max(h, w)\n    if scale < 1:\n        h = math.ceil(scale * h)\n        w = math.ceil(scale * w)\n        mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n    # Annotate\n    fs = int((h + w) * ns * 0.01)  # font size\n    annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n    for i in range(i + 1):\n        x, y = int(w * (i // ns)), int(h * (i % ns))  # block origin\n        annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2)  # borders\n        if paths:\n            annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220))  # filenames\n        if len(targets) > 0:\n            idx = targets[:, 0] == i\n            ti = targets[idx]  # image targets\n\n            boxes = xywh2xyxy(ti[:, 2:6]).T\n            classes = ti[:, 1].astype(\"int\")\n            labels = ti.shape[1] == 6  # labels if no conf column\n            conf = None if labels else ti[:, 6]  # check for confidence presence (label vs pred)\n\n            if boxes.shape[1]:\n                if boxes.max() <= 1.01:  # if normalized with tolerance 0.01\n                    boxes[[0, 2]] *= w  # scale to pixels\n                    boxes[[1, 3]] *= h\n                elif scale < 1:  # absolute coords need scale if image scales\n                    boxes *= scale\n            boxes[[0, 2]] += x\n            boxes[[1, 3]] += y\n            for j, box in enumerate(boxes.T.tolist()):\n                cls = classes[j]\n                color = colors(cls)\n                cls = names[cls] if names else cls\n                if labels or conf[j] > 0.25:  # 0.25 conf thresh\n                    label = f\"{cls}\" if labels else f\"{cls} {conf[j]:.1f}\"\n                    annotator.box_label(box, label, color=color)\n\n            # Plot masks\n            if len(masks):\n                if masks.max() > 1.0:  # mean that masks are overlap\n                    image_masks = masks[[i]]  # (1, 640, 640)\n                    nl = len(ti)\n                    index = np.arange(nl).reshape(nl, 1, 1) + 1\n                    image_masks = np.repeat(image_masks, nl, axis=0)\n                    image_masks = np.where(image_masks == index, 1.0, 0.0)\n                else:\n                    image_masks = masks[idx]\n\n                im = np.asarray(annotator.im).copy()\n                for j, box in enumerate(boxes.T.tolist()):\n                    if labels or conf[j] > 0.25:  # 0.25 conf thresh\n                        color = colors(classes[j])\n                        mh, mw = image_masks[j].shape\n                        if mh != h or mw != w:\n                            mask = image_masks[j].astype(np.uint8)\n                            mask = cv2.resize(mask, (w, h))\n                            mask = mask.astype(bool)\n                        else:\n                            mask = image_masks[j].astype(bool)\n                        with contextlib.suppress(Exception):\n                            im[y : y + h, x : x + w, :][mask] = (\n                                im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6\n                            )\n                annotator.fromarray(im)\n    annotator.im.save(fname)  # save\n\n\ndef plot_results_with_masks(file=\"path/to/results.csv\", dir=\"\", best=True):\n    \"\"\"Plots training results from CSV files, plotting best or last result highlights based on `best` parameter.\n\n    Example: from utils.plots import *; plot_results('path/to/results.csv')\n    \"\"\"\n    save_dir = Path(file).parent if file else Path(dir)\n    fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)\n    ax = ax.ravel()\n    files = list(save_dir.glob(\"results*.csv\"))\n    assert len(files), f\"No results.csv files found in {save_dir.resolve()}, nothing to plot.\"\n    for f in files:\n        try:\n            data = pd.read_csv(f)\n            index = np.argmax(\n                0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + 0.1 * data.values[:, 11]\n            )\n            s = [x.strip() for x in data.columns]\n            x = data.values[:, 0]\n            for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]):\n                y = data.values[:, j]\n                # y[y == 0] = np.nan  # don't show zero values\n                ax[i].plot(x, y, marker=\".\", label=f.stem, linewidth=2, markersize=2)\n                if best:\n                    # best\n                    ax[i].scatter(index, y[index], color=\"r\", label=f\"best:{index}\", marker=\"*\", linewidth=3)\n                    ax[i].set_title(s[j] + f\"\\n{round(y[index], 5)}\")\n                else:\n                    # last\n                    ax[i].scatter(x[-1], y[-1], color=\"r\", label=\"last\", marker=\"*\", linewidth=3)\n                    ax[i].set_title(s[j] + f\"\\n{round(y[-1], 5)}\")\n                # if j in [8, 9, 10]:  # share train and val loss y axes\n                #     ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n        except Exception as e:\n            print(f\"Warning: Plotting error for {f}: {e}\")\n    ax[1].legend()\n    fig.savefig(save_dir / \"results.png\", dpi=200)\n    plt.close()\n"
  },
  {
    "path": "utils/torch_utils.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"PyTorch utils.\"\"\"\n\nimport math\nimport os\nimport platform\nimport subprocess\nimport time\nimport warnings\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom utils.general import LOGGER, check_version, colorstr, file_date, git_describe\n\nLOCAL_RANK = int(os.getenv(\"LOCAL_RANK\", -1))  # https://pytorch.org/docs/stable/elastic/run.html\nRANK = int(os.getenv(\"RANK\", -1))\nWORLD_SIZE = int(os.getenv(\"WORLD_SIZE\", 1))\n\ntry:\n    import thop  # for FLOPs computation\nexcept ImportError:\n    thop = None\n\n# Suppress PyTorch warnings\nwarnings.filterwarnings(\"ignore\", message=\"User provided device_type of 'cuda', but CUDA is not available. Disabling\")\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\ndef smart_inference_mode(torch_1_9=check_version(torch.__version__, \"1.9.0\")):\n    \"\"\"Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() as a decorator for functions.\"\"\"\n\n    def decorate(fn):\n        \"\"\"Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() to the decorated function.\"\"\"\n        return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n    return decorate\n\n\ndef smartCrossEntropyLoss(label_smoothing=0.0):\n    \"\"\"Return CrossEntropyLoss with optional label smoothing for torch>=1.10.0; warns if smoothing on lower versions.\"\"\"\n    if check_version(torch.__version__, \"1.10.0\"):\n        return nn.CrossEntropyLoss(label_smoothing=label_smoothing)\n    if label_smoothing > 0:\n        LOGGER.warning(f\"WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0\")\n    return nn.CrossEntropyLoss()\n\n\ndef smart_DDP(model):\n    \"\"\"Initializes DistributedDataParallel (DDP) for model training, respecting torch version constraints.\"\"\"\n    assert not check_version(torch.__version__, \"1.12.0\", pinned=True), (\n        \"torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. \"\n        \"Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395\"\n    )\n    if check_version(torch.__version__, \"1.11.0\"):\n        return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True)\n    else:\n        return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)\n\n\ndef reshape_classifier_output(model, n=1000):\n    \"\"\"Reshapes last layer of model to match class count 'n', supporting Classify, Linear, Sequential types.\"\"\"\n    from models.common import Classify\n\n    name, m = list((model.model if hasattr(model, \"model\") else model).named_children())[-1]  # last module\n    if isinstance(m, Classify):  # YOLOv5 Classify() head\n        if m.linear.out_features != n:\n            m.linear = nn.Linear(m.linear.in_features, n)\n    elif isinstance(m, nn.Linear):  # ResNet, EfficientNet\n        if m.out_features != n:\n            setattr(model, name, nn.Linear(m.in_features, n))\n    elif isinstance(m, nn.Sequential):\n        types = [type(x) for x in m]\n        if nn.Linear in types:\n            i = len(types) - 1 - types[::-1].index(nn.Linear)  # last nn.Linear index\n            if m[i].out_features != n:\n                m[i] = nn.Linear(m[i].in_features, n)\n        elif nn.Conv2d in types:\n            i = len(types) - 1 - types[::-1].index(nn.Conv2d)  # last nn.Conv2d index\n            if m[i].out_channels != n:\n                m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None)\n\n\n@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n    \"\"\"Context manager ensuring ordered operations in distributed training by making all processes wait for the leading\n    process.\n    \"\"\"\n    if local_rank not in [-1, 0]:\n        dist.barrier(device_ids=[local_rank])\n    yield\n    if local_rank == 0:\n        dist.barrier(device_ids=[0])\n\n\ndef device_count():\n    \"\"\"Returns the number of available CUDA devices; works on Linux and Windows by invoking `nvidia-smi`.\"\"\"\n    assert platform.system() in (\"Linux\", \"Windows\"), \"device_count() only supported on Linux or Windows\"\n    try:\n        cmd = \"nvidia-smi -L | wc -l\" if platform.system() == \"Linux\" else 'nvidia-smi -L | find /c /v \"\"'  # Windows\n        return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1])\n    except Exception:\n        return 0\n\n\ndef select_device(device=\"\", batch_size=0, newline=True):\n    \"\"\"Selects computing device (CPU, CUDA GPU, MPS) for YOLOv5 model deployment, logging device info.\"\"\"\n    s = f\"YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} \"\n    device = str(device).strip().lower().replace(\"cuda:\", \"\").replace(\"none\", \"\")  # to string, 'cuda:0' to '0'\n    cpu = device == \"cpu\"\n    mps = device == \"mps\"  # Apple Metal Performance Shaders (MPS)\n    if cpu or mps:\n        os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"  # force torch.cuda.is_available() = False\n    elif device:  # non-cpu device requested\n        os.environ[\"CUDA_VISIBLE_DEVICES\"] = device  # set environment variable - must be before assert is_available()\n        assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(\",\", \"\")), (\n            f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n        )\n\n    if not cpu and not mps and torch.cuda.is_available():  # prefer GPU if available\n        devices = device.split(\",\") if device else \"0\"  # range(torch.cuda.device_count())  # i.e. 0,1,6,7\n        n = len(devices)  # device count\n        if n > 1 and batch_size > 0:  # check batch_size is divisible by device_count\n            assert batch_size % n == 0, f\"batch-size {batch_size} not multiple of GPU count {n}\"\n        space = \" \" * (len(s) + 1)\n        for i, d in enumerate(devices):\n            p = torch.cuda.get_device_properties(i)\n            s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\"  # bytes to MB\n        arg = \"cuda:0\"\n    elif mps and getattr(torch, \"has_mps\", False) and torch.backends.mps.is_available():  # prefer MPS if available\n        s += \"MPS\\n\"\n        arg = \"mps\"\n    else:  # revert to CPU\n        s += \"CPU\\n\"\n        arg = \"cpu\"\n\n    if not newline:\n        s = s.rstrip()\n    LOGGER.info(s)\n    return torch.device(arg)\n\n\ndef time_sync():\n    \"\"\"Synchronizes PyTorch for accurate timing, leveraging CUDA if available, and returns the current time.\"\"\"\n    if torch.cuda.is_available():\n        torch.cuda.synchronize()\n    return time.time()\n\n\ndef profile(input, ops, n=10, device=None):\n    \"\"\"YOLOv5 speed/memory/FLOPs profiler.\n\n    Examples:\n        >>> input = torch.randn(16, 3, 640, 640)\n        >>> m1 = lambda x: x * torch.sigmoid(x)\n        >>> m2 = nn.SiLU()\n        >>> profile(input, [m1, m2], n=100)  # profile over 100 iterations.\n    \"\"\"\n    results = []\n    if not isinstance(device, torch.device):\n        device = select_device(device)\n    print(\n        f\"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}\"\n        f\"{'input':>24s}{'output':>24s}\"\n    )\n\n    for x in input if isinstance(input, list) else [input]:\n        x = x.to(device)\n        x.requires_grad = True\n        for m in ops if isinstance(ops, list) else [ops]:\n            m = m.to(device) if hasattr(m, \"to\") else m  # device\n            m = m.half() if hasattr(m, \"half\") and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m\n            tf, tb, t = 0, 0, [0, 0, 0]  # dt forward, backward\n            try:\n                flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1e9 * 2  # GFLOPs\n            except Exception:\n                flops = 0\n\n            try:\n                for _ in range(n):\n                    t[0] = time_sync()\n                    y = m(x)\n                    t[1] = time_sync()\n                    try:\n                        _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()\n                        t[2] = time_sync()\n                    except Exception:  # no backward method\n                        # print(e)  # for debug\n                        t[2] = float(\"nan\")\n                    tf += (t[1] - t[0]) * 1000 / n  # ms per op forward\n                    tb += (t[2] - t[1]) * 1000 / n  # ms per op backward\n                mem = torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0  # (GB)\n                s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else \"list\" for x in (x, y))  # shapes\n                p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0  # parameters\n                print(f\"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{s_in!s:>24s}{s_out!s:>24s}\")\n                results.append([p, flops, mem, tf, tb, s_in, s_out])\n            except Exception as e:\n                print(e)\n                results.append(None)\n            torch.cuda.empty_cache()\n    return results\n\n\ndef is_parallel(model):\n    \"\"\"Checks if the model is using Data Parallelism (DP) or Distributed Data Parallelism (DDP).\"\"\"\n    return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef de_parallel(model):\n    \"\"\"Returns a single-GPU model by removing Data Parallelism (DP) or Distributed Data Parallelism (DDP) if applied.\"\"\"\n    return model.module if is_parallel(model) else model\n\n\ndef initialize_weights(model):\n    \"\"\"Initializes weights of Conv2d, BatchNorm2d, and activations (Hardswish, LeakyReLU, ReLU, ReLU6, SiLU) in the\n    model.\n    \"\"\"\n    for m in model.modules():\n        t = type(m)\n        if t is nn.Conv2d:\n            pass  # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n        elif t is nn.BatchNorm2d:\n            m.eps = 1e-3\n            m.momentum = 0.03\n        elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n            m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n    \"\"\"Finds and returns list of layer indices in `model.module_list` matching the specified `mclass`.\"\"\"\n    return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef sparsity(model):\n    \"\"\"Calculate global sparsity of a model as the ratio of zero-valued parameters to total parameters.\"\"\"\n    a, b = 0, 0\n    for p in model.parameters():\n        a += p.numel()\n        b += (p == 0).sum()\n    return b / a\n\n\ndef prune(model, amount=0.3):\n    \"\"\"Prunes Conv2d layers in a model to a specified sparsity using L1 unstructured pruning.\"\"\"\n    import torch.nn.utils.prune as prune\n\n    for name, m in model.named_modules():\n        if isinstance(m, nn.Conv2d):\n            prune.l1_unstructured(m, name=\"weight\", amount=amount)  # prune\n            prune.remove(m, \"weight\")  # make permanent\n    LOGGER.info(f\"Model pruned to {sparsity(model):.3g} global sparsity\")\n\n\ndef fuse_conv_and_bn(conv, bn):\n    \"\"\"Fuses Conv2d and BatchNorm2d layers into a single Conv2d layer.\n\n    See https://tehnokv.com/posts/fusing-batchnorm-and-conv/.\n    \"\"\"\n    fusedconv = (\n        nn.Conv2d(\n            conv.in_channels,\n            conv.out_channels,\n            kernel_size=conv.kernel_size,\n            stride=conv.stride,\n            padding=conv.padding,\n            dilation=conv.dilation,\n            groups=conv.groups,\n            bias=True,\n        )\n        .requires_grad_(False)\n        .to(conv.weight.device)\n    )\n\n    # Prepare filters\n    w_conv = conv.weight.clone().view(conv.out_channels, -1)\n    w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n    fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n    # Prepare spatial bias\n    b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n    b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n    fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n    return fusedconv\n\n\ndef model_info(model, verbose=False, imgsz=640):\n    \"\"\"Prints model summary including layers, parameters, gradients, and FLOPs; imgsz may be int or list.\n\n    Example: img_size=640 or img_size=[640, 320]\n    \"\"\"\n    n_p = sum(x.numel() for x in model.parameters())  # number parameters\n    n_g = sum(x.numel() for x in model.parameters() if x.requires_grad)  # number gradients\n    if verbose:\n        print(f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n        for i, (name, p) in enumerate(model.named_parameters()):\n            name = name.replace(\"module_list.\", \"\")\n            print(\n                \"%5g %40s %9s %12g %20s %10.3g %10.3g\"\n                % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())\n            )\n\n    try:  # FLOPs\n        p = next(model.parameters())\n        stride = max(int(model.stride.max()), 32) if hasattr(model, \"stride\") else 32  # max stride\n        im = torch.empty((1, p.shape[1], stride, stride), device=p.device)  # input image in BCHW format\n        flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1e9 * 2  # stride GFLOPs\n        imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz]  # expand if int/float\n        fs = f\", {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs\"  # 640x640 GFLOPs\n    except Exception:\n        fs = \"\"\n\n    name = Path(model.yaml_file).stem.replace(\"yolov5\", \"YOLOv5\") if hasattr(model, \"yaml_file\") else \"Model\"\n    LOGGER.info(f\"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")\n\n\ndef scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,416)\n    \"\"\"Scales an image tensor `img` of shape (bs,3,y,x) by `ratio`, optionally maintaining the original shape, padded to\n    multiples of `gs`.\n    \"\"\"\n    if ratio == 1.0:\n        return img\n    h, w = img.shape[2:]\n    s = (int(h * ratio), int(w * ratio))  # new size\n    img = F.interpolate(img, size=s, mode=\"bilinear\", align_corners=False)  # resize\n    if not same_shape:  # pad/crop img\n        h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))\n    return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447)  # value = imagenet mean\n\n\ndef copy_attr(a, b, include=(), exclude=()):\n    \"\"\"Copies attributes from object b to a, optionally filtering with include and exclude lists.\"\"\"\n    for k, v in b.__dict__.items():\n        if (len(include) and k not in include) or k.startswith(\"_\") or k in exclude:\n            continue\n        else:\n            setattr(a, k, v)\n\n\ndef smart_optimizer(model, name=\"Adam\", lr=0.001, momentum=0.9, decay=1e-5):\n    \"\"\"Initializes YOLOv5 smart optimizer with 3 parameter groups for different decay configurations.\n\n    Groups are 0) weights with decay, 1) weights no decay, 2) biases no decay.\n    \"\"\"\n    g = [], [], []  # optimizer parameter groups\n    bn = tuple(v for k, v in nn.__dict__.items() if \"Norm\" in k)  # normalization layers, i.e. BatchNorm2d()\n    for v in model.modules():\n        for p_name, p in v.named_parameters(recurse=0):\n            if p_name == \"bias\":  # bias (no decay)\n                g[2].append(p)\n            elif p_name == \"weight\" and isinstance(v, bn):  # weight (no decay)\n                g[1].append(p)\n            else:\n                g[0].append(p)  # weight (with decay)\n\n    if name == \"Adam\":\n        optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999))  # adjust beta1 to momentum\n    elif name == \"AdamW\":\n        optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)\n    elif name == \"RMSProp\":\n        optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum)\n    elif name == \"SGD\":\n        optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)\n    else:\n        raise NotImplementedError(f\"Optimizer {name} not implemented.\")\n\n    optimizer.add_param_group({\"params\": g[0], \"weight_decay\": decay})  # add g0 with weight_decay\n    optimizer.add_param_group({\"params\": g[1], \"weight_decay\": 0.0})  # add g1 (BatchNorm2d weights)\n    LOGGER.info(\n        f\"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups \"\n        f\"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias\"\n    )\n    return optimizer\n\n\ndef smart_hub_load(repo=\"ultralytics/yolov5\", model=\"yolov5s\", **kwargs):\n    \"\"\"YOLOv5 torch.hub.load() wrapper with smart error handling, adjusting torch arguments for compatibility.\"\"\"\n    if check_version(torch.__version__, \"1.9.1\"):\n        kwargs[\"skip_validation\"] = True  # validation causes GitHub API rate limit errors\n    if check_version(torch.__version__, \"1.12.0\"):\n        kwargs[\"trust_repo\"] = True  # argument required starting in torch 0.12\n    try:\n        return torch.hub.load(repo, model, **kwargs)\n    except Exception:\n        return torch.hub.load(repo, model, force_reload=True, **kwargs)\n\n\ndef smart_resume(ckpt, optimizer, ema=None, weights=\"yolov5s.pt\", epochs=300, resume=True):\n    \"\"\"Resumes training from a checkpoint, updating optimizer, ema, and epochs, with optional resume verification.\"\"\"\n    best_fitness = 0.0\n    start_epoch = ckpt[\"epoch\"] + 1\n    if ckpt[\"optimizer\"] is not None:\n        optimizer.load_state_dict(ckpt[\"optimizer\"])  # optimizer\n        best_fitness = ckpt[\"best_fitness\"]\n    if ema and ckpt.get(\"ema\"):\n        ema.ema.load_state_dict(ckpt[\"ema\"].float().state_dict())  # EMA\n        ema.updates = ckpt[\"updates\"]\n    if resume:\n        assert start_epoch > 0, (\n            f\"{weights} training to {epochs} epochs is finished, nothing to resume.\\n\"\n            f\"Start a new training without --resume, i.e. 'python train.py --weights {weights}'\"\n        )\n        LOGGER.info(f\"Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs\")\n    if epochs < start_epoch:\n        LOGGER.info(f\"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.\")\n        epochs += ckpt[\"epoch\"]  # finetune additional epochs\n    return best_fitness, start_epoch, epochs\n\n\nclass EarlyStopping:\n    \"\"\"Implements early stopping to halt training when no improvement is observed for a specified number of epochs.\"\"\"\n\n    def __init__(self, patience=30):\n        \"\"\"Initializes simple early stopping mechanism for YOLOv5, with adjustable patience for non-improving epochs.\"\"\"\n        self.best_fitness = 0.0  # i.e. mAP\n        self.best_epoch = 0\n        self.patience = patience or float(\"inf\")  # epochs to wait after fitness stops improving to stop\n        self.possible_stop = False  # possible stop may occur next epoch\n\n    def __call__(self, epoch, fitness):\n        \"\"\"Evaluates if training should stop based on fitness improvement and patience, returning a boolean.\"\"\"\n        if fitness >= self.best_fitness:  # >= 0 to allow for early zero-fitness stage of training\n            self.best_epoch = epoch\n            self.best_fitness = fitness\n        delta = epoch - self.best_epoch  # epochs without improvement\n        self.possible_stop = delta >= (self.patience - 1)  # possible stop may occur next epoch\n        stop = delta >= self.patience  # stop training if patience exceeded\n        if stop:\n            LOGGER.info(\n                f\"Stopping training early as no improvement observed in last {self.patience} epochs. \"\n                f\"Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\\n\"\n                f\"To update EarlyStopping(patience={self.patience}) pass a new patience value, \"\n                f\"i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.\"\n            )\n        return stop\n\n\nclass ModelEMA:\n    \"\"\"Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models Keeps a moving\n    average of everything in the model state_dict (parameters and buffers) For EMA details\n    see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.\n    \"\"\"\n\n    def __init__(self, model, decay=0.9999, tau=2000, updates=0):\n        \"\"\"Initializes EMA with model parameters, decay rate, tau for decay adjustment, and update count; sets model to\n        evaluation mode.\n        \"\"\"\n        self.ema = deepcopy(de_parallel(model)).eval()  # FP32 EMA\n        self.updates = updates  # number of EMA updates\n        self.decay = lambda x: decay * (1 - math.exp(-x / tau))  # decay exponential ramp (to help early epochs)\n        for p in self.ema.parameters():\n            p.requires_grad_(False)\n\n    def update(self, model):\n        \"\"\"Updates the Exponential Moving Average (EMA) parameters based on the current model's parameters.\"\"\"\n        self.updates += 1\n        d = self.decay(self.updates)\n\n        msd = de_parallel(model).state_dict()  # model state_dict\n        for k, v in self.ema.state_dict().items():\n            if v.dtype.is_floating_point:  # true for FP16 and FP32\n                v *= d\n                v += (1 - d) * msd[k].detach()\n        # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32'\n\n    def update_attr(self, model, include=(), exclude=(\"process_group\", \"reducer\")):\n        \"\"\"Updates EMA attributes by copying specified attributes from model to EMA, excluding certain attributes by\n        default.\n        \"\"\"\n        copy_attr(self.ema, model, include, exclude)\n"
  },
  {
    "path": "utils/triton.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Utils to interact with the Triton Inference Server.\"\"\"\n\nfrom __future__ import annotations\n\nfrom urllib.parse import urlparse\n\nimport torch\n\n\nclass TritonRemoteModel:\n    \"\"\"A wrapper over a model served by the Triton Inference Server.\n\n    It can be configured to communicate over GRPC or HTTP. It accepts Torch Tensors as input and returns them as\n    outputs.\n    \"\"\"\n\n    def __init__(self, url: str):\n        \"\"\"Keyword Arguments: url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000.\"\"\"\n        parsed_url = urlparse(url)\n        if parsed_url.scheme == \"grpc\":\n            from tritonclient.grpc import InferenceServerClient, InferInput\n\n            self.client = InferenceServerClient(parsed_url.netloc)  # Triton GRPC client\n            model_repository = self.client.get_model_repository_index()\n            self.model_name = model_repository.models[0].name\n            self.metadata = self.client.get_model_metadata(self.model_name, as_json=True)\n\n            def create_input_placeholders() -> list[InferInput]:\n                return [\n                    InferInput(i[\"name\"], [int(s) for s in i[\"shape\"]], i[\"datatype\"]) for i in self.metadata[\"inputs\"]\n                ]\n\n        else:\n            from tritonclient.http import InferenceServerClient, InferInput\n\n            self.client = InferenceServerClient(parsed_url.netloc)  # Triton HTTP client\n            model_repository = self.client.get_model_repository_index()\n            self.model_name = model_repository[0][\"name\"]\n            self.metadata = self.client.get_model_metadata(self.model_name)\n\n            def create_input_placeholders() -> list[InferInput]:\n                return [\n                    InferInput(i[\"name\"], [int(s) for s in i[\"shape\"]], i[\"datatype\"]) for i in self.metadata[\"inputs\"]\n                ]\n\n        self._create_input_placeholders_fn = create_input_placeholders\n\n    @property\n    def runtime(self):\n        \"\"\"Returns the model runtime.\"\"\"\n        return self.metadata.get(\"backend\", self.metadata.get(\"platform\"))\n\n    def __call__(self, *args, **kwargs) -> torch.Tensor | tuple[torch.Tensor, ...]:\n        \"\"\"Invokes the model.\n\n        Parameters can be provided via args or kwargs. args, if provided, are assumed to match the order of inputs of\n        the model. kwargs are matched with the model input names.\n        \"\"\"\n        inputs = self._create_inputs(*args, **kwargs)\n        response = self.client.infer(model_name=self.model_name, inputs=inputs)\n        result = []\n        for output in self.metadata[\"outputs\"]:\n            tensor = torch.as_tensor(response.as_numpy(output[\"name\"]))\n            result.append(tensor)\n        return result[0] if len(result) == 1 else result\n\n    def _create_inputs(self, *args, **kwargs):\n        \"\"\"Creates input tensors from args or kwargs, not both; raises error if none or both are provided.\"\"\"\n        args_len, kwargs_len = len(args), len(kwargs)\n        if not args_len and not kwargs_len:\n            raise RuntimeError(\"No inputs provided.\")\n        if args_len and kwargs_len:\n            raise RuntimeError(\"Cannot specify args and kwargs at the same time\")\n\n        placeholders = self._create_input_placeholders_fn()\n        if args_len:\n            if args_len != len(placeholders):\n                raise RuntimeError(f\"Expected {len(placeholders)} inputs, got {args_len}.\")\n            for input, value in zip(placeholders, args):\n                input.set_data_from_numpy(value.cpu().numpy())\n        else:\n            for input in placeholders:\n                value = kwargs[input.name]\n                input.set_data_from_numpy(value.cpu().numpy())\n        return placeholders\n"
  },
  {
    "path": "val.py",
    "content": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nValidate a trained YOLOv5 detection model on a detection dataset.\n\nUsage:\n    $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640\n\nUsage - formats:\n    $ python val.py --weights yolov5s.pt                 # PyTorch\n                              yolov5s.torchscript        # TorchScript\n                              yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn\n                              yolov5s_openvino_model     # OpenVINO\n                              yolov5s.engine             # TensorRT\n                              yolov5s.mlpackage          # CoreML (macOS-only)\n                              yolov5s_saved_model        # TensorFlow SavedModel\n                              yolov5s.pb                 # TensorFlow GraphDef\n                              yolov5s.tflite             # TensorFlow Lite\n                              yolov5s_edgetpu.tflite     # TensorFlow Edge TPU\n                              yolov5s_paddle_model       # PaddlePaddle\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom models.common import DetectMultiBackend\nfrom utils.callbacks import Callbacks\nfrom utils.dataloaders import create_dataloader\nfrom utils.general import (\n    LOGGER,\n    TQDM_BAR_FORMAT,\n    Profile,\n    check_dataset,\n    check_img_size,\n    check_requirements,\n    check_yaml,\n    coco80_to_coco91_class,\n    colorstr,\n    increment_path,\n    non_max_suppression,\n    print_args,\n    scale_boxes,\n    xywh2xyxy,\n    xyxy2xywh,\n)\nfrom utils.metrics import ConfusionMatrix, ap_per_class, box_iou\nfrom utils.plots import output_to_target, plot_images, plot_val_study\nfrom utils.torch_utils import select_device, smart_inference_mode\n\n\ndef save_one_txt(predn, save_conf, shape, file):\n    \"\"\"Saves one detection result to a txt file in normalized xywh format, optionally including confidence.\n\n    Args:\n        predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes in xyxy format,\n            tensor of shape (N, 6) where N is the number of detections.\n        save_conf (bool): If True, saves the confidence scores along with the bounding box coordinates.\n        shape (tuple): Shape of the original image as (height, width).\n        file (str | Path): File path where the result will be saved.\n\n    Returns:\n        None\n\n    Examples:\n        ```python\n        predn = torch.tensor([[10, 20, 30, 40, 0.9, 1]])  # example prediction\n        save_one_txt(predn, save_conf=True, shape=(640, 480), file=\"output.txt\")\n        ```\n\n    Notes:\n        The xyxy bounding box format represents the coordinates (xmin, ymin, xmax, ymax).\n        The xywh format represents the coordinates (center_x, center_y, width, height) and is normalized by the width and\n        height of the image.\n    \"\"\"\n    gn = torch.tensor(shape)[[1, 0, 1, 0]]  # normalization gain whwh\n    for *xyxy, conf, cls in predn.tolist():\n        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh\n        line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format\n        with open(file, \"a\") as f:\n            f.write((\"%g \" * len(line)).rstrip() % line + \"\\n\")\n\n\ndef save_one_json(predn, jdict, path, class_map):\n    \"\"\"Saves a single JSON detection result, including image ID, category ID, bounding box, and confidence score.\n\n    Args:\n        predn (torch.Tensor): Predicted detections in xyxy format with shape (n, 6) where n is the number of detections.\n            The tensor should contain [x_min, y_min, x_max, y_max, confidence, class_id] for each detection.\n        jdict (list[dict]): List to collect JSON formatted detection results.\n        path (pathlib.Path): Path object of the image file, used to extract image_id.\n        class_map (dict[int, int]): Mapping from model class indices to dataset-specific category IDs.\n\n    Returns:\n        None: Appends detection results as dictionaries to `jdict` list in-place.\n\n    Examples:\n        ```python\n        predn = torch.tensor([[100, 50, 200, 150, 0.9, 0], [50, 30, 100, 80, 0.8, 1]])\n        jdict = []\n        path = Path(\"42.jpg\")\n        class_map = {0: 18, 1: 19}\n        save_one_json(predn, jdict, path, class_map)\n        ```\n        This will append to `jdict`:\n        ```\n        [\n            {'image_id': 42, 'category_id': 18, 'bbox': [125.0, 75.0, 100.0, 100.0], 'score': 0.9},\n            {'image_id': 42, 'category_id': 19, 'bbox': [75.0, 55.0, 50.0, 50.0], 'score': 0.8}\n        ]\n        ```\n\n    Notes:\n        The `bbox` values are formatted as [x, y, width, height], where x and y represent the top-left corner of the box.\n    \"\"\"\n    image_id = int(path.stem) if path.stem.isnumeric() else path.stem\n    box = xyxy2xywh(predn[:, :4])  # xywh\n    box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner\n    for p, b in zip(predn.tolist(), box.tolist()):\n        jdict.append(\n            {\n                \"image_id\": image_id,\n                \"category_id\": class_map[int(p[5])],\n                \"bbox\": [round(x, 3) for x in b],\n                \"score\": round(p[4], 5),\n            }\n        )\n\n\ndef process_batch(detections, labels, iouv):\n    \"\"\"Return a correct prediction matrix given detections and labels at various IoU thresholds.\n\n    Args:\n        detections (np.ndarray): Array of shape (N, 6) where each row corresponds to a detection with format [x1, y1,\n            x2, y2, conf, class].\n        labels (np.ndarray): Array of shape (M, 5) where each row corresponds to a ground truth label with format\n            [class, x1, y1, x2, y2].\n        iouv (np.ndarray): Array of IoU thresholds to evaluate at.\n\n    Returns:\n        correct (np.ndarray): A binary array of shape (N, len(iouv)) indicating whether each detection is a true\n            positive for each IoU threshold. There are 10 IoU levels used in the evaluation.\n\n    Examples:\n        ```python\n        detections = np.array([[50, 50, 200, 200, 0.9, 1], [30, 30, 150, 150, 0.7, 0]])\n        labels = np.array([[1, 50, 50, 200, 200]])\n        iouv = np.linspace(0.5, 0.95, 10)\n        correct = process_batch(detections, labels, iouv)\n        ```\n\n    Notes:\n        - This function is used as part of the evaluation pipeline for object detection models.\n        - IoU (Intersection over Union) is a common evaluation metric for object detection performance.\n    \"\"\"\n    correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)\n    iou = box_iou(labels[:, 1:], detections[:, :4])\n    correct_class = labels[:, 0:1] == detections[:, 5]\n    for i in range(len(iouv)):\n        x = torch.where((iou >= iouv[i]) & correct_class)  # IoU > threshold and classes match\n        if x[0].shape[0]:\n            matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()  # [label, detect, iou]\n            if x[0].shape[0] > 1:\n                matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n                # matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n            correct[matches[:, 1].astype(int), i] = True\n    return torch.tensor(correct, dtype=torch.bool, device=iouv.device)\n\n\n@smart_inference_mode()\ndef run(\n    data,\n    weights=None,  # model.pt path(s)\n    batch_size=32,  # batch size\n    imgsz=640,  # inference size (pixels)\n    conf_thres=0.001,  # confidence threshold\n    iou_thres=0.6,  # NMS IoU threshold\n    max_det=300,  # maximum detections per image\n    task=\"val\",  # train, val, test, speed or study\n    device=\"\",  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n    workers=8,  # max dataloader workers (per RANK in DDP mode)\n    single_cls=False,  # treat as single-class dataset\n    augment=False,  # augmented inference\n    verbose=False,  # verbose output\n    save_txt=False,  # save results to *.txt\n    save_hybrid=False,  # save label+prediction hybrid results to *.txt\n    save_conf=False,  # save confidences in --save-txt labels\n    save_json=False,  # save a COCO-JSON results file\n    project=ROOT / \"runs/val\",  # save to project/name\n    name=\"exp\",  # save to project/name\n    exist_ok=False,  # existing project/name ok, do not increment\n    half=True,  # use FP16 half-precision inference\n    dnn=False,  # use OpenCV DNN for ONNX inference\n    model=None,\n    dataloader=None,\n    save_dir=Path(\"\"),\n    plots=True,\n    callbacks=Callbacks(),\n    compute_loss=None,\n):\n    \"\"\"Evaluates a YOLOv5 model on a dataset and logs performance metrics.\n\n    Args:\n        data (str | dict): Path to a dataset YAML file or a dataset dictionary.\n        weights (str | list[str], optional): Path to the model weights file(s). Supports various formats including\n            PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow SavedModel, TensorFlow GraphDef,\n            TensorFlow Lite, TensorFlow Edge TPU, and PaddlePaddle.\n        batch_size (int, optional): Batch size for inference. Default is 32.\n        imgsz (int, optional): Input image size (pixels). Default is 640.\n        conf_thres (float, optional): Confidence threshold for object detection. Default is 0.001.\n        iou_thres (float, optional): IoU threshold for Non-Maximum Suppression (NMS). Default is 0.6.\n        max_det (int, optional): Maximum number of detections per image. Default is 300.\n        task (str, optional): Task type - 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'.\n        device (str, optional): Device to use for computation, e.g., '0' or '0,1,2,3' for CUDA or 'cpu' for CPU. Default\n            is ''.\n        workers (int, optional): Number of dataloader workers. Default is 8.\n        single_cls (bool, optional): Treat dataset as a single class. Default is False.\n        augment (bool, optional): Enable augmented inference. Default is False.\n        verbose (bool, optional): Enable verbose output. Default is False.\n        save_txt (bool, optional): Save results to *.txt files. Default is False.\n        save_hybrid (bool, optional): Save label and prediction hybrid results to *.txt files. Default is False.\n        save_conf (bool, optional): Save confidences in --save-txt labels. Default is False.\n        save_json (bool, optional): Save a COCO-JSON results file. Default is False.\n        project (str | Path, optional): Directory to save results. Default is ROOT/'runs/val'.\n        name (str, optional): Name of the run. Default is 'exp'.\n        exist_ok (bool, optional): Overwrite existing project/name without incrementing. Default is False.\n        half (bool, optional): Use FP16 half-precision inference. Default is True.\n        dnn (bool, optional): Use OpenCV DNN for ONNX inference. Default is False.\n        model (torch.nn.Module, optional): Model object for training. Default is None.\n        dataloader (torch.utils.data.DataLoader, optional): Dataloader object. Default is None.\n        save_dir (Path, optional): Directory to save results. Default is Path('').\n        plots (bool, optional): Plot validation images and metrics. Default is True.\n        callbacks (utils.callbacks.Callbacks, optional): Callbacks for logging and monitoring. Default is Callbacks().\n        compute_loss (function, optional): Loss function for training. Default is None.\n\n    Returns:\n        dict: Contains performance metrics including precision, recall, mAP50, and mAP50-95.\n    \"\"\"\n    # Initialize/load model and set device\n    training = model is not None\n    if training:  # called by train.py\n        device, pt, jit, engine = next(model.parameters()).device, True, False, False  # get model device, PyTorch model\n        half &= device.type != \"cpu\"  # half precision only supported on CUDA\n        model.half() if half else model.float()\n    else:  # called directly\n        device = select_device(device, batch_size=batch_size)\n\n        # Directories\n        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run\n        (save_dir / \"labels\" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir\n\n        # Load model\n        model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)\n        stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine\n        imgsz = check_img_size(imgsz, s=stride)  # check image size\n        half = model.fp16  # FP16 supported on limited backends with CUDA\n        if engine:\n            batch_size = model.batch_size\n        else:\n            device = model.device\n            if not (pt or jit):\n                batch_size = 1  # export.py models default to batch-size 1\n                LOGGER.info(f\"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models\")\n\n        # Data\n        data = check_dataset(data)  # check\n\n    # Configure\n    model.eval()\n    cuda = device.type != \"cpu\"\n    is_coco = isinstance(data.get(\"val\"), str) and data[\"val\"].endswith(f\"coco{os.sep}val2017.txt\")  # COCO dataset\n    nc = 1 if single_cls else int(data[\"nc\"])  # number of classes\n    iouv = torch.linspace(0.5, 0.95, 10, device=device)  # iou vector for mAP@0.5:0.95\n    niou = iouv.numel()\n\n    # Dataloader\n    if not training:\n        if pt and not single_cls:  # check --weights are trained on --data\n            ncm = model.model.nc\n            assert ncm == nc, (\n                f\"{weights} ({ncm} classes) trained on different --data than what you passed ({nc} \"\n                f\"classes). Pass correct combination of --weights and --data that are trained together.\"\n            )\n        model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz))  # warmup\n        pad, rect = (0.0, False) if task == \"speed\" else (0.5, pt)  # square inference for benchmarks\n        task = task if task in (\"train\", \"val\", \"test\") else \"val\"  # path to train/val/test images\n        dataloader = create_dataloader(\n            data[task],\n            imgsz,\n            batch_size,\n            stride,\n            single_cls,\n            pad=pad,\n            rect=rect,\n            workers=workers,\n            prefix=colorstr(f\"{task}: \"),\n        )[0]\n\n    seen = 0\n    confusion_matrix = ConfusionMatrix(nc=nc)\n    names = model.names if hasattr(model, \"names\") else model.module.names  # get class names\n    if isinstance(names, (list, tuple)):  # old format\n        names = dict(enumerate(names))\n    class_map = coco80_to_coco91_class() if is_coco else list(range(1000))\n    s = (\"%22s\" + \"%11s\" * 6) % (\"Class\", \"Images\", \"Instances\", \"P\", \"R\", \"mAP50\", \"mAP50-95\")\n    tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n    dt = Profile(device=device), Profile(device=device), Profile(device=device)  # profiling times\n    loss = torch.zeros(3, device=device)\n    jdict, stats, ap, ap_class = [], [], [], []\n    callbacks.run(\"on_val_start\")\n    pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT)  # progress bar\n    for batch_i, (im, targets, paths, shapes) in enumerate(pbar):\n        callbacks.run(\"on_val_batch_start\")\n        with dt[0]:\n            if cuda:\n                im = im.to(device, non_blocking=True)\n                targets = targets.to(device)\n            im = im.half() if half else im.float()  # uint8 to fp16/32\n            im /= 255  # 0 - 255 to 0.0 - 1.0\n            nb, _, height, width = im.shape  # batch size, channels, height, width\n\n        # Inference\n        with dt[1]:\n            preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)\n\n        # Loss\n        if compute_loss:\n            loss += compute_loss(train_out, targets)[1]  # box, obj, cls\n\n        # NMS\n        targets[:, 2:] *= torch.tensor((width, height, width, height), device=device)  # to pixels\n        lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []  # for autolabelling\n        with dt[2]:\n            preds = non_max_suppression(\n                preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det\n            )\n\n        # Metrics\n        for si, pred in enumerate(preds):\n            labels = targets[targets[:, 0] == si, 1:]\n            nl, npr = labels.shape[0], pred.shape[0]  # number of labels, predictions\n            path, shape = Path(paths[si]), shapes[si][0]\n            correct = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init\n            seen += 1\n\n            if npr == 0:\n                if nl:\n                    stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))\n                    if plots:\n                        confusion_matrix.process_batch(detections=None, labels=labels[:, 0])\n                continue\n\n            # Predictions\n            if single_cls:\n                pred[:, 5] = 0\n            predn = pred.clone()\n            scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1])  # native-space pred\n\n            # Evaluate\n            if nl:\n                tbox = xywh2xyxy(labels[:, 1:5])  # target boxes\n                scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1])  # native-space labels\n                labelsn = torch.cat((labels[:, 0:1], tbox), 1)  # native-space labels\n                correct = process_batch(predn, labelsn, iouv)\n                if plots:\n                    confusion_matrix.process_batch(predn, labelsn)\n            stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0]))  # (correct, conf, pcls, tcls)\n\n            # Save/log\n            if save_txt:\n                (save_dir / \"labels\").mkdir(parents=True, exist_ok=True)\n                save_one_txt(predn, save_conf, shape, file=save_dir / \"labels\" / f\"{path.stem}.txt\")\n            if save_json:\n                save_one_json(predn, jdict, path, class_map)  # append to COCO-JSON dictionary\n            callbacks.run(\"on_val_image_end\", pred, predn, path, names, im[si])\n\n        # Plot images\n        if plots and batch_i < 3:\n            plot_images(im, targets, paths, save_dir / f\"val_batch{batch_i}_labels.jpg\", names)  # labels\n            plot_images(im, output_to_target(preds), paths, save_dir / f\"val_batch{batch_i}_pred.jpg\", names)  # pred\n\n        callbacks.run(\"on_val_batch_end\", batch_i, im, targets, paths, shapes, preds)\n\n    # Compute metrics\n    stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)]  # to numpy\n    if len(stats) and stats[0].any():\n        tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)\n        ap50, ap = ap[:, 0], ap.mean(1)  # AP@0.5, AP@0.5:0.95\n        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()\n    nt = np.bincount(stats[3].astype(int), minlength=nc)  # number of targets per class\n\n    # Print results\n    pf = \"%22s\" + \"%11i\" * 2 + \"%11.3g\" * 4  # print format\n    LOGGER.info(pf % (\"all\", seen, nt.sum(), mp, mr, map50, map))\n    if nt.sum() == 0:\n        LOGGER.warning(f\"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels\")\n\n    # Print results per class\n    if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):\n        for i, c in enumerate(ap_class):\n            LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))\n\n    # Print speeds\n    t = tuple(x.t / seen * 1e3 for x in dt)  # speeds per image\n    if not training:\n        shape = (batch_size, 3, imgsz, imgsz)\n        LOGGER.info(f\"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}\" % t)\n\n    # Plots\n    if plots:\n        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))\n        callbacks.run(\"on_val_end\", nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)\n\n    # Save JSON\n    if save_json and len(jdict):\n        w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else \"\"  # weights\n        anno_json = str(Path(\"../datasets/coco/annotations/instances_val2017.json\"))  # annotations\n        if not os.path.exists(anno_json):\n            anno_json = os.path.join(data[\"path\"], \"annotations\", \"instances_val2017.json\")\n        pred_json = str(save_dir / f\"{w}_predictions.json\")  # predictions\n        LOGGER.info(f\"\\nEvaluating pycocotools mAP... saving {pred_json}...\")\n        with open(pred_json, \"w\") as f:\n            json.dump(jdict, f)\n\n        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb\n            check_requirements(\"pycocotools>=2.0.6\")\n            from pycocotools.coco import COCO\n            from pycocotools.cocoeval import COCOeval\n\n            anno = COCO(anno_json)  # init annotations api\n            pred = anno.loadRes(pred_json)  # init predictions api\n            eval = COCOeval(anno, pred, \"bbox\")\n            if is_coco:\n                eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files]  # image IDs to evaluate\n            eval.evaluate()\n            eval.accumulate()\n            eval.summarize()\n            map, map50 = eval.stats[:2]  # update results (mAP@0.5:0.95, mAP@0.5)\n        except Exception as e:\n            LOGGER.info(f\"pycocotools unable to run: {e}\")\n\n    # Return results\n    model.float()  # for training\n    if not training:\n        s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else \"\"\n        LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\n    maps = np.zeros(nc) + map\n    for i, c in enumerate(ap_class):\n        maps[c] = ap[i]\n    return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t\n\n\ndef parse_opt():\n    \"\"\"Parse command-line options for configuring YOLOv5 model inference.\n\n    Args:\n        data (str, optional): Path to the dataset YAML file. Default is 'data/coco128.yaml'.\n        weights (list[str], optional): List of paths to model weight files. Default is 'yolov5s.pt'.\n        batch_size (int, optional): Batch size for inference. Default is 32.\n        imgsz (int, optional): Inference image size in pixels. Default is 640.\n        conf_thres (float, optional): Confidence threshold for predictions. Default is 0.001.\n        iou_thres (float, optional): IoU threshold for Non-Max Suppression (NMS). Default is 0.6.\n        max_det (int, optional): Maximum number of detections per image. Default is 300.\n        task (str, optional): Task type - options are 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'.\n        device (str, optional): Device to run the model on. e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let the\n            system choose automatically.\n        workers (int, optional): Maximum number of dataloader workers per rank in DDP mode. Default is 8.\n        single_cls (bool, optional): If set, treats the dataset as a single-class dataset. Default is False.\n        augment (bool, optional): If set, performs augmented inference. Default is False.\n        verbose (bool, optional): If set, reports mAP by class. Default is False.\n        save_txt (bool, optional): If set, saves results to *.txt files. Default is False.\n        save_hybrid (bool, optional): If set, saves label+prediction hybrid results to *.txt files. Default is False.\n        save_conf (bool, optional): If set, saves confidences in --save-txt labels. Default is False.\n        save_json (bool, optional): If set, saves results to a COCO-JSON file. Default is False.\n        project (str, optional): Project directory to save results to. Default is 'runs/val'.\n        name (str, optional): Name of the directory to save results to. Default is 'exp'.\n        exist_ok (bool, optional): If set, existing directory will not be incremented. Default is False.\n        half (bool, optional): If set, uses FP16 half-precision inference. Default is False.\n        dnn (bool, optional): If set, uses OpenCV DNN for ONNX inference. Default is False.\n\n    Returns:\n        argparse.Namespace: Parsed command-line options.\n\n    Examples:\n        To validate a trained YOLOv5 model on a COCO dataset:\n        ```python\n        $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640\n        ```\n        Different model formats could be used instead of `yolov5s.pt`:\n        ```python\n        $ python val.py --weights yolov5s.pt yolov5s.torchscript yolov5s.onnx yolov5s_openvino_model yolov5s.engine\n        ```\n        Additional options include saving results in different formats, selecting devices, and more.\n\n    Notes:\n        - The '--data' parameter is checked to ensure it ends with 'coco.yaml' if '--save-json' is set.\n        - The '--save-txt' option is set to True if '--save-hybrid' is enabled.\n        - Args are printed using `print_args` to facilitate debugging.\n    \"\"\"\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--data\", type=str, default=ROOT / \"data/coco128.yaml\", help=\"dataset.yaml path\")\n    parser.add_argument(\"--weights\", nargs=\"+\", type=str, default=ROOT / \"yolov5s.pt\", help=\"model path(s)\")\n    parser.add_argument(\"--batch-size\", type=int, default=32, help=\"batch size\")\n    parser.add_argument(\"--imgsz\", \"--img\", \"--img-size\", type=int, default=640, help=\"inference size (pixels)\")\n    parser.add_argument(\"--conf-thres\", type=float, default=0.001, help=\"confidence threshold\")\n    parser.add_argument(\"--iou-thres\", type=float, default=0.6, help=\"NMS IoU threshold\")\n    parser.add_argument(\"--max-det\", type=int, default=300, help=\"maximum detections per image\")\n    parser.add_argument(\"--task\", default=\"val\", help=\"train, val, test, speed or study\")\n    parser.add_argument(\"--device\", default=\"\", help=\"cuda device, i.e. 0 or 0,1,2,3 or cpu\")\n    parser.add_argument(\"--workers\", type=int, default=8, help=\"max dataloader workers (per RANK in DDP mode)\")\n    parser.add_argument(\"--single-cls\", action=\"store_true\", help=\"treat as single-class dataset\")\n    parser.add_argument(\"--augment\", action=\"store_true\", help=\"augmented inference\")\n    parser.add_argument(\"--verbose\", action=\"store_true\", help=\"report mAP by class\")\n    parser.add_argument(\"--save-txt\", action=\"store_true\", help=\"save results to *.txt\")\n    parser.add_argument(\"--save-hybrid\", action=\"store_true\", help=\"save label+prediction hybrid results to *.txt\")\n    parser.add_argument(\"--save-conf\", action=\"store_true\", help=\"save confidences in --save-txt labels\")\n    parser.add_argument(\"--save-json\", action=\"store_true\", help=\"save a COCO-JSON results file\")\n    parser.add_argument(\"--project\", default=ROOT / \"runs/val\", help=\"save to project/name\")\n    parser.add_argument(\"--name\", default=\"exp\", help=\"save to project/name\")\n    parser.add_argument(\"--exist-ok\", action=\"store_true\", help=\"existing project/name ok, do not increment\")\n    parser.add_argument(\"--half\", action=\"store_true\", help=\"use FP16 half-precision inference\")\n    parser.add_argument(\"--dnn\", action=\"store_true\", help=\"use OpenCV DNN for ONNX inference\")\n    opt = parser.parse_args()\n    opt.data = check_yaml(opt.data)  # check YAML\n    opt.save_json |= opt.data.endswith(\"coco.yaml\")\n    opt.save_txt |= opt.save_hybrid\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    \"\"\"Executes YOLOv5 tasks like training, validation, testing, speed, and study benchmarks based on provided options.\n\n    Args:\n        opt (argparse.Namespace): Parsed command-line options. This includes values for parameters like 'data',\n            'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres', 'max_det', 'task', 'device', 'workers',\n            'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid', 'save_conf', 'save_json', 'project', 'name',\n            'exist_ok', 'half', and 'dnn', essential for configuring the YOLOv5 tasks.\n\n    Returns:\n        None\n\n    Examples:\n        To validate a trained YOLOv5 model on the COCO dataset with a specific weights file, use:\n        ```python\n        $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640\n        ```\n    \"\"\"\n    check_requirements(ROOT / \"requirements.txt\", exclude=(\"tensorboard\", \"thop\"))\n\n    if opt.task in (\"train\", \"val\", \"test\"):  # run normally\n        if opt.conf_thres > 0.001:  # https://github.com/ultralytics/yolov5/issues/1466\n            LOGGER.info(f\"WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results\")\n        if opt.save_hybrid:\n            LOGGER.info(\"WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone\")\n        run(**vars(opt))\n\n    else:\n        weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]\n        opt.half = torch.cuda.is_available() and opt.device != \"cpu\"  # FP16 for fastest results\n        if opt.task == \"speed\":  # speed benchmarks\n            # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...\n            opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False\n            for opt.weights in weights:\n                run(**vars(opt), plots=False)\n\n        elif opt.task == \"study\":  # speed vs mAP benchmarks\n            # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...\n            for opt.weights in weights:\n                f = f\"study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt\"  # filename to save to\n                x, y = list(range(256, 1536 + 128, 128)), []  # x axis (image sizes), y axis\n                for opt.imgsz in x:  # img-size\n                    LOGGER.info(f\"\\nRunning {f} --imgsz {opt.imgsz}...\")\n                    r, _, t = run(**vars(opt), plots=False)\n                    y.append(r + t)  # results and times\n                np.savetxt(f, y, fmt=\"%10.4g\")  # save\n            subprocess.run([\"zip\", \"-r\", \"study.zip\", \"study_*.txt\"])\n            plot_val_study(x=x)  # plot\n        else:\n            raise NotImplementedError(f'--task {opt.task} not in (\"train\", \"val\", \"test\", \"speed\", \"study\")')\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  }
]