Showing preview only (1,349K chars total). Download the full file or copy to clipboard to get everything.
Repository: ultralytics/yolov5
Branch: master
Commit: 88af13e3e256
Files: 143
Total size: 1.3 MB
Directory structure:
gitextract_ha6213k8/
├── .dockerignore
├── .gitattributes
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug-report.yml
│ │ ├── config.yml
│ │ ├── feature-request.yml
│ │ └── question.yml
│ ├── dependabot.yml
│ └── workflows/
│ ├── ci-testing.yml
│ ├── cla.yml
│ ├── docker.yml
│ ├── format.yml
│ ├── links.yml
│ ├── merge-main-into-prs.yml
│ └── stale.yml
├── .gitignore
├── CITATION.cff
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── README.zh-CN.md
├── benchmarks.py
├── classify/
│ ├── predict.py
│ ├── train.py
│ ├── tutorial.ipynb
│ └── val.py
├── data/
│ ├── Argoverse.yaml
│ ├── GlobalWheat2020.yaml
│ ├── ImageNet.yaml
│ ├── ImageNet10.yaml
│ ├── ImageNet100.yaml
│ ├── ImageNet1000.yaml
│ ├── Objects365.yaml
│ ├── SKU-110K.yaml
│ ├── VOC.yaml
│ ├── VisDrone.yaml
│ ├── coco.yaml
│ ├── coco128-seg.yaml
│ ├── coco128.yaml
│ ├── hyps/
│ │ ├── hyp.Objects365.yaml
│ │ ├── hyp.VOC.yaml
│ │ ├── hyp.no-augmentation.yaml
│ │ ├── hyp.scratch-high.yaml
│ │ ├── hyp.scratch-low.yaml
│ │ └── hyp.scratch-med.yaml
│ ├── scripts/
│ │ ├── download_weights.sh
│ │ ├── get_coco.sh
│ │ ├── get_coco128.sh
│ │ ├── get_imagenet.sh
│ │ ├── get_imagenet10.sh
│ │ ├── get_imagenet100.sh
│ │ └── get_imagenet1000.sh
│ └── xView.yaml
├── detect.py
├── export.py
├── hubconf.py
├── models/
│ ├── __init__.py
│ ├── common.py
│ ├── experimental.py
│ ├── hub/
│ │ ├── anchors.yaml
│ │ ├── yolov3-spp.yaml
│ │ ├── yolov3-tiny.yaml
│ │ ├── yolov3.yaml
│ │ ├── yolov5-bifpn.yaml
│ │ ├── yolov5-fpn.yaml
│ │ ├── yolov5-p2.yaml
│ │ ├── yolov5-p34.yaml
│ │ ├── yolov5-p6.yaml
│ │ ├── yolov5-p7.yaml
│ │ ├── yolov5-panet.yaml
│ │ ├── yolov5l6.yaml
│ │ ├── yolov5m6.yaml
│ │ ├── yolov5n6.yaml
│ │ ├── yolov5s-LeakyReLU.yaml
│ │ ├── yolov5s-ghost.yaml
│ │ ├── yolov5s-transformer.yaml
│ │ ├── yolov5s6.yaml
│ │ └── yolov5x6.yaml
│ ├── segment/
│ │ ├── yolov5l-seg.yaml
│ │ ├── yolov5m-seg.yaml
│ │ ├── yolov5n-seg.yaml
│ │ ├── yolov5s-seg.yaml
│ │ └── yolov5x-seg.yaml
│ ├── tf.py
│ ├── yolo.py
│ ├── yolov5l.yaml
│ ├── yolov5m.yaml
│ ├── yolov5n.yaml
│ ├── yolov5s.yaml
│ └── yolov5x.yaml
├── pyproject.toml
├── requirements.txt
├── segment/
│ ├── predict.py
│ ├── train.py
│ ├── tutorial.ipynb
│ └── val.py
├── train.py
├── tutorial.ipynb
├── utils/
│ ├── __init__.py
│ ├── activations.py
│ ├── augmentations.py
│ ├── autoanchor.py
│ ├── autobatch.py
│ ├── aws/
│ │ ├── __init__.py
│ │ ├── mime.sh
│ │ ├── resume.py
│ │ └── userdata.sh
│ ├── callbacks.py
│ ├── dataloaders.py
│ ├── docker/
│ │ ├── Dockerfile
│ │ ├── Dockerfile-arm64
│ │ └── Dockerfile-cpu
│ ├── downloads.py
│ ├── flask_rest_api/
│ │ ├── README.md
│ │ ├── example_request.py
│ │ └── restapi.py
│ ├── general.py
│ ├── google_app_engine/
│ │ ├── Dockerfile
│ │ ├── additional_requirements.txt
│ │ └── app.yaml
│ ├── loggers/
│ │ ├── __init__.py
│ │ ├── clearml/
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── clearml_utils.py
│ │ │ └── hpo.py
│ │ ├── comet/
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── comet_utils.py
│ │ │ └── hpo.py
│ │ └── wandb/
│ │ ├── __init__.py
│ │ └── wandb_utils.py
│ ├── loss.py
│ ├── metrics.py
│ ├── plots.py
│ ├── segment/
│ │ ├── __init__.py
│ │ ├── augmentations.py
│ │ ├── dataloaders.py
│ │ ├── general.py
│ │ ├── loss.py
│ │ ├── metrics.py
│ │ └── plots.py
│ ├── torch_utils.py
│ └── triton.py
└── val.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
.git
.cache
.idea
runs
output
coco
storage.googleapis.com
data/samples/*
**/results*.csv
*.jpg
# Neural Network weights -----------------------------------------------------------------------------------------------
**/*.pt
**/*.pth
**/*.onnx
**/*.engine
**/*.mlmodel
**/*.torchscript
**/*.torchscript.pt
**/*.tflite
**/*.h5
**/*.pb
*_saved_model/
*_web_model/
*_openvino_model/
# Below Copied From .gitignore -----------------------------------------------------------------------------------------
# Below Copied From .gitignore -----------------------------------------------------------------------------------------
# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
wandb/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv*
venv*/
ENV*/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
Icon?
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff:
.idea/*
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
.html # Bokeh Plots
.pg # TensorFlow Frozen Graphs
.avi # videos
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# CMake
cmake-build-debug/
cmake-build-release/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
================================================
FILE: .gitattributes
================================================
# this drop notebooks from GitHub language stats
*.ipynb linguist-vendored
================================================
FILE: .github/ISSUE_TEMPLATE/bug-report.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: 🐛 Bug Report
description: "Problems with Ultralytics YOLOv5"
labels: [bug, triage]
type: "bug"
body:
- type: markdown
attributes:
value: |
Thank you for submitting an Ultralytics YOLOv5 🐛 Bug Report!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please search the Ultralytics YOLOv5 [docs](https://docs.ultralytics.com/yolov5/) and [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists.
options:
- label: >
I have searched https://github.com/ultralytics/yolov5/issues and did not find a similar report.
required: true
- type: dropdown
attributes:
label: Project area
description: |
Help us route the report to the right maintainers.
multiple: true
options:
- "Training"
- "Validation/testing"
- "Export/deployment"
- "Models/checkpoints"
- "Documentation"
- "Other"
validations:
required: false
- type: textarea
attributes:
label: Bug
description: Please describe the issue in detail so we can reproduce it in Ultralytics YOLOv5. Include logs, screenshots, console output, and any context that helps explain the problem.
placeholder: |
💡 ProTip! Include as much information as possible (logs, tracebacks, screenshots, etc.) to receive the most helpful response.
validations:
required: true
- type: textarea
attributes:
label: Environment
description: Share the platform and version information relevant to your report.
placeholder: |
Please include:
- OS (e.g., Ubuntu 20.04, macOS 13.5, Windows 11)
- Language or framework version (Python, Swift, Flutter, etc.)
- Package or app version
- Hardware (e.g., CPU, GPU model, device model)
- Any other environment details
validations:
required: true
- type: textarea
attributes:
label: Minimal Reproducible Example
description: >
Provide the smallest possible snippet, command, or steps required to reproduce the issue. This helps us pinpoint problems faster.
placeholder: |
```python
# Code or commands to reproduce your issue here
```
validations:
required: false
- type: textarea
attributes:
label: Additional
description: Anything else you would like to share?
- type: checkboxes
attributes:
label: Are you willing to submit a PR?
description: >
(Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) to help improve Ultralytics YOLOv5, especially if you know how to fix the issue.
See the Ultralytics [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started.
options:
- label: Yes I'd like to help by submitting a PR!
================================================
FILE: .github/ISSUE_TEMPLATE/config.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
blank_issues_enabled: true
contact_links:
- name: 📘 YOLOv5 Docs
url: https://docs.ultralytics.com/yolov5/
about: Complete Ultralytics YOLOv5 documentation
- name: 💬 Forum
url: https://community.ultralytics.com/
about: Ask the Ultralytics community for workflow help
- name: 🎧 Discord
url: https://ultralytics.com/discord
about: Chat with the Ultralytics team and other builders
- name: ⌨️ Reddit
url: https://reddit.com/r/ultralytics
about: Discuss Ultralytics projects on Reddit
================================================
FILE: .github/ISSUE_TEMPLATE/feature-request.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: 🚀 Feature Request
description: "Suggest an Ultralytics YOLOv5 improvement"
labels: [enhancement]
type: "feature"
body:
- type: markdown
attributes:
value: |
Thank you for submitting an Ultralytics YOLOv5 🚀 Feature Request!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please search the Ultralytics YOLOv5 [docs](https://docs.ultralytics.com/yolov5/) and [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists.
options:
- label: >
I have searched https://github.com/ultralytics/yolov5/issues and did not find a similar request.
required: true
- type: textarea
attributes:
label: Description
description: Briefly describe the feature you would like to see added to Ultralytics YOLOv5.
placeholder: |
What new capability or improvement are you proposing?
validations:
required: true
- type: textarea
attributes:
label: Use case
description: Explain how this feature would be used and who benefits from it. Screenshots or mockups are welcome.
placeholder: |
How would this feature improve your workflow?
- type: textarea
attributes:
label: Additional
description: Anything else you would like to share?
- type: checkboxes
attributes:
label: Are you willing to submit a PR?
description: >
(Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) to help improve Ultralytics YOLOv5.
See the Ultralytics [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started.
options:
- label: Yes I'd like to help by submitting a PR!
================================================
FILE: .github/ISSUE_TEMPLATE/question.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: ❓ Question
description: "Ask an Ultralytics YOLOv5 question"
labels: [question]
body:
- type: markdown
attributes:
value: |
Thank you for asking an Ultralytics YOLOv5 ❓ Question!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please search the Ultralytics YOLOv5 [docs](https://docs.ultralytics.com/yolov5/), [issues](https://github.com/ultralytics/yolov5/issues), and [Ultralytics discussions](https://github.com/orgs/ultralytics/discussions) to see if a similar question already exists.
options:
- label: >
I checked the docs, issues, and discussions and could not find an answer.
required: true
- type: textarea
attributes:
label: Question
description: What is your question? Provide as much detail as possible so we can assist with Ultralytics YOLOv5. Include code snippets, screenshots, logs, or links to notebooks/demos.
placeholder: |
💡 ProTip! Include as much information as possible (logs, tracebacks, screenshots, etc.) to receive the most helpful response.
validations:
required: true
- type: textarea
attributes:
label: Additional
description: Anything else you would like to share?
================================================
FILE: .github/dependabot.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Dependabot for package version updates
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: pip
directory: "/"
schedule:
interval: weekly
time: "04:00"
open-pull-requests-limit: 10
labels:
- dependencies
- package-ecosystem: github-actions
directory: "/.github/workflows"
schedule:
interval: weekly
time: "04:00"
open-pull-requests-limit: 5
labels:
- dependencies
================================================
FILE: .github/workflows/ci-testing.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# YOLOv5 Continuous Integration (CI) GitHub Actions tests
name: YOLOv5 CI
permissions:
contents: read
on:
push:
branches: [master]
pull_request:
branches: [master]
schedule:
- cron: "0 0 * * *" # runs at 00:00 UTC every day
workflow_dispatch:
jobs:
Benchmarks:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.11"] # requires python<=3.11
model: [yolov5n]
steps:
- uses: actions/checkout@v6
- uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
- uses: astral-sh/setup-uv@v7
- name: Install requirements
run: |
uv pip install --system -r requirements.txt coremltools openvino-dev "tensorflow<=2.19.0" "keras>=3.5.0,<=3.12.0" --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match
yolo checks
uv pip list
- name: Benchmark DetectionModel
run: |
python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29
- name: Benchmark SegmentationModel
run: |
python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22
- name: Test predictions
run: |
python export.py --weights ${{ matrix.model }}-cls.pt --include onnx --img 224
python detect.py --weights ${{ matrix.model }}.onnx --img 320
python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320
python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224
Tests:
timeout-minutes: 60
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-14] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049
python-version: ["3.11"]
model: [yolov5n]
include:
- os: ubuntu-latest
python-version: "3.8" # torch 1.8.0 requires python >=3.6, <=3.8
model: yolov5n
torch: "1.8.0" # min torch version CI https://pypi.org/project/torchvision/
steps:
- uses: actions/checkout@v6
- uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
- uses: astral-sh/setup-uv@v7
- name: Install requirements
run: |
torch=""
if [ "${{ matrix.torch }}" == "1.8.0" ]; then
torch="torch==1.8.0 torchvision==0.9.0"
fi
uv pip install --system -r requirements.txt $torch --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match
shell: bash # for Windows compatibility
- name: Check environment
run: |
yolo checks
pip list
- name: Test detection
shell: bash # for Windows compatibility
run: |
# export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories
m=${{ matrix.model }} # official weights
b=runs/train/exp/weights/best # best.pt checkpoint
python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train
for d in cpu; do # devices
for w in $m $b; do # weights
python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val
python detect.py --imgsz 64 --weights $w.pt --device $d # detect
done
done
python hubconf.py --model $m # hub
# python models/tf.py --weights $m.pt # build TF model
python models/yolo.py --cfg $m.yaml # build PyTorch model
python export.py --weights $m.pt --img 64 --include torchscript # export
python - <<EOF
import torch
im = torch.zeros([1, 3, 64, 64])
for path in '$m', '$b':
model = torch.hub.load('.', 'custom', path=path, source='local')
print(model('data/images/bus.jpg'))
model(im) # warmup, build grids for trace
torch.jit.trace(model, [im])
EOF
- name: Test segmentation
shell: bash # for Windows compatibility
run: |
m=${{ matrix.model }}-seg # official weights
b=runs/train-seg/exp/weights/best # best.pt checkpoint
python segment/train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train
python segment/train.py --imgsz 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device cpu # train
for d in cpu; do # devices
for w in $m $b; do # weights
python segment/val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val
python segment/predict.py --imgsz 64 --weights $w.pt --device $d # predict
python export.py --weights $w.pt --img 64 --include torchscript --device $d # export
done
done
- name: Test classification
shell: bash # for Windows compatibility
run: |
m=${{ matrix.model }}-cls.pt # official weights
b=runs/train-cls/exp/weights/best.pt # best.pt checkpoint
python classify/train.py --imgsz 32 --model $m --data mnist160 --epochs 1 # train
python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist160 # val
python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist160/test/7/60.png # predict
python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict
python export.py --weights $b --img 64 --include torchscript # export
python - <<EOF
import torch
for path in '$m', '$b':
model = torch.hub.load('.', 'custom', path=path, source='local')
EOF
Summary:
runs-on: ubuntu-latest
needs: [Benchmarks, Tests]
if: always()
steps:
- name: Check for failure and notify
if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') && github.run_attempt == '1'
uses: slackapi/slack-github-action@v3.0.1
with:
webhook-type: incoming-webhook
webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
payload: |
text: "<!channel> GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"
================================================
FILE: .github/workflows/cla.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA
# This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged
name: CLA Assistant
on:
issue_comment:
types:
- created
pull_request_target:
types:
- reopened
- opened
- synchronize
permissions:
actions: write
contents: write
pull-requests: write
statuses: write
jobs:
CLA:
if: github.repository == 'ultralytics/yolov5'
runs-on: ubuntu-latest
steps:
- name: CLA Assistant
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target'
uses: contributor-assistant/github-action@v2.6.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Must be repository secret PAT
PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }}
with:
path-to-signatures: "signatures/version1/cla.json"
path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document
# Branch must not be protected
branch: "cla-signatures"
allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot*
remote-organization-name: ultralytics
remote-repository-name: cla
custom-pr-sign-comment: "I have read the CLA Document and I sign the CLA"
custom-allsigned-prcomment: All Contributors have signed the CLA. ✅
================================================
FILE: .github/workflows/docker.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5
name: Publish Docker Images
permissions:
contents: read
on:
push:
branches: [master]
workflow_dispatch:
jobs:
docker:
if: github.repository == 'ultralytics/yolov5'
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v6
with:
fetch-depth: 0 # copy full .git directory to access full git history in Docker images
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v4
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push arm64 image
uses: docker/build-push-action@v6
continue-on-error: true
with:
context: .
platforms: linux/arm64
file: utils/docker/Dockerfile-arm64
push: true
tags: ultralytics/yolov5:latest-arm64
- name: Build and push CPU image
uses: docker/build-push-action@v6
continue-on-error: true
with:
context: .
file: utils/docker/Dockerfile-cpu
push: true
tags: ultralytics/yolov5:latest-cpu
- name: Build and push GPU image
uses: docker/build-push-action@v6
continue-on-error: true
with:
context: .
file: utils/docker/Dockerfile
push: true
tags: ultralytics/yolov5:latest
================================================
FILE: .github/workflows/format.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Ultralytics Actions https://github.com/ultralytics/actions
# This workflow formats code and documentation in PRs to Ultralytics standards
name: Ultralytics Actions
on:
issues:
types: [opened]
pull_request:
branches: [main, master]
types: [opened, closed, synchronize, review_requested]
permissions:
contents: write # Modify code in PRs
pull-requests: write # Add comments and labels to PRs
issues: write # Add comments and labels to issues
jobs:
actions:
runs-on: ubuntu-latest
steps:
- name: Run Ultralytics Actions
uses: ultralytics/actions@main
with:
token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN }} # Auto-generated token
labels: true # Auto-label issues/PRs using AI
python: true # Format Python with Ruff and docformatter
prettier: true # Format YAML, JSON, Markdown, CSS
spelling: true # Check spelling with codespell
links: false # Check broken links with Lychee
summary: true # Generate AI-powered PR summaries
openai_api_key: ${{ secrets.OPENAI_API_KEY }} # Powers PR summaries, labels and comments
brave_api_key: ${{ secrets.BRAVE_API_KEY }} # Used for broken link resolution
first_issue_response: |
👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/).
If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it.
If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/).
## Requirements
[**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started:
```bash
git clone https://github.com/ultralytics/yolov5 # clone
cd yolov5
pip install -r requirements.txt # install
```
## Environments
YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda-zone)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
- **Notebooks** with free GPU: <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a> <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/models/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)
- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)
- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
## Status
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit.
================================================
FILE: .github/workflows/links.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee
# Ignores the following status codes to reduce false positives:
# - 403(OpenVINO, 'forbidden')
# - 429(Instagram, 'too many requests')
# - 500(Zenodo, 'cached')
# - 502(Zenodo, 'bad gateway')
# - 999(LinkedIn, 'unknown status code')
name: Check Broken links
permissions:
contents: read
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *" # runs at 00:00 UTC every day
jobs:
Links:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- name: Install lychee
run: curl -sSfL "https://github.com/lycheeverse/lychee/releases/latest/download/lychee-x86_64-unknown-linux-gnu.tar.gz" | sudo tar xz -C /usr/local/bin
- name: Test Markdown and HTML links with retry
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 5
retry_delay_seconds: 60
retries: 2
run: |
lychee \
--scheme 'https' \
--timeout 60 \
--insecure \
--accept 100..=103,200..=299,401,403,429,500,502,999 \
--exclude-all-private \
--exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|x\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \
--exclude-path './**/ci.yml' \
--github-token ${{ secrets.GITHUB_TOKEN }} \
--header "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \
'./**/*.md' \
'./**/*.html' | tee -a $GITHUB_STEP_SUMMARY
# Raise error if broken links found
if ! grep -q "0 Errors" $GITHUB_STEP_SUMMARY; then
exit 1
fi
- name: Test Markdown, HTML, YAML, Python and Notebook links with retry
if: github.event_name == 'workflow_dispatch'
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 5
retry_delay_seconds: 60
retries: 2
run: |
lychee \
--scheme 'https' \
--timeout 60 \
--insecure \
--accept 100..=103,200..=299,429,999 \
--exclude-all-private \
--exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|x\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \
--exclude-path './**/ci.yml' \
--github-token ${{ secrets.GITHUB_TOKEN }} \
--header "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \
'./**/*.md' \
'./**/*.html' \
'./**/*.yml' \
'./**/*.yaml' \
'./**/*.py' \
'./**/*.ipynb' | tee -a $GITHUB_STEP_SUMMARY
# Raise error if broken links found
if ! grep -q "0 Errors" $GITHUB_STEP_SUMMARY; then
exit 1
fi
================================================
FILE: .github/workflows/merge-main-into-prs.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Automatically merges repository 'main' branch into all open PRs to keep them up-to-date
# Action runs on updates to main branch so when one PR merges to main all others update
name: Merge main into PRs
permissions:
contents: read
pull-requests: write
on:
workflow_dispatch:
# push:
# branches:
# - ${{ github.event.repository.default_branch }}
jobs:
Merge:
if: github.repository == 'ultralytics/yolov5'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v6
with:
fetch-depth: 0
- uses: actions/setup-python@v6
with:
python-version: "3.x"
cache: "pip"
- name: Install requirements
run: |
pip install pygithub
- name: Merge default branch into PRs
shell: python
run: |
from github import Github
import os
g = Github(os.getenv('GITHUB_TOKEN'))
repo = g.get_repo(os.getenv('GITHUB_REPOSITORY'))
# Fetch the default branch name
default_branch_name = repo.default_branch
default_branch = repo.get_branch(default_branch_name)
for pr in repo.get_pulls(state='open', sort='created'):
try:
# Get full names for repositories and branches
base_repo_name = repo.full_name
head_repo_name = pr.head.repo.full_name
base_branch_name = pr.base.ref
head_branch_name = pr.head.ref
# Check if PR is behind the default branch
comparison = repo.compare(default_branch.commit.sha, pr.head.sha)
if comparison.behind_by > 0:
print(f"⚠️ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is behind {default_branch_name} by {comparison.behind_by} commit(s).")
# Attempt to update the branch
try:
success = pr.update_branch()
assert success, "Branch update failed"
print(f"✅ Successfully merged '{default_branch_name}' into PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}).")
except Exception as update_error:
print(f"❌ Could not update PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}): {update_error}")
print(" This might be due to branch protection rules or insufficient permissions.")
else:
print(f"✅ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is up to date with {default_branch_name}.")
except Exception as e:
print(f"❌ Could not process PR #{pr.number}: {e}")
env:
GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }}
GITHUB_REPOSITORY: ${{ github.repository }}
================================================
FILE: .github/workflows/stale.yml
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: Close stale issues
on:
schedule:
- cron: "0 0 * * *" # Runs at 00:00 UTC every day
jobs:
stale:
permissions:
issues: write
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: |
👋 Hello there! We wanted to give you a friendly reminder that this issue has not had any recent activity and may be closed soon, but don't worry - you can always reopen it if needed. If you still have any questions or concerns, please feel free to let us know how we can help.
For additional resources and information, please see the links below:
- **Docs**: https://docs.ultralytics.com
- **Platform**: https://platform.ultralytics.com
- **Community**: https://community.ultralytics.com
Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed!
Thank you for your contributions to YOLO 🚀 and Vision AI ⭐
stale-pr-message: |
👋 Hello there! We wanted to let you know that we've decided to close this pull request due to inactivity. We appreciate the effort you put into contributing to our project, but unfortunately, not all contributions are suitable or aligned with our product roadmap.
We hope you understand our decision, and please don't let it discourage you from contributing to open source projects in the future. We value all of our community members and their contributions, and we encourage you to keep exploring new projects and ways to get involved.
For additional resources and information, please see the links below:
- **Docs**: https://docs.ultralytics.com
- **Platform**: https://platform.ultralytics.com
- **Community**: https://community.ultralytics.com
Thank you for your contributions to YOLO 🚀 and Vision AI ⭐
days-before-issue-stale: 30
days-before-issue-close: 10
days-before-pr-stale: 90
days-before-pr-close: 30
exempt-issue-labels: "documentation,tutorial,TODO"
operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting.
================================================
FILE: .gitignore
================================================
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
*.jpg
*.jpeg
*.png
*.bmp
*.tif
*.tiff
*.heic
*.JPG
*.JPEG
*.PNG
*.BMP
*.TIF
*.TIFF
*.HEIC
*.mp4
*.mov
*.MOV
*.avi
*.data
*.json
*.cfg
!setup.cfg
!cfg/yolov3*.cfg
storage.googleapis.com
runs/*
data/*
data/images/*
!data/*.yaml
!data/hyps
!data/scripts
!data/images
!data/images/zidane.jpg
!data/images/bus.jpg
!data/*.sh
results*.csv
# Datasets -------------------------------------------------------------------------------------------------------------
coco/
coco128/
VOC/
# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
*.m~
*.mat
!targets*.mat
# Neural Network weights -----------------------------------------------------------------------------------------------
*.weights
*.pt
*.pb
*.onnx
*.engine
*.mlmodel
*.mlpackage
*.torchscript
*.tflite
*.h5
*_saved_model/
*_web_model/
*_openvino_model/
*_paddle_model/
darknet53.conv.74
yolov3-tiny.conv.15
# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
/wandb/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv*
venv*/
ENV*/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
Icon?
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff:
.idea/*
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
.html # Bokeh Plots
.pg # TensorFlow Frozen Graphs
.avi # videos
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# CMake
cmake-build-debug/
cmake-build-release/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
================================================
FILE: CITATION.cff
================================================
cff-version: 1.2.0
preferred-citation:
type: software
message: If you use YOLOv5, please cite it as below.
authors:
- family-names: Jocher
given-names: Glenn
orcid: "https://orcid.org/0000-0001-5950-6979"
title: "YOLOv5 by Ultralytics"
version: 7.0
doi: 10.5281/zenodo.3908559
date-released: 2020-5-29
license: AGPL-3.0
url: "https://github.com/ultralytics/yolov5"
================================================
FILE: CONTRIBUTING.md
================================================
<a href="https://www.ultralytics.com/"><img src="https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg" width="320" alt="Ultralytics logo"></a>
# Contributing to YOLO 🚀
We value your input and are committed to making contributing to YOLO as easy and transparent as possible. Whether you're:
- Reporting a bug
- Discussing the current state of the codebase
- Submitting a fix
- Proposing a new feature
- Interested in becoming a maintainer
Ultralytics YOLO thrives thanks to the collective efforts of our community. Every improvement you contribute helps push the boundaries of what's possible in AI! 😃
## 🛠️ Submitting a Pull Request (PR)
Submitting a PR is straightforward! Here’s an example showing how to update `requirements.txt` in four simple steps:
### 1. Select the File to Update
Click on `requirements.txt` in the GitHub repository.
<p align="center"><img width="800" alt="PR_step1" src="https://user-images.githubusercontent.com/26833433/122260847-08be2600-ced4-11eb-828b-8287ace4136c.png"></p>
### 2. Click 'Edit this file'
Find the 'Edit this file' button in the top-right corner.
<p align="center"><img width="800" alt="PR_step2" src="https://user-images.githubusercontent.com/26833433/122260844-06f46280-ced4-11eb-9eec-b8a24be519ca.png"></p>
### 3. Make Your Changes
For example, update the `matplotlib` version from `3.2.2` to `3.3`.
<p align="center"><img width="800" alt="PR_step3" src="https://user-images.githubusercontent.com/26833433/122260853-0a87e980-ced4-11eb-9fd2-3650fb6e0842.png"></p>
### 4. Preview Changes and Submit Your PR
Click the **Preview changes** tab to review your updates. At the bottom, select 'Create a new branch for this commit', give your branch a descriptive name like `fix/matplotlib_version`, and click the green **Propose changes** button. Your PR is now submitted for review! 😃
<p align="center"><img width="800" alt="PR_step4" src="https://user-images.githubusercontent.com/26833433/122260856-0b208000-ced4-11eb-8e8e-77b6151cbcc3.png"></p>
### PR Best Practices
To ensure your work is integrated smoothly, please:
- ✅ Make sure your PR is **up-to-date** with the `ultralytics/yolov5` `master` branch. If your branch is behind, update it using the 'Update branch' button or by running `git pull` and `git merge master` locally.
<p align="center"><img width="751" alt="Screenshot 2022-08-29 at 22 47 15" src="https://user-images.githubusercontent.com/26833433/187295893-50ed9f44-b2c9-4138-a614-de69bd1753d7.png"></p>
- ✅ Ensure all YOLO Continuous Integration (CI) **checks are passing**.
<p align="center"><img width="751" alt="Screenshot 2022-08-29 at 22 47 03" src="https://user-images.githubusercontent.com/26833433/187296922-545c5498-f64a-4d8c-8300-5fa764360da6.png"></p>
- ✅ Limit your changes to the **minimum** required for your bug fix or feature.
_"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
## 🐛 Submitting a Bug Report
If you encounter an issue with YOLO, please submit a bug report!
To help us investigate, we need to be able to reproduce the problem. Follow these guidelines to provide what we need to get started:
When asking a question or reporting a bug, you'll get better help if you provide **code** that others can easily understand and use to **reproduce** the issue. This is known as a [minimum reproducible example](https://docs.ultralytics.com/help/minimum-reproducible-example/). Your code should be:
- ✅ **Minimal** – Use as little code as possible that still produces the issue
- ✅ **Complete** – Include all parts needed for someone else to reproduce the problem
- ✅ **Reproducible** – Test your code to ensure it actually reproduces the issue
Additionally, for [Ultralytics](https://www.ultralytics.com/) to assist you, your code should be:
- ✅ **Current** – Ensure your code is up-to-date with the latest [master branch](https://github.com/ultralytics/yolov5/tree/master). Use `git pull` or `git clone` to get the latest version and confirm your issue hasn't already been fixed.
- ✅ **Unmodified** – The problem must be reproducible without any custom modifications to the repository. [Ultralytics](https://www.ultralytics.com/) does not provide support for custom code ⚠️.
If your issue meets these criteria, please close your current issue and open a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose), including your [minimum reproducible example](https://docs.ultralytics.com/help/minimum-reproducible-example/) to help us diagnose your problem.
## 📄 License
By contributing, you agree that your contributions will be licensed under the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/).
---
For more details on contributing, check out the [Ultralytics open-source contributing guide](https://docs.ultralytics.com/help/contributing/), and explore our [Ultralytics blog](https://www.ultralytics.com/blog) for community highlights and best practices.
We welcome your contributions—thank you for helping make Ultralytics YOLO better! 🚀
[](https://github.com/ultralytics/ultralytics/graphs/contributors)
================================================
FILE: LICENSE
================================================
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
================================================
FILE: README.md
================================================
<div align="center">
<p>
<a href="https://platform.ultralytics.com/?utm_source=github&utm_medium=referral&utm_campaign=platform_launch&utm_content=banner&utm_term=ultralytics_github" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="Ultralytics YOLO banner"></a>
</p>
[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)
<div>
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI Testing"></a>
<a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
<a href="https://discord.com/invite/ultralytics"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a> <a href="https://www.reddit.com/r/ultralytics/"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br>
<a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a>
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
<a href="https://www.kaggle.com/models/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
</div>
<br>
Ultralytics YOLOv5 🚀 is a cutting-edge, state-of-the-art (SOTA) computer vision model developed by [Ultralytics](https://www.ultralytics.com/). Based on the [PyTorch](https://pytorch.org/) framework, YOLOv5 is renowned for its ease of use, speed, and accuracy. It incorporates insights and best practices from extensive research and development, making it a popular choice for a wide range of vision AI tasks, including [object detection](https://docs.ultralytics.com/tasks/detect/), [image segmentation](https://docs.ultralytics.com/tasks/segment/), and [image classification](https://docs.ultralytics.com/tasks/classify/).
We hope the resources here help you get the most out of YOLOv5. Please browse the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5/) for detailed information, raise an issue on [GitHub](https://github.com/ultralytics/yolov5/issues/new/choose) for support, and join our [Discord community](https://discord.com/invite/ultralytics) for questions and discussions!
To request an Enterprise License, please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="2%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
</div>
</div>
<br>
## 🚀 YOLO11: The Next Evolution
We are excited to announce the launch of **Ultralytics YOLO11** 🚀, the latest advancement in our state-of-the-art (SOTA) vision models! Available now at the [Ultralytics YOLO GitHub repository](https://github.com/ultralytics/ultralytics), YOLO11 builds on our legacy of speed, precision, and ease of use. Whether you're tackling [object detection](https://docs.ultralytics.com/tasks/detect/), [instance segmentation](https://docs.ultralytics.com/tasks/segment/), [pose estimation](https://docs.ultralytics.com/tasks/pose/), [image classification](https://docs.ultralytics.com/tasks/classify/), or [oriented object detection (OBB)](https://docs.ultralytics.com/tasks/obb/), YOLO11 delivers the performance and versatility needed to excel in diverse applications.
Get started today and unlock the full potential of YOLO11! Visit the [Ultralytics Docs](https://docs.ultralytics.com/) for comprehensive guides and resources:
[](https://badge.fury.io/py/ultralytics) [](https://clickpy.clickhouse.com/dashboard/ultralytics)
```bash
# Install the ultralytics package
pip install ultralytics
```
<div align="center">
<a href="https://platform.ultralytics.com/ultralytics/yolo26" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png" alt="Ultralytics YOLO Performance Comparison"></a>
</div>
## 📚 Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5/) for full documentation on training, testing, and deployment. See below for quickstart examples.
<details open>
<summary>Install</summary>
Clone the repository and install dependencies in a [**Python>=3.8.0**](https://www.python.org/) environment. Ensure you have [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) installed.
```bash
# Clone the YOLOv5 repository
git clone https://github.com/ultralytics/yolov5
# Navigate to the cloned directory
cd yolov5
# Install required packages
pip install -r requirements.txt
```
</details>
<details open>
<summary>Inference with PyTorch Hub</summary>
Use YOLOv5 via [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) for inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) are automatically downloaded from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
```python
import torch
# Load a YOLOv5 model (options: yolov5n, yolov5s, yolov5m, yolov5l, yolov5x)
model = torch.hub.load("ultralytics/yolov5", "yolov5s") # Default: yolov5s
# Define the input image source (URL, local file, PIL image, OpenCV frame, numpy array, or list)
img = "https://ultralytics.com/images/zidane.jpg" # Example image
# Perform inference (handles batching, resizing, normalization automatically)
results = model(img)
# Process the results (options: .print(), .show(), .save(), .crop(), .pandas())
results.print() # Print results to console
results.show() # Display results in a window
results.save() # Save results to runs/detect/exp
```
</details>
<details>
<summary>Inference with detect.py</summary>
The `detect.py` script runs inference on various sources. It automatically downloads [models](https://github.com/ultralytics/yolov5/tree/master/models) from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saves the results to the `runs/detect` directory.
```bash
# Run inference using a webcam
python detect.py --weights yolov5s.pt --source 0
# Run inference on a local image file
python detect.py --weights yolov5s.pt --source img.jpg
# Run inference on a local video file
python detect.py --weights yolov5s.pt --source vid.mp4
# Run inference on a screen capture
python detect.py --weights yolov5s.pt --source screen
# Run inference on a directory of images
python detect.py --weights yolov5s.pt --source path/to/images/
# Run inference on a text file listing image paths
python detect.py --weights yolov5s.pt --source list.txt
# Run inference on a text file listing stream URLs
python detect.py --weights yolov5s.pt --source list.streams
# Run inference using a glob pattern for images
python detect.py --weights yolov5s.pt --source 'path/to/*.jpg'
# Run inference on a YouTube video URL
python detect.py --weights yolov5s.pt --source 'https://youtu.be/LNwODJXcvt4'
# Run inference on an RTSP, RTMP, or HTTP stream
python detect.py --weights yolov5s.pt --source 'rtsp://example.com/media.mp4'
```
</details>
<details>
<summary>Training</summary>
The commands below demonstrate how to reproduce YOLOv5 [COCO dataset](https://docs.ultralytics.com/datasets/detect/coco/) results. Both [models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) are downloaded automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are approximately 1/2/4/6/8 days on a single [NVIDIA V100 GPU](https://www.nvidia.com/en-us/data-center/v100/). Using [Multi-GPU training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) can significantly reduce training time. Use the largest `--batch-size` your hardware allows, or use `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). The batch sizes shown below are for V100-16GB GPUs.
```bash
# Train YOLOv5n on COCO for 300 epochs
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
# Train YOLOv5s on COCO for 300 epochs
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5s.yaml --batch-size 64
# Train YOLOv5m on COCO for 300 epochs
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5m.yaml --batch-size 40
# Train YOLOv5l on COCO for 300 epochs
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5l.yaml --batch-size 24
# Train YOLOv5x on COCO for 300 epochs
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5x.yaml --batch-size 16
```
<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png" alt="YOLOv5 Training Results">
</details>
<details open>
<summary>Tutorials</summary>
- **[Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/)** 🚀 **RECOMMENDED**: Learn how to train YOLOv5 on your own datasets.
- **[Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/)** ☘️: Improve your model's performance with expert tips.
- **[Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)**: Speed up training using multiple GPUs.
- **[PyTorch Hub Integration](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/)** 🌟 **NEW**: Easily load models using PyTorch Hub.
- **[Model Export (TFLite, ONNX, CoreML, TensorRT)](https://docs.ultralytics.com/yolov5/tutorials/model_export/)** 🚀: Convert your models to various deployment formats like [ONNX](https://onnx.ai/) or [TensorRT](https://developer.nvidia.com/tensorrt).
- **[NVIDIA Jetson Deployment](https://docs.ultralytics.com/guides/nvidia-jetson/)** 🌟 **NEW**: Deploy YOLOv5 on [NVIDIA Jetson](https://developer.nvidia.com/embedded-computing) devices.
- **[Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)**: Enhance prediction accuracy with TTA.
- **[Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/)**: Combine multiple models for better performance.
- **[Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/)**: Optimize models for size and speed.
- **[Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/)**: Automatically find the best training hyperparameters.
- **[Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/)**: Adapt pretrained models to new tasks efficiently using [transfer learning](https://www.ultralytics.com/glossary/transfer-learning).
- **[Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/)** 🌟 **NEW**: Understand the YOLOv5 model architecture.
- **[Ultralytics Platform Training](https://platform.ultralytics.com)** 🚀 **RECOMMENDED**: Train and deploy YOLO models using Ultralytics Platform.
- **[ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/)**: Integrate with [ClearML](https://clear.ml/) for experiment tracking.
- **[Neural Magic DeepSparse Integration](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/)**: Accelerate inference with DeepSparse.
- **[Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/)** 🌟 **NEW**: Log experiments using [Comet ML](https://www.comet.com/site/).
</details>
## 🧩 Integrations
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with partners like [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/), [Comet ML](https://docs.ultralytics.com/integrations/comet/), [Roboflow](https://docs.ultralytics.com/integrations/roboflow/), and [Intel OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow. Explore more at [Ultralytics Integrations](https://docs.ultralytics.com/integrations/).
<a href="https://docs.ultralytics.com/integrations/" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations">
</a>
<br>
<br>
<div align="center">
<a href="https://platform.ultralytics.com">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png" width="10%" alt="Ultralytics Platform logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.ultralytics.com/integrations/weights-biases/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png" width="10%" alt="Weights & Biases logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.ultralytics.com/integrations/comet/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" alt="Comet ML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.ultralytics.com/integrations/neural-magic/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="Neural Magic logo"></a>
</div>
| Ultralytics Platform 🌟 | Weights & Biases | Comet | Neural Magic |
| :--------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: |
| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics Platform](https://platform.ultralytics.com). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/). | Free forever, [Comet ML](https://docs.ultralytics.com/integrations/comet/) lets you save YOLO models, resume training, and interactively visualize predictions. | Run YOLO inference up to 6x faster with [Neural Magic DeepSparse](https://docs.ultralytics.com/integrations/neural-magic/). |
## ⭐ Ultralytics Platform
Experience seamless AI development with [Ultralytics Platform](https://platform.ultralytics.com) ⭐, the ultimate platform for building, training, and deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models. Visualize datasets, train [YOLOv5](https://docs.ultralytics.com/models/yolov5/) and [YOLOv8](https://docs.ultralytics.com/models/yolov8/) 🚀 models, and deploy them to real-world applications without writing any code. Transform images into actionable insights using our cutting-edge tools and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** today!
<a align="center" href="https://platform.ultralytics.com" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics Platform Platform Screenshot"></a>
## 🤔 Why YOLOv5?
YOLOv5 is designed for simplicity and ease of use. We prioritize real-world performance and accessibility.
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png" alt="YOLOv5 Performance Chart"></p>
<details>
<summary>YOLOv5-P5 640 Figure</summary>
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png" alt="YOLOv5 P5 640 Performance Chart"></p>
</details>
<details>
<summary>Figure Notes</summary>
- **COCO AP val** denotes the [mean Average Precision (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map) at [Intersection over Union (IoU)](https://www.ultralytics.com/glossary/intersection-over-union-iou) thresholds from 0.5 to 0.95, measured on the 5,000-image [COCO val2017 dataset](https://docs.ultralytics.com/datasets/detect/coco/) across various inference sizes (256 to 1536 pixels).
- **GPU Speed** measures the average inference time per image on the [COCO val2017 dataset](https://docs.ultralytics.com/datasets/detect/coco/) using an [AWS p3.2xlarge V100 instance](https://aws.amazon.com/ec2/instance-types/p4/) with a batch size of 32.
- **EfficientDet** data is sourced from the [google/automl repository](https://github.com/google/automl) at batch size 8.
- **Reproduce** these results using the command: `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
</details>
### Pretrained Checkpoints
This table shows the performance metrics for various YOLOv5 models trained on the COCO dataset.
| Model | Size<br><sup>(pixels) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | Speed<br><sup>CPU b1<br>(ms) | Speed<br><sup>V100 b1<br>(ms) | Speed<br><sup>V100 b32<br>(ms) | Params<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- |
| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** |
| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 |
| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 |
| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 |
| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 |
| | | | | | | | | |
| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 |
| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 |
| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 |
| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 |
| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+ [[TTA]](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) | 1280<br>1536 | 55.0<br>**55.8** | 72.7<br>**72.7** | 3136<br>- | 26.2<br>- | 19.4<br>- | 140.7<br>- | 209.8<br>- |
<details>
<summary>Table Notes</summary>
- All checkpoints were trained for 300 epochs using default settings. Nano (n) and Small (s) models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyperparameters, while Medium (m), Large (l), and Extra-Large (x) models use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).
- **mAP<sup>val</sup>** values represent single-model, single-scale performance on the [COCO val2017 dataset](https://docs.ultralytics.com/datasets/detect/coco/).<br>Reproduce using: `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
- **Speed** metrics are averaged over COCO val images using an [AWS p3.2xlarge V100 instance](https://aws.amazon.com/ec2/instance-types/p4/). Non-Maximum Suppression (NMS) time (~1 ms/image) is not included.<br>Reproduce using: `python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** ([Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)) includes reflection and scale augmentations for improved accuracy.<br>Reproduce using: `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
</details>
## 🖼️ Segmentation
The YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) introduced [instance segmentation](https://docs.ultralytics.com/tasks/segment/) models that achieve state-of-the-art performance. These models are designed for easy training, validation, and deployment. For full details, see the [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and explore the [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart examples.
<details>
<summary>Segmentation Checkpoints</summary>
<div align="center">
<a align="center" href="https://www.ultralytics.com/yolo" target="_blank">
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png" alt="YOLOv5 Segmentation Performance Chart"></a>
</div>
YOLOv5 segmentation models were trained on the [COCO dataset](https://docs.ultralytics.com/datasets/segment/coco/) for 300 epochs at an image size of 640 pixels using A100 GPUs. Models were exported to [ONNX](https://onnx.ai/) FP32 for CPU speed tests and [TensorRT](https://developer.nvidia.com/tensorrt) FP16 for GPU speed tests. All speed tests were conducted on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for reproducibility.
| Model | Size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Train Time<br><sup>300 epochs<br>A100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TRT A100<br>(ms) | Params<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- |
| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** |
| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 |
| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 |
| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 |
| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 |
- All checkpoints were trained for 300 epochs using the SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at an image size of 640 pixels, using default settings.<br>Training runs are logged at [https://wandb.ai/glenn-jocher/YOLOv5_v70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official).
- **Accuracy** values represent single-model, single-scale performance on the COCO dataset.<br>Reproduce using: `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`
- **Speed** metrics are averaged over 100 inference images using a [Colab Pro A100 High-RAM instance](https://colab.research.google.com/signup). Values indicate inference speed only (NMS adds approximately 1ms per image).<br>Reproduce using: `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`
- **Export** to ONNX (FP32) and TensorRT (FP16) was performed using `export.py`.<br>Reproduce using: `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
</details>
<details>
<summary>Segmentation Usage Examples <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>
### Train
YOLOv5 segmentation training supports automatic download of the [COCO128-seg dataset](https://docs.ultralytics.com/datasets/segment/coco8-seg/) via the `--data coco128-seg.yaml` argument. For the full [COCO-segments dataset](https://docs.ultralytics.com/datasets/segment/coco/), download it manually using `bash data/scripts/get_coco.sh --train --val --segments` and then train with `python train.py --data coco.yaml`.
```bash
# Train on a single GPU
python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640
# Train using Multi-GPU Distributed Data Parallel (DDP)
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
```
### Val
Validate the mask [mean Average Precision (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map) of YOLOv5s-seg on the COCO dataset:
```bash
# Download COCO validation segments split (780MB, 5000 images)
bash data/scripts/get_coco.sh --val --segments
# Validate the model
python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640
```
### Predict
Use the pretrained YOLOv5m-seg.pt model to perform segmentation on `bus.jpg`:
```bash
# Run prediction
python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg
```
```python
# Load model from PyTorch Hub (Note: Inference support might vary)
model = torch.hub.load("ultralytics/yolov5", "custom", "yolov5m-seg.pt")
```
|  |  |
| :-----------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------: |
### Export
Export the YOLOv5s-seg model to ONNX and TensorRT formats:
```bash
# Export model
python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0
```
</details>
## 🏷️ Classification
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases/v6.2) introduced support for [image classification](https://docs.ultralytics.com/tasks/classify/) model training, validation, and deployment. Check the [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) for details and the [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart guides.
<details>
<summary>Classification Checkpoints</summary>
<br>
YOLOv5-cls classification models were trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) for 90 epochs using a 4xA100 instance. [ResNet](https://arxiv.org/abs/1512.03385) and [EfficientNet](https://arxiv.org/abs/1905.11946) models were trained alongside under identical settings for comparison. Models were exported to [ONNX](https://onnx.ai/) FP32 (CPU speed tests) and [TensorRT](https://developer.nvidia.com/tensorrt) FP16 (GPU speed tests). All speed tests were run on Google [Colab Pro](https://colab.research.google.com/signup) for reproducibility.
| Model | Size<br><sup>(pixels) | Acc<br><sup>top1 | Acc<br><sup>top5 | Training<br><sup>90 epochs<br>4xA100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TensorRT V100<br>(ms) | Params<br><sup>(M) | FLOPs<br><sup>@224 (B) |
| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- |
| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** |
| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 |
| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 |
| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 |
| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 |
| | | | | | | | | |
| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 |
| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 |
| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 |
| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 |
| | | | | | | | | |
| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 |
| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 |
| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 |
| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
<details>
<summary>Table Notes (click to expand)</summary>
- All checkpoints were trained for 90 epochs using the SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at an image size of 224 pixels, using default settings.<br>Training runs are logged at [https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2).
- **Accuracy** values (top-1 and top-5) represent single-model, single-scale performance on the [ImageNet-1k dataset](https://docs.ultralytics.com/datasets/classify/imagenet/).<br>Reproduce using: `python classify/val.py --data ../datasets/imagenet --img 224`
- **Speed** metrics are averaged over 100 inference images using a Google [Colab Pro V100 High-RAM instance](https://colab.research.google.com/signup).<br>Reproduce using: `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`
- **Export** to ONNX (FP32) and TensorRT (FP16) was performed using `export.py`.<br>Reproduce using: `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
</details>
</details>
<details>
<summary>Classification Usage Examples <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>
### Train
YOLOv5 classification training supports automatic download for datasets like [MNIST](https://docs.ultralytics.com/datasets/classify/mnist/), [Fashion-MNIST](https://docs.ultralytics.com/datasets/classify/fashion-mnist/), [CIFAR10](https://docs.ultralytics.com/datasets/classify/cifar10/), [CIFAR100](https://docs.ultralytics.com/datasets/classify/cifar100/), [Imagenette](https://docs.ultralytics.com/datasets/classify/imagenette/), [Imagewoof](https://docs.ultralytics.com/datasets/classify/imagewoof/), and [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) using the `--data` argument. For example, start training on MNIST with `--data mnist`.
```bash
# Train on a single GPU using CIFAR-100 dataset
python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128
# Train using Multi-GPU DDP on ImageNet dataset
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
```
### Val
Validate the accuracy of the YOLOv5m-cls model on the ImageNet-1k validation dataset:
```bash
# Download ImageNet validation split (6.3GB, 50,000 images)
bash data/scripts/get_imagenet.sh --val
# Validate the model
python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224
```
### Predict
Use the pretrained YOLOv5s-cls.pt model to classify the image `bus.jpg`:
```bash
# Run prediction
python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg
```
```python
# Load model from PyTorch Hub
model = torch.hub.load("ultralytics/yolov5", "custom", "yolov5s-cls.pt")
```
### Export
Export trained YOLOv5s-cls, ResNet50, and EfficientNet_b0 models to ONNX and TensorRT formats:
```bash
# Export models
python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224
```
</details>
## ☁️ Environments
Get started quickly with our pre-configured environments. Click the icons below for setup details.
<div align="center">
<a href="https://bit.ly/yolov5-paperspace-notebook" title="Run on Paperspace Gradient">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gradient.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb" title="Open in Google Colab">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-colab-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://www.kaggle.com/models/ultralytics/yolov5" title="Open in Kaggle">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-kaggle-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://hub.docker.com/r/ultralytics/yolov5" title="Pull Docker Image">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-docker-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/" title="AWS Quickstart Guide">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-aws-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/" title="GCP Quickstart Guide">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gcp-small.png" width="10%" /></a>
</div>
## 🤝 Contribute
We welcome your contributions! Making YOLOv5 accessible and effective is a community effort. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started. Share your feedback through the [YOLOv5 Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). Thank you to all our contributors for making YOLOv5 better!
[](https://github.com/ultralytics/yolov5/graphs/contributors)
## 📜 License
Ultralytics provides two licensing options to meet different needs:
- **AGPL-3.0 License**: An [OSI-approved](https://opensource.org/license/agpl-v3) open-source license ideal for academic research, personal projects, and testing. It promotes open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details.
- **Enterprise License**: Tailored for commercial applications, this license allows seamless integration of Ultralytics software and AI models into commercial products and services, bypassing the open-source requirements of AGPL-3.0. For commercial use cases, please contact us via [Ultralytics Licensing](https://www.ultralytics.com/license).
## 📧 Contact
For bug reports and feature requests related to YOLOv5, please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For general questions, discussions, and community support, join our [Discord server](https://discord.com/invite/ultralytics)!
<br>
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>
================================================
FILE: README.zh-CN.md
================================================
<div align="center">
<p>
<a href="https://platform.ultralytics.com/?utm_source=github&utm_medium=referral&utm_campaign=platform_launch&utm_content=banner&utm_term=ultralytics_github" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="Ultralytics YOLO 横幅"></a>
</p>
[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)
<div>
<a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI 测试"></a>
<a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker 拉取次数"></a>
<a href="https://discord.com/invite/ultralytics"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://community.ultralytics.com/"><img alt="Ultralytics 论坛" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a> <a href="https://www.reddit.com/r/ultralytics/"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br>
<a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="在 Gradient 上运行"></a>
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a>
<a href="https://www.kaggle.com/models/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="在 Kaggle 中打开"></a>
</div>
<br>
Ultralytics YOLOv5 🚀 是由 [Ultralytics](https://www.ultralytics.com/) 开发的尖端、达到业界顶尖水平(SOTA)的计算机视觉模型。基于 [PyTorch](https://pytorch.org/) 框架,YOLOv5 以其易用性、速度和准确性而闻名。它融合了广泛研究和开发的见解与最佳实践,使其成为各种视觉 AI 任务的热门选择,包括[目标检测](https://docs.ultralytics.com/tasks/detect/)、[图像分割](https://docs.ultralytics.com/tasks/segment/)和[图像分类](https://docs.ultralytics.com/tasks/classify/)。
我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 [YOLOv5 文档](https://docs.ultralytics.com/yolov5/)获取详细信息,在 [GitHub](https://github.com/ultralytics/yolov5/issues/new/choose) 上提出 issue 以获得支持,并加入我们的 [Discord 社区](https://discord.com/invite/ultralytics)进行提问和讨论!
如需申请企业许可证,请填写 [Ultralytics 授权许可](https://www.ultralytics.com/license) 表格。
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="2%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
</div>
</div>
<br>
## 🚀 YOLO11:下一代进化
我们激动地宣布推出 **Ultralytics YOLO11** 🚀,这是我们业界顶尖(SOTA)视觉模型的最新进展!YOLO11 现已在 [Ultralytics YOLO GitHub 仓库](https://github.com/ultralytics/ultralytics)发布,它继承了我们速度快、精度高和易于使用的传统。无论您是处理[目标检测](https://docs.ultralytics.com/tasks/detect/)、[实例分割](https://docs.ultralytics.com/tasks/segment/)、[姿态估计](https://docs.ultralytics.com/tasks/pose/)、[图像分类](https://docs.ultralytics.com/tasks/classify/)还是[旋转目标检测 (OBB)](https://docs.ultralytics.com/tasks/obb/),YOLO11 都能提供在多样化应用中脱颖而出所需的性能和多功能性。
立即开始,释放 YOLO11 的全部潜力!访问 [Ultralytics 文档](https://docs.ultralytics.com/)获取全面的指南和资源:
[](https://badge.fury.io/py/ultralytics) [](https://clickpy.clickhouse.com/dashboard/ultralytics)
```bash
# 安装 ultralytics 包
pip install ultralytics
```
<div align="center">
<a href="https://platform.ultralytics.com/ultralytics/yolo26" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png" alt="Ultralytics YOLO 性能比较"></a>
</div>
## 📚 文档
请参阅 [YOLOv5 文档](https://docs.ultralytics.com/yolov5/),了解有关训练、测试和部署的完整文档。请参阅下方的快速入门示例。
<details open>
<summary>安装</summary>
克隆仓库并在 [**Python>=3.8.0**](https://www.python.org/) 环境中安装依赖项。确保您已安装 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/)。
```bash
# 克隆 YOLOv5 仓库
git clone https://github.com/ultralytics/yolov5
# 导航到克隆的目录
cd yolov5
# 安装所需的包
pip install -r requirements.txt
```
</details>
<details open>
<summary>使用 PyTorch Hub 进行推理</summary>
通过 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 使用 YOLOv5 进行推理。[模型](https://github.com/ultralytics/yolov5/tree/master/models) 会自动从最新的 YOLOv5 [发布版本](https://github.com/ultralytics/yolov5/releases)下载。
```python
import torch
# 加载 YOLOv5 模型(选项:yolov5n, yolov5s, yolov5m, yolov5l, yolov5x)
model = torch.hub.load("ultralytics/yolov5", "yolov5s") # 默认:yolov5s
# 定义输入图像源(URL、本地文件、PIL 图像、OpenCV 帧、numpy 数组或列表)
img = "https://ultralytics.com/images/zidane.jpg" # 示例图像
# 执行推理(自动处理批处理、调整大小、归一化)
results = model(img)
# 处理结果(选项:.print(), .show(), .save(), .crop(), .pandas())
results.print() # 将结果打印到控制台
results.show() # 在窗口中显示结果
results.save() # 将结果保存到 runs/detect/exp
```
</details>
<details>
<summary>使用 detect.py 进行推理</summary>
`detect.py` 脚本在各种来源上运行推理。它会自动从最新的 YOLOv5 [发布版本](https://github.com/ultralytics/yolov5/releases)下载[模型](https://github.com/ultralytics/yolov5/tree/master/models),并将结果保存到 `runs/detect` 目录。
```bash
# 使用网络摄像头运行推理
python detect.py --weights yolov5s.pt --source 0
# 对本地图像文件运行推理
python detect.py --weights yolov5s.pt --source img.jpg
# 对本地视频文件运行推理
python detect.py --weights yolov5s.pt --source vid.mp4
# 对屏幕截图运行推理
python detect.py --weights yolov5s.pt --source screen
# 对图像目录运行推理
python detect.py --weights yolov5s.pt --source path/to/images/
# 对列出图像路径的文本文件运行推理
python detect.py --weights yolov5s.pt --source list.txt
# 对列出流 URL 的文本文件运行推理
python detect.py --weights yolov5s.pt --source list.streams
# 使用 glob 模式对图像运行推理
python detect.py --weights yolov5s.pt --source 'path/to/*.jpg'
# 对 YouTube 视频 URL 运行推理
python detect.py --weights yolov5s.pt --source 'https://youtu.be/LNwODJXcvt4'
# 对 RTSP、RTMP 或 HTTP 流运行推理
python detect.py --weights yolov5s.pt --source 'rtsp://example.com/media.mp4'
```
</details>
<details>
<summary>训练</summary>
以下命令演示了如何复现 YOLOv5 在 [COCO 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上的结果。[模型](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)都会自动从最新的 YOLOv5 [发布版本](https://github.com/ultralytics/yolov5/releases)下载。YOLOv5n/s/m/l/x 的训练时间在单个 [NVIDIA V100 GPU](https://www.nvidia.com/en-us/data-center/v100/) 上大约需要 1/2/4/6/8 天。使用[多 GPU 训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)可以显著减少训练时间。请使用硬件允许的最大 `--batch-size`,或使用 `--batch-size -1` 以启用 YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092)。下面显示的批处理大小适用于 V100-16GB GPU。
```bash
# 在 COCO 上训练 YOLOv5n 300 个周期
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
# 在 COCO 上训练 YOLOv5s 300 个周期
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5s.yaml --batch-size 64
# 在 COCO 上训练 YOLOv5m 300 个周期
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5m.yaml --batch-size 40
# 在 COCO 上训练 YOLOv5l 300 个周期
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5l.yaml --batch-size 24
# 在 COCO 上训练 YOLOv5x 300 个周期
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5x.yaml --batch-size 16
```
<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png" alt="YOLOv5 训练结果">
</details>
<details open>
<summary>教程</summary>
- **[训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/)** 🚀 **推荐**:学习如何在您自己的数据集上训练 YOLOv5。
- **[获得最佳训练结果的技巧](https://docs.ultralytics.com/guides/model-training-tips/)** ☘️:利用专家技巧提升模型性能。
- **[多 GPU 训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/)**:使用多个 GPU 加速训练。
- **[PyTorch Hub 集成](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/)** 🌟 **新增**:使用 PyTorch Hub 轻松加载模型。
- **[模型导出 (TFLite, ONNX, CoreML, TensorRT)](https://docs.ultralytics.com/yolov5/tutorials/model_export/)** 🚀:将您的模型转换为各种部署格式,如 [ONNX](https://onnx.ai/) 或 [TensorRT](https://developer.nvidia.com/tensorrt)。
- **[NVIDIA Jetson 部署](https://docs.ultralytics.com/guides/nvidia-jetson/)** 🌟 **新增**:在 [NVIDIA Jetson](https://developer.nvidia.com/embedded-computing) 设备上部署 YOLOv5。
- **[测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)**:使用 TTA 提高预测准确性。
- **[模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/)**:组合多个模型以获得更好的性能。
- **[模型剪枝/稀疏化](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/)**:优化模型的大小和速度。
- **[超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/)**:自动找到最佳训练超参数。
- **[使用冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/)**:使用[迁移学习](https://www.ultralytics.com/glossary/transfer-learning)高效地将预训练模型应用于新任务。
- **[架构摘要](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/)** 🌟 **新增**:了解 YOLOv5 模型架构。
- **[Ultralytics Platform 训练](https://platform.ultralytics.com)** 🚀 **推荐**:使用 Ultralytics Platform 训练和部署 YOLO 模型。
- **[ClearML 日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/)**:与 [ClearML](https://clear.ml/) 集成以进行实验跟踪。
- **[Neural Magic DeepSparse 集成](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/)**:使用 DeepSparse 加速推理。
- **[Comet 日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/)** 🌟 **新增**:使用 [Comet ML](https://www.comet.com/site/) 记录实验。
</details>
## 🧩 集成
我们与领先 AI 平台的关键集成扩展了 Ultralytics 产品的功能,增强了诸如数据集标注、训练、可视化和模型管理等任务。了解 Ultralytics 如何与 [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/)、[Comet ML](https://docs.ultralytics.com/integrations/comet/)、[Roboflow](https://docs.ultralytics.com/integrations/roboflow/) 和 [Intel OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 等合作伙伴协作,优化您的 AI 工作流程。在 [Ultralytics 集成](https://docs.ultralytics.com/integrations/) 探索更多信息。
<a href="https://docs.ultralytics.com/integrations/" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics 主动学习集成">
</a>
<br>
<br>
<div align="center">
<a href="https://platform.ultralytics.com">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png" width="10%" alt="Ultralytics Platform logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.ultralytics.com/integrations/weights-biases/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png" width="10%" alt="Weights & Biases logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.ultralytics.com/integrations/comet/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" alt="Comet ML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.ultralytics.com/integrations/neural-magic/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="Neural Magic logo"></a>
</div>
| Ultralytics Platform 🌟 | Weights & Biases | Comet | Neural Magic |
| :----------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------: |
| 简化 YOLO 工作流程:使用 [Ultralytics Platform](https://platform.ultralytics.com) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/) 跟踪实验、超参数和结果。 | 永久免费的 [Comet ML](https://docs.ultralytics.com/integrations/comet/) 让您保存 YOLO 模型、恢复训练并交互式地可视化预测。 | 使用 [Neural Magic DeepSparse](https://docs.ultralytics.com/integrations/neural-magic/) 将 YOLO 推理速度提高多达 6 倍。 |
## ⭐ Ultralytics Platform
通过 [Ultralytics Platform](https://platform.ultralytics.com) ⭐ 体验无缝的 AI 开发,这是构建、训练和部署[计算机视觉](https://www.ultralytics.com/glossary/computer-vision-cv)模型的终极平台。可视化数据集,训练 [YOLOv5](https://docs.ultralytics.com/models/yolov5/) 和 [YOLOv8](https://docs.ultralytics.com/models/yolov8/) 🚀 模型,并将它们部署到实际应用中,无需编写任何代码。使用我们尖端的工具和用户友好的 [Ultralytics App](https://www.ultralytics.com/app-install) 将图像转化为可操作的见解。今天就**免费**开始您的旅程吧!
<a align="center" href="https://platform.ultralytics.com" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics Platform 平台截图"></a>
## 🤔 为何选择 YOLOv5?
YOLOv5 的设计旨在简单易用。我们优先考虑实际性能和可访问性。
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png" alt="YOLOv5 性能图表"></p>
<details>
<summary>YOLOv5-P5 640 图表</summary>
<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png" alt="YOLOv5 P5 640 性能图表"></p>
</details>
<details>
<summary>图表说明</summary>
- **COCO AP val** 表示在 [交并比 (IoU)](https://www.ultralytics.com/glossary/intersection-over-union-iou) 阈值从 0.5 到 0.95 范围内的[平均精度均值 (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map),在包含 5000 张图像的 [COCO val2017 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上,使用各种推理尺寸(256 到 1536 像素)测量得出。
- **GPU Speed** 使用批处理大小为 32 的 [AWS p3.2xlarge V100 实例](https://aws.amazon.com/ec2/instance-types/p4/),测量在 [COCO val2017 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上每张图像的平均推理时间。
- **EfficientDet** 数据来源于 [google/automl 仓库](https://github.com/google/automl),批处理大小为 8。
- **复现**这些结果请使用命令:`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
</details>
### 预训练权重
此表显示了在 COCO 数据集上训练的各种 YOLOv5 模型的性能指标。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | 速度<br><sup>CPU b1<br>(毫秒) | 速度<br><sup>V100 b1<br>(毫秒) | 速度<br><sup>V100 b32<br>(毫秒) | 参数<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | -------------------- | ----------------- | ----------------------------- | ------------------------------ | ------------------------------- | ---------------- | ---------------------- |
| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** |
| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 |
| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 |
| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 |
| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 |
| | | | | | | | | |
| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 |
| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 |
| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 |
| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 |
| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+ [[TTA]](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) | 1280<br>1536 | 55.0<br>**55.8** | 72.7<br>**72.7** | 3136<br>- | 26.2<br>- | 19.4<br>- | 140.7<br>- | 209.8<br>- |
<details>
<summary>表格说明</summary>
- 所有预训练权重均使用默认设置训练了 300 个周期。Nano (n) 和 Small (s) 模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) 超参数,而 Medium (m)、Large (l) 和 Extra-Large (x) 模型使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml)。
- **mAP<sup>val</sup>** 值表示在 [COCO val2017 数据集](https://docs.ultralytics.com/datasets/detect/coco/)上的单模型、单尺度性能。<br>复现请使用:`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
- **速度**指标是在 [AWS p3.2xlarge V100 实例](https://aws.amazon.com/ec2/instance-types/p4/)上对 COCO val 图像进行平均测量的。不包括非极大值抑制 (NMS) 时间(约 1 毫秒/图像)。<br>复现请使用:`python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** ([测试时增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/)) 包括反射和尺度增强以提高准确性。<br>复现请使用:`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
</details>
## 🖼️ 分割
YOLOv5 [v7.0 版本](https://github.com/ultralytics/yolov5/releases/v7.0) 引入了[实例分割](https://docs.ultralytics.com/tasks/segment/)模型,达到了业界顶尖的性能。这些模型设计用于轻松训练、验证和部署。有关完整详细信息,请参阅[发布说明](https://github.com/ultralytics/yolov5/releases/v7.0),并探索 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)以获取快速入门示例。
<details>
<summary>分割预训练权重</summary>
<div align="center">
<a align="center" href="https://www.ultralytics.com/yolo" target="_blank">
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png" alt="YOLOv5 分割性能图表"></a>
</div>
YOLOv5 分割模型在 [COCO 数据集](https://docs.ultralytics.com/datasets/segment/coco/)上使用 A100 GPU 以 640 像素的图像大小训练了 300 个周期。模型导出为 [ONNX](https://onnx.ai/) FP32 用于 CPU 速度测试,导出为 [TensorRT](https://developer.nvidia.com/tensorrt) FP16 用于 GPU 速度测试。所有速度测试均在 Google [Colab Pro](https://colab.research.google.com/signup) 笔记本上进行,以确保可复现性。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | 训练时间<br><sup>300 周期<br>A100 (小时) | 速度<br><sup>ONNX CPU<br>(毫秒) | 速度<br><sup>TRT A100<br>(毫秒) | 参数<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ------------------------------------------------------------------------------------------ | ------------------- | -------------------- | --------------------- | ---------------------------------------- | ------------------------------- | ------------------------------- | ---------------- | ---------------------- |
| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** |
| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 |
| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 |
| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 |
| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 |
- 所有预训练权重均使用 SGD 优化器,`lr0=0.01` 和 `weight_decay=5e-5`,在 640 像素的图像大小下,使用默认设置训练了 300 个周期。<br>训练运行记录在 [https://wandb.ai/glenn-jocher/YOLOv5_v70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official)。
- **准确度**值表示在 COCO 数据集上的单模型、单尺度性能。<br>复现请使用:`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`
- **速度**指标是在 [Colab Pro A100 High-RAM 实例](https://colab.research.google.com/signup)上对 100 张推理图像进行平均测量的。值仅表示推理速度(NMS 约增加 1 毫秒/图像)。<br>复现请使用:`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`
- **导出**到 ONNX (FP32) 和 TensorRT (FP16) 是使用 `export.py` 完成的。<br>复现请使用:`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
</details>
<details>
<summary>分割使用示例 <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a></summary>
### 训练
YOLOv5 分割训练支持通过 `--data coco128-seg.yaml` 参数自动下载 [COCO128-seg 数据集](https://docs.ultralytics.com/datasets/segment/coco8-seg/)。对于完整的 [COCO-segments 数据集](https://docs.ultralytics.com/datasets/segment/coco/),请使用 `bash data/scripts/get_coco.sh --train --val --segments` 手动下载,然后使用 `python train.py --data coco.yaml` 进行训练。
```bash
# 在单个 GPU 上训练
python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640
# 使用多 GPU 分布式数据并行 (DDP) 进行训练
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
```
### 验证
在 COCO 数据集上验证 YOLOv5s-seg 的掩码[平均精度均值 (mAP)](https://www.ultralytics.com/glossary/mean-average-precision-map):
```bash
# 下载 COCO 验证分割集 (780MB, 5000 张图像)
bash data/scripts/get_coco.sh --val --segments
# 验证模型
python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640
```
### 预测
使用预训练的 YOLOv5m-seg.pt 模型对 `bus.jpg` 执行分割:
```bash
# 运行预测
python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg
```
```python
# 从 PyTorch Hub 加载模型(注意:推理支持可能有所不同)
model = torch.hub.load("ultralytics/yolov5", "custom", "yolov5m-seg.pt")
```
|  |  |
| :-----------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: |
### 导出
将 YOLOv5s-seg 模型导出为 ONNX 和 TensorRT 格式:
```bash
# 导出模型
python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0
```
</details>
## 🏷️ 分类
YOLOv5 [v6.2 版本](https://github.com/ultralytics/yolov5/releases/v6.2) 引入了对[图像分类](https://docs.ultralytics.com/tasks/classify/)模型训练、验证和部署的支持。请查看[发布说明](https://github.com/ultralytics/yolov5/releases/v6.2)了解详细信息,并参阅 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)获取快速入门指南。
<details>
<summary>分类预训练权重</summary>
<br>
YOLOv5-cls 分类模型在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 上使用 4xA100 实例训练了 90 个周期。[ResNet](https://arxiv.org/abs/1512.03385) 和 [EfficientNet](https://arxiv.org/abs/1905.11946) 模型在相同设置下一起训练以进行比较。模型导出为 [ONNX](https://onnx.ai/) FP32(用于 CPU 速度测试)和 [TensorRT](https://developer.nvidia.com/tensorrt) FP16(用于 GPU 速度测试)。所有速度测试均在 Google [Colab Pro](https://colab.research.google.com/signup) 上运行,以确保可复现性。
| 模型 | 尺寸<br><sup>(像素) | 准确率<br><sup>top1 | 准确率<br><sup>top5 | 训练<br><sup>90 周期<br>4xA100 (小时) | 速度<br><sup>ONNX CPU<br>(毫秒) | 速度<br><sup>TensorRT V100<br>(毫秒) | 参数<br><sup>(M) | FLOPs<br><sup>@224 (B) |
| -------------------------------------------------------------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------------------------- | ------------------------------- | ------------------------------------ | ---------------- | ---------------------- |
| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** |
| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 |
| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 |
| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 |
| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 |
| | | | | | | | | |
| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 |
| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 |
| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 |
| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 |
| | | | | | | | | |
| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 |
| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 |
| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 |
| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
<details>
<summary>表格说明(点击展开)</summary>
- 所有预训练权重均使用 SGD 优化器,`lr0=0.001` 和 `weight_decay=5e-5`,在 224 像素的图像大小下,使用默认设置训练了 90 个周期。<br>训练运行记录在 [https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2)。
- **准确度**值(top-1 和 top-5)表示在 [ImageNet-1k 数据集](https://docs.ultralytics.com/datasets/classify/imagenet/)上的单模型、单尺度性能。<br>复现请使用:`python classify/val.py --data ../datasets/imagenet --img 224`
- **速度**指标是在 Google [Colab Pro V100 High-RAM 实例](https://colab.research.google.com/signup)上对 100 张推理图像进行平均测量的。<br>复现请使用:`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`
- **导出**到 ONNX (FP32) 和 TensorRT (FP16) 是使用 `export.py` 完成的。<br>复现请使用:`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
</details>
</details>
<details>
<summary>分类使用示例 <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a></summary>
### 训练
YOLOv5 分类训练支持使用 `--data` 参数自动下载诸如 [MNIST](https://docs.ultralytics.com/datasets/classify/mnist/)、[Fashion-MNIST](https://docs.ultralytics.com/datasets/classify/fashion-mnist/)、[CIFAR10](https://docs.ultralytics.com/datasets/classify/cifar10/)、[CIFAR100](https://docs.ultralytics.com/datasets/classify/cifar100/)、[Imagenette](https://docs.ultralytics.com/datasets/classify/imagenette/)、[Imagewoof](https://docs.ultralytics.com/datasets/classify/imagewoof/) 和 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 等数据集。例如,使用 `--data mnist` 开始在 MNIST 上训练。
```bash
# 使用 CIFAR-100 数据集在单个 GPU 上训练
python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128
# 在 ImageNet 数据集上使用多 GPU DDP 进行训练
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
```
### 验证
在 ImageNet-1k 验证数据集上验证 YOLOv5m-cls 模型的准确性:
```bash
# 下载 ImageNet 验证集 (6.3GB, 50,000 张图像)
bash data/scripts/get_imagenet.sh --val
# 验证模型
python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224
```
### 预测
使用预训练的 YOLOv5s-cls.pt 模型对图像 `bus.jpg` 进行分类:
```bash
# 运行预测
python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg
```
```python
# 从 PyTorch Hub 加载模型
model = torch.hub.load("ultralytics/yolov5", "custom", "yolov5s-cls.pt")
```
### 导出
将训练好的 YOLOv5s-cls、ResNet50 和 EfficientNet_b0 模型导出为 ONNX 和 TensorRT 格式:
```bash
# 导出模型
python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224
```
</details>
## ☁️ 环境
使用我们预配置的环境快速开始。点击下面的图标查看设置详情。
<div align="center">
<a href="https://bit.ly/yolov5-paperspace-notebook" title="在 Paperspace Gradient 上运行">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gradient.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb" title="在 Google Colab 中打开">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-colab-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://www.kaggle.com/models/ultralytics/yolov5" title="在 Kaggle 中打开">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-kaggle-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://hub.docker.com/r/ultralytics/yolov5" title="拉取 Docker 镜像">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-docker-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/" title="AWS 快速入门指南">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-aws-small.png" width="10%" /></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
<a href="https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/" title="GCP 快速入门指南">
<img src="https://github.com/ultralytics/assets/releases/download/v0.0.0/logo-gcp-small.png" width="10%" /></a>
</div>
## 🤝 贡献
我们欢迎您的贡献!让 YOLOv5 变得易于访问和有效是社区的共同努力。请参阅我们的[贡献指南](https://docs.ultralytics.com/help/contributing/)开始。通过 [YOLOv5 调查](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)分享您的反馈。感谢所有为使 YOLOv5 变得更好而做出贡献的人!
[](https://github.com/ultralytics/yolov5/graphs/contributors)
## 📜 许可证
Ultralytics 提供两种许可选项以满足不同需求:
- **AGPL-3.0 许可证**:一种 [OSI 批准的](https://opensource.org/license/agpl-v3)开源许可证,非常适合学术研究、个人项目和测试。它促进开放协作和知识共享。详情请参阅 [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件。
- **企业许可证**:专为商业应用量身定制,此许可证允许将 Ultralytics 软件和 AI 模型无缝集成到商业产品和服务中,绕过 AGPL-3.0 的开源要求。对于商业用例,请通过 [Ultralytics 授权许可](https://www.ultralytics.com/license)联系我们。
## 📧 联系
对于与 YOLOv5 相关的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。对于一般问题、讨论和社区支持,请加入我们的 [Discord 服务器](https://discord.com/invite/ultralytics)!
<br>
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>
================================================
FILE: benchmarks.py
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Run YOLOv5 benchmarks on all supported export formats.
Format | `export.py --include` | Model
--- | --- | ---
PyTorch | - | yolov5s.pt
TorchScript | `torchscript` | yolov5s.torchscript
ONNX | `onnx` | yolov5s.onnx
OpenVINO | `openvino` | yolov5s_openvino_model/
TensorRT | `engine` | yolov5s.engine
CoreML | `coreml` | yolov5s.mlpackage
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
TensorFlow GraphDef | `pb` | yolov5s.pb
TensorFlow Lite | `tflite` | yolov5s.tflite
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
TensorFlow.js | `tfjs` | yolov5s_web_model/
Requirements:
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
Usage:
$ python benchmarks.py --weights yolov5s.pt --img 640
"""
import argparse
import platform
import sys
import time
from pathlib import Path
import pandas as pd
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
# ROOT = ROOT.relative_to(Path.cwd()) # relative
import export
from models.experimental import attempt_load
from models.yolo import SegmentationModel
from segment.val import run as val_seg
from utils import notebook_init
from utils.general import LOGGER, check_yaml, file_size, print_args
from utils.torch_utils import select_device
from val import run as val_det
def run(
weights=ROOT / "yolov5s.pt", # weights path
imgsz=640, # inference size (pixels)
batch_size=1, # batch size
data=ROOT / "data/coco128.yaml", # dataset.yaml path
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
half=False, # use FP16 half-precision inference
test=False, # test exports only
pt_only=False, # test PyTorch only
hard_fail=False, # throw error on benchmark failure
):
"""Run YOLOv5 benchmarks on multiple export formats and log results for model performance evaluation.
Args:
weights (Path | str): Path to the model weights file (default: ROOT / "yolov5s.pt").
imgsz (int): Inference size in pixels (default: 640).
batch_size (int): Batch size for inference (default: 1).
data (Path | str): Path to the dataset.yaml file (default: ROOT / "data/coco128.yaml").
device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: "").
half (bool): Use FP16 half-precision inference (default: False).
test (bool): Test export formats only (default: False).
pt_only (bool): Test PyTorch format only (default: False).
hard_fail (bool): Throw an error on benchmark failure if True (default: False).
Returns:
None. Logs information about the benchmark results, including the format, size, mAP50-95, and inference time.
Examples:
```python
$ python benchmarks.py --weights yolov5s.pt --img 640
```
Install required packages:
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
Run benchmarks:
$ python benchmarks.py --weights yolov5s.pt --img 640
Notes:
Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML,
TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js
are unsupported.
"""
y, t = [], time.time()
device = select_device(device)
model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
try:
assert i not in (9, 10), "inference not supported" # Edge TPU and TF.js are unsupported
assert i != 5 or platform.system() == "Darwin", "inference only supported on macOS>=10.13" # CoreML
if "cpu" in device.type:
assert cpu, "inference not supported on CPU"
if "cuda" in device.type:
assert gpu, "inference not supported on GPU"
# Export
if f == "-":
w = weights # PyTorch format
else:
w = export.run(
weights=weights, imgsz=[imgsz], include=[f], batch_size=batch_size, device=device, half=half
)[-1] # all others
assert suffix in str(w), "export failed"
# Validate
if model_type == SegmentationModel:
result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task="speed", half=half)
metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
else: # DetectionModel:
result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task="speed", half=half)
metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
speed = result[2][1] # times (preprocess, inference, postprocess)
y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
except Exception as e:
if hard_fail:
assert type(e) is AssertionError, f"Benchmark --hard-fail for {name}: {e}"
LOGGER.warning(f"WARNING ⚠️ Benchmark failure for {name}: {e}")
y.append([name, None, None, None]) # mAP, t_inference
if pt_only and i == 0:
break # break after PyTorch
# Print results
LOGGER.info("\n")
parse_opt()
notebook_init() # print system info
c = ["Format", "Size (MB)", "mAP50-95", "Inference time (ms)"] if map else ["Format", "Export", "", ""]
py = pd.DataFrame(y, columns=c)
LOGGER.info(f"\nBenchmarks complete ({time.time() - t:.2f}s)")
LOGGER.info(str(py if map else py.iloc[:, :2]))
if hard_fail and isinstance(hard_fail, str):
metrics = py["mAP50-95"].array # values to compare to floor
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
assert all(x > floor for x in metrics if pd.notna(x)), f"HARD FAIL: mAP50-95 < floor {floor}"
return py
def test(
weights=ROOT / "yolov5s.pt", # weights path
imgsz=640, # inference size (pixels)
batch_size=1, # batch size
data=ROOT / "data/coco128.yaml", # dataset.yaml path
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
half=False, # use FP16 half-precision inference
test=False, # test exports only
pt_only=False, # test PyTorch only
hard_fail=False, # throw error on benchmark failure
):
"""Run YOLOv5 export tests for all supported formats and log the results, including export statuses.
Args:
weights (Path | str): Path to the model weights file (.pt format). Default is 'ROOT / "yolov5s.pt"'.
imgsz (int): Inference image size (in pixels). Default is 640.
batch_size (int): Batch size for testing. Default is 1.
data (Path | str): Path to the dataset configuration file (.yaml format). Default is 'ROOT /
"data/coco128.yaml"'.
device (str): Device for running the tests, can be 'cpu' or a specific CUDA device ('0', '0,1,2,3', etc.).
Default is an empty string.
half (bool): Use FP16 half-precision for inference if True. Default is False.
test (bool): Test export formats only without running inference. Default is False.
pt_only (bool): Test only the PyTorch model if True. Default is False.
hard_fail (bool): Raise error on export or test failure if True. Default is False.
Returns:
pd.DataFrame: DataFrame containing the results of the export tests, including format names and export statuses.
Examples:
```python
$ python benchmarks.py --weights yolov5s.pt --img 640
```
Install required packages:
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
Run export tests:
$ python benchmarks.py --weights yolov5s.pt --img 640
Notes:
Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow
SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js are unsupported.
"""
y, t = [], time.time()
device = select_device(device)
for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
try:
w = (
weights
if f == "-"
else export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1]
) # weights
assert suffix in str(w), "export failed"
y.append([name, True])
except Exception:
y.append([name, False]) # mAP, t_inference
# Print results
LOGGER.info("\n")
parse_opt()
notebook_init() # print system info
py = pd.DataFrame(y, columns=["Format", "Export"])
LOGGER.info(f"\nExports complete ({time.time() - t:.2f}s)")
LOGGER.info(str(py))
return py
def parse_opt():
"""Parses command-line arguments for YOLOv5 model inference configuration.
Args:
weights (str): The path to the weights file. Defaults to 'ROOT / "yolov5s.pt"'.
imgsz (int): Inference size in pixels. Defaults to 640.
batch_size (int): Batch size. Defaults to 1.
data (str): Path to the dataset YAML file. Defaults to 'ROOT / "data/coco128.yaml"'.
device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu'. Defaults to an empty string (auto-select).
half (bool): Use FP16 half-precision inference. This is a flag and defaults to False.
test (bool): Test exports only. This is a flag and defaults to False.
pt_only (bool): Test PyTorch only. This is a flag and defaults to False.
hard_fail (bool | str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum
metric floor, e.g., '0.29'. Defaults to False.
Returns:
argparse.Namespace: Parsed command-line arguments encapsulated in an argparse Namespace object.
Notes:
The function modifies the 'opt.data' by checking and validating the YAML path using 'check_yaml()'.
The parsed arguments are printed for reference using 'print_args()'.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)")
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--test", action="store_true", help="test exports only")
parser.add_argument("--pt-only", action="store_true", help="test PyTorch only")
parser.add_argument("--hard-fail", nargs="?", const=True, default=False, help="Exception on error or < min metric")
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
print_args(vars(opt))
return opt
def main(opt):
"""Executes YOLOv5 benchmark tests or main training/inference routines based on the provided command-line arguments.
Args:
opt (argparse.Namespace): Parsed command-line arguments including options for weights, image size, batch size,
data configuration, device, and other flags for inference settings.
Returns:
None: This function does not return any value. It leverages side-effects such as logging and running benchmarks.
Examples:
```python
if __name__ == "__main__":
opt = parse_opt()
main(opt)
```
Notes:
- For a complete list of supported export formats and their respective requirements, refer to the
[Ultralytics YOLOv5 Export Formats](https://github.com/ultralytics/yolov5#export-formats).
- Ensure that you have installed all necessary dependencies by following the installation instructions detailed in
the [main repository](https://github.com/ultralytics/yolov5#installation).
```shell
# Running benchmarks on default weights and image size
$ python benchmarks.py --weights yolov5s.pt --img 640
```
"""
test(**vars(opt)) if opt.test else run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
================================================
FILE: classify/predict.py
================================================
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
Usage - sources:
$ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam
img.jpg # image
vid.mp4 # video
screen # screenshot
path/ # directory
list.txt # list of images
list.streams # list of streams
'path/*.jpg' # glob
'https://youtu.be/LNwODJXcvt4' # YouTube
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
Usage - formats:
$ python classify/predict.py --weights yolov5s-cls.pt # PyTorch
yolov5s-cls.torchscript # TorchScript
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
yolov5s-cls_openvino_model # OpenVINO
yolov5s-cls.engine # TensorRT
yolov5s-cls.mlmodel # CoreML (macOS-only)
yolov5s-cls_saved_model # TensorFlow SavedModel
yolov5s-cls.pb # TensorFlow GraphDef
yolov5s-cls.tflite # TensorFlow Lite
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
yolov5s-cls_paddle_model # PaddlePaddle
"""
import argparse
import os
import platform
import sys
from pathlib import Path
import torch
import torch.nn.functional as F
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from ultralytics.utils.plotting import Annotator
from models.common import DetectMultiBackend
from utils.augmentations import classify_transforms
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (
LOGGER,
Profile,
check_file,
check_img_size,
check_imshow,
check_requirements,
colorstr,
cv2,
increment_path,
print_args,
strip_optimizer,
)
from utils.torch_utils import select_device, smart_inference_mode
@smart_inference_mode()
def run(
weights=ROOT / "yolov5s-cls.pt", # model.pt path(s)
source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
data=ROOT / "data/coco128.yaml", # dataset.yaml path
imgsz=(224, 224), # inference size (height, width)
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
nosave=False, # do not save images/videos
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / "runs/predict-cls", # save results to project/name
name="exp", # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
):
"""Conducts YOLOv5 classification inference on diverse input sources and saves results."""
source = str(source)
save_img = not nosave and not source.endswith(".txt") # save inference images
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
screenshot = source.lower().startswith("screen")
if is_url and is_file:
source = check_file(source) # download
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
# Dataloader
bs = 1 # batch_size
if webcam:
view_img = check_imshow(warn=True)
dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
bs = len(dataset)
elif screenshot:
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
else:
dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
vid_path, vid_writer = [None] * bs, [None] * bs
# Run inference
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
for path, im, im0s, vid_cap, s in dataset:
with dt[0]:
im = torch.Tensor(im).to(model.device)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
if len(im.shape) == 3:
im = im[None] # expand for batch dim
# Inference
with dt[1]:
results = model(im)
# Post-process
with dt[2]:
pred = F.softmax(results, dim=1) # probabilities
# Process predictions
for i, prob in enumerate(pred): # per image
seen += 1
if webcam: # batch_size >= 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f"{i}: "
else:
p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # im.jpg
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
annotator = Annotator(im0, example=str(names), pil=True)
# Print results
top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
# Write results
text = "\n".join(f"{prob[j]:.2f} {names[j]}" for j in top5i)
if save_img or view_img: # Add bbox to image
annotator.text([32, 32], text, txt_color=(255, 255, 255))
if save_txt: # Write to file
with open(f"{txt_path}.txt", "a") as f:
f.write(text + "\n")
# Stream results
im0 = annotator.result()
if view_img:
if platform.system() == "Linux" and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
vid_writer[i].write(im0)
# Print time (inference-only)
LOGGER.info(f"{s}{dt[1].dt * 1e3:.1f}ms")
# Print results
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
def parse_opt():
"""Parses command line arguments for YOLOv5 inference settings including model, source, device, and image size."""
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-cls.pt", help="model path(s)")
parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path")
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[224], help="inference size h,w")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
p
gitextract_ha6213k8/ ├── .dockerignore ├── .gitattributes ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug-report.yml │ │ ├── config.yml │ │ ├── feature-request.yml │ │ └── question.yml │ ├── dependabot.yml │ └── workflows/ │ ├── ci-testing.yml │ ├── cla.yml │ ├── docker.yml │ ├── format.yml │ ├── links.yml │ ├── merge-main-into-prs.yml │ └── stale.yml ├── .gitignore ├── CITATION.cff ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── README.zh-CN.md ├── benchmarks.py ├── classify/ │ ├── predict.py │ ├── train.py │ ├── tutorial.ipynb │ └── val.py ├── data/ │ ├── Argoverse.yaml │ ├── GlobalWheat2020.yaml │ ├── ImageNet.yaml │ ├── ImageNet10.yaml │ ├── ImageNet100.yaml │ ├── ImageNet1000.yaml │ ├── Objects365.yaml │ ├── SKU-110K.yaml │ ├── VOC.yaml │ ├── VisDrone.yaml │ ├── coco.yaml │ ├── coco128-seg.yaml │ ├── coco128.yaml │ ├── hyps/ │ │ ├── hyp.Objects365.yaml │ │ ├── hyp.VOC.yaml │ │ ├── hyp.no-augmentation.yaml │ │ ├── hyp.scratch-high.yaml │ │ ├── hyp.scratch-low.yaml │ │ └── hyp.scratch-med.yaml │ ├── scripts/ │ │ ├── download_weights.sh │ │ ├── get_coco.sh │ │ ├── get_coco128.sh │ │ ├── get_imagenet.sh │ │ ├── get_imagenet10.sh │ │ ├── get_imagenet100.sh │ │ └── get_imagenet1000.sh │ └── xView.yaml ├── detect.py ├── export.py ├── hubconf.py ├── models/ │ ├── __init__.py │ ├── common.py │ ├── experimental.py │ ├── hub/ │ │ ├── anchors.yaml │ │ ├── yolov3-spp.yaml │ │ ├── yolov3-tiny.yaml │ │ ├── yolov3.yaml │ │ ├── yolov5-bifpn.yaml │ │ ├── yolov5-fpn.yaml │ │ ├── yolov5-p2.yaml │ │ ├── yolov5-p34.yaml │ │ ├── yolov5-p6.yaml │ │ ├── yolov5-p7.yaml │ │ ├── yolov5-panet.yaml │ │ ├── yolov5l6.yaml │ │ ├── yolov5m6.yaml │ │ ├── yolov5n6.yaml │ │ ├── yolov5s-LeakyReLU.yaml │ │ ├── yolov5s-ghost.yaml │ │ ├── yolov5s-transformer.yaml │ │ ├── yolov5s6.yaml │ │ └── yolov5x6.yaml │ ├── segment/ │ │ ├── yolov5l-seg.yaml │ │ ├── yolov5m-seg.yaml │ │ ├── yolov5n-seg.yaml │ │ ├── yolov5s-seg.yaml │ │ └── yolov5x-seg.yaml │ ├── tf.py │ ├── yolo.py │ ├── yolov5l.yaml │ ├── yolov5m.yaml │ ├── yolov5n.yaml │ ├── yolov5s.yaml │ └── yolov5x.yaml ├── pyproject.toml ├── requirements.txt ├── segment/ │ ├── predict.py │ ├── train.py │ ├── tutorial.ipynb │ └── val.py ├── train.py ├── tutorial.ipynb ├── utils/ │ ├── __init__.py │ ├── activations.py │ ├── augmentations.py │ ├── autoanchor.py │ ├── autobatch.py │ ├── aws/ │ │ ├── __init__.py │ │ ├── mime.sh │ │ ├── resume.py │ │ └── userdata.sh │ ├── callbacks.py │ ├── dataloaders.py │ ├── docker/ │ │ ├── Dockerfile │ │ ├── Dockerfile-arm64 │ │ └── Dockerfile-cpu │ ├── downloads.py │ ├── flask_rest_api/ │ │ ├── README.md │ │ ├── example_request.py │ │ └── restapi.py │ ├── general.py │ ├── google_app_engine/ │ │ ├── Dockerfile │ │ ├── additional_requirements.txt │ │ └── app.yaml │ ├── loggers/ │ │ ├── __init__.py │ │ ├── clearml/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── clearml_utils.py │ │ │ └── hpo.py │ │ ├── comet/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── comet_utils.py │ │ │ └── hpo.py │ │ └── wandb/ │ │ ├── __init__.py │ │ └── wandb_utils.py │ ├── loss.py │ ├── metrics.py │ ├── plots.py │ ├── segment/ │ │ ├── __init__.py │ │ ├── augmentations.py │ │ ├── dataloaders.py │ │ ├── general.py │ │ ├── loss.py │ │ ├── metrics.py │ │ └── plots.py │ ├── torch_utils.py │ └── triton.py └── val.py
SYMBOL INDEX (691 symbols across 43 files)
FILE: benchmarks.py
function run (line 52) | def run(
function test (line 150) | def test(
function parse_opt (line 219) | def parse_opt():
function main (line 257) | def main(opt):
FILE: classify/predict.py
function run (line 68) | def run(
function parse_opt (line 207) | def parse_opt():
function main (line 233) | def main(opt):
FILE: classify/train.py
function train (line 78) | def train(opt, device):
function parse_opt (line 313) | def parse_opt(known=False):
function main (line 343) | def main(opt):
function run (line 367) | def run(**kwargs):
FILE: classify/val.py
function run (line 53) | def run(
function parse_opt (line 150) | def parse_opt():
function main (line 170) | def main(opt):
FILE: detect.py
function run (line 70) | def run(
function parse_opt (line 323) | def parse_opt():
function main (line 408) | def main(opt):
FILE: export.py
class iOSModel (line 93) | class iOSModel(torch.nn.Module):
method __init__ (line 96) | def __init__(self, model, im):
method forward (line 122) | def forward(self, x):
function export_formats (line 143) | def export_formats():
function try_export (line 182) | def try_export(inner_func):
function export_torchscript (line 225) | def export_torchscript(model, im, file, optimize, prefix=colorstr("Torch...
function export_onnx (line 280) | def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colors...
function export_openvino (line 379) | def export_openvino(file, metadata, half, int8, data, prefix=colorstr("O...
function export_paddle (line 467) | def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePadd...
function export_coreml (line 515) | def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colo...
function export_engine (line 584) | def export_engine(
function export_saved_model (line 693) | def export_saved_model(
function export_pb (line 789) | def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
function export_tflite (line 826) | def export_tflite(
function export_edgetpu (line 901) | def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
function export_tfjs (line 962) | def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
function add_tflite_metadata (line 1023) | def add_tflite_metadata(file, metadata, num_outputs):
function pipeline_coreml (line 1084) | def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr(...
function run (line 1258) | def run(
function parse_opt (line 1464) | def parse_opt(known=False):
function main (line 1517) | def main(opt):
FILE: hubconf.py
function _create (line 16) | def _create(name, pretrained=True, channels=3, classes=80, autoshape=Tru...
function custom (line 107) | def custom(path="path/to/model.pt", autoshape=True, _verbose=True, devic...
function yolov5n (line 138) | def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
function yolov5s (line 175) | def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
function yolov5m (line 216) | def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
function yolov5l (line 249) | def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
function yolov5x (line 279) | def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
function yolov5n6 (line 309) | def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
function yolov5s6 (line 336) | def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
function yolov5m6 (line 373) | def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
function yolov5l6 (line 407) | def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
function yolov5x6 (line 439) | def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
FILE: models/common.py
function autopad (line 60) | def autopad(k, p=None, d=1):
class Conv (line 72) | class Conv(nn.Module):
method __init__ (line 77) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
method forward (line 84) | def forward(self, x):
method forward_fuse (line 88) | def forward_fuse(self, x):
class DWConv (line 93) | class DWConv(Conv):
method __init__ (line 96) | def __init__(self, c1, c2, k=1, s=1, d=1, act=True):
class DWConvTranspose2d (line 103) | class DWConvTranspose2d(nn.ConvTranspose2d):
method __init__ (line 106) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):
class TransformerLayer (line 113) | class TransformerLayer(nn.Module):
method __init__ (line 116) | def __init__(self, c, num_heads):
method forward (line 129) | def forward(self, x):
class TransformerBlock (line 136) | class TransformerBlock(nn.Module):
method __init__ (line 139) | def __init__(self, c1, c2, num_heads, num_layers):
method forward (line 151) | def forward(self, x):
class Bottleneck (line 162) | class Bottleneck(nn.Module):
method __init__ (line 165) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
method forward (line 175) | def forward(self, x):
class BottleneckCSP (line 182) | class BottleneckCSP(nn.Module):
method __init__ (line 185) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
method forward (line 199) | def forward(self, x):
class CrossConv (line 208) | class CrossConv(nn.Module):
method __init__ (line 211) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
method forward (line 223) | def forward(self, x):
class C3 (line 228) | class C3(nn.Module):
method __init__ (line 231) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
method forward (line 242) | def forward(self, x):
class C3x (line 247) | class C3x(C3):
method __init__ (line 250) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
class C3TR (line 259) | class C3TR(C3):
method __init__ (line 262) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
class C3SPP (line 271) | class C3SPP(C3):
method __init__ (line 274) | def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
class C3Ghost (line 283) | class C3Ghost(C3):
method __init__ (line 286) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
class SPP (line 293) | class SPP(nn.Module):
method __init__ (line 296) | def __init__(self, c1, c2, k=(5, 9, 13)):
method forward (line 306) | def forward(self, x):
class SPPF (line 316) | class SPPF(nn.Module):
method __init__ (line 319) | def __init__(self, c1, c2, k=5):
method forward (line 331) | def forward(self, x):
class Focus (line 341) | class Focus(nn.Module):
method __init__ (line 344) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
method forward (line 352) | def forward(self, x):
class GhostConv (line 358) | class GhostConv(nn.Module):
method __init__ (line 361) | def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
method forward (line 370) | def forward(self, x):
class GhostBottleneck (line 376) | class GhostBottleneck(nn.Module):
method __init__ (line 379) | def __init__(self, c1, c2, k=3, s=1):
method forward (line 394) | def forward(self, x):
class Contract (line 399) | class Contract(nn.Module):
method __init__ (line 402) | def __init__(self, gain=2):
method forward (line 409) | def forward(self, x):
class Expand (line 420) | class Expand(nn.Module):
method __init__ (line 423) | def __init__(self, gain=2):
method forward (line 432) | def forward(self, x):
class Concat (line 441) | class Concat(nn.Module):
method __init__ (line 444) | def __init__(self, dimension=1):
method forward (line 449) | def forward(self, x):
class DetectMultiBackend (line 454) | class DetectMultiBackend(nn.Module):
method __init__ (line 457) | def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), d...
method forward (line 687) | def forward(self, im, augment=False, visualize=False):
method from_numpy (line 771) | def from_numpy(self, x):
method warmup (line 775) | def warmup(self, imgsz=(1, 3, 640, 640)):
method _model_type (line 784) | def _model_type(p="path/to/model.pt"):
method _load_metadata (line 803) | def _load_metadata(f=Path("path/to/meta.yaml")):
class AutoShape (line 811) | class AutoShape(nn.Module):
method __init__ (line 822) | def __init__(self, model, verbose=True):
method _apply (line 836) | def _apply(self, fn):
method forward (line 851) | def forward(self, ims, size=640, augment=False, profile=False):
class Detections (line 921) | class Detections:
method __init__ (line 924) | def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shap...
method _run (line 942) | def _run(self, pprint=False, show=False, save=False, crop=False, rende...
method show (line 997) | def show(self, labels=True):
method save (line 1004) | def save(self, labels=True, save_dir="runs/detect/exp", exist_ok=False):
method crop (line 1012) | def crop(self, save=True, save_dir="runs/detect/exp", exist_ok=False):
method render (line 1020) | def render(self, labels=True):
method pandas (line 1025) | def pandas(self):
method tolist (line 1038) | def tolist(self):
method print (line 1056) | def print(self):
method __len__ (line 1060) | def __len__(self):
method __str__ (line 1064) | def __str__(self):
method __repr__ (line 1070) | def __repr__(self):
class Proto (line 1075) | class Proto(nn.Module):
method __init__ (line 1078) | def __init__(self, c1, c_=256, c2=32):
method forward (line 1086) | def forward(self, x):
class Classify (line 1091) | class Classify(nn.Module):
method __init__ (line 1094) | def __init__(
method forward (line 1107) | def forward(self, x):
FILE: models/experimental.py
class Sum (line 14) | class Sum(nn.Module):
method __init__ (line 17) | def __init__(self, n, weight=False):
method forward (line 27) | def forward(self, x):
class MixConv2d (line 40) | class MixConv2d(nn.Module):
method __init__ (line 43) | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
method forward (line 66) | def forward(self, x):
class Ensemble (line 73) | class Ensemble(nn.ModuleList):
method __init__ (line 76) | def __init__(self):
method forward (line 80) | def forward(self, x, augment=False, profile=False, visualize=False):
function attempt_load (line 89) | def attempt_load(weights, device=None, inplace=True, fuse=True):
FILE: models/tf.py
class TFBN (line 51) | class TFBN(keras.layers.Layer):
method __init__ (line 54) | def __init__(self, w=None):
method call (line 65) | def call(self, inputs):
class TFPad (line 70) | class TFPad(keras.layers.Layer):
method __init__ (line 73) | def __init__(self, pad):
method call (line 85) | def call(self, inputs):
class TFConv (line 90) | class TFConv(keras.layers.Layer):
method __init__ (line 93) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
method call (line 116) | def call(self, inputs):
class TFDWConv (line 121) | class TFDWConv(keras.layers.Layer):
method __init__ (line 124) | def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
method call (line 145) | def call(self, inputs):
class TFDWConvTranspose2d (line 150) | class TFDWConvTranspose2d(keras.layers.Layer):
method __init__ (line 153) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
method call (line 177) | def call(self, inputs):
class TFFocus (line 182) | class TFFocus(keras.layers.Layer):
method __init__ (line 185) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
method call (line 194) | def call(self, inputs):
class TFBottleneck (line 203) | class TFBottleneck(keras.layers.Layer):
method __init__ (line 206) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):
method call (line 218) | def call(self, inputs):
class TFCrossConv (line 225) | class TFCrossConv(keras.layers.Layer):
method __init__ (line 228) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
method call (line 236) | def call(self, inputs):
class TFConv2d (line 241) | class TFConv2d(keras.layers.Layer):
method __init__ (line 244) | def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
method call (line 260) | def call(self, inputs):
class TFBottleneckCSP (line 265) | class TFBottleneckCSP(keras.layers.Layer):
method __init__ (line 268) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
method call (line 284) | def call(self, inputs):
class TFC3 (line 293) | class TFC3(keras.layers.Layer):
method __init__ (line 296) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
method call (line 308) | def call(self, inputs):
class TFC3x (line 316) | class TFC3x(keras.layers.Layer):
method __init__ (line 319) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
method call (line 333) | def call(self, inputs):
class TFSPP (line 338) | class TFSPP(keras.layers.Layer):
method __init__ (line 341) | def __init__(self, c1, c2, k=(5, 9, 13), w=None):
method call (line 349) | def call(self, inputs):
class TFSPPF (line 355) | class TFSPPF(keras.layers.Layer):
method __init__ (line 358) | def __init__(self, c1, c2, k=5, w=None):
method call (line 366) | def call(self, inputs):
class TFDetect (line 376) | class TFDetect(keras.layers.Layer):
method __init__ (line 379) | def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None):
method call (line 399) | def call(self, inputs):
method _make_grid (line 424) | def _make_grid(nx=20, ny=20):
class TFSegment (line 431) | class TFSegment(TFDetect):
method __init__ (line 434) | def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(64...
method call (line 446) | def call(self, x):
class TFProto (line 455) | class TFProto(keras.layers.Layer):
method __init__ (line 458) | def __init__(self, c1, c_=256, c2=32, w=None):
method call (line 466) | def call(self, inputs):
class TFUpsample (line 471) | class TFUpsample(keras.layers.Layer):
method __init__ (line 474) | def __init__(self, size, scale_factor, mode, w=None):
method call (line 488) | def call(self, inputs):
class TFConcat (line 493) | class TFConcat(keras.layers.Layer):
method __init__ (line 496) | def __init__(self, dimension=1, w=None):
method call (line 502) | def call(self, inputs):
function parse_model (line 507) | def parse_model(d, ch, model, imgsz):
class TFModel (line 587) | class TFModel:
method __init__ (line 590) | def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgs...
method predict (line 608) | def predict(
method _xywh2xyxy (line 650) | def _xywh2xyxy(xywh):
class AgnosticNMS (line 656) | class AgnosticNMS(keras.layers.Layer):
method call (line 659) | def call(self, input, topk_all, iou_thres, conf_thres):
method _nms (line 669) | def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):
function activations (line 704) | def activations(act=nn.SiLU):
function representative_dataset_gen (line 716) | def representative_dataset_gen(dataset, ncalib=100):
function run (line 727) | def run(
function parse_opt (line 753) | def parse_opt():
function main (line 768) | def main(opt):
FILE: models/yolo.py
class Detect (line 72) | class Detect(nn.Module):
method __init__ (line 79) | def __init__(self, nc=80, anchors=(), ch=(), inplace=True):
method forward (line 92) | def forward(self, x):
method _make_grid (line 118) | def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch...
class Segment (line 130) | class Segment(Detect):
method __init__ (line 133) | def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=T...
method forward (line 143) | def forward(self, x):
class BaseModel (line 152) | class BaseModel(nn.Module):
method forward (line 155) | def forward(self, x, profile=False, visualize=False):
method _forward_once (line 161) | def _forward_once(self, x, profile=False, visualize=False):
method _profile_one_layer (line 175) | def _profile_one_layer(self, m, x, dt):
method fuse (line 189) | def fuse(self):
method info (line 200) | def info(self, verbose=False, img_size=640):
method _apply (line 204) | def _apply(self, fn):
class DetectionModel (line 218) | class DetectionModel(BaseModel):
method __init__ (line 221) | def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None):
method forward (line 266) | def forward(self, x, augment=False, profile=False, visualize=False):
method _forward_augment (line 272) | def _forward_augment(self, x):
method _descale_pred (line 287) | def _descale_pred(self, p, flips, scale, img_size):
method _clip_augmented (line 304) | def _clip_augmented(self, y):
method _initialize_biases (line 317) | def _initialize_biases(self, cf=None):
class SegmentationModel (line 336) | class SegmentationModel(DetectionModel):
method __init__ (line 339) | def __init__(self, cfg="yolov5s-seg.yaml", ch=3, nc=None, anchors=None):
class ClassificationModel (line 346) | class ClassificationModel(BaseModel):
method __init__ (line 349) | def __init__(self, cfg=None, model=None, nc=1000, cutoff=10):
method _from_detection_model (line 356) | def _from_detection_model(self, model, nc=1000, cutoff=10):
method _from_yaml (line 373) | def _from_yaml(self, cfg):
function parse_model (line 378) | def parse_model(d, ch):
FILE: segment/predict.py
function run (line 70) | def run(
function parse_opt (line 260) | def parse_opt():
function main (line 299) | def main(opt):
FILE: segment/train.py
function train (line 100) | def train(hyp, opt, device, callbacks):
function parse_opt (line 544) | def parse_opt(known=False):
function main (line 592) | def main(opt, callbacks=Callbacks()):
function run (line 749) | def run(**kwargs):
FILE: segment/val.py
function save_one_txt (line 73) | def save_one_txt(predn, save_conf, shape, file):
function save_one_json (line 85) | def save_one_json(predn, jdict, path, class_map, pred_masks):
function process_batch (line 116) | def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=No...
function run (line 155) | def run(
function parse_opt (line 447) | def parse_opt():
function main (line 482) | def main(opt):
FILE: train.py
function train (line 105) | def train(hyp, opt, device, callbacks):
function parse_opt (line 546) | def parse_opt(known=False):
function main (line 620) | def main(opt, callbacks=Callbacks()):
function generate_individual (line 889) | def generate_individual(input_ranges, individual_length):
function run (line 919) | def run(**kwargs):
FILE: utils/__init__.py
function emojis (line 9) | def emojis(str=""):
class TryExcept (line 14) | class TryExcept(contextlib.ContextDecorator):
method __init__ (line 17) | def __init__(self, msg=""):
method __enter__ (line 21) | def __enter__(self):
method __exit__ (line 25) | def __exit__(self, exc_type, value, traceback):
function threaded (line 34) | def threaded(func):
function join_threads (line 46) | def join_threads(verbose=False):
function notebook_init (line 59) | def notebook_init(verbose=True):
FILE: utils/activations.py
class SiLU (line 9) | class SiLU(nn.Module):
method forward (line 13) | def forward(x):
class Hardswish (line 21) | class Hardswish(nn.Module):
method forward (line 25) | def forward(x):
class Mish (line 33) | class Mish(nn.Module):
method forward (line 37) | def forward(x):
class MemoryEfficientMish (line 42) | class MemoryEfficientMish(nn.Module):
class F (line 45) | class F(torch.autograd.Function):
method forward (line 49) | def forward(ctx, x):
method backward (line 55) | def backward(ctx, grad_output):
method forward (line 62) | def forward(self, x):
class FReLU (line 67) | class FReLU(nn.Module):
method __init__ (line 70) | def __init__(self, c1, k=3): # ch_in, kernel
method forward (line 76) | def forward(self, x):
class AconC (line 84) | class AconC(nn.Module):
method __init__ (line 91) | def __init__(self, c1):
method forward (line 98) | def forward(self, x):
class MetaAconC (line 104) | class MetaAconC(nn.Module):
method __init__ (line 111) | def __init__(self, c1, k=1, s=1, r=16):
method forward (line 122) | def forward(self, x):
FILE: utils/augmentations.py
class Albumentations (line 20) | class Albumentations:
method __init__ (line 23) | def __init__(self, size=640):
method __call__ (line 50) | def __call__(self, im, labels, p=1.0):
function normalize (line 58) | def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
function denormalize (line 66) | def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
function augment_hsv (line 73) | def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
function hist_equalize (line 89) | def hist_equalize(im, clahe=True, bgr=False):
function replicate (line 100) | def replicate(im, labels):
function letterbox (line 120) | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True...
function random_perspective (line 153) | def random_perspective(
function copy_paste (line 235) | def copy_paste(im, labels, segments, p=0.5):
function cutout (line 260) | def cutout(im, labels, p=0.5):
function mixup (line 290) | def mixup(im, labels, im2, labels2):
function box_candidates (line 301) | def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1...
function classify_albumentations (line 313) | def classify_albumentations(
function classify_transforms (line 358) | def classify_transforms(size=224):
class LetterBox (line 365) | class LetterBox:
method __init__ (line 368) | def __init__(self, size=(640, 640), auto=False, stride=32):
method __call__ (line 377) | def __call__(self, im):
class CenterCrop (line 392) | class CenterCrop:
method __init__ (line 395) | def __init__(self, size=640):
method __call__ (line 400) | def __call__(self, im):
class ToTensor (line 411) | class ToTensor:
method __init__ (line 414) | def __init__(self, half=False):
method __call__ (line 419) | def __call__(self, im):
FILE: utils/autoanchor.py
function check_anchor_order (line 17) | def check_anchor_order(m):
function check_anchors (line 28) | def check_anchors(dataset, model, thr=4.0, imgsz=640):
function kmean_anchors (line 66) | def kmean_anchors(dataset="./data/coco128.yaml", n=9, img_size=640, thr=...
FILE: utils/autobatch.py
function check_train_batch_size (line 13) | def check_train_batch_size(model, imgsz=640, amp=True):
function autobatch (line 19) | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
FILE: utils/callbacks.py
class Callbacks (line 7) | class Callbacks:
method __init__ (line 10) | def __init__(self):
method register_action (line 35) | def register_action(self, hook, name="", callback=None):
method get_registered_actions (line 47) | def get_registered_actions(self, hook=None):
method run (line 55) | def run(self, hook, *args, thread=False, **kwargs):
FILE: utils/dataloaders.py
function get_hash (line 75) | def get_hash(paths):
function exif_size (line 83) | def exif_size(img):
function exif_transpose (line 93) | def exif_transpose(image):
function seed_worker (line 120) | def seed_worker(worker_id):
class SmartDistributedSampler (line 132) | class SmartDistributedSampler(distributed.DistributedSampler):
method __iter__ (line 135) | def __iter__(self):
function create_dataloader (line 159) | def create_dataloader(
class InfiniteDataLoader (line 220) | class InfiniteDataLoader(dataloader.DataLoader):
method __init__ (line 226) | def __init__(self, *args, **kwargs):
method __len__ (line 234) | def __len__(self):
method __iter__ (line 238) | def __iter__(self):
class _RepeatSampler (line 244) | class _RepeatSampler:
method __init__ (line 251) | def __init__(self, sampler):
method __iter__ (line 255) | def __iter__(self):
class LoadScreenshots (line 261) | class LoadScreenshots:
method __init__ (line 264) | def __init__(self, source, img_size=640, stride=32, auto=True, transfo...
method __iter__ (line 297) | def __iter__(self):
method __next__ (line 301) | def __next__(self):
class LoadImages (line 318) | class LoadImages:
method __init__ (line 321) | def __init__(self, path, img_size=640, stride=32, auto=True, transform...
method __iter__ (line 358) | def __iter__(self):
method __next__ (line 363) | def __next__(self):
method _new_video (line 404) | def _new_video(self, path):
method _cv2_rotate (line 412) | def _cv2_rotate(self, im):
method __len__ (line 422) | def __len__(self):
class LoadStreams (line 427) | class LoadStreams:
method __init__ (line 430) | def __init__(self, sources="file.streams", img_size=640, stride=32, au...
method update (line 478) | def update(self, i, cap, stream):
method __iter__ (line 494) | def __iter__(self):
method __next__ (line 499) | def __next__(self):
method __len__ (line 518) | def __len__(self):
function img2label_paths (line 523) | def img2label_paths(img_paths):
class LoadImagesAndLabels (line 531) | class LoadImagesAndLabels(Dataset):
method __init__ (line 537) | def __init__(
method check_cache_ram (line 697) | def check_cache_ram(self, safety_margin=0.1, prefix=""):
method cache_labels (line 716) | def cache_labels(self, path=Path("./labels.cache"), prefix=""):
method __len__ (line 756) | def __len__(self):
method __getitem__ (line 766) | def __getitem__(self, index):
method load_image (line 842) | def load_image(self, i):
method cache_images_to_disk (line 866) | def cache_images_to_disk(self, i):
method load_mosaic (line 872) | def load_mosaic(self, index):
method load_mosaic9 (line 932) | def load_mosaic9(self, index):
method collate_fn (line 1014) | def collate_fn(batch):
method collate_fn4 (line 1022) | def collate_fn4(batch):
function flatten_recursive (line 1051) | def flatten_recursive(path=DATASETS_DIR / "coco128"):
function extract_boxes (line 1061) | def extract_boxes(path=DATASETS_DIR / "coco128"):
function autosplit (line 1099) | def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0...
function verify_image_label (line 1126) | def verify_image_label(args):
class HUBDatasetStats (line 1177) | class HUBDatasetStats:
method __init__ (line 1192) | def __init__(self, path="coco128.yaml", autodownload=False):
method _find_yaml (line 1213) | def _find_yaml(dir):
method _unzip (line 1225) | def _unzip(self, path):
method _hub_ops (line 1235) | def _hub_ops(self, f, max_dim=1920):
method get_json (line 1253) | def get_json(self, save=False, verbose=False):
method process_images (line 1291) | def process_images(self):
class ClassificationDataset (line 1305) | class ClassificationDataset(torchvision.datasets.ImageFolder):
method __init__ (line 1314) | def __init__(self, root, augment, imgsz, cache=False):
method __getitem__ (line 1325) | def __getitem__(self, i):
function create_classification_dataloader (line 1343) | def create_classification_dataloader(
FILE: utils/downloads.py
function is_url (line 13) | def is_url(url, check=True):
function gsutil_getsize (line 24) | def gsutil_getsize(url=""):
function url_getsize (line 33) | def url_getsize(url="https://ultralytics.com/images/bus.jpg"):
function curl_download (line 39) | def curl_download(url, filename, *, silent: bool = False) -> bool:
function safe_download (line 59) | def safe_download(file, url, url2=None, min_bytes=1e0, error_msg=""):
function attempt_download (line 86) | def attempt_download(file, repo="ultralytics/yolov5", release="v7.0"):
FILE: utils/flask_rest_api/restapi.py
function predict (line 18) | def predict(model):
FILE: utils/general.py
function is_ascii (line 77) | def is_ascii(s=""):
function is_chinese (line 83) | def is_chinese(s="人工智能"):
function is_colab (line 88) | def is_colab():
function is_jupyter (line 93) | def is_jupyter():
function is_kaggle (line 107) | def is_kaggle():
function is_docker (line 112) | def is_docker() -> bool:
function is_writeable (line 123) | def is_writeable(dir, test=False):
function set_logging (line 140) | def set_logging(name=LOGGING_NAME, verbose=True):
function user_config_dir (line 174) | def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"):
class Profile (line 191) | class Profile(contextlib.ContextDecorator):
method __init__ (line 194) | def __init__(self, t=0.0, device: torch.device = None):
method __enter__ (line 200) | def __enter__(self):
method __exit__ (line 205) | def __exit__(self, type, value, traceback):
method time (line 210) | def time(self):
class Timeout (line 217) | class Timeout(contextlib.ContextDecorator):
method __init__ (line 220) | def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors...
method _timeout_handler (line 226) | def _timeout_handler(self, signum, frame):
method __enter__ (line 230) | def __enter__(self):
method __exit__ (line 236) | def __exit__(self, exc_type, exc_val, exc_tb):
class WorkingDirectory (line 244) | class WorkingDirectory(contextlib.ContextDecorator):
method __init__ (line 247) | def __init__(self, new_dir):
method __enter__ (line 252) | def __enter__(self):
method __exit__ (line 256) | def __exit__(self, exc_type, exc_val, exc_tb):
function methods (line 261) | def methods(instance):
function print_args (line 266) | def print_args(args: dict | None = None, show_file=True, show_func=False):
function init_seeds (line 281) | def init_seeds(seed=0, deterministic=False):
function intersect_dicts (line 299) | def intersect_dicts(da, db, exclude=()):
function get_default_args (line 306) | def get_default_args(func):
function get_latest_run (line 312) | def get_latest_run(search_dir="."):
function file_age (line 318) | def file_age(path=__file__):
function file_date (line 324) | def file_date(path=__file__):
function file_size (line 330) | def file_size(path):
function check_online (line 342) | def check_online():
function git_describe (line 359) | def git_describe(path=ROOT):
function check_git_status (line 373) | def check_git_status(repo="ultralytics/yolov5", branch="master"):
function check_git_info (line 402) | def check_git_info(path="."):
function check_python (line 420) | def check_python(minimum="3.8.0"):
function check_version (line 425) | def check_version(current="0.0.0", minimum="0.0.0", name="version ", pin...
function check_img_size (line 437) | def check_img_size(imgsz, s=32, floor=0):
function check_imshow (line 449) | def check_imshow(warn=False):
function check_suffix (line 465) | def check_suffix(file="yolov5s.pt", suffix=(".pt",), msg=""):
function check_yaml (line 476) | def check_yaml(file, suffix=(".yaml", ".yml")):
function check_file (line 481) | def check_file(file, suffix=""):
function check_font (line 511) | def check_font(font=FONT, progress=False):
function check_dataset (line 521) | def check_dataset(data, autodownload=True):
function check_amp (line 586) | def check_amp(model):
function yaml_load (line 614) | def yaml_load(file="data.yaml"):
function yaml_save (line 620) | def yaml_save(file="data.yaml", data=None):
function unzip_file (line 630) | def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX")):
function url2file (line 642) | def url2file(url):
function download (line 651) | def download(url, dir=".", unzip=True, delete=True, curl=False, threads=...
function make_divisible (line 698) | def make_divisible(x, divisor):
function clean_str (line 705) | def clean_str(s):
function one_cycle (line 712) | def one_cycle(y1=0.0, y2=1.0, steps=100):
function colorstr (line 720) | def colorstr(*input):
function labels_to_class_weights (line 750) | def labels_to_class_weights(labels, nc=80):
function labels_to_image_weights (line 769) | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
function coco80_to_coco91_class (line 776) | def coco80_to_coco91_class():
function xyxy2xywh (line 869) | def xyxy2xywh(x):
function xywh2xyxy (line 879) | def xywh2xyxy(x):
function xywhn2xyxy (line 889) | def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
function xyxy2xywhn (line 899) | def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
function xyn2xy (line 911) | def xyn2xy(x, w=640, h=640, padw=0, padh=0):
function segment2box (line 919) | def segment2box(segment, width=640, height=640):
function segments2boxes (line 930) | def segments2boxes(segments):
function resample_segments (line 939) | def resample_segments(segments, n=1000):
function scale_boxes (line 949) | def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
function scale_segments (line 965) | def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, nor...
function clip_boxes (line 984) | def clip_boxes(boxes, shape):
function clip_segments (line 996) | def clip_segments(segments, shape):
function non_max_suppression (line 1006) | def non_max_suppression(
function strip_optimizer (line 1118) | def strip_optimizer(f="best.pt", s=""):
function print_mutation (line 1138) | def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr...
function apply_classifier (line 1193) | def apply_classifier(x, model, img, im0):
function increment_path (line 1228) | def increment_path(path, exist_ok=False, sep="", mkdir=False):
function imread (line 1262) | def imread(filename, flags=cv2.IMREAD_COLOR):
function imwrite (line 1269) | def imwrite(filename, img):
function imshow (line 1278) | def imshow(path, im):
FILE: utils/loggers/__init__.py
function SummaryWriter (line 25) | def SummaryWriter(*args):
function _json_default (line 64) | def _json_default(value):
class Loggers (line 77) | class Loggers:
method __init__ (line 80) | def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, lo...
method remote_dataset (line 158) | def remote_dataset(self):
method on_train_start (line 170) | def on_train_start(self):
method on_pretrain_routine_start (line 175) | def on_pretrain_routine_start(self):
method on_pretrain_routine_end (line 180) | def on_pretrain_routine_end(self, labels, names):
method on_train_batch_end (line 193) | def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):
method on_train_epoch_end (line 214) | def on_train_epoch_end(self, epoch):
method on_val_start (line 222) | def on_val_start(self):
method on_val_image_end (line 227) | def on_val_image_end(self, pred, predn, path, names, im):
method on_val_batch_end (line 234) | def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):
method on_val_end (line 239) | def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusi...
method on_fit_epoch_end (line 251) | def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
method on_model_save (line 290) | def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
method on_train_end (line 303) | def on_train_end(self, last, best, epoch, results):
method on_params_update (line 339) | def on_params_update(self, params: dict):
class GenericLogger (line 349) | class GenericLogger:
method __init__ (line 359) | def __init__(self, opt, console_logger, include=("tb", "wandb", "clear...
method log_metrics (line 394) | def log_metrics(self, metrics, epoch):
method log_images (line 413) | def log_images(self, files, name="Images", epoch=0):
method log_graph (line 431) | def log_graph(self, model, imgsz=(640, 640)):
method log_model (line 436) | def log_model(self, model_path, epoch=0, metadata=None):
method update_params (line 448) | def update_params(self, params):
function log_tensorboard_graph (line 456) | def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
function web_project_name (line 469) | def web_project_name(project):
FILE: utils/loggers/clearml/clearml_utils.py
function construct_dataset (line 23) | def construct_dataset(clearml_info_string):
class ClearmlLogger (line 65) | class ClearmlLogger:
method __init__ (line 75) | def __init__(self, opt, hyp):
method log_scalars (line 127) | def log_scalars(self, metrics, epoch):
method log_model (line 138) | def log_model(self, model_path, model_name, epoch=0):
method log_summary (line 150) | def log_summary(self, metrics):
method log_plot (line 159) | def log_plot(self, title, plot_path):
method log_debug_samples (line 173) | def log_debug_samples(self, files, title="Debug Samples"):
method log_image_with_boxes (line 188) | def log_image_with_boxes(self, image_path, boxes, class_names, image, ...
FILE: utils/loggers/comet/__init__.py
class CometLogger (line 66) | class CometLogger:
method __init__ (line 69) | def __init__(self, opt, hyp, run_id=None, job_type="Training", **exper...
method _get_experiment (line 170) | def _get_experiment(self, mode, experiment_id=None):
method log_metrics (line 203) | def log_metrics(self, log_dict, **kwargs):
method log_parameters (line 207) | def log_parameters(self, log_dict, **kwargs):
method log_asset (line 211) | def log_asset(self, asset_path, **kwargs):
method log_asset_data (line 215) | def log_asset_data(self, asset, **kwargs):
method log_image (line 219) | def log_image(self, img, **kwargs):
method log_model (line 223) | def log_model(self, path, opt, epoch, fitness_score, best_model=False):
method check_dataset (line 247) | def check_dataset(self, data_file):
method log_predictions (line 260) | def log_predictions(self, image, labelsn, path, shape, predn):
method preprocess_prediction (line 301) | def preprocess_prediction(self, image, labels, shape, pred):
method add_assets_to_artifact (line 321) | def add_assets_to_artifact(self, artifact, path, asset_path, split):
method upload_dataset_artifact (line 347) | def upload_dataset_artifact(self):
method download_dataset_artifact (line 372) | def download_dataset_artifact(self, artifact_path):
method update_data_paths (line 392) | def update_data_paths(self, data_dict):
method on_pretrain_routine_end (line 405) | def on_pretrain_routine_end(self, paths):
method on_train_start (line 418) | def on_train_start(self):
method on_train_epoch_start (line 422) | def on_train_epoch_start(self):
method on_train_epoch_end (line 426) | def on_train_epoch_end(self, epoch):
method on_train_batch_start (line 432) | def on_train_batch_start(self):
method on_train_batch_end (line 436) | def on_train_batch_end(self, log_dict, step):
method on_train_end (line 444) | def on_train_end(self, files, save_dir, last, best, epoch, results):
method on_val_start (line 472) | def on_val_start(self):
method on_val_batch_start (line 476) | def on_val_batch_start(self):
method on_val_batch_end (line 480) | def on_val_batch_end(self, batch_i, images, targets, paths, shapes, ou...
method on_val_end (line 499) | def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusi...
method on_fit_epoch_end (line 534) | def on_fit_epoch_end(self, result, epoch):
method on_model_save (line 538) | def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
method on_params_update (line 543) | def on_params_update(self, params):
method finish_run (line 547) | def finish_run(self):
FILE: utils/loggers/comet/comet_utils.py
function download_model_checkpoint (line 21) | def download_model_checkpoint(opt, experiment):
function set_opt_parameters (line 69) | def set_opt_parameters(opt, experiment):
function check_comet_weights (line 99) | def check_comet_weights(opt):
function check_comet_resume (line 122) | def check_comet_resume(opt):
FILE: utils/loggers/comet/hpo.py
function get_args (line 29) | def get_args(known=False):
function run (line 90) | def run(parameters, opt):
FILE: utils/loggers/wandb/wandb_utils.py
class WandbLogger (line 33) | class WandbLogger:
method __init__ (line 45) | def __init__(self, opt, run_id=None, job_type="Training"):
method setup_training (line 82) | def setup_training(self, opt):
method log_model (line 113) | def log_model(self, path, opt, epoch, fitness_score, best_model=False):
method val_one_image (line 147) | def val_one_image(self, pred, predn, path, names, im):
method log (line 151) | def log(self, log_dict):
method end_epoch (line 161) | def end_epoch(self):
method finish_run (line 179) | def finish_run(self):
function all_logging_disabled (line 190) | def all_logging_disabled(highest_level=logging.CRITICAL):
FILE: utils/loss.py
function smooth_BCE (line 11) | def smooth_BCE(eps=0.1):
class BCEBlurWithLogitsLoss (line 18) | class BCEBlurWithLogitsLoss(nn.Module):
method __init__ (line 21) | def __init__(self, alpha=0.05):
method forward (line 29) | def forward(self, pred, true):
class FocalLoss (line 42) | class FocalLoss(nn.Module):
method __init__ (line 45) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
method forward (line 56) | def forward(self, pred, true):
class QFocalLoss (line 77) | class QFocalLoss(nn.Module):
method __init__ (line 80) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
method forward (line 89) | def forward(self, pred, true):
class ComputeLoss (line 108) | class ComputeLoss:
method __init__ (line 114) | def __init__(self, model, autobalance=False):
method __call__ (line 141) | def __call__(self, p, targets): # predictions, targets
method build_targets (line 193) | def build_targets(self, p, targets):
FILE: utils/metrics.py
function fitness (line 18) | def fitness(x):
function smooth (line 24) | def smooth(y, f=0.05):
function ap_per_class (line 32) | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir="....
function compute_ap (line 101) | def compute_ap(recall, precision):
class ConfusionMatrix (line 132) | class ConfusionMatrix:
method __init__ (line 135) | def __init__(self, nc, conf=0.25, iou_thres=0.45):
method process_batch (line 142) | def process_batch(self, detections, labels):
method tp_fp (line 190) | def tp_fp(self):
method plot (line 200) | def plot(self, normalize=True, save_dir="", names=()):
method print (line 232) | def print(self):
function bbox_iou (line 238) | def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, ...
function box_iou (line 282) | def box_iou(box1, box2, eps=1e-7):
function bbox_ioa (line 303) | def bbox_ioa(box1, box2, eps=1e-7):
function wh_iou (line 332) | def wh_iou(wh1, wh2, eps=1e-7):
function plot_pr_curve (line 346) | def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()):
function plot_mc_curve (line 371) | def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabe...
FILE: utils/plots.py
class Colors (line 31) | class Colors:
method __init__ (line 34) | def __init__(self):
method __call__ (line 65) | def __call__(self, i, bgr=False):
method hex2rgb (line 71) | def hex2rgb(h):
function feature_visualization (line 79) | def feature_visualization(x, module_type, stage, n=32, save_dir=Path("ru...
function hist2d (line 110) | def hist2d(x, y, n=100):
function butter_lowpass_filtfilt (line 122) | def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
function output_to_target (line 139) | def output_to_target(output, max_det=300):
function plot_images (line 152) | def plot_images(images, targets, paths=None, fname="images.jpg", names=N...
function plot_lr_scheduler (line 216) | def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""):
function plot_val_txt (line 233) | def plot_val_txt():
function plot_targets_txt (line 254) | def plot_targets_txt():
function plot_val_study (line 270) | def plot_val_study(file="", dir="", x=None):
function plot_labels (line 325) | def plot_labels(labels, names=(), save_dir=Path("")):
function imshow_cls (line 370) | def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=...
function plot_evolve (line 400) | def plot_evolve(evolve_csv="path/to/evolve.csv"):
function plot_results (line 430) | def plot_results(file="path/to/results.csv", dir=""):
function profile_idetection (line 460) | def profile_idetection(start=0, stop=0, labels=(), save_dir=""):
function save_one_box (line 494) | def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, squar...
FILE: utils/segment/augmentations.py
function mixup (line 14) | def mixup(im, labels, segments, im2, labels2, segments2):
function random_perspective (line 26) | def random_perspective(
FILE: utils/segment/dataloaders.py
function create_dataloader (line 21) | def create_dataloader(
class LoadImagesAndLabelsAndMasks (line 86) | class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/...
method __init__ (line 89) | def __init__(
method __getitem__ (line 130) | def __getitem__(self, index):
method load_mosaic (line 237) | def load_mosaic(self, index):
method collate_fn (line 298) | def collate_fn(batch):
function polygon2mask (line 307) | def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
function polygons2masks (line 326) | def polygons2masks(img_size, polygons, color, downsample_ratio=1):
function polygons2masks_overlap (line 340) | def polygons2masks_overlap(img_size, segments, downsample_ratio=1):
FILE: utils/segment/general.py
function crop_mask (line 9) | def crop_mask(masks, boxes):
function process_mask_upsample (line 24) | def process_mask_upsample(protos, masks_in, bboxes, shape):
function process_mask (line 43) | def process_mask(protos, masks_in, bboxes, shape, upsample=False):
function process_mask_native (line 71) | def process_mask_native(protos, masks_in, bboxes, shape):
function scale_image (line 96) | def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
function mask_iou (line 120) | def mask_iou(mask1, mask2, eps=1e-7):
function masks_iou (line 137) | def masks_iou(mask1, mask2, eps=1e-7):
function masks2segments (line 154) | def masks2segments(masks, strategy="largest"):
FILE: utils/segment/loss.py
class ComputeLoss (line 14) | class ComputeLoss:
method __init__ (line 17) | def __init__(self, model, autobalance=False, overlap=False):
method __call__ (line 47) | def __call__(self, preds, targets, masks): # predictions, targets, model
method single_mask_loss (line 115) | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
method build_targets (line 121) | def build_targets(self, p, targets):
FILE: utils/segment/metrics.py
function fitness (line 9) | def fitness(x):
function ap_per_class_box_and_mask (line 15) | def ap_per_class_box_and_mask(
class Metric (line 56) | class Metric:
method __init__ (line 59) | def __init__(self) -> None:
method ap50 (line 70) | def ap50(self):
method ap (line 79) | def ap(self):
method mp (line 88) | def mp(self):
method mr (line 97) | def mr(self):
method map50 (line 106) | def map50(self):
method map (line 115) | def map(self):
method mean_results (line 123) | def mean_results(self):
method class_result (line 127) | def class_result(self, i):
method get_maps (line 131) | def get_maps(self, nc):
method update (line 138) | def update(self, results):
class Metrics (line 151) | class Metrics:
method __init__ (line 154) | def __init__(self) -> None:
method update (line 159) | def update(self, results):
method mean_results (line 167) | def mean_results(self):
method class_result (line 171) | def class_result(self, i):
method get_maps (line 175) | def get_maps(self, nc):
method ap_class_index (line 182) | def ap_class_index(self):
FILE: utils/segment/plots.py
function plot_images_and_masks (line 19) | def plot_images_and_masks(images, targets, masks, paths=None, fname="ima...
function plot_results_with_masks (line 115) | def plot_results_with_masks(file="path/to/results.csv", dir="", best=True):
FILE: utils/torch_utils.py
function smart_inference_mode (line 36) | def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9...
function smartCrossEntropyLoss (line 46) | def smartCrossEntropyLoss(label_smoothing=0.0):
function smart_DDP (line 55) | def smart_DDP(model):
function reshape_classifier_output (line 67) | def reshape_classifier_output(model, n=1000):
function torch_distributed_zero_first (line 91) | def torch_distributed_zero_first(local_rank: int):
function device_count (line 102) | def device_count():
function select_device (line 112) | def select_device(device="", batch_size=0, newline=True):
function time_sync (line 149) | def time_sync():
function profile (line 156) | def profile(input, ops, n=10, device=None):
function is_parallel (line 210) | def is_parallel(model):
function de_parallel (line 215) | def de_parallel(model):
function initialize_weights (line 220) | def initialize_weights(model):
function find_modules (line 235) | def find_modules(model, mclass=nn.Conv2d):
function sparsity (line 240) | def sparsity(model):
function prune (line 249) | def prune(model, amount=0.3):
function fuse_conv_and_bn (line 260) | def fuse_conv_and_bn(conv, bn):
function model_info (line 293) | def model_info(model, verbose=False, imgsz=640):
function scale_img (line 323) | def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,...
function copy_attr (line 337) | def copy_attr(a, b, include=(), exclude=()):
function smart_optimizer (line 346) | def smart_optimizer(model, name="Adam", lr=0.001, momentum=0.9, decay=1e...
function smart_hub_load (line 382) | def smart_hub_load(repo="ultralytics/yolov5", model="yolov5s", **kwargs):
function smart_resume (line 394) | def smart_resume(ckpt, optimizer, ema=None, weights="yolov5s.pt", epochs...
class EarlyStopping (line 416) | class EarlyStopping:
method __init__ (line 419) | def __init__(self, patience=30):
method __call__ (line 426) | def __call__(self, epoch, fitness):
class ModelEMA (line 444) | class ModelEMA:
method __init__ (line 450) | def __init__(self, model, decay=0.9999, tau=2000, updates=0):
method update (line 460) | def update(self, model):
method update_attr (line 472) | def update_attr(self, model, include=(), exclude=("process_group", "re...
FILE: utils/triton.py
class TritonRemoteModel (line 11) | class TritonRemoteModel:
method __init__ (line 18) | def __init__(self, url: str):
method runtime (line 50) | def runtime(self):
method __call__ (line 54) | def __call__(self, *args, **kwargs) -> torch.Tensor | tuple[torch.Tens...
method _create_inputs (line 68) | def _create_inputs(self, *args, **kwargs):
FILE: val.py
function save_one_txt (line 64) | def save_one_txt(predn, save_conf, shape, file):
function save_one_json (line 96) | def save_one_json(predn, jdict, path, class_map):
function process_batch (line 142) | def process_batch(detections, labels, iouv):
function run (line 185) | def run(
function parse_opt (line 467) | def parse_opt():
function main (line 545) | def main(opt):
Condensed preview — 143 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,387K chars).
[
{
"path": ".dockerignore",
"chars": 3701,
"preview": "# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------"
},
{
"path": ".gitattributes",
"chars": 75,
"preview": "# this drop notebooks from GitHub language stats\n*.ipynb linguist-vendored\n"
},
{
"path": ".github/ISSUE_TEMPLATE/bug-report.yml",
"chars": 3032,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: 🐛 Bug Report\ndescription: \"Problems with Ultra"
},
{
"path": ".github/ISSUE_TEMPLATE/config.yml",
"chars": 588,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nblank_issues_enabled: true\ncontact_links:\n - name: "
},
{
"path": ".github/ISSUE_TEMPLATE/feature-request.yml",
"chars": 1866,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: 🚀 Feature Request\ndescription: \"Suggest an Ult"
},
{
"path": ".github/ISSUE_TEMPLATE/question.yml",
"chars": 1341,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: ❓ Question\ndescription: \"Ask an Ultralytics YO"
},
{
"path": ".github/dependabot.yml",
"chars": 601,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Dependabot for package version updates\n# https://d"
},
{
"path": ".github/workflows/ci-testing.yml",
"chars": 6937,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# YOLOv5 Continuous Integration (CI) GitHub Actions "
},
{
"path": ".github/workflows/cla.yml",
"chars": 1629,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Ultralytics Contributor License Agreement (CLA) ac"
},
{
"path": ".github/workflows/docker.yml",
"chars": 1754,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest images on DockerH"
},
{
"path": ".github/workflows/format.yml",
"chars": 4987,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Ultralytics Actions https://github.com/ultralytics"
},
{
"path": ".github/workflows/links.yml",
"chars": 3106,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Continuous Integration (CI) GitHub Actions tests b"
},
{
"path": ".github/workflows/merge-main-into-prs.yml",
"chars": 3161,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Automatically merges repository 'main' branch into"
},
{
"path": ".github/workflows/stale.yml",
"chars": 2453,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nname: Close stale issues\non:\n schedule:\n - cron:"
},
{
"path": ".gitignore",
"chars": 4010,
"preview": "# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------"
},
{
"path": "CITATION.cff",
"chars": 393,
"preview": "cff-version: 1.2.0\npreferred-citation:\n type: software\n message: If you use YOLOv5, please cite it as below.\n authors"
},
{
"path": "CONTRIBUTING.md",
"chars": 5357,
"preview": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralyt"
},
{
"path": "LICENSE",
"chars": 34523,
"preview": " GNU AFFERO GENERAL PUBLIC LICENSE\n Version 3, 19 November 2007\n\n Copyright (C)"
},
{
"path": "README.md",
"chars": 49901,
"preview": "<div align=\"center\">\n <p>\n <a href=\"https://platform.ultralytics.com/?utm_source=github&utm_medium=referral&utm_camp"
},
{
"path": "README.zh-CN.md",
"chars": 42754,
"preview": "<div align=\"center\">\n <p>\n <a href=\"https://platform.ultralytics.com/?utm_source=github&utm_medium=referral&utm_camp"
},
{
"path": "benchmarks.py",
"chars": 14123,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 benchmarks on all supported export for"
},
{
"path": "classify/predict.py",
"chars": 12119,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 classification inference on images, vi"
},
{
"path": "classify/train.py",
"chars": 16498,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTrain a YOLOv5 classifier model on a classificati"
},
{
"path": "classify/tutorial.ipynb",
"chars": 97502,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\n \"id\": \"t6MPjfT5NrKQ\"\n },\n \"source\": [\n \"<div a"
},
{
"path": "classify/val.py",
"chars": 8226,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nValidate a trained YOLOv5 classification model on"
},
{
"path": "data/Argoverse.yaml",
"chars": 2729,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Argoverse-HD dataset (ring-front-center camera) ht"
},
{
"path": "data/GlobalWheat2020.yaml",
"chars": 1886,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Global Wheat 2020 dataset http://www.global-wheat."
},
{
"path": "data/ImageNet.yaml",
"chars": 18866,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/inde"
},
{
"path": "data/ImageNet10.yaml",
"chars": 933,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/inde"
},
{
"path": "data/ImageNet100.yaml",
"chars": 2650,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/inde"
},
{
"path": "data/ImageNet1000.yaml",
"chars": 18868,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# ImageNet-1k dataset https://www.image-net.org/inde"
},
{
"path": "data/Objects365.yaml",
"chars": 9200,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Objects365 dataset https://www.objects365.org/ by "
},
{
"path": "data/SKU-110K.yaml",
"chars": 2337,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# SKU-110K retail items dataset https://github.com/e"
},
{
"path": "data/VOC.yaml",
"chars": 3495,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# PASCAL VOC dataset http://host.robots.ox.ac.uk/pas"
},
{
"path": "data/VisDrone.yaml",
"chars": 2975,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# VisDrone2019-DET dataset https://github.com/VisDro"
},
{
"path": "data/coco.yaml",
"chars": 2493,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# COCO 2017 dataset http://cocodataset.org by Micros"
},
{
"path": "data/coco128-seg.yaml",
"chars": 1905,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# COCO128-seg dataset https://www.kaggle.com/dataset"
},
{
"path": "data/coco128.yaml",
"chars": 1889,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# COCO128 dataset https://www.kaggle.com/datasets/ul"
},
{
"path": "data/hyps/hyp.Objects365.yaml",
"chars": 695,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for Objects365 training\n# python t"
},
{
"path": "data/hyps/hyp.VOC.yaml",
"chars": 1178,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for VOC training\n# python train.py"
},
{
"path": "data/hyps/hyp.no-augmentation.yaml",
"chars": 1677,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters when using Albumentations framewor"
},
{
"path": "data/hyps/hyp.scratch-high.yaml",
"chars": 1677,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for high-augmentation COCO trainin"
},
{
"path": "data/hyps/hyp.scratch-low.yaml",
"chars": 1685,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for low-augmentation COCO training"
},
{
"path": "data/hyps/hyp.scratch-med.yaml",
"chars": 1679,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Hyperparameters for medium-augmentation COCO train"
},
{
"path": "data/scripts/download_weights.sh",
"chars": 637,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download latest models from https://gi"
},
{
"path": "data/scripts/get_coco.sh",
"chars": 1576,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download COCO 2017 dataset http://coco"
},
{
"path": "data/scripts/get_coco128.sh",
"chars": 620,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download COCO128 dataset https://www.k"
},
{
"path": "data/scripts/get_imagenet.sh",
"chars": 1677,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset h"
},
{
"path": "data/scripts/get_imagenet10.sh",
"chars": 739,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset h"
},
{
"path": "data/scripts/get_imagenet100.sh",
"chars": 743,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset h"
},
{
"path": "data/scripts/get_imagenet1000.sh",
"chars": 747,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Download ILSVRC2012 ImageNet dataset h"
},
{
"path": "data/xView.yaml",
"chars": 5169,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# DIUx xView 2018 Challenge https://challenge.xviewd"
},
{
"path": "detect.py",
"chars": 23757,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 detection inference on images, videos,"
},
{
"path": "export.py",
"chars": 68500,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nExport a YOLOv5 PyTorch model to other formats. T"
},
{
"path": "hubconf.py",
"chars": 24040,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nPyTorch Hub models https://pytorch.org/hub/ultral"
},
{
"path": "models/__init__.py",
"chars": 67,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
},
{
"path": "models/common.py",
"chars": 52642,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Common modules.\"\"\"\n\nimport ast\nimport contextlib\ni"
},
{
"path": "models/experimental.py",
"chars": 5220,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Experimental modules.\"\"\"\n\nimport math\n\nimport nump"
},
{
"path": "models/hub/anchors.yaml",
"chars": 3360,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Default anchors for COCO data\n\n# P5 --------------"
},
{
"path": "models/hub/yolov3-spp.yaml",
"chars": 1612,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov3-tiny.yaml",
"chars": 1269,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov3.yaml",
"chars": 1603,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5-bifpn.yaml",
"chars": 1463,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5-fpn.yaml",
"chars": 1252,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5-p2.yaml",
"chars": 1723,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5-p34.yaml",
"chars": 1263,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5-p6.yaml",
"chars": 1778,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5-p7.yaml",
"chars": 2163,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5-panet.yaml",
"chars": 1447,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5l6.yaml",
"chars": 1858,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5m6.yaml",
"chars": 1860,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5n6.yaml",
"chars": 1860,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5s-LeakyReLU.yaml",
"chars": 1536,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\nactivation: "
},
{
"path": "models/hub/yolov5s-ghost.yaml",
"chars": 1523,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5s-transformer.yaml",
"chars": 1480,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5s6.yaml",
"chars": 1860,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/hub/yolov5x6.yaml",
"chars": 1860,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/segment/yolov5l-seg.yaml",
"chars": 1451,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/segment/yolov5m-seg.yaml",
"chars": 1453,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/segment/yolov5n-seg.yaml",
"chars": 1453,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/segment/yolov5s-seg.yaml",
"chars": 1452,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/segment/yolov5x-seg.yaml",
"chars": 1453,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/tf.py",
"chars": 33548,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTensorFlow, Keras and TFLite versions of YOLOv5\nA"
},
{
"path": "models/yolo.py",
"chars": 21055,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nYOLO-specific modules.\n\nUsage:\n $ python model"
},
{
"path": "models/yolov5l.yaml",
"chars": 1441,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/yolov5m.yaml",
"chars": 1443,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/yolov5n.yaml",
"chars": 1443,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/yolov5s.yaml",
"chars": 1443,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "models/yolov5x.yaml",
"chars": 1443,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multip"
},
{
"path": "pyproject.toml",
"chars": 5443,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Overview:\n# This pyproject.toml file manages the b"
},
{
"path": "requirements.txt",
"chars": 1763,
"preview": "# YOLOv5 requirements\n# Usage: pip install -r requirements.txt\n\n# Base -------------------------------------------------"
},
{
"path": "segment/predict.py",
"chars": 16311,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nRun YOLOv5 segmentation inference on images, vide"
},
{
"path": "segment/train.py",
"chars": 35233,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTrain a YOLOv5 segment model on a segment dataset"
},
{
"path": "segment/tutorial.ipynb",
"chars": 42652,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\n \"id\": \"t6MPjfT5NrKQ\"\n },\n \"source\": [\n \"<div a"
},
{
"path": "segment/val.py",
"chars": 24364,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nValidate a trained YOLOv5 segment model on a segm"
},
{
"path": "train.py",
"chars": 47224,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nTrain a YOLOv5 model on a custom dataset. Models "
},
{
"path": "tutorial.ipynb",
"chars": 40606,
"preview": "{\n \"nbformat\": 4,\n \"nbformat_minor\": 0,\n \"metadata\": {\n \"colab\": {\n \"name\": \"YOLOv5 Tutorial\",\n \"provenance\": []\n "
},
{
"path": "utils/__init__.py",
"chars": 3263,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"utils/initialization.\"\"\"\n\nimport contextlib\nimport"
},
{
"path": "utils/activations.py",
"chars": 4998,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Activation functions.\"\"\"\n\nimport torch\nimport torc"
},
{
"path": "utils/augmentations.py",
"chars": 18622,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Image augmentation functions.\"\"\"\n\nimport math\nimpo"
},
{
"path": "utils/autoanchor.py",
"chars": 7932,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"AutoAnchor utils.\"\"\"\n\nimport random\n\nimport numpy "
},
{
"path": "utils/autobatch.py",
"chars": 3055,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Auto-batch utils.\"\"\"\n\nfrom copy import deepcopy\n\ni"
},
{
"path": "utils/aws/__init__.py",
"chars": 67,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
},
{
"path": "utils/aws/mime.sh",
"chars": 843,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# AWS EC2 instance startup 'MIME' script https://aws"
},
{
"path": "utils/aws/resume.py",
"chars": 1315,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Resume all interrupted trainings in yolov5/ dir in"
},
{
"path": "utils/aws/userdata.sh",
"chars": 1316,
"preview": "#!/bin/bash\n# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# AWS EC2 instance startup script https:"
},
{
"path": "utils/callbacks.py",
"chars": 2708,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Callback utils.\"\"\"\n\nimport threading\n\n\nclass Callb"
},
{
"path": "utils/dataloaders.py",
"chars": 60332,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Dataloaders and dataset utils.\"\"\"\n\nimport contextl"
},
{
"path": "utils/docker/Dockerfile",
"chars": 2581,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest image on DockerHu"
},
{
"path": "utils/docker/Dockerfile-arm64",
"chars": 1592,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest-arm64 image on Do"
},
{
"path": "utils/docker/Dockerfile-cpu",
"chars": 1845,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# Builds ultralytics/yolov5:latest-cpu image on Dock"
},
{
"path": "utils/downloads.py",
"chars": 5211,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Download utils.\"\"\"\n\nimport logging\nimport subproce"
},
{
"path": "utils/flask_rest_api/README.md",
"chars": 4091,
"preview": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralyt"
},
{
"path": "utils/flask_rest_api/example_request.py",
"chars": 388,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Perform test request.\"\"\"\n\nimport pprint\n\nimport re"
},
{
"path": "utils/flask_rest_api/restapi.py",
"chars": 1595,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Run a Flask REST API exposing one or more YOLOv5s "
},
{
"path": "utils/general.py",
"chars": 51405,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"General utils.\"\"\"\n\nfrom __future__ import annotati"
},
{
"path": "utils/google_app_engine/Dockerfile",
"chars": 821,
"preview": "FROM gcr.io/google-appengine/python\n\n# Create a virtualenv for dependencies. This isolates these packages from\n# system-"
},
{
"path": "utils/google_app_engine/additional_requirements.txt",
"chars": 264,
"preview": "# add these requirements in your app on top of the existing ones\npip==26.0\nFlask==2.3.2\ngunicorn==23.0.0\nwerkzeug>=3.0.1"
},
{
"path": "utils/google_app_engine/app.yaml",
"chars": 242,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nruntime: custom\nenv: flex\n\nservice: yolov5app\n\nliven"
},
{
"path": "utils/loggers/__init__.py",
"chars": 20213,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Logging utils.\"\"\"\n\nimport json\nimport os\nimport wa"
},
{
"path": "utils/loggers/clearml/README.md",
"chars": 11895,
"preview": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralyt"
},
{
"path": "utils/loggers/clearml/__init__.py",
"chars": 67,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
},
{
"path": "utils/loggers/clearml/clearml_utils.py",
"chars": 9671,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Main Logger class for ClearML experiment tracking."
},
{
"path": "utils/loggers/clearml/hpo.py",
"chars": 5312,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nfrom clearml import Task\n\n# Connecting ClearML with "
},
{
"path": "utils/loggers/comet/README.md",
"chars": 13993,
"preview": "<a href=\"https://www.ultralytics.com/\"><img src=\"https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralyt"
},
{
"path": "utils/loggers/comet/__init__.py",
"chars": 21515,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport glob\nimport json\nimport logging\nimport os\nimp"
},
{
"path": "utils/loggers/comet/comet_utils.py",
"chars": 4783,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport logging\nimport os\nfrom urllib.parse import ur"
},
{
"path": "utils/loggers/comet/hpo.py",
"chars": 6953,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport argparse\nimport json\nimport logging\nimport os"
},
{
"path": "utils/loggers/wandb/__init__.py",
"chars": 67,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
},
{
"path": "utils/loggers/wandb/wandb_utils.py",
"chars": 8084,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\n# WARNING ⚠️ wandb is deprecated and will be removed"
},
{
"path": "utils/loss.py",
"chars": 11163,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Loss functions.\"\"\"\n\nimport torch\nimport torch.nn a"
},
{
"path": "utils/metrics.py",
"chars": 15620,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Model validation metrics.\"\"\"\n\nimport math\nimport w"
},
{
"path": "utils/plots.py",
"chars": 20644,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Plotting utils.\"\"\"\n\nimport contextlib\nimport math\n"
},
{
"path": "utils/segment/__init__.py",
"chars": 67,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n"
},
{
"path": "utils/segment/augmentations.py",
"chars": 3550,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Image augmentation functions.\"\"\"\n\nimport math\nimpo"
},
{
"path": "utils/segment/dataloaders.py",
"chars": 13685,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Dataloaders.\"\"\"\n\nimport os\nimport random\n\nimport c"
},
{
"path": "utils/segment/general.py",
"chars": 6084,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport cv2\nimport numpy as np\nimport torch\nimport to"
},
{
"path": "utils/segment/loss.py",
"chars": 9187,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.f"
},
{
"path": "utils/segment/metrics.py",
"chars": 5967,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Model validation metrics.\"\"\"\n\nimport numpy as np\n\n"
},
{
"path": "utils/segment/plots.py",
"chars": 6704,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\nimport contextlib\nimport math\nfrom pathlib import Pa"
},
{
"path": "utils/torch_utils.py",
"chars": 21638,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"PyTorch utils.\"\"\"\n\nimport math\nimport os\nimport pl"
},
{
"path": "utils/triton.py",
"chars": 3761,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"Utils to interact with the Triton Inference Server"
},
{
"path": "val.py",
"chars": 30402,
"preview": "# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license\n\"\"\"\nValidate a trained YOLOv5 detection model on a de"
}
]
About this extraction
This page contains the full source code of the ultralytics/yolov5 GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 143 files (1.3 MB), approximately 378.0k tokens, and a symbol index with 691 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.