Full Code of wdragondragon/apex-yolov5 for AI

master 31f8835805a3 cached
244 files
1.5 MB
434.8k tokens
1282 symbols
1 requests
Download .txt
Showing preview only (1,619K chars total). Download the full file or copy to clipboard to get everything.
Repository: wdragondragon/apex-yolov5
Branch: master
Commit: 31f8835805a3
Files: 244
Total size: 1.5 MB

Directory structure:
gitextract_v1024r3r/

├── .dockerignore
├── .gitattributes
├── .github/
│   └── workflows/
│       ├── cla.yml
│       ├── format.yml
│       └── merge-main-into-prs.yml
├── .gitignore
├── CITATION.cff
├── CONTRIBUTING.md
├── LICENSE
├── MouseHook.py
├── PID.py
├── README-yolo.md
├── README-yolo.zh-CN.md
├── README.md
├── ag.spec
├── ag_asyn.spec
├── apex_recoils/
│   ├── __init__.py
│   ├── core/
│   │   ├── GameWindowsStatus.py
│   │   ├── ReaSnowSelectGun.py
│   │   ├── SelectGun.py
│   │   ├── __init__.py
│   │   ├── image_comparator/
│   │   │   ├── DynamicSizeImageComparator.py
│   │   │   ├── ImageComparator.py
│   │   │   ├── LocalImageComparator.py
│   │   │   ├── NetImageComparator.py
│   │   │   └── __init__.py
│   │   ├── kmnet_listener/
│   │   │   ├── ToggleKeyListener.py
│   │   │   └── __init__.py
│   │   └── screentaker/
│   │       ├── CapScreenTaker.py
│   │       ├── LocalMssScreenTaker.py
│   │       ├── LocalScreenTaker.py
│   │       ├── SocketScreenTaker.py
│   │       └── __init__.py
│   └── net/
│       ├── __init__.py
│       └── socket/
│           ├── Client.py
│           ├── ReaSnowSelectGunSocket.py
│           ├── Server.py
│           ├── SocketMouseMover.py
│           └── __init__.py
├── apex_yolov5/
│   ├── Counter.py
│   ├── FrameRateMonitor.py
│   ├── KeyAndMouseListener.py
│   ├── KmBoxNetListener.py
│   ├── LogUtil.py
│   ├── RecoildsCore.py
│   ├── SystemTrayApp.py
│   ├── Tools.py
│   ├── __init__.py
│   ├── apex_model.py
│   ├── auxiliary.py
│   ├── check_run.pyi
│   ├── global_img_info.py
│   ├── grabscreen.py
│   ├── job_listener/
│   │   ├── JoyListener.py
│   │   ├── JoyToKey.py
│   │   ├── RockerMonitor.py
│   │   ├── S1SwitchMonitor.py
│   │   └── __init__.py
│   ├── log/
│   │   ├── LogFactory.py
│   │   ├── LogWindow.py
│   │   ├── Logger.py
│   │   └── __init__.py
│   ├── magnifying_glass.py
│   ├── mouse.py
│   ├── mouse_lock.py
│   ├── mouse_mover/
│   │   ├── FeiMover.py
│   │   ├── GHubMover.py
│   │   ├── IntentManager.py
│   │   ├── KmBoxMover.py
│   │   ├── KmBoxNetMover.py
│   │   ├── MouseMover.py
│   │   ├── MoverFactory.py
│   │   ├── PanNiMover.py
│   │   ├── Win32ApiMover.py
│   │   ├── WuYaMover.py
│   │   └── __init__.py
│   ├── socket/
│   │   ├── config.py
│   │   ├── socket_util.py
│   │   └── yolov5_handler.py
│   ├── window_layout/
│   │   ├── ai_toggle_layout.py
│   │   ├── anthropomorphic_config_layout.py
│   │   ├── auto_charged_energy_layout.py
│   │   ├── auto_gun_config_layout.py
│   │   ├── auto_save_config_layout.py
│   │   ├── model_config_layout.py
│   │   ├── mouse_config_layout.py
│   │   └── screenshot_area_layout.py
│   └── windows/
│       ├── DebugWindow.py
│       ├── DisclaimerWindow.py
│       ├── __init__.py
│       ├── aim_show_window.py
│       ├── circle_window.py
│       └── config_window.py
├── apex_yolov5_main.py
├── apex_yolov5_main_asyn.py
├── benchmarks.py
├── bez_test.py
├── check.py
├── classify/
│   ├── predict.py
│   ├── train.py
│   ├── tutorial.ipynb
│   └── val.py
├── client.py
├── client.spec
├── config/
│   └── ref.txt
├── data/
│   ├── Argoverse.yaml
│   ├── GlobalWheat2020.yaml
│   ├── ImageNet.yaml
│   ├── ImageNet10.yaml
│   ├── ImageNet100.yaml
│   ├── ImageNet1000.yaml
│   ├── Objects365.yaml
│   ├── SKU-110K.yaml
│   ├── VOC.yaml
│   ├── VisDrone.yaml
│   ├── coco.yaml
│   ├── coco128-seg.yaml
│   ├── coco128.yaml
│   ├── hyps/
│   │   ├── hyp.Objects365.yaml
│   │   ├── hyp.VOC.yaml
│   │   ├── hyp.no-augmentation.yaml
│   │   ├── hyp.scratch-high.yaml
│   │   ├── hyp.scratch-low.yaml
│   │   └── hyp.scratch-med.yaml
│   ├── scripts/
│   │   ├── download_weights.sh
│   │   ├── get_coco.sh
│   │   ├── get_coco128.sh
│   │   ├── get_imagenet.sh
│   │   ├── get_imagenet10.sh
│   │   ├── get_imagenet100.sh
│   │   └── get_imagenet1000.sh
│   └── xView.yaml
├── detect.py
├── export.py
├── hubconf.py
├── images/
│   ├── 1920x1080/
│   │   └── list.txt
│   ├── 1920x1200/
│   │   └── list.txt
│   ├── 2048x1152/
│   │   └── list.txt
│   ├── 2560x1440/
│   │   └── list.txt
│   ├── hop_up/
│   │   ├── 1920x1080/
│   │   │   └── list.txt
│   │   └── 2560x1440/
│   │       └── list.txt
│   └── scope/
│       ├── 1920x1080/
│       │   └── list.txt
│       └── 2560x1440/
│           └── list.txt
├── joy_test.py
├── lg.py
├── main.py
├── models/
│   ├── __init__.py
│   ├── common.py
│   ├── experimental.py
│   ├── hub/
│   │   ├── anchors.yaml
│   │   ├── yolov3-spp.yaml
│   │   ├── yolov3-tiny.yaml
│   │   ├── yolov3.yaml
│   │   ├── yolov5-bifpn.yaml
│   │   ├── yolov5-fpn.yaml
│   │   ├── yolov5-p2.yaml
│   │   ├── yolov5-p34.yaml
│   │   ├── yolov5-p6.yaml
│   │   ├── yolov5-p7.yaml
│   │   ├── yolov5-panet.yaml
│   │   ├── yolov5l6.yaml
│   │   ├── yolov5m6.yaml
│   │   ├── yolov5n6.yaml
│   │   ├── yolov5s-LeakyReLU.yaml
│   │   ├── yolov5s-ghost.yaml
│   │   ├── yolov5s-transformer.yaml
│   │   ├── yolov5s6.yaml
│   │   └── yolov5x6.yaml
│   ├── mydata.yaml
│   ├── segment/
│   │   ├── yolov5l-seg.yaml
│   │   ├── yolov5m-seg.yaml
│   │   ├── yolov5n-seg.yaml
│   │   ├── yolov5s-seg.yaml
│   │   └── yolov5x-seg.yaml
│   ├── tf.py
│   ├── yolo.py
│   ├── yolov5l.yaml
│   ├── yolov5m.yaml
│   ├── yolov5n.yaml
│   ├── yolov5s.yaml
│   └── yolov5x.yaml
├── pyproject.toml
├── requirements.txt
├── segment/
│   ├── predict.py
│   ├── train.py
│   ├── tutorial.ipynb
│   └── val.py
├── server.py
├── server.spec
├── setenv.py
├── setup.py
├── setup_check.py
├── train.py
├── trt.spec
├── tutorial.ipynb
├── utils/
│   ├── __init__.py
│   ├── activations.py
│   ├── augmentations.py
│   ├── autoanchor.py
│   ├── autobatch.py
│   ├── aws/
│   │   ├── __init__.py
│   │   ├── mime.sh
│   │   ├── resume.py
│   │   └── userdata.sh
│   ├── callbacks.py
│   ├── dataloaders.py
│   ├── docker/
│   │   ├── Dockerfile
│   │   ├── Dockerfile-arm64
│   │   └── Dockerfile-cpu
│   ├── downloads.py
│   ├── flask_rest_api/
│   │   ├── README.md
│   │   ├── example_request.py
│   │   └── restapi.py
│   ├── general.py
│   ├── google_app_engine/
│   │   ├── Dockerfile
│   │   ├── additional_requirements.txt
│   │   └── app.yaml
│   ├── image_util.py
│   ├── loggers/
│   │   ├── __init__.py
│   │   ├── clearml/
│   │   │   ├── README.md
│   │   │   ├── __init__.py
│   │   │   ├── clearml_utils.py
│   │   │   └── hpo.py
│   │   ├── comet/
│   │   │   ├── README.md
│   │   │   ├── __init__.py
│   │   │   ├── comet_utils.py
│   │   │   └── hpo.py
│   │   └── wandb/
│   │       ├── __init__.py
│   │       └── wandb_utils.py
│   ├── loss.py
│   ├── metrics.py
│   ├── plots.py
│   ├── segment/
│   │   ├── __init__.py
│   │   ├── augmentations.py
│   │   ├── dataloaders.py
│   │   ├── general.py
│   │   ├── loss.py
│   │   ├── metrics.py
│   │   └── plots.py
│   ├── torch_utils.py
│   └── triton.py
├── val.py
├── validate.spec
└── 训练命令.txt

================================================
FILE CONTENTS
================================================

================================================
FILE: .dockerignore
================================================
# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
.git
.cache
.idea
runs
output
coco
storage.googleapis.com

data/samples/*
**/results*.csv
*.jpg

# Neural Network weights -----------------------------------------------------------------------------------------------
**/*.pt
**/*.pth
**/*.onnx
**/*.engine
**/*.mlmodel
**/*.torchscript
**/*.torchscript.pt
**/*.tflite
**/*.h5
**/*.pb
*_saved_model/
*_web_model/
*_openvino_model/

# Below Copied From .gitignore -----------------------------------------------------------------------------------------
# Below Copied From .gitignore -----------------------------------------------------------------------------------------


# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
wandb/
.installed.cfg
*.egg

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# dotenv
.env

# virtualenv
.venv*
venv*/
ENV*/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/


# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------

# General
.DS_Store
.AppleDouble
.LSOverride

# Icon must end with two \r
Icon
Icon?

# Thumbnails
._*

# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent

# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk


# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839

# User-specific stuff:
.idea/*
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
.html  # Bokeh Plots
.pg  # TensorFlow Frozen Graphs
.avi # videos

# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml

# Gradle:
.idea/**/gradle.xml
.idea/**/libraries

# CMake
cmake-build-debug/
cmake-build-release/

# Mongo Explorer plugin:
.idea/**/mongoSettings.xml

## File-based project format:
*.iws

## Plugin-specific files:

# IntelliJ
out/

# mpeltonen/sbt-idea plugin
.idea_modules/

# JIRA plugin
atlassian-ide-plugin.xml

# Cursive Clojure plugin
.idea/replstate.xml

# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties


================================================
FILE: .gitattributes
================================================
# this drop notebooks from GitHub language stats
*.ipynb linguist-vendored


================================================
FILE: .github/workflows/cla.yml
================================================
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA
# This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged

name: CLA Assistant
on:
  issue_comment:
    types:
      - created
  pull_request_target:
    types:
      - reopened
      - opened
      - synchronize

jobs:
  CLA:
    if: github.repository == 'ultralytics/yolov5'
    runs-on: ubuntu-latest
    steps:
      - name: CLA Assistant
        if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target'
        uses: contributor-assistant/github-action@v2.4.0
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
          # must be repository secret token
          PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
        with:
          path-to-signatures: "signatures/version1/cla.json"
          path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document
          # branch should not be protected
          branch: "main"
          allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot*

          remote-organization-name: ultralytics
          remote-repository-name: cla
          custom-pr-sign-comment: "I have read the CLA Document and I sign the CLA"
          custom-allsigned-prcomment: All Contributors have signed the CLA. ✅
          #custom-notsigned-prcomment: 'pull request comment with Introductory message to ask new contributors to sign'


================================================
FILE: .github/workflows/format.yml
================================================
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Ultralytics Actions https://github.com/ultralytics/actions
# This workflow automatically formats code and documentation in PRs to official Ultralytics standards

name: Ultralytics Actions

on:
  push:
    branches: [main, master]
  pull_request_target:
    branches: [main, master]

jobs:
  format:
    runs-on: ubuntu-latest
    steps:
      - name: Run Ultralytics Formatting
        uses: ultralytics/actions@main
        with:
          token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, do not modify
          python: true # format Python code and docstrings
          markdown: true # format Markdown
          prettier: true # format YAML
          spelling: true # check spelling
          links: false # check broken links
          summary: true # print PR summary with GPT4 (requires 'openai_api_key' or 'openai_azure_api_key' and 'openai_azure_endpoint')
          openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }}
          openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }}


================================================
FILE: .github/workflows/merge-main-into-prs.yml
================================================
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
# Automatically merges repository 'main' branch into all open PRs to keep them up-to-date
# Action runs on updates to main branch so when one PR merges to main all others update

name: Merge main into PRs

on:
  workflow_dispatch:
  push:
    branches:
      - main
      - master
  
jobs:
  Merge:
    if: github.repository == 'ultralytics/yolov5'
    runs-on: ubuntu-latest
    steps:
    - name: Checkout repository
      uses: actions/checkout@v4
      with:
        fetch-depth: 0
    - uses: actions/setup-python@v5
      with:
        python-version: "3.11"
        cache: "pip" # caching pip dependencies
    - name: Install requirements
      run: |
        pip install pygithub
    - name: Merge main into PRs
      shell: python
      run: |
        from github import Github
        import os
        
        # Authenticate with the GitHub Token
        g = Github(os.getenv('GITHUB_TOKEN'))
        
        # Get the repository dynamically
        repo = g.get_repo(os.getenv('GITHUB_REPOSITORY'))
        
        # List all open pull requests
        open_pulls = repo.get_pulls(state='open', sort='created')
        
        for pr in open_pulls:
            # Compare PR head with main to see if it's behind
            try:
                # Merge main into the PR branch
                success = pr.update_branch()
                assert success, "Branch update failed"
                print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.")
            except Exception as e:
                print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}")
      env:
        GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
        GITHUB_REPOSITORY: ${{ github.repository }}


================================================
FILE: .gitignore
================================================
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
*.jpg
*.jpeg
*.png
*.bmp
*.tif
*.tiff
*.heic
*.JPG
*.JPEG
*.PNG
*.BMP
*.TIF
*.TIFF
*.HEIC
*.mp4
*.mov
*.MOV
*.avi
*.data
*.json
*.cfg
!setup.cfg
!cfg/yolov3*.cfg

storage.googleapis.com
runs/*
data/*
data/images/*
!data/*.yaml
!data/hyps
!data/scripts
!data/images
!data/images/zidane.jpg
!data/images/bus.jpg
!data/*.sh

results*.csv

# Datasets -------------------------------------------------------------------------------------------------------------
coco/
coco128/
VOC/

# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
*.m~
*.mat
!targets*.mat

# Neural Network weights -----------------------------------------------------------------------------------------------
*.weights
*.pt
*.pb
*.onnx
*.engine
*.mlmodel
*.torchscript
*.tflite
*.h5
*_saved_model/
*_web_model/
*_openvino_model/
*_paddle_model/
darknet53.conv.74
yolov3-tiny.conv.15

# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
/wandb/
.installed.cfg
*.egg


# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
#*.spec


# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# dotenv
.env

# virtualenv
.venv*
venv*/
ENV*/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/


# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------

# General
.DS_Store
.AppleDouble
.LSOverride

# Icon must end with two \r
Icon
Icon?

# Thumbnails
._*

# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent

# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk


# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839

# User-specific stuff:
.idea/*
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
.html  # Bokeh Plots
.pg  # TensorFlow Frozen Graphs
.avi # videos

# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml

# Gradle:
.idea/**/gradle.xml
.idea/**/libraries

# CMake
cmake-build-debug/
cmake-build-release/

# Mongo Explorer plugin:
.idea/**/mongoSettings.xml

## File-based project format:
*.iws

## Plugin-specific files:

# IntelliJ
out/

# mpeltonen/sbt-idea plugin
.idea_modules/

# JIRA plugin
atlassian-ide-plugin.xml

# Cursive Clojure plugin
.idea/replstate.xml

# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties

apex_model/

================================================
FILE: CITATION.cff
================================================
cff-version: 1.2.0
preferred-citation:
  type: software
  message: If you use YOLOv5, please cite it as below.
  authors:
  - family-names: Jocher
    given-names: Glenn
    orcid: "https://orcid.org/0000-0001-5950-6979"
  title: "YOLOv5 by Ultralytics"
  version: 7.0
  doi: 10.5281/zenodo.3908559
  date-released: 2020-5-29
  license: AGPL-3.0
  url: "https://github.com/ultralytics/yolov5"


================================================
FILE: CONTRIBUTING.md
================================================
## Contributing to YOLOv5 🚀

We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:

- Reporting a bug
- Discussing the current state of the code
- Submitting a fix
- Proposing a new feature
- Becoming a maintainer

YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be helping push the frontiers of what's possible in AI 😃!

## Submitting a Pull Request (PR) 🛠️

Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:

### 1. Select File to Update

Select `requirements.txt` to update by clicking on it in GitHub.

<p align="center"><img width="800" alt="PR_step1" src="https://user-images.githubusercontent.com/26833433/122260847-08be2600-ced4-11eb-828b-8287ace4136c.png"></p>

### 2. Click 'Edit this file'

The button is in the top-right corner.

<p align="center"><img width="800" alt="PR_step2" src="https://user-images.githubusercontent.com/26833433/122260844-06f46280-ced4-11eb-9eec-b8a24be519ca.png"></p>

### 3. Make Changes

Change the `matplotlib` version from `3.2.2` to `3.3`.

<p align="center"><img width="800" alt="PR_step3" src="https://user-images.githubusercontent.com/26833433/122260853-0a87e980-ced4-11eb-9fd2-3650fb6e0842.png"></p>

### 4. Preview Changes and Submit PR

Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!

<p align="center"><img width="800" alt="PR_step4" src="https://user-images.githubusercontent.com/26833433/122260856-0b208000-ced4-11eb-8e8e-77b6151cbcc3.png"></p>

### PR recommendations

To allow your work to be integrated as seamlessly as possible, we advise you to:

- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.

<p align="center"><img width="751" alt="Screenshot 2022-08-29 at 22 47 15" src="https://user-images.githubusercontent.com/26833433/187295893-50ed9f44-b2c9-4138-a614-de69bd1753d7.png"></p>

- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.

<p align="center"><img width="751" alt="Screenshot 2022-08-29 at 22 47 03" src="https://user-images.githubusercontent.com/26833433/187296922-545c5498-f64a-4d8c-8300-5fa764360da6.png"></p>

- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee

## Submitting a Bug Report 🐛

If you spot a problem with YOLOv5 please submit a Bug Report!

For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need to get started.

When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). Your code that reproduces the problem should be:

- ✅ **Minimal** – Use as little code as possible that still produces the same problem
- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem

In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be:

- ✅ **Current** – Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits.
- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.

If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem.

## License

By contributing, you agree that your contributions will be licensed under the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/)


================================================
FILE: LICENSE
================================================
                    GNU AFFERO GENERAL PUBLIC LICENSE
                       Version 3, 19 November 2007

 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

                            Preamble

  The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.

  The licenses for most software and other practical works are designed
to take away your freedom to share and change the works.  By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.

  Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.

  A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate.  Many developers of free software are heartened and
encouraged by the resulting cooperation.  However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.

  The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community.  It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server.  Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.

  An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals.  This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.

  The precise terms and conditions for copying, distribution and
modification follow.

                       TERMS AND CONDITIONS

  0. Definitions.

  "This License" refers to version 3 of the GNU Affero General Public License.

  "Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.

  "The Program" refers to any copyrightable work licensed under this
License.  Each licensee is addressed as "you".  "Licensees" and
"recipients" may be individuals or organizations.

  To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy.  The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.

  A "covered work" means either the unmodified Program or a work based
on the Program.

  To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy.  Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.

  To "convey" a work means any kind of propagation that enables other
parties to make or receive copies.  Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.

  An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License.  If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.

  1. Source Code.

  The "source code" for a work means the preferred form of the work
for making modifications to it.  "Object code" means any non-source
form of a work.

  A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.

  The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form.  A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.

  The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities.  However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work.  For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.

  The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.

  The Corresponding Source for a work in source code form is that
same work.

  2. Basic Permissions.

  All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met.  This License explicitly affirms your unlimited
permission to run the unmodified Program.  The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work.  This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.

  You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force.  You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright.  Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.

  Conveying under any other circumstances is permitted solely under
the conditions stated below.  Sublicensing is not allowed; section 10
makes it unnecessary.

  3. Protecting Users' Legal Rights From Anti-Circumvention Law.

  No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.

  When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.

  4. Conveying Verbatim Copies.

  You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.

  You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.

  5. Conveying Modified Source Versions.

  You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:

    a) The work must carry prominent notices stating that you modified
    it, and giving a relevant date.

    b) The work must carry prominent notices stating that it is
    released under this License and any conditions added under section
    7.  This requirement modifies the requirement in section 4 to
    "keep intact all notices".

    c) You must license the entire work, as a whole, under this
    License to anyone who comes into possession of a copy.  This
    License will therefore apply, along with any applicable section 7
    additional terms, to the whole of the work, and all its parts,
    regardless of how they are packaged.  This License gives no
    permission to license the work in any other way, but it does not
    invalidate such permission if you have separately received it.

    d) If the work has interactive user interfaces, each must display
    Appropriate Legal Notices; however, if the Program has interactive
    interfaces that do not display Appropriate Legal Notices, your
    work need not make them do so.

  A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit.  Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.

  6. Conveying Non-Source Forms.

  You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:

    a) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by the
    Corresponding Source fixed on a durable physical medium
    customarily used for software interchange.

    b) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by a
    written offer, valid for at least three years and valid for as
    long as you offer spare parts or customer support for that product
    model, to give anyone who possesses the object code either (1) a
    copy of the Corresponding Source for all the software in the
    product that is covered by this License, on a durable physical
    medium customarily used for software interchange, for a price no
    more than your reasonable cost of physically performing this
    conveying of source, or (2) access to copy the
    Corresponding Source from a network server at no charge.

    c) Convey individual copies of the object code with a copy of the
    written offer to provide the Corresponding Source.  This
    alternative is allowed only occasionally and noncommercially, and
    only if you received the object code with such an offer, in accord
    with subsection 6b.

    d) Convey the object code by offering access from a designated
    place (gratis or for a charge), and offer equivalent access to the
    Corresponding Source in the same way through the same place at no
    further charge.  You need not require recipients to copy the
    Corresponding Source along with the object code.  If the place to
    copy the object code is a network server, the Corresponding Source
    may be on a different server (operated by you or a third party)
    that supports equivalent copying facilities, provided you maintain
    clear directions next to the object code saying where to find the
    Corresponding Source.  Regardless of what server hosts the
    Corresponding Source, you remain obligated to ensure that it is
    available for as long as needed to satisfy these requirements.

    e) Convey the object code using peer-to-peer transmission, provided
    you inform other peers where the object code and Corresponding
    Source of the work are being offered to the general public at no
    charge under subsection 6d.

  A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.

  A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling.  In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage.  For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product.  A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.

  "Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source.  The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.

  If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information.  But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).

  The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed.  Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.

  Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.

  7. Additional Terms.

  "Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law.  If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.

  When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it.  (Additional permissions may be written to require their own
removal in certain cases when you modify the work.)  You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.

  Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:

    a) Disclaiming warranty or limiting liability differently from the
    terms of sections 15 and 16 of this License; or

    b) Requiring preservation of specified reasonable legal notices or
    author attributions in that material or in the Appropriate Legal
    Notices displayed by works containing it; or

    c) Prohibiting misrepresentation of the origin of that material, or
    requiring that modified versions of such material be marked in
    reasonable ways as different from the original version; or

    d) Limiting the use for publicity purposes of names of licensors or
    authors of the material; or

    e) Declining to grant rights under trademark law for use of some
    trade names, trademarks, or service marks; or

    f) Requiring indemnification of licensors and authors of that
    material by anyone who conveys the material (or modified versions of
    it) with contractual assumptions of liability to the recipient, for
    any liability that these contractual assumptions directly impose on
    those licensors and authors.

  All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10.  If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term.  If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.

  If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.

  Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.

  8. Termination.

  You may not propagate or modify a covered work except as expressly
provided under this License.  Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).

  However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.

  Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.

  Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License.  If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.

  9. Acceptance Not Required for Having Copies.

  You are not required to accept this License in order to receive or
run a copy of the Program.  Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance.  However,
nothing other than this License grants you permission to propagate or
modify any covered work.  These actions infringe copyright if you do
not accept this License.  Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.

  10. Automatic Licensing of Downstream Recipients.

  Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License.  You are not responsible
for enforcing compliance by third parties with this License.

  An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations.  If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.

  You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License.  For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.

  11. Patents.

  A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based.  The
work thus licensed is called the contributor's "contributor version".

  A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version.  For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.

  Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.

  In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement).  To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.

  If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients.  "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.

  If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.

  A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License.  You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.

  Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.

  12. No Surrender of Others' Freedom.

  If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all.  For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.

  13. Remote Network Interaction; Use with the GNU General Public License.

  Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software.  This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.

  Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work.  The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.

  14. Revised Versions of this License.

  The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time.  Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

  Each version is given a distinguishing version number.  If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation.  If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.

  If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.

  Later license versions may give you additional or different
permissions.  However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.

  15. Disclaimer of Warranty.

  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. Limitation of Liability.

  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.

  17. Interpretation of Sections 15 and 16.

  If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.

                     END OF TERMS AND CONDITIONS

            How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU Affero General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU Affero General Public License for more details.

    You should have received a copy of the GNU Affero General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.

Also add information on how to contact you by electronic and paper mail.

  If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source.  For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code.  There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.

  You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.


================================================
FILE: MouseHook.py
================================================
# import os.path as op
# import json
# import threading
# import time
#
# import pynput
# from pynput.mouse import Button
#
# from apex_yolov5 import auxiliary
# from apex_yolov5.KeyAndMouseListener import apex_mouse_listener
# from apex_yolov5.auxiliary import set_intention
#
#
# class MouseHook:
#     def __init__(self):
#         config_file_path = 'specs.json'
#         if op.exists(config_file_path):
#             with open(config_file_path) as file:
#                 self.specs_data = json.load(file)
#                 print("加载配置文件: {}".format(config_file_path))
#         else:
#             print("配置文件不存在: {}".format(config_file_path))
#
#     def get_config(self, name):
#         for spec in self.specs_data:
#             if spec['name'] == name:
#                 return spec
#         return None
#
#
# listener = pynput.mouse.Listener(
#     on_click=apex_mouse_listener.on_click)
# listener.start()
# threading.Thread(target=auxiliary.start).start()
# mouse_hook = MouseHook()
# spec = mouse_hook.get_config("car")
# print(spec)
#
# start_time = None
# pre_x, pre_y = 0, 0
# i = 0
# while True:
#     if apex_mouse_listener.is_press(Button.left) and apex_mouse_listener.is_press(Button.right):
#         if start_time is None:
#             start_time = time.time()
#         index = next(
#             (i for i, time_point in enumerate(spec['time_points']) if time_point >= (time.time() - start_time) * 1000),
#             None)
#         if index is not None and i < index:
#             print(str(index))
#             # 获取对应下标的x和y
#             x_value = spec['x'][index] - pre_x
#             y_value = spec['y'][index] - pre_y
#             set_intention(-x_value, -y_value, 0, 0)
#             pre_x, pre_y = x_value, y_value
#             i = index
#     else:
#         start_time = None
#         pre_x, pre_y, i = 0, 0, 0
#     time.sleep(0.001)


================================================
FILE: PID.py
================================================
import time


class Pid():
    def __init__(self, kp, ki, kd):
        self.KP = kp
        self.KI = ki
        self.KD = kd
        self.now_val = 0
        self.sum_err = 0
        self.now_err = 0
        self.last_err = 0

    def cmd_pid(self, exp_val):
        self.last_err = self.now_err
        self.now_err = exp_val - self.now_val
        self.sum_err += self.now_err

        # 使用 PID 控制算法
        control_output = self.KP * (exp_val - self.now_val) + self.KI * self.sum_err + self.KD * (
                self.now_err - self.last_err)

        # 更新当前值
        self.now_val += control_output

        return self.now_val


if __name__ == '__main__':
    # 假设你有人物运动轨迹数据
    trajectory_data = [(1, 2), (2, 4), (3, 6), (4, 8), (5, 10)]  # 格式为 (x, y)

    # 初始化 x 和 y 方向上的 PID 控制器
    pid_controller_x = Pid(kp=0.2, ki=0.03, kd=0.15)
    pid_controller_y = Pid(kp=0.1, ki=0.01, kd=0.1)
    for i in range(1, 1000):
        start = time.time()
        x = i
        y = 2 * i
        predicted_x = pid_controller_x.cmd_pid(x)
        predicted_y = pid_controller_y.cmd_pid(y)
        print(
            f"The {i}th prediction, cost {int((time.time() - start) * 1000)} ms,Actual Trajectory: ({x + 1}, {2 * (x + 1)}), Predicted Trajectory: ({predicted_x}, {predicted_y})")


================================================
FILE: README-yolo.md
================================================
<div align="center">
  <p>
    <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
      <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
  </p>

[English](README.md) | [简体中文](README.zh-CN.md)
<br>

<div>
    <a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
    <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
    <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
    <br>
    <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a>
    <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
    <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
  </div>
  <br>

YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.

We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href="https://docs.ultralytics.com/yolov5">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> for support, and join our <a href="https://ultralytics.com/discord">Discord</a> community for questions and discussions!

To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).

<div align="center">
  <a href="https://github.com/ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
  <a href="https://www.linkedin.com/company/ultralytics/" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
  <a href="https://twitter.com/ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
  <a href="https://youtube.com/ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
  <a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
  <a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
  <a href="https://ultralytics.com/discord" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="2%" alt="" /></a>
</div>

</div>
<br>

## <div align="center">YOLOv8 🚀 NEW</div>

We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model
released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**.
YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of
object detection, image segmentation and image classification tasks.

See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:

[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)

```bash
pip install ultralytics
```

<div align="center">
  <a href="https://ultralytics.com/yolov8" target="_blank">
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png"></a>
</div>

## <div align="center">Documentation</div>

See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples.

<details open>
<summary>Install</summary>

Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
[**Python>=3.7.0**](https://www.python.org/) environment, including
[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/).

```bash
git clone https://github.com/ultralytics/yolov5  # clone
cd yolov5
pip install -r requirements.txt  # install
```

</details>

<details>
<summary>Inference</summary>

YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).

```python
import torch

# Model
model = torch.hub.load("ultralytics/yolov5", "yolov5s")  # or yolov5n - yolov5x6, custom

# Images
img = "https://ultralytics.com/images/zidane.jpg"  # or file, Path, PIL, OpenCV, numpy, list

# Inference
results = model(img)

# Results
results.print()  # or .show(), .save(), .crop(), .pandas(), etc.
```

</details>

<details>
<summary>Inference with detect.py</summary>

`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from
the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.

```bash
python detect.py --weights yolov5s.pt --source 0                               # webcam
                                               img.jpg                         # image
                                               vid.mp4                         # video
                                               screen                          # screenshot
                                               path/                           # directory
                                               list.txt                        # list of images
                                               list.streams                    # list of streams
                                               'path/*.jpg'                    # glob
                                               'https://youtu.be/Zgi9g1ksQHc'  # YouTube
                                               'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
```

</details>

<details>
<summary>Training</summary>

The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the
largest `--batch-size` possible, or pass `--batch-size -1` for
YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.

```bash
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml  --batch-size 128
                                                                 yolov5s                    64
                                                                 yolov5m                    40
                                                                 yolov5l                    24
                                                                 yolov5x                    16
```

<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">

</details>

<details open>
<summary>Tutorials</summary>

- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED
- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️
- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training)
- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW
- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀
- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 NEW
- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation)
- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling)
- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity)
- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution)
- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers)
- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 NEW
- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration)
- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 NEW
- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 NEW
- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 NEW

</details>

## <div align="center">Integrations</div>

<br>
<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png"></a>
<br>
<br>

<div align="center">
  <a href="https://roboflow.com/?ref=ultralytics">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-roboflow.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
  <a href="https://cutt.ly/yolov5-readme-clearml">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-clearml.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
  <a href="https://bit.ly/yolov5-readme-comet2">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
  <a href="https://bit.ly/yolov5-neuralmagic">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" /></a>
</div>

|                                                           Roboflow                                                           |                                                            ClearML ⭐ NEW                                                            |                                                                        Comet ⭐ NEW                                                                         |                                           Neural Magic ⭐ NEW                                           |
| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |

## <div align="center">Ultralytics HUB</div>

Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!

<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>

## <div align="center">Why YOLOv5</div>

YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results.

<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png"></p>
<details>
  <summary>YOLOv5-P5 640 Figure</summary>

<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png"></p>
</details>
<details>
  <summary>Figure Notes</summary>

- **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536.
- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32.
- **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8.
- **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`

</details>

### Pretrained Checkpoints

| Model                                                                                           | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | Speed<br><sup>CPU b1<br>(ms) | Speed<br><sup>V100 b1<br>(ms) | Speed<br><sup>V100 b32<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- |
| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt)              | 640                   | 28.0                 | 45.7              | **45**                       | **6.3**                       | **0.6**                        | **1.9**            | **4.5**                |
| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt)              | 640                   | 37.4                 | 56.8              | 98                           | 6.4                           | 0.9                            | 7.2                | 16.5                   |
| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt)              | 640                   | 45.4                 | 64.1              | 224                          | 8.2                           | 1.7                            | 21.2               | 49.0                   |
| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt)              | 640                   | 49.0                 | 67.3              | 430                          | 10.1                          | 2.7                            | 46.5               | 109.1                  |
| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt)              | 640                   | 50.7                 | 68.9              | 766                          | 12.1                          | 4.8                            | 86.7               | 205.7                  |
|                                                                                                 |                       |                      |                   |                              |                               |                                |                    |                        |
| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt)            | 1280                  | 36.0                 | 54.4              | 153                          | 8.1                           | 2.1                            | 3.2                | 4.6                    |
| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt)            | 1280                  | 44.8                 | 63.7              | 385                          | 8.2                           | 3.6                            | 12.6               | 16.8                   |
| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt)            | 1280                  | 51.3                 | 69.3              | 887                          | 11.1                          | 6.8                            | 35.7               | 50.0                   |
| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt)            | 1280                  | 53.7                 | 71.3              | 1784                         | 15.8                          | 10.5                           | 76.8               | 111.4                  |
| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+ [TTA] | 1280<br>1536          | 55.0<br>**55.8**     | 72.7<br>**72.7**  | 3136<br>-                    | 26.2<br>-                     | 19.4<br>-                      | 140.7<br>-         | 209.8<br>-             |

<details>
  <summary>Table Notes</summary>

- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.<br>Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.<br>Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.<br>Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`

</details>

## <div align="center">Segmentation</div>

Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials.

<details>
  <summary>Segmentation Checkpoints</summary>

<div align="center">
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png"></a>
</div>

We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility.

| Model                                                                                      | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Train time<br><sup>300 epochs<br>A100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TRT A100<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- |
| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640                   | 27.6                 | 23.4                  | 80:17                                         | **62.7**                       | **1.2**                        | **2.0**            | **7.1**                |
| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640                   | 37.6                 | 31.7                  | 88:16                                         | 173.3                          | 1.4                            | 7.6                | 26.4                   |
| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640                   | 45.0                 | 37.1                  | 108:36                                        | 427.0                          | 2.2                            | 22.0               | 70.8                   |
| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640                   | 49.0                 | 39.9                  | 66:43 (2x)                                    | 857.4                          | 2.9                            | 47.9               | 147.7                  |
| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640                   | **50.7**             | **41.4**              | 62:56 (3x)                                    | 1579.2                         | 4.5                            | 88.8               | 265.7                  |

- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.<br>Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official
- **Accuracy** values are for single-model single-scale on COCO dataset.<br>Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`
- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image). <br>Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`
- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`. <br>Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`

</details>

<details>
  <summary>Segmentation Usage Examples &nbsp;<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>

### Train

YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`.

```bash
# Single-GPU
python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640

# Multi-GPU DDP
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
```

### Val

Validate YOLOv5s-seg mask mAP on COCO dataset:

```bash
bash data/scripts/get_coco.sh --val --segments  # download COCO val segments split (780MB, 5000 images)
python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640  # validate
```

### Predict

Use pretrained YOLOv5m-seg.pt to predict bus.jpg:

```bash
python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg
```

```python
model = torch.hub.load(
    "ultralytics/yolov5", "custom", "yolov5m-seg.pt"
)  # load from PyTorch Hub (WARNING: inference not yet supported)
```

| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) |
| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- |

### Export

Export YOLOv5s-seg model to ONNX and TensorRT:

```bash
python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0
```

</details>

## <div align="center">Classification</div>

YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials.

<details>
  <summary>Classification Checkpoints</summary>

<br>

We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility.

| Model                                                                                              | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Training<br><sup>90 epochs<br>4xA100 (hours) | Speed<br><sup>ONNX CPU<br>(ms) | Speed<br><sup>TensorRT V100<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>@224 (B) |
| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- |
| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt)         | 224                   | 64.6             | 85.4             | 7:59                                         | **3.3**                        | **0.5**                             | **2.5**            | **0.5**                |
| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt)         | 224                   | 71.5             | 90.2             | 8:09                                         | 6.6                            | 0.6                                 | 5.4                | 1.4                    |
| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt)         | 224                   | 75.9             | 92.9             | 10:06                                        | 15.5                           | 0.9                                 | 12.9               | 3.9                    |
| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt)         | 224                   | 78.0             | 94.0             | 11:56                                        | 26.9                           | 1.4                                 | 26.5               | 8.5                    |
| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt)         | 224                   | **79.0**         | **94.4**         | 15:04                                        | 54.3                           | 1.8                                 | 48.1               | 15.9                   |
|                                                                                                    |                       |                  |                  |                                              |                                |                                     |                    |                        |
| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt)               | 224                   | 70.3             | 89.5             | **6:47**                                     | 11.2                           | 0.5                                 | 11.7               | 3.7                    |
| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt)               | 224                   | 73.9             | 91.8             | 8:33                                         | 20.6                           | 0.9                                 | 21.8               | 7.4                    |
| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt)               | 224                   | 76.8             | 93.4             | 11:10                                        | 23.4                           | 1.0                                 | 25.6               | 8.5                    |
| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt)             | 224                   | 78.5             | 94.3             | 17:10                                        | 42.1                           | 1.9                                 | 44.5               | 15.9                   |
|                                                                                                    |                       |                  |                  |                                              |                                |                                     |                    |                        |
| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224                   | 75.1             | 92.4             | 13:03                                        | 12.5                           | 1.3                                 | 5.3                | 1.0                    |
| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224                   | 76.4             | 93.2             | 17:04                                        | 14.9                           | 1.6                                 | 7.8                | 1.5                    |
| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224                   | 76.6             | 93.4             | 17:10                                        | 15.9                           | 1.6                                 | 9.1                | 1.7                    |
| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224                   | 77.7             | 94.0             | 19:19                                        | 18.9                           | 1.9                                 | 12.2               | 2.4                    |

<details>
  <summary>Table Notes (click to expand)</summary>

- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.<br>Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2
- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.<br>Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224`
- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.<br>Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`
- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`. <br>Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`

</details>
</details>

<details>
  <summary>Classification Usage Examples &nbsp;<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>

### Train

YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`.

```bash
# Single-GPU
python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128

# Multi-GPU DDP
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
```

### Val

Validate YOLOv5m-cls accuracy on ImageNet-1k dataset:

```bash
bash data/scripts/get_imagenet.sh --val  # download ImageNet val split (6.3G, 50000 images)
python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224  # validate
```

### Predict

Use pretrained YOLOv5s-cls.pt to predict bus.jpg:

```bash
python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg
```

```python
model = torch.hub.load(
    "ultralytics/yolov5", "custom", "yolov5s-cls.pt"
)  # load from PyTorch Hub
```

### Export

Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT:

```bash
python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224
```

</details>

## <div align="center">Environments</div>

Get started in seconds with our verified environments. Click each icon below for details.

<div align="center">
  <a href="https://bit.ly/yolov5-paperspace-notebook">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gradient.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-colab-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://www.kaggle.com/ultralytics/yolov5">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-kaggle-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://hub.docker.com/r/ultralytics/yolov5">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-docker-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-aws-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="10%" /></a>
</div>

## <div align="center">Contribute</div>

We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors!

<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->

<a href="https://github.com/ultralytics/yolov5/graphs/contributors">
<img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>

## <div align="center">License</div>

Ultralytics offers two licensing options to accommodate diverse use cases:

- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details.
- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).

## <div align="center">Contact</div>

For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions!

<br>
<div align="center">
  <a href="https://github.com/ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
  <a href="https://www.linkedin.com/company/ultralytics/" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
  <a href="https://twitter.com/ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
  <a href="https://youtube.com/ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
  <a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
  <a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
  <a href="https://ultralytics.com/discord" style="text-decoration:none;">
    <img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="3%" alt="" /></a>
</div>

[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation


================================================
FILE: README-yolo.zh-CN.md
================================================
<div align="center">
  <p>
    <a href="http://www.ultralytics.com/blog/ultralytics-yolov8-turns-one-a-year-of-breakthroughs-and-innovations" target="_blank">
      <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png"></a>
    <!--
    <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
      <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
    -->
  </p>

[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)

<div>
    <a href="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml"><img src="https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg" alt="YOLOv5 CI"></a>
    <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
    <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
    <br>
    <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a>
    <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
    <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
  </div>
  <br>

YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表<a href="https://ultralytics.com"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。

我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 <a href="https://docs.ultralytics.com/yolov5/">文档</a> 了解详细信息,在 <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> 上提交问题以获得支持,并加入我们的 <a href="https://ultralytics.com/discord">Discord</a> 社区进行问题和讨论!

如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格

<div align="center">
  <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
  <a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
  <a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
  <a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
  <a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
  <a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="Ultralytics Instagram"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
  <a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
</div>
</div>

## <div align="center">YOLOv8 🚀 新品</div>

我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。

请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用:

[![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)

```commandline
pip install ultralytics
```

<div align="center">
  <a href="https://ultralytics.com/yolov8" target="_blank">
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png"></a>
</div>

## <div align="center">文档</div>

有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com/yolov5/)。请参阅下面的快速入门示例。

<details open>
<summary>安装</summary>

克隆 repo,并要求在 [**Python>=3.8.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 。

```bash
git clone https://github.com/ultralytics/yolov5  # clone
cd yolov5
pip install -r requirements.txt  # install
```

</details>

<details>
<summary>推理</summary>

使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。

```python
import torch

# Model
model = torch.hub.load("ultralytics/yolov5", "yolov5s")  # or yolov5n - yolov5x6, custom

# Images
img = "https://ultralytics.com/images/zidane.jpg"  # or file, Path, PIL, OpenCV, numpy, list

# Inference
results = model(img)

# Results
results.print()  # or .show(), .save(), .crop(), .pandas(), etc.
```

</details>

<details>
<summary>使用 detect.py 推理</summary>

`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。

```bash
python detect.py --weights yolov5s.pt --source 0                               # webcam
                                               img.jpg                         # image
                                               vid.mp4                         # video
                                               screen                          # screenshot
                                               path/                           # directory
                                               list.txt                        # list of images
                                               list.streams                    # list of streams
                                               'path/*.jpg'                    # glob
                                               'https://youtu.be/LNwODJXcvt4'  # YouTube
                                               'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
```

</details>

<details>
<summary>训练</summary>

下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data)
将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。

```bash
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml  --batch-size 128
                                                                 yolov5s                    64
                                                                 yolov5m                    40
                                                                 yolov5l                    24
                                                                 yolov5x                    16
```

<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">

</details>

<details open>
<summary>教程</summary>

- [训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 推荐
- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️
- [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training)
- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 新
- [TFLite,ONNX,CoreML,TensorRT导出](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀
- [NVIDIA Jetson平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 新
- [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation)
- [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling)
- [模型剪枝/稀疏](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity)
- [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution)
- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers)
- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 新
- [Roboflow用于数据集、标注和主动学习](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration)
- [ClearML日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 新
- [使用Neural Magic的Deepsparse的YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 新
- [Comet日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 新

</details>

## <div align="center">模块集成</div>

<br>
<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png"></a>
<br>
<br>

<div align="center">
  <a href="https://roboflow.com/?ref=ultralytics">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-roboflow.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
  <a href="https://cutt.ly/yolov5-readme-clearml">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-clearml.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
  <a href="https://bit.ly/yolov5-readme-comet2">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="" />
  <a href="https://bit.ly/yolov5-neuralmagic">
    <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" /></a>
</div>

|                                                  Roboflow                                                  |                                         ClearML ⭐ 新                                          |                                                   Comet ⭐ 新                                                    |                                           Neural Magic ⭐ 新                                           |
| :--------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 |

## <div align="center">Ultralytics HUB</div>

[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他!

<a align="center" href="https://bit.ly/ultralytics_hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>

## <div align="center">为什么选择 YOLOv5</div>

YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结果。

<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png"></p>
<details>
  <summary>YOLOv5-P5 640 图</summary>

<p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png"></p>
</details>
<details>
  <summary>图表笔记</summary>

- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。
- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。
- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。
- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`

</details>

### 预训练模型

| 模型                                                                                           | 尺寸<br><sup>(像素) | mAP<sup>val<br>50-95 | mAP<sup>val<br>50 | 推理速度<br><sup>CPU b1<br>(ms) | 推理速度<br><sup>V100 b1<br>(ms) | 速度<br><sup>V100 b32<br>(ms) | 参数量<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ---------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | --------------------------------- | ---------------------------------- | ------------------------------- | ------------------ | ---------------------- |
| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt)             | 640                   | 28.0                 | 45.7              | **45**                            | **6.3**                            | **0.6**                         | **1.9**            | **4.5**                |
| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt)             | 640                   | 37.4                 | 56.8              | 98                                | 6.4                                | 0.9                             | 7.2                | 16.5                   |
| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt)             | 640                   | 45.4                 | 64.1              | 224                               | 8.2                                | 1.7                             | 21.2               | 49.0                   |
| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt)             | 640                   | 49.0                 | 67.3              | 430                               | 10.1                               | 2.7                             | 46.5               | 109.1                  |
| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt)             | 640                   | 50.7                 | 68.9              | 766                               | 12.1                               | 4.8                             | 86.7               | 205.7                  |
|                                                                                                |                       |                      |                   |                                   |                                    |                                 |                    |                        |
| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt)           | 1280                  | 36.0                 | 54.4              | 153                               | 8.1                                | 2.1                             | 3.2                | 4.6                    |
| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt)           | 1280                  | 44.8                 | 63.7              | 385                               | 8.2                                | 3.6                             | 12.6               | 16.8                   |
| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt)           | 1280                  | 51.3                 | 69.3              | 887                               | 11.1                               | 6.8                             | 35.7               | 50.0                   |
| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt)           | 1280                  | 53.7                 | 71.3              | 1784                              | 15.8                               | 10.5                            | 76.8               | 111.4                  |
| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)<br>+[TTA] | 1280<br>1536          | 55.0<br>**55.8**     | 72.7<br>**72.7**  | 3136<br>-                         | 26.2<br>-                          | 19.4<br>-                       | 140.7<br>-         | 209.8<br>-             |

<details>
  <summary>笔记</summary>

- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。
- \*\*mAP<sup>val</sup>\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。<br>复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。<br>复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1`
- **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) 包括反射和尺度变换。<br>复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`

</details>

## <div align="center">实例分割模型 ⭐ 新</div>

我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。

<details>
  <summary>实例分割模型列表</summary>

<br>

<div align="center">
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
<img width="800" src="https://user-images.githubusercontent.com/61612323/204180385-84f3aca9-a5e9-43d8-a617-dda7ca12e54a.png"></a>
</div>

我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。

| 模型                                                                                       | 尺寸<br><sup>(像素) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | 训练时长<br><sup>300 epochs<br>A100 GPU(小时) | 推理速度<br><sup>ONNX CPU<br>(ms) | 推理速度<br><sup>TRT A100<br>(ms) | 参数量<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | ----------------------------------------------- | ----------------------------------- | ----------------------------------- | ------------------ | ---------------------- |
| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640                   | 27.6                 | 23.4                  | 80:17                                           | **62.7**                            | **1.2**                             | **2.0**            | **7.1**                |
| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640                   | 37.6                 | 31.7                  | 88:16                                           | 173.3                               | 1.4                                 | 7.6                | 26.4                   |
| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640                   | 45.0                 | 37.1                  | 108:36                                          | 427.0                               | 2.2                                 | 22.0               | 70.8                   |
| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640                   | 49.0                 | 39.9                  | 66:43 (2x)                                      | 857.4                               | 2.9                                 | 47.9               | 147.7                  |
| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640                   | **50.7**             | **41.4**              | 62:56 (3x)                                      | 1579.2                              | 4.5                                 | 88.8               | 265.7                  |

- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。<br>训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official
- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。<br>复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt`
- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。<br>复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1`
- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.<br>运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`

</details>

<details>
  <summary>分割模型使用示例 &nbsp;<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>

### 训练

YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。

```bash
# 单 GPU
python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640

# 多 GPU, DDP 模式
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
```

### 验证

在 COCO 数据集上验证 YOLOv5s-seg mask mAP:

```bash
bash data/scripts/get_coco.sh --val --segments  # 下载 COCO val segments 数据集 (780MB, 5000 images)
python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640  # 验证
```

### 预测

使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg:

```bash
python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg
```

```python
model = torch.hub.load(
    "ultralytics/yolov5", "custom", "yolov5m-seg.pt"
)  # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持)
```

| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) |
| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- |

### 模型导出

将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT:

```bash
python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0
```

</details>

## <div align="center">分类网络 ⭐ 新</div>

YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。

<details>
  <summary>分类网络模型</summary>

<br>

我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。

| 模型                                                                                               | 尺寸<br><sup>(像素) | acc<br><sup>top1 | acc<br><sup>top5 | 训练时长<br><sup>90 epochs<br>4xA100(小时) | 推理速度<br><sup>ONNX CPU<br>(ms) | 推理速度<br><sup>TensorRT V100<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>@640 (B) |
| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ----------------------------------- | ---------------------------------------- | ---------------- | ---------------------- |
| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt)         | 224                   | 64.6             | 85.4             | 7:59                                         | **3.3**                             | **0.5**                                  | **2.5**          | **0.5**                |
| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt)         | 224                   | 71.5             | 90.2             | 8:09                                         | 6.6                                 | 0.6                                      | 5.4              | 1.4                    |
| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt)         | 224                   | 75.9             | 92.9             | 10:06                                        | 15.5                                | 0.9                                      | 12.9             | 3.9                    |
| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt)         | 224                   | 78.0             | 94.0             | 11:56                                        | 26.9                                | 1.4                                      | 26.5             | 8.5                    |
| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt)         | 224                   | **79.0**         | **94.4**         | 15:04                                        | 54.3                                | 1.8                                      | 48.1             | 15.9                   |
|                                                                                                    |                       |                  |                  |                                              |                                     |                                          |                  |                        |
| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt)               | 224                   | 70.3             | 89.5             | **6:47**                                     | 11.2                                | 0.5                                      | 11.7             | 3.7                    |
| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt)              | 224                   | 73.9             | 91.8             | 8:33                                         | 20.6                                | 0.9                                      | 21.8             | 7.4                    |
| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt)               | 224                   | 76.8             | 93.4             | 11:10                                        | 23.4                                | 1.0                                      | 25.6             | 8.5                    |
| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt)             | 224                   | 78.5             | 94.3             | 17:10                                        | 42.1                                | 1.9                                      | 44.5             | 15.9                   |
|                                                                                                    |                       |                  |                  |                                              |                                     |                                          |                  |                        |
| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224                   | 75.1             | 92.4             | 13:03                                        | 12.5                                | 1.3                                      | 5.3              | 1.0                    |
| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224                   | 76.4             | 93.2             | 17:04                                        | 14.9                                | 1.6                                      | 7.8              | 1.5                    |
| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224                   | 76.6             | 93.4             | 17:10                                        | 15.9                                | 1.6                                      | 9.1              | 1.7                    |
| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224                   | 77.7             | 94.0             | 19:19                                        | 18.9                                | 1.9                                      | 12.2             | 2.4                    |

<details>
  <summary>Table Notes (点击以展开)</summary>

- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。<br>训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2
- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。<br>复现命令 `python classify/val.py --data ../datasets/imagenet --img 224`
- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。<br>复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`
- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。<br>复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
  </details>
  </details>

<details>
  <summary>分类训练示例 &nbsp;<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></summary>

### 训练

YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集,命令中使用 `--data` 即可。 MNIST 示例 `--data mnist` 。

```bash
# 单 GPU
python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128

# 多 GPU, DDP 模式
python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
```

### 验证

在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性:

```bash
bash data/scripts/get_imagenet.sh --val  # download ImageNet val split (6.3G, 50000 images)
python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224  # validate
```

### 预测

使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg:

```bash
python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg
```

```python
model = torch.hub.load("ultralytics/yolov5", "custom", "yolov5s-cls.pt")  # load from PyTorch Hub
```

### 模型导出

将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT:

```bash
python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224
```

</details>

## <div align="center">环境</div>

使用下面我们经过验证的环境,在几秒钟内开始使用 YOLOv5 。单击下面的图标了解详细信息。

<div align="center">
  <a href="https://bit.ly/yolov5-paperspace-notebook">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gradient.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-colab-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://www.kaggle.com/ultralytics/yolov5">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-kaggle-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://hub.docker.com/r/ultralytics/yolov5">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-docker-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-aws-small.png" width="10%" /></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="5%" alt="" />
  <a href="https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/">
    <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="10%" /></a>
</div>

## <div align="center">贡献</div>

我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!

<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->

<a href="https://github.com/ultralytics/yolov5/graphs/contributors">
<img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>

## <div align="center">许可证</div>

Ultralytics 提供两种许可证选项以适应各种使用场景:

- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。
- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。

## <div align="center">联系方式</div>

对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://ultralytics.com/discord) 社区进行问题和讨论!

<br>
<div align="center">
  <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
  <a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
  <a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
  <a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
  <a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
  <a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="Ultralytics Instagram"></a>
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
  <a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>

[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation


================================================
FILE: README.md
================================================
# apex gun

基于yolov5的apex英雄目标检测自动瞄准器

开源交流群新建于2024-04-25,群号:206666041,加群前请先star。

进群细则:请具有一定代码基础的人再进群,本群各管理都不会对一些过于基础的问题进行回答(但你可以抱有希望来问其他群友),并不会从零开始手把手教你如何使用,只提供代码上实现思路或环境安装上的帮助疑惑。只保证代码能够运行,不负责处理因为个人安装所产生的差异导致无法运行的环境问。

release包中所产生bug可能并不能及时的修复,所以请不要逮着群主问围绕着release版本或使用方面的问题,请自行拉取最新代码编译运行。

以上所有细款视心情生效,请不要进群没人回答就开始不耐烦,人生攻击。另外,不接受任何形式的付款“请教”,请您有钱就去买挂玩,谢谢。

[![Star History Chart](https://api.star-history.com/svg?repos=wdragondragon/apex-yolov5&type=Date)](https://star-history.com/#wdragondragon/apex-yolov5&Date)

## 功能清单

- [x] ai自瞄,敌我识别
- [x] 识别枪械,自动开枪
- [x] 鼠标平滑移动
- [x] 可自定义鼠标单次移动像素,增加识别图像移动倍率
- [x] debug窗口,显示框人物位置
- [x] 基于socket双机:服务端运算,客户端移动鼠标
- [x] 多服务端支持,可同时运行多个服务端供单个客户端进行加速运算
- [x] [基于机器码的使用权限校验](https://github.com/wdragondragon/apex_vaildate.git)
- [x] [自动下载/更新](https://github.com/wdragondragon/ag_auto_update.git)
- [x] 识别到目标时,1秒频率的自动标注,供反喂数据优化学习
- [x] 支持保存配置,多配置切换
- [x] 识别帧数波动变换的可视化折线图
- [x] 漏枪
- [x] 适配kmbox A,罗技驱动,无涯键鼠盒子
- [X] 动态识别区域:解决了固定参数无法贴脸与抽远枪的问题,现支持根据敌人大小动态改变识别范围
- [X] 锁定敌人标记

## 使用说明

为提高作弊门槛,旨在技术分享,本项目不提供任何运行说明。

除权重文件外,其他项目文件完整,有能力可自行研究。

或参考文章[(yolov5从零开始,自动瞄准不再是天方夜谭)](https://www.jianshu.com/p/84ad94250172)

## 其他项目
[罗技抖枪宏大全](https://github.com/wdragondragon/apex-shake-gun.git)

[基于opencv的apex枪械识别框架(带压枪抖枪,对接硬件转换器自动识别)](https://github.com/wdragondragon/ApexRecoils.git)

[基于opencv的apex枪械识别框架(对接罗技lua文件动态替换)](https://github.com/wdragondragon/ApexAutomaticGunSelection.git)

## 加入我们

欢迎加入我们,共同完善已有代码,优化模型或提供建议。我们将资源完全共享。因为加我的人员较多,暂只接收提供贡献的好友位,使用分享请加Q群。

![wechat.png](wechat.png)


================================================
FILE: ag.spec
================================================
# -*- mode: python ; coding: utf-8 -*-


block_cipher = None

pathex = [
    'C:/Users/Administrator/PycharmProjects/yolov5'
]

hiddenimports = ['models.yolo',
'utils',
'utils.general',
'models',
'utils.aws',
'utils.docker',
'utils.flask_rest_api',
'utils.google_app_engine',
'utils.loggers',
'utils.segment',
'utils.loggers.clearml',
'utils.loggers.comet',
'utils.loggers.wandb',
'utils.segment',
'models.hub',
'segment',
'apex_yolov5',
'apex_yolov5.socket'
]

a = Analysis(
    ['apex_yolov5_main.py'],
    pathex=pathex,
    binaries=[(r'./utils/general.pyc',r'./utils')],
    datas=[(r'./config/global_config.json',r'./config')],
    hiddenimports=['models.yolo'],
    hookspath=[],
    hooksconfig={},
    runtime_hooks=['setenv.py'],
    excludes=[],
    win_no_prefer_redirects=False,
    win_private_assemblies=False,
    cipher=block_cipher,
    noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)

exe = EXE(
    pyz,
    a.scripts,
    [],
    exclude_binaries=True,
    name='ag',
    debug=False,
    bootloader_ignore_signals=False,
    strip=False,
    upx=True,
    console=True,
    disable_windowed_traceback=False,
    argv_emulation=False,
    target_arch=None,
    codesign_identity=None,
    entitlements_file=None,
    icon='./images/ag.ico'
)
coll = COLLECT(
    exe,
    a.binaries,
    a.zipfiles,
    a.datas,
    strip=False,
    upx=True,
    upx_exclude=[],
    name='ag'
)


================================================
FILE: ag_asyn.spec
================================================
# -*- mode: python ; coding: utf-8 -*-


block_cipher = None

pathex = [
    'C:/Users/Administrator/PycharmProjects/yolov5'
]

hiddenimports = ['models.yolo',
'utils',
'utils.general',
'models',
'utils.aws',
'utils.docker',
'utils.flask_rest_api',
'utils.google_app_engine',
'utils.loggers',
'utils.segment',
'utils.loggers.clearml',
'utils.loggers.comet',
'utils.loggers.wandb',
'utils.segment',
'models.hub',
'segment',
'apex_yolov5',
'apex_yolov5.socket'
]

a = Analysis(
    ['main.py'],
    pathex=pathex,
    binaries=[(r'./utils/general.pyc',r'./utils')],
    datas=[(r'./config/ref/global_config.json',r'./config/ref'),(r'./config/ref.txt',r'./config')],
    hiddenimports=['models.yolo','scipy.special._cdflib','wmi'],
    hookspath=[],
    hooksconfig={},
    runtime_hooks=['setenv.py'],
    excludes=[],
    win_no_prefer_redirects=False,
    win_private_assemblies=False,
    cipher=block_cipher,
    noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)

exe = EXE(
    pyz,
    a.scripts,
    [],
    exclude_binaries=True,
    name='ag_asyn',
    debug=False,
    bootloader_ignore_signals=False,
    strip=False,
    upx=True,
    console=True,
    disable_windowed_traceback=False,
    argv_emulation=False,
    target_arch=None,
    codesign_identity=None,
    entitlements_file=None,
    icon='./images/ag.ico'
)
coll = COLLECT(
    exe,
    a.binaries,
    a.zipfiles,
    a.datas,
    strip=False,
    upx=True,
    upx_exclude=[],
    name='ag_asyn'
)


================================================
FILE: apex_recoils/__init__.py
================================================


================================================
FILE: apex_recoils/core/GameWindowsStatus.py
================================================
import threading
import time

from apex_yolov5.Tools import Tools
from apex_yolov5.log import LogFactory


class GameWindowsStatus:
    """
        游戏窗口状态检测
    """

    def __init__(self):
        self.status = False
        self.logger = LogFactory.getLogger(self.__class__)
        self.timing_get_status_thread()

    def timing_get_status_thread(self):
        """
            新线程检测
        """
        threading.Thread(target=self.timing_get_status).start()

    def timing_get_status(self):
        """
            检测窗口
        """
        while True:
            status = Tools.is_apex_windows()
            if self.status != status:
                self.status = status
                self.logger.print_log(f"窗口状态切换{self.status}")
            time.sleep(2)

    def get_game_windows_status(self):
        """
            获取状态
        """
        return self.status


game_status = None


def init():
    global game_status
    game_status = GameWindowsStatus()


def get_game_status():
    return game_status


================================================
FILE: apex_recoils/core/ReaSnowSelectGun.py
================================================
import json
import os.path as op

from apex_yolov5.Tools import Tools
from apex_yolov5.log import LogFactory
from apex_yolov5.mouse_mover import MoverFactory


class ReaSnowSelectGun:
    """
        转换器自动识别按键宏触发
    """

    def __init__(self, config_name='ReaSnowGun'):
        self.logger = LogFactory.getLogger(self.__class__)
        self.config_path = f".\\config\\{config_name}.json"

        if op.exists(self.config_path):
            with open(self.config_path, encoding='utf-8') as global_file:
                self.key_dict = json.load(global_file)
        if "close_key" in self.key_dict:
            self.no_macro_key = self.key_dict["close_key"]
        else:
            self.no_macro_key = "0x35"

        if "no_found_click_close_key" in self.key_dict:
            self.no_found_click_close_key = self.key_dict["no_found_click_close_key"]
        else:
            self.no_found_click_close_key = True

        if "auto_caps" in self.key_dict:
            self.auto_caps = self.key_dict["auto_caps"]
        else:
            self.auto_caps = True

        self.no_macro_key = Tools.convert_to_decimal(self.no_macro_key)

    def trigger_button(self, select_gun, select_scope, hot_pop):
        """

        :param select_gun:
        :param select_scope:
        :param hot_pop:
        :return:
        """
        if select_gun is None or select_scope is None:
            self.logger.print_log(f"未识别到枪械{',关闭宏' if self.no_found_click_close_key else ''}")
            if self.no_found_click_close_key:
                MoverFactory.mouse_mover().click_key(self.no_macro_key)
            return

        gun_scope_dict = self.key_dict.get(select_gun)
        if gun_scope_dict is None:
            self.logger.print_log(f"枪械[{select_gun}]没有数据{',关闭宏' if self.no_found_click_close_key else ''}")
            if self.no_found_click_close_key:
                MoverFactory.mouse_mover().click_key(self.no_macro_key)
            return

        if hot_pop is not None and hot_pop in gun_scope_dict:
            gun_scope_dict = gun_scope_dict[hot_pop]

        first_char = select_scope[0]

        caps_lock = True
        if "caps_" + first_char in gun_scope_dict:
            caps_lock = gun_scope_dict["caps_" + first_char]
        elif "caps" in gun_scope_dict:
            caps_lock = gun_scope_dict["caps"]

        if first_char in gun_scope_dict:
            scope_data = gun_scope_dict[first_char]
        else:
            scope_data = None
        if "0" in gun_scope_dict:
            scope_data = gun_scope_dict["0"]
            self.logger.print_log(f"枪械[{select_gun}使用通用数据]")
        if scope_data is not None:
            self.logger.print_log(f"枪械[{select_gun}]按下键位[{scope_data}]切换数据")
            MoverFactory.mouse_mover().click_key(Tools.convert_to_decimal(scope_data))
            if self.auto_caps:
                MoverFactory.mouse_mover().toggle_caps_lock(caps_lock)


================================================
FILE: apex_recoils/core/SelectGun.py
================================================
import threading
import time
import traceback

from apex_recoils.core.screentaker.LocalScreenTaker import LocalScreenTaker
from apex_yolov5.KeyAndMouseListener import KMCallBack
from apex_yolov5.log import LogFactory


class SelectGun:
    """
        枪械识别
    """

    def __init__(self, bbox, image_path, scope_bbox, scope_path, hop_up_bbox, hop_up_path,
                 refresh_buttons, has_turbocharger, image_comparator, screen_taker: LocalScreenTaker,
                 game_windows_status):
        super().__init__()
        self.logger = LogFactory.getLogger(self.__class__)
        self.on_key_map = dict()
        self.bbox = bbox
        self.image_path = image_path
        self.scope_bbox = scope_bbox
        self.scope_path = scope_path
        self.select_gun_sign = True
        self.current_gun = None
        self.current_scope = None
        self.current_hot_pop = None
        self.real_current_scope = None
        self.refresh_buttons = refresh_buttons
        self.has_turbocharger = has_turbocharger
        self.hop_up_bbox = hop_up_bbox
        self.hop_up_path = hop_up_path
        self.game_windows_status = game_windows_status
        self.call_back = []
        self.fail_time = 0
        self.image_comparator = image_comparator
        self.screen_taker = screen_taker
        for refresh_button in self.refresh_buttons:
            KMCallBack.connect(KMCallBack("k", refresh_button, self.select_gun_threading, False))

        threading.Thread(target=self.timing_execution).start()

    def timing_execution(self):
        """
            定时识别
        """
        while True:
            try:
                if self.game_windows_status.get_game_windows_status():
                    if self.select_gun_with_sign(auto=True):
                        self.fail_time = 0
                    else:
                        self.fail_time += 1
                else:
                    self.fail_time = 0
            except Exception as e:
                traceback.print_exc()
                pass
            time.sleep(1 + self.fail_time / 5)

    def select_gun_threading(self, pressed=False, toggle=False):
        """

        :param pressed:
        :param toggle:
        :return:
        """
        if self.select_gun_sign:
            return
        threading.Thread(target=self.select_gun_with_sign, args=(pressed, toggle, False)).start()

    def select_gun_with_sign(self, pressed=False, toggle=False, auto=False):
        """

        :param pressed:
        :param toggle:
        :param auto:
        :return:
        """
        if self.select_gun_sign:
            return
        self.select_gun_sign = True
        start = time.time()
        result = self.select_gun(pressed, toggle, auto)
        self.logger.print_log(f"该次识别耗时:{int((time.time() - start) * 1000)}ms")
        self.select_gun_sign = False
        return result

    def get_images_from_bbox(self, bbox_list):
        """
        Get images from specified bounding boxes.

        :param bbox_list: List of bounding boxes [(x1, y1, x2, y2), ...]
        :return: Generator yielding images
        """
        # try:
        #     return list(ImageGrab.grab(bbox=bbox) for bbox in bbox_list)
        # except Exception as e:
        #     self.logger.print_log(f"Error in get_images_from_bbox: {e}")
        return self.screen_taker.get_images_from_bbox(bbox_list)

    def select_gun(self, pressed=False, toggle=False, auto=False):
        """
            使用图片对比,逐一识别枪械,相似度最高设置为current_gun
        :return:
        """
        if not self.game_windows_status.get_game_windows_status():
            return False
        gun_temp, score_temp = self.image_comparator.compare_with_path(self.image_path,
                                                                       self.get_images_from_bbox([self.bbox]), 0.9, 0.7)
        if gun_temp is None:
            self.logger.print_log("未找到枪械")
            self.current_gun = None
            self.current_scope = None
            self.current_hot_pop = None
            return False

        scope_temp, score_scope_temp = self.image_comparator.compare_with_path(self.scope_path,
                                                                               self.get_images_from_bbox(
                                                                                   self.scope_bbox), 0.9,
                                                                               0.4)
        self.real_current_scope = scope_temp
        if scope_temp is None:
            self.logger.print_log("未找到配件,默认为1倍")
            scope_temp = '1x'

        if gun_temp in self.has_turbocharger:
            hop_up_temp, score_hop_up_temp = self.image_comparator.compare_with_path(self.hop_up_path,
                                                                                     self.get_images_from_bbox(
                                                                                         self.hop_up_bbox),
                                                                                     0.9, 0.6)
        else:
            hop_up_temp = None
            score_hop_up_temp = 0

        if gun_temp == self.current_gun and scope_temp == self.current_scope and hop_up_temp == self.current_hot_pop:
            self.logger.print_log(
                "当前枪械搭配已经是: {}-{}-{}".format(self.current_gun, self.current_scope, self.current_hot_pop))
            if auto:
                return False
        else:
            self.current_scope = scope_temp
            self.current_gun = gun_temp
            self.current_hot_pop = hop_up_temp
            self.logger.print_log(
                "枪械: {},相似: {}-配件: {},相似: {}-hop_up: {},相似: {}".format(self.current_gun, score_temp,
                                                                                 self.current_scope, score_scope_temp,
                                                                                 self.current_hot_pop,
                                                                                 score_hop_up_temp))

        for func in self.call_back:
            func(self.current_gun, self.current_scope, self.current_hot_pop)
        return True

    def connect(self, func):
        self.call_back.append(func)

    def test(self):
        self.logger.print_log("自动识别初始化中,请稍后……")
        start = time.time()
        self.image_comparator.compare_with_path(self.image_path,
                                                self.get_images_from_bbox([self.bbox]), 0.9, 0.7)
        self.image_comparator.compare_with_path(self.scope_path,
                                                self.get_images_from_bbox(
                                                    self.scope_bbox), 0.9,
                                                0.4)
        self.image_comparator.compare_with_path(self.hop_up_path,
                                                self.get_images_from_bbox(
                                                    self.hop_up_bbox),
                                                0.9, 0.6)
        self.logger.print_log(f"自动识别初始化完毕,耗时[{int((time.time() - start) * 1000)}]")
        self.select_gun_sign = False


select_gun = None


def get_select_gun():
    return select_gun


================================================
FILE: apex_recoils/core/__init__.py
================================================


================================================
FILE: apex_recoils/core/image_comparator/DynamicSizeImageComparator.py
================================================
from apex_recoils.core.image_comparator.NetImageComparator import NetImageComparator
from apex_yolov5.log import LogFactory


class DynamicSizeImageComparator(NetImageComparator):
    """
        可动态模糊匹配的网络图片对比
    """

    def __init__(self, base_path, screen_taker):
        super().__init__(base_path)
        self.image_cache = {}
        self.logger = LogFactory.getLogger(self.__class__)
        self.base_path = base_path
        self.screen_taker = screen_taker

    def compare_with_path(self, path, images, lock_score, discard_score):
        path = self.base_path + path
        image_info_arr = [image_info.split() for image_info in
                          self.read_file_from_url_and_cache(path, "list.txt")]
        select_name, score_temp = self.match_template(path, image_info_arr, threshold=discard_score)
        return select_name, score_temp

    def match_template(self, path, image_info_arr, threshold=0.8):
        for image_info in image_info_arr:
            image_path, x, y, w, h = image_info
            image_path = path + image_path
            box = (int(x), int(y), int(w), int(h))
            img = self.screen_taker.get_images_from_bbox([box])[0]
            score = super().compare_image(img, image_path)
            if score > threshold:
                return image_info[0].split(".")[0], score
        return "", 0.0

    def cache_image(self, base_path, line_content):
        arr = line_content.split()
        if len(arr) == 5:
            image_path, x, y, w, h = arr[0], arr[1], arr[2], arr[3], arr[4]
            image_path = base_path + image_path
        else:
            image_path = line_content
        super().cache_image("", image_path)


================================================
FILE: apex_recoils/core/image_comparator/ImageComparator.py
================================================
import concurrent.futures

from apex_yolov5.log import LogFactory
import concurrent.futures
import traceback
from io import BytesIO

import cv2
import numpy as np
from skimage.metrics import structural_similarity

class ImageComparator:
    """
        图片对比
    """

    def __init__(self, base_path):
        # 用于缓存图片
        self.image_cache = {}
        self.logger = LogFactory.getLogger(self.__class__)
        self.base_path = base_path

    def compare_image(self, img, path_image):
        """
            图片对比
        :param img:
        :param path_image:
        :return:
        """
        # 下载图片到内存
        try:
            downloaded_image = self.get_image_from_cache(path_image)

            if downloaded_image:
                downloaded_image.seek(0)
                image_a = cv2.imdecode(np.frombuffer(downloaded_image.getvalue(), dtype=np.uint8), cv2.IMREAD_COLOR)
                downloaded_image.close()
                image_b = np.array(img)
                gray_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)
                gray_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)
                (score, diff) = structural_similarity(gray_a, gray_b, full=True)
                return score
            else:
                # 图片下载失败时的处理
                return 0
        except Exception as e:
            print(e)
            traceback.print_exc()
            self.logger.print_log(f"对比图片错误:{path_image}")
            return 0
    def get_image_from_cache(self, url):
        """
            缓存获取图片
        """
        # 如果图像已经在缓存中,直接返回缓存的图像
        url = url.strip()
        if url not in self.image_cache:
            self.cache_image("", url)
        return BytesIO(self.image_cache[url])


    def compare_with_path(self, path, images, lock_score, discard_score):
        """
            截图范围与文件路径内的所有图片对比
        :param path:
        :param images:
        :param lock_score:
        :param discard_score:
        :return:
        """
        path = self.base_path + path
        select_name = ''
        score_temp = 0.00000000000000000000
        for img in images:
            for fileName in self.read_file_from_url_and_cache(path, "list.txt"):
                score = self.compare_image(img, path + fileName)
                if score > score_temp:
                    score_temp = score
                    select_name = fileName.split('.')[0]
                if score_temp > lock_score:
                    break
        if score_temp < discard_score:
            select_name = None
        return select_name, score_temp

    def read_file_from_url_and_cache(self, base_path, file_name):
        """
            从文件中读取并下载图片
        """
        images_path = self.read_file_from_url(base_path + file_name)
        if images_path is None:
            return None

        # 使用线程池
        with concurrent.futures.ThreadPoolExecutor() as executor:
            # 提交每个下载任务给线程池
            futures = [executor.submit(self.cache_image, base_path, image_path) for image_path in images_path]

            # 等待所有任务完成
            concurrent.futures.wait(futures)

        return images_path

    def read_file_from_url(self, url):
        """
        :param url
        """
        return []

    def cache_image(self, base_path, url):
        """
        :param base_path:
        :param url:
        :return:
        """
        self.logger.print_log("Caching image is no working...")
        pass


================================================
FILE: apex_recoils/core/image_comparator/LocalImageComparator.py
================================================
import os
import re

from apex_recoils.core.image_comparator.ImageComparator import ImageComparator
from apex_yolov5.log import LogFactory

net_file_cache = {}


class LocalImageComparator(ImageComparator):
    """
        本地图片对比
    """

    def __init__(self, base_path):
        super().__init__(base_path)
        self.image_cache = {}
        self.logger = LogFactory.getLogger(self.__class__)
        self.base_path = base_path


    def read_file_from_url(self, filepath):
        """
        从本地文件读取内容并按行返回
        :param filepath: 本地文件路径
        :return: 按行分割后的字符串列表,或 None(失败时)
        """
        try:
            if filepath in net_file_cache:
                return net_file_cache[filepath]

            if not os.path.isfile(filepath):
                print(f"File not found: {filepath}")
                return None

            with open(filepath, 'r', encoding='utf-8') as f:
                text = f.read()

                lines = re.split(r'\r\n|\r|\n', text)
                net_file_cache[filepath] = lines
                return lines
        except Exception as e:
            print(f"An error occurred while reading local file: {e}")
            return None

    def cache_image(self, base_path, url):
        # 如果图像已经在缓存中,直接返回缓存的图像
        url = base_path + url
        url = url.strip()
        if url in self.image_cache:
            return
        self.logger.print_log(f"正在加载图片:{url.replace(self.base_path, '')}")
        if os.path.exists(url) and os.path.isfile(url):
            with open(url, 'rb') as f:
                self.image_cache[url] = f.read()
        else:
            # 如果请求失败,打印错误信息
            self.logger.print_log(f"Failed to load image: {url}. check exists")

================================================
FILE: apex_recoils/core/image_comparator/NetImageComparator.py
================================================
import re
import traceback
from io import BytesIO

import cv2
import numpy as np
import requests
from skimage.metrics import structural_similarity

from apex_recoils.core.image_comparator.ImageComparator import ImageComparator
from apex_yolov5.log import LogFactory

headers_list = [
    {
        'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; SM-G955U Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 10; SM-G981B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (iPad; CPU OS 13_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/87.0.4280.77 Mobile/15E148 Safari/604.1'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.109 Safari/537.36 CrKey/1.54.248666'
    }, {
        'user-agent': 'Mozilla/5.0 (X11; Linux aarch64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.188 Safari/537.36 CrKey/1.54.250320'
    }, {
        'user-agent': 'Mozilla/5.0 (BB10; Touch) AppleWebKit/537.10+ (KHTML, like Gecko) Version/10.0.9.2372 Mobile Safari/537.10+'
    }, {
        'user-agent': 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 2.1.0; en-US) AppleWebKit/536.2+ (KHTML like Gecko) Version/7.2.1.0 Safari/536.2+'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; U; Android 4.3; en-us; SM-N900T Build/JSS15J) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; U; Android 4.1; en-us; GT-N7100 Build/JRO03C) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; U; Android 4.0; en-us; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 7.0; SM-G950U Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; SM-G965U Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.111 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; SM-T837A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.80 Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; U; en-us; KFAPWI Build/JDQ39) AppleWebKit/535.19 (KHTML, like Gecko) Silk/3.13 Safari/535.19 Silk-Accelerated=true'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; U; Android 4.4.2; en-us; LGMS323 Build/KOT49I.MS32310c) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 550) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Mobile Safari/537.36 Edge/14.14263'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 6.0.1; Moto G (4)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 10 Build/MOB31T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; Nexus 5X Build/OPR4.170623.006) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 7.1.1; Nexus 6 Build/N6F26U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; Nexus 6P Build/OPP3.170518.006) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 7 Build/MOB30X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch; NOKIA; Lumia 520)'
    }, {
        'user-agent': 'Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 9; Pixel 3 Build/PQ1A.181105.017.A1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.158 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 10; Pixel 4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 11; Pixel 3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.181 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Mobile Safari/537.36'
    }, {
        'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1'
    }, {
        'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
    }, {
        'user-agent': 'Mozilla/5.0 (iPad; CPU OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1'
    }
]

net_file_cache = {}


class NetImageComparator(ImageComparator):
    def __init__(self, base_path):
        super().__init__(base_path)
        # 用于缓存已下载图像的字典
        self.image_cache = {}
        self.logger = LogFactory.getLogger(self.__class__)
        self.base_path = base_path

    def read_file_from_url(self, url):
        """

        :param url:
        :return:
        """
        try:
            if url in net_file_cache:
                return net_file_cache[url]
            # 发送GET请求获取文件内容
            # headers = random.choice(headers_list)
            response = requests.get(url)
            response.encoding = 'utf-8'
            # 检查请求是否成功
            if response.status_code == 200:
                # 根据换行符切割文件内容并返回列表
                text = response.text
                lines = re.split(r'\r\n|\r|\n', text)
                net_file_cache[url] = lines
                return lines
            else:
                print(f"Failed to read file from URL. Status code: {response.status_code}")
                return None
        except Exception as e:
            print(f"An error occurred: {e}")
            return None

    def cache_image(self, base_path, url):
        # 如果图像已经在缓存中,直接返回缓存的图像
        url = base_path + url
        url = url.strip()
        if url in self.image_cache:
            return
        self.logger.print_log(f"正在加载图片:{url.replace(self.base_path, '')}")
        # 发送GET请求获取图片的二进制数据
        # 发送GET请求获取文件内容
        # headers = random.choice(headers_list)
        response = requests.get(url)

        # 检查请求是否成功
        if response.status_code == 200:
            # 将二进制数据转换为图像对象
            image_bytes = response.content
            # 将图像添加到缓存
            self.image_cache[url] = image_bytes
        else:
            # 如果请求失败,打印错误信息
            self.logger.print_log(f"Failed to download image: {url}. Status code: {response.status_code}")

    def get_image_from_cache(self, url):
        """
            缓存获取图片
        """
        # 如果图像已经在缓存中,直接返回缓存的图像
        url = url.strip()
        if url not in self.image_cache:
            self.cache_image("", url)
        return BytesIO(self.image_cache[url])

    def compare_image(self, img, path_image):
        # 下载图片到内存
        try:
            downloaded_image = self.get_image_from_cache(path_image)

            if downloaded_image:
                downloaded_image.seek(0)
                image_a = cv2.imdecode(np.frombuffer(downloaded_image.getvalue(), dtype=np.uint8), cv2.IMREAD_COLOR)
                downloaded_image.close()
                image_b = np.array(img)
                gray_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)
                gray_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)
                (score, diff) = structural_similarity(gray_a, gray_b, full=True)
                return score
            else:
                # 图片下载失败时的处理
                return 0
        except Exception as e:
            print(e)
            traceback.print_exc()
            self.logger.print_log(f"对比图片错误:{path_image}")
            return 0


================================================
FILE: apex_recoils/core/image_comparator/__init__.py
================================================


================================================
FILE: apex_recoils/core/kmnet_listener/ToggleKeyListener.py
================================================
import time

from apex_recoils.core import GameWindowsStatus
from apex_yolov5.KmBoxNetListener import KmBoxNetListener
from apex_yolov5.Tools import Tools
from apex_yolov5.log import LogFactory
from apex_yolov5.mouse_mover import MoverFactory


class ToggleKeyListener:
    """
        监听kmnet 关于辅助开关键的实现
    """

    def __init__(self, km_box_net_listener: KmBoxNetListener, delayed_activation_key_list,
                 toggle_hold_key):
        import kmNet
        self.kmNet = kmNet
        self.logger = LogFactory.getLogger(self.__class__)
        self.km_box_net_listener = km_box_net_listener
        # 自定义按住延迟转换
        self.delayed_activation_key_status_map = {}
        self.delayed_activation_key_list = [(Tools.convert_to_decimal(key), value) for key, value in
                                            delayed_activation_key_list.items()]
        km_box_net_listener.connect(self.delayed_activation)

        # 自定义切换按住键
        self.key_status_map = {}
        self.toggle_hold_key = toggle_hold_key
        self.toggle_close_key = {}

        for key in self.toggle_hold_key:
            close_keys = self.toggle_hold_key[key]
            for close_key in close_keys:
                if close_key not in self.toggle_close_key:
                    self.toggle_close_key[close_key] = []
                if Tools.convert_to_decimal(key) is None:
                    continue
                self.toggle_close_key[close_key].append(key)

        self.mask_toggle_key()
        km_box_net_listener.connect(self.toggle_change)

    def mask_toggle_key(self):
        self.kmNet.unmask_all()
        for key in self.toggle_hold_key:
            self.kmNet.mask_keyboard(Tools.convert_to_decimal(key))
            self.key_status_map[key] = ToggleKey()

    def toggle_change(self):
        if not GameWindowsStatus.get_game_status().get_game_windows_status():
            return
        for key in self.toggle_hold_key:
            num_key = Tools.convert_to_decimal(key)
            if num_key is None:
                continue
            hold_status = self.kmNet.isdown_keyboard(num_key) == 1
            toggle_key_status = self.key_status_map[key]

            if not toggle_key_status.last_hold_status and hold_status:
                toggle_key_status.toggle()
                if toggle_key_status.toggle_status:
                    self.logger.print_log(f"启动长按" + key)
                    MoverFactory.mouse_mover().key_down(num_key)
                else:
                    self.logger.print_log(f"关闭长按" + key)
                    MoverFactory.mouse_mover().key_up(num_key)
            toggle_key_status.hold(hold_status)

        for close_key in self.toggle_close_key:
            num_close_key = Tools.convert_to_decimal(close_key)
            if num_close_key is None:
                continue
            hold_status = self.kmNet.isdown_keyboard(num_close_key) == 1
            if not hold_status:
                continue
            keys = self.toggle_close_key[close_key]
            for key in keys:
                if key not in self.key_status_map:
                    continue
                toggle_key_status = self.key_status_map[key]
                if toggle_key_status.toggle_status:
                    self.logger.print_log(f"关闭长按" + key)
                    MoverFactory.mouse_mover().key_up(Tools.convert_to_decimal(key))
                    toggle_key_status.toggle()

    def controller_toggle_hold_change(self, key):
        if key in self.toggle_close_key:
            keys = self.toggle_close_key[key]
            for key in keys:
                if key not in self.key_status_map:
                    continue
                toggle_key_status = self.key_status_map[key]
                if toggle_key_status.toggle_status:
                    self.logger.print_log(f"关闭长按" + key)
                    MoverFactory.mouse_mover().key_up(Tools.convert_to_decimal(key))
                    toggle_key_status.toggle()

    def delayed_activation(self):
        if not GameWindowsStatus.get_game_status().get_game_windows_status():
            return
        for key, delayed_param in self.delayed_activation_key_list:
            key_time = delayed_param["delay"] if "delay" in delayed_param else None
            up_deactivation = delayed_param["up_deactivation"]
            down_deactivation = delayed_param["down_deactivation"]
            click_key = delayed_param["click_key"] if "click_key" in delayed_param else None
            click_keys = delayed_param["click_keys"] if "click_keys" in delayed_param else None

            hold_status = self.kmNet.isdown_keyboard(key) == 1

            if hold_status:
                if click_keys is None:
                    if key not in self.delayed_activation_key_status_map:
                        self.delayed_activation_key_status_map[key] = DelayedActivationKey()

                    delayed_activation_key_status = self.delayed_activation_key_status_map[key]
                    if down_deactivation:
                        if (int((time.time() - delayed_activation_key_status.hold_time) * 1000) >= key_time
                                and not delayed_activation_key_status.handle):
                            delayed_activation_key_status.handle = True
                            self.logger.print_log(f"持续按下{key},{key_time}ms,转换器开关按下:[{click_key}]")
                            # 转换器切换键
                            MoverFactory.mouse_mover().click_key(Tools.convert_to_decimal(click_key))
                else:
                    if down_deactivation:
                        for click_key_item in click_keys:
                            key_time = click_key_item["delay"]
                            click_key = click_key_item["click_key"]
                            if key not in self.delayed_activation_key_status_map:
                                self.delayed_activation_key_status_map[key] = DelayedActivationKey()

                            delayed_activation_key_status = self.delayed_activation_key_status_map[key]
                            if (int((time.time() - delayed_activation_key_status.hold_time) * 1000) >= key_time
                                    and not delayed_activation_key_status.in_handle_list(key_time)):
                                delayed_activation_key_status.list_handle(key_time)
                                self.logger.print_log(f"持续按下{key},{key_time}ms,转换器开关按下:[{click_key}]")
                                # 转换器切换键
                                MoverFactory.mouse_mover().click_key(Tools.convert_to_decimal(click_key))
            else:
                if key in self.delayed_activation_key_status_map:
                    if up_deactivation:
                        delayed_activation_key_status = self.delayed_activation_key_status_map[key]
                        # 转换器切换键
                        if delayed_activation_key_status.handle:
                            self.logger.print_log(f"持续按下{key}后弹起,转换器开关按下:[{click_key}]")
                            MoverFactory.mouse_mover().click_key(Tools.convert_to_decimal(click_key))
                        else:
                            if click_keys is None:
                                if int((time.time() - delayed_activation_key_status.hold_time) * 1000) >= key_time:
                                    self.logger.print_log(f"按下{key}开关,转换器开关按下:[{click_key}]")
                                    MoverFactory.mouse_mover().click_key(Tools.convert_to_decimal(click_key))
                            else:
                                click_keys = sorted(click_keys, key=lambda x: x["delay"], reverse=True)
                                for click_key_item in click_keys:
                                    key_time = click_key_item["delay"]
                                    click_key = click_key_item["click_key"]
                                    if int((time.time() - delayed_activation_key_status.hold_time) * 1000) >= key_time:
                                        if click_key is not None:
                                            self.logger.print_log(
                                                f"符合按键时长{key_time},按下{key}开关,转换器开关按下:[{click_key}]")
                                            MoverFactory.mouse_mover().click_key(Tools.convert_to_decimal(click_key))
                                        break
                    self.delayed_activation_key_status_map.pop(key)

    def destory(self):
        self.kmNet.unmask_all()


class DelayedActivationKey:
    """
        开关状态
    """

    def __init__(self):
        self.hold_time = time.time()
        self.handle = False
        self.handle_list = dict()

    def in_handle_list(self, delay):
        return delay in self.handle_list and self.handle_list[delay]

    def list_handle(self, delay):
        self.handle_list[delay] = True


class ToggleKey:
    """
        开关状态
    """

    def __init__(self):
        self.last_hold_status = False
        self.toggle_status = False

    def toggle(self):
        self.toggle_status = not self.toggle_status

    def hold(self, status):
        self.last_hold_status = status


================================================
FILE: apex_recoils/core/kmnet_listener/__init__.py
================================================


================================================
FILE: apex_recoils/core/screentaker/CapScreenTaker.py
================================================
import cv2

from apex_yolov5.log import LogFactory


class CapScreenTaker:
    """
        本地截图
    """

    def __init__(self):
        self.logger = LogFactory.getLogger(self.__class__)
        self.cap = cv2.VideoCapture(0)  # 视频流
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
        self.logger.print_log("使用视频采集卡")

    def get_images_from_bbox(self, bbox_list):
        frames = []
        ret, frame = self.cap.read()
        for monitor in bbox_list:
            frames.append(frame[monitor[1]: monitor[3], monitor[0]: monitor[2]])
        return list(frames)


================================================
FILE: apex_recoils/core/screentaker/LocalMssScreenTaker.py
================================================
import mss

from apex_yolov5.log import LogFactory


class LocalMssScreenTaker:
    """
        本地截图
    """

    def __init__(self):
        self.logger = LogFactory.getLogger(self.__class__)

    def get_images_from_bbox(self, bbox_list):
        """
        Get images from specified bounding boxes.

        :param bbox_list: List of bounding boxes [(x1, y1, x2, y2), ...]
        :return: Generator yielding images
        """

        try:
            with mss.mss() as sct:
                return list(
                    sct.grab({'top': bbox[1], 'left': bbox[0], 'width': bbox[2] - bbox[0], 'height': bbox[3] - bbox[1]})
                    for bbox in bbox_list)
        except Exception as e:
            self.logger.print_log(f"Error in get_images_from_bbox: {e}")


================================================
FILE: apex_recoils/core/screentaker/LocalScreenTaker.py
================================================
from PIL import ImageGrab

from apex_yolov5.log import LogFactory


class LocalScreenTaker:
    """
        本地截图
    """

    def __init__(self):
        self.logger = LogFactory.getLogger(self.__class__)

    def get_images_from_bbox(self, bbox_list):
        """
        Get images from specified bounding boxes.

        :param bbox_list: List of bounding boxes [(x1, y1, x2, y2), ...]
        :return: Generator yielding images
        """

        try:
            return list(ImageGrab.grab(bbox=bbox) for bbox in bbox_list)
        except Exception as e:
            self.logger.print_log(f"Error in get_images_from_bbox: {e}")


================================================
FILE: apex_recoils/core/screentaker/SocketScreenTaker.py
================================================
from apex_recoils.net.socket.Client import Client
from apex_yolov5.log import LogFactory
from apex_yolov5.log.Logger import Logger


class SocketScreenTaker:
    """
        网络截图
    """

    def __init__(self, logger: Logger, socket_address=("127.0.0.1", 12345)):
        self.logger = LogFactory.getLogger(self.__class__)
        self.socket_address = socket_address
        self.client = Client(socket_address, "screen_taker")
        self.client.open()

    def get_images_from_bbox(self, bbox_list):
        try:
            return self.client.get_images_from_bbox(bbox_list)
        except:
            self.client.close()
            self.open()

    def open(self):
        while not self.client.open_sign:
            try:
                self.client.open()
            except:
                pass


================================================
FILE: apex_recoils/core/screentaker/__init__.py
================================================


================================================
FILE: apex_recoils/net/__init__.py
================================================


================================================
FILE: apex_recoils/net/socket/Client.py
================================================
import pickle  # 用于序列化/反序列化数据
import socket

from apex_yolov5.socket import socket_util

client_cache = {}


class Client:
    """
        识别客户端
    """

    def __init__(self, socket_address, client_type):
        self.socket_address = socket_address
        self.client_type = client_type
        self.client_socket = None
        self.open_sign = False

    def open(self):
        self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.client_socket.connect(self.socket_address)
        data = pickle.dumps(self.client_type)
        socket_util.send(self.client_socket, data)
        self.open_sign = True

    def close(self):
        try:
            self.client_socket.close()
        except:
            pass
        self.client_socket = None
        self.open_sign = False

    def compare_with_path(self, path, images, lock_score, discard_score):
        """

        :param path:
        :param images:
        :param lock_score:
        :param discard_score:
        :return:
        """
        data = (path, images, lock_score, discard_score)
        # data = {"type": "compare_with_path", "data": (path, images, lock_score, discard_score)}
        data = pickle.dumps(data)
        socket_util.send(self.client_socket, data)
        result_data = socket_util.recv(self.client_socket)
        result = pickle.loads(result_data)
        return result

    def key_trigger(self, select_gun, select_scope, hot_pop):
        """

        :param select_gun:
        :param select_scope:
        :param hot_pop:
        """
        data = (select_gun, select_scope, hot_pop)
        data = pickle.dumps(data)
        socket_util.send(self.client_socket, data)

    def mouse_mover(self, func_name, param):
        """

        :param func_name:
        :param param:
        :return:
        """
        data = (func_name, param)
        data = pickle.dumps(data)
        socket_util.send(self.client_socket, data)

    def get_images_from_bbox(self, bbox_list):
        """
            从服务获取截图,反向架构
        """
        data = bbox_list
        data = pickle.dumps(data)
        socket_util.send(self.client_socket, data)
        result_data = socket_util.recv(self.client_socket)
        result = pickle.loads(result_data)
        return result


================================================
FILE: apex_recoils/net/socket/ReaSnowSelectGunSocket.py
================================================
import time

from apex_recoils.core.SelectGun import SelectGun
from apex_recoils.net.socket.Client import Client
from apex_yolov5.log import LogFactory


class ReaSnowSelectGunSocket:
    """
        通过网络socket触发按键
    """

    def __init__(self, select_gun: SelectGun, socket_address=("127.0.0.1", 12345)):
        self.logger = LogFactory.getLogger(self.__class__)
        self.client = Client(socket_address, "key_trigger")
        select_gun.connect(self.trigger_button)

    def trigger_button(self, select_gun, select_scope, hot_pop):
        """

        :param select_gun:
        :param select_scope:
        :param hot_pop:
        :return:
        """
        if select_gun is None or select_scope is None:
            return
        start = time.time()
        self.client.key_trigger(select_gun, select_scope, hot_pop)
        self.logger.print_log(f"该次按键触发耗时:{int(1000 * (time.time() - start))}ms")


================================================
FILE: apex_recoils/net/socket/Server.py
================================================
import pickle
import socket
import threading
import traceback

from apex_recoils.core.screentaker.LocalScreenTaker import LocalScreenTaker
from apex_yolov5.log import LogFactory
from apex_yolov5.socket import socket_util


class Server:
    """
        识别服务端
    """

    def __init__(self, server_address, screen_taker: LocalScreenTaker):
        self.logger = LogFactory.getLogger(self.__class__)
        self.server_address = server_address
        self.screen_taker = screen_taker
        self.server_socket = None
        self.buffer_size = 4096
        self.open()

    def open(self):
        """
            打开服务端
        """
        # 创建一个TCP/IP套接字
        self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # 绑定服务器地址和端口
        self.server_socket.bind(self.server_address)
        # 监听客户端连接
        self.server_socket.listen(1)

    def wait_client(self):
        """
            监听
        """
        while True:
            self.logger.print_log('等待客户端连接...')
            # 等待客户端连接
            client_socket, client_address = self.server_socket.accept()
            self.logger.print_log('客户端已连接:{}'.format(client_address))
            data = socket_util.recv(client_socket)
            data = pickle.loads(data)
            self.logger.print_log("客户端类型:{}".format(data))
            threading.Thread(target=self.listener, args=(client_socket, data)).start()

    def listener(self, client_socket, data_type):
        """

        :param data_type:
        :param client_socket:
        """
        try:
            while True:
                data = socket_util.recv(client_socket)
                data = pickle.loads(data)
                if data_type == "screen_taker":
                    images = self.screen_taker.get_images_from_bbox(data)
                    result_data = pickle.dumps(images)
                    socket_util.send(client_socket, result_data)
        except Exception as e:
            print(e)
            traceback.print_exc()
        finally:
            # 关闭连接
            try:
                client_socket.close()
            except Exception as e:
                print(e)
                traceback.print_exc()


================================================
FILE: apex_recoils/net/socket/SocketMouseMover.py
================================================
from log.Logger import Logger
from mouse_mover.MouseMover import MouseMover
from net.socket.Client import Client

from apex_yolov5.log import LogFactory


class SocketMouseMover(MouseMover):
    def __init__(self, mouse_mover_param):
        super().__init__(mouse_mover_param)
        self.logger = LogFactory.getLogger(self.__class__)
        self.client = Client((mouse_mover_param["ip"], mouse_mover_param["port"]), "mouse_mover")
        self.listener = None
        self.toggle_key_listener = None
        self.server_mouse_mover = None

    def move_rp(self, x: int, y: int):
        self.client.mouse_mover("move_rp", (x, y))

    def move(self, x: int, y: int):
        self.client.mouse_mover("move", (x, y))

    def left_click(self):
        self.client.mouse_mover("left_click", ())

    def key_down(self, value):
        self.client.mouse_mover("key_down", (value,))

    def key_up(self, value):
        self.client.mouse_mover("key_up", (value,))

    def get_position(self):
        return super().get_position()

    def is_num_locked(self):
        return super().is_num_locked()

    def is_caps_locked(self):
        return super().is_caps_locked()

    def click_key(self, value):
        self.client.mouse_mover("click_key", (value,))

    def destroy(self):
        """
            销毁
        """
        self.listener.stop()
        self.toggle_key_listener.destory()


================================================
FILE: apex_recoils/net/socket/__init__.py
================================================


================================================
FILE: apex_yolov5/Counter.py
================================================
class Counter:
    def __init__(self):
        self.count = 0

    def increase(self):
        self.count += 1
        return self.get_count()

    def reset(self):
        self.count = 0

    def get_count(self):
        return self.count


no_lock_counter = Counter()


def sure_no_aim(num):
    return no_lock_counter.increase() >= num


def reset_counter():
    no_lock_counter.reset()


================================================
FILE: apex_yolov5/FrameRateMonitor.py
================================================
import sys
import time
import traceback

from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.ticker import MultipleLocator

from apex_yolov5.Tools import Tools
from apex_yolov5.log import LogFactory


class FrameRateMonitor(QMainWindow):
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.initUI()

    def initUI(self):
        self.setWindowTitle('帧率监控')
        self.setGeometry(100, 100, 300, 200)
        self.setWindowFlags(Qt.WindowStaysOnTopHint)

        import matplotlib
        # 指定中文字体
        matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 使用宋体或其他支持中文的字体

        matplotlib.rcParams['font.family'] = 'SimHei'  # 字体

        matplotlib.rcParams['font.size'] = 11  # 调整字体大小

        layout = QVBoxLayout()

        # 创建Matplotlib图形和帧率数据
        self.figure = Figure()
        self.canvas = FigureCanvas(self.figure)
        self.ax = self.figure.add_subplot(111)
        self.ax.xaxis.set_major_locator(MultipleLocator(1))
        self.ax.yaxis.set_major_locator(MultipleLocator(1))
        self.ax.grid(True, linestyle='--', alpha=0.6)
        self.ax.set_facecolor('#f0f0f0')  # 浅灰色背景颜色
        self.frame_rate_data = []
        self.frame_rate_data_2 = []

        layout.setContentsMargins(5, 5, 5, 5)  # 设置布局内容的边距
        layout.addWidget(self.canvas)

        central_widget = QWidget()
        central_widget.setLayout(layout)
        self.setCentralWidget(central_widget)

        self.queue = Tools.GetBlockQueue(name='frame_rate_queue', maxsize=1000)
        self.frame_rate_monitor_thread = FrameRateMonitorThread(self.queue)
        self.frame_rate_monitor_thread.signal.connect(self.update_frame_rate_plot)
        self.frame_rate_monitor_thread.start()

    def add_frame_rate_plot(self, frame_rate):
        self.queue.put(frame_rate)

    def update_frame_rate_plot(self, frame_rate):
        reasoning, screenshot = frame_rate

        self.frame_rate_data.append(reasoning)
        if len(self.frame_rate_data) > 60:
            self.frame_rate_data.pop(0)

        self.frame_rate_data_2.append(screenshot)
        if len(self.frame_rate_data_2) > 60:
            self.frame_rate_data_2.pop(0)

        if self.config.frame_rate_monitor:
            # 清除图表并绘制新数据
            self.ax.clear()
            self.ax.plot(self.frame_rate_data, marker='o', linestyle='-', label='识别', markersize=3)
            self.ax.plot(self.frame_rate_data_2, marker='o', linestyle='-', label='截图', markersize=3)
            # self.ax.set_title('帧率监控')
            # self.ax.set_xlabel('经过时间(秒)', fontsize=12)
            # self.ax.set_ylabel('帧率', fontsize=12)

            self.ax.legend(loc='lower right')

            # 刷新图表
            self.canvas.draw()
        else:
            LogFactory.logger().print_log(f"截图频率:[{screenshot}],推理频率:[{reasoning}]")


class FrameRateMonitorThread(QThread):
    """
        使用信号槽来多线程更新ui
    """
    signal = pyqtSignal(object)

    def __init__(self, queue: Tools.GetBlockQueue):
        super().__init__()
        self.queue = queue

    def run(self):
        """
            避免多线程影响ui,在一个线程中启动队列消费打印
        """
        while True:
            try:
                data = self.queue.get()
                self.signal.emit(data)
            except Exception as e:
                print(e)
                traceback.print_exc()
                time.sleep(0.1)


if __name__ == '__main__':
    app = QApplication(sys.argv)
    import matplotlib

    # 指定中文字体
    matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 使用宋体或其他支持中文的字体

    matplotlib.rcParams['font.family'] = 'SimHei'  # 字体

    matplotlib.rcParams['font.size'] = 11  # 调整字体大小

    frame_rate_app = FrameRateMonitor()
    frame_rate_app.show()
    sys.exit(app.exec_())


================================================
FILE: apex_yolov5/KeyAndMouseListener.py
================================================
import time

from pynput.mouse import Button

from apex_yolov5.Tools import Tools
from apex_yolov5.mouse_mover import MoverFactory
from apex_yolov5.socket.config import global_config


class KeyListener:

    def __init__(self):
        super().__init__()
        self.press_key = dict()
        self.refresh_button = global_config.refresh_button
        self.toggle_key_map = []

    def on_press(self, key):
        """
            键盘按下事件
        :param key:
        """
        key_name = self.get_key_name(key)

        if key_name is not None:
            self.press_key[key_name] = Tools.current_milli_time()

        if key_name in self.toggle_key_map:
            self.toggle_key_map.remove(key_name)
        else:
            self.toggle_key_map.append(key_name)
        for cb in KMCallBack.toggle_call_back:
            if cb.key_type == 'k' and cb.key == key_name and cb.is_press:
                cb.call_back(True, cb.key in self.toggle_key_map)

    # 释放按钮,按esc按键会退出监听
    def on_release(self, key):
        """
            键盘释放事件
        :param key:
        """
        key_name = self.get_key_name(key)
        if key_name is not None and key_name in self.press_key:
            self.press_key.pop(key_name)
        for cb in KMCallBack.toggle_call_back:
            if cb.key_type == 'k' and cb.key == key_name and not cb.is_press:
                cb.call_back(True, cb.key in self.toggle_key_map)

    def is_open(self, button):
        """
            判断按钮作为开关的开关状态
        :param button:
        :return:
        """
        return button in self.press_key

    def get_key_name(self, key):
        """
            从key中获取key_name
        :param key:
        :return:
        """
        key_name = None
        if not hasattr(key, 'name') and hasattr(key, 'char') and key.char is not None:
            key_name = key.char
        elif hasattr(key, 'name') and key.name is not None:
            key_name = key.name
        return key_name


class MouseListener:
    def __init__(self):
        super().__init__()
        self.on_mouse_key_map = dict()
        self.toggle_mouse_key_map = []
        self.move_metering = None
        self.move_avg_x = 1
        self.move_avg_y = 1

    def on_move(self, x, y):
        if MoverFactory.mouse_mover() is None:
            return
        if self.move_metering is None:
            self.move_metering = (time.time(), (MoverFactory.mouse_mover().get_position()), 0, 0, 0, 0)
        pre_time, (pre_x, pre_y), metering_x, metering_y, move_time_x, move_time_y = self.move_metering
        now = time.time()
        abs_x = abs(pre_x - x)
        abs_y = abs(pre_y - y)
        if int((now - pre_time) * 1000) < 100:
            if abs_x > 0:
                move_time_x += 1
            if abs_y > 0:
                move_time_y += 1
            self.move_metering = (
                pre_time, (x, y), metering_x + abs_x, metering_y + abs_y, move_time_x, move_time_y)
        else:
            avg_x = 0 if move_time_x == 0 else metering_x / move_time_x
            avg_y = 0 if move_time_y == 0 else metering_y / move_time_y
            # print(
            #     f"1秒鼠标移动幅度:[{metering_x, metering_y}],移动次数:[{move_time_x, move_time_y}],平均每次:[{avg_x, avg_y}]")
            self.move_metering = (time.time(), (x, y), abs_x, abs_y, 1 if abs_x > 0 else 0, 1 if abs_y > 0 else 0)
            self.move_avg_x = max(1, round(avg_x, 0))
            self.move_avg_y = max(1, round(avg_y, 0))

    def on_click(self, x, y, button, pressed):
        if pressed:
            if button in self.on_mouse_key_map:
                return
            self.on_mouse_key_map[button] = Tools.current_milli_time()
            if button.name in self.toggle_mouse_key_map:
                self.toggle_mouse_key_map.remove(button.name)
            else:
                self.toggle_mouse_key_map.append(button.name)
            for cb in KMCallBack.toggle_call_back:
                if cb.key_type == 'm' and cb.key == button.name and cb.is_press:
                    cb.call_back(pressed, cb.key in self.toggle_mouse_key_map)
            # print("左键按下")
        e
Download .txt
gitextract_v1024r3r/

├── .dockerignore
├── .gitattributes
├── .github/
│   └── workflows/
│       ├── cla.yml
│       ├── format.yml
│       └── merge-main-into-prs.yml
├── .gitignore
├── CITATION.cff
├── CONTRIBUTING.md
├── LICENSE
├── MouseHook.py
├── PID.py
├── README-yolo.md
├── README-yolo.zh-CN.md
├── README.md
├── ag.spec
├── ag_asyn.spec
├── apex_recoils/
│   ├── __init__.py
│   ├── core/
│   │   ├── GameWindowsStatus.py
│   │   ├── ReaSnowSelectGun.py
│   │   ├── SelectGun.py
│   │   ├── __init__.py
│   │   ├── image_comparator/
│   │   │   ├── DynamicSizeImageComparator.py
│   │   │   ├── ImageComparator.py
│   │   │   ├── LocalImageComparator.py
│   │   │   ├── NetImageComparator.py
│   │   │   └── __init__.py
│   │   ├── kmnet_listener/
│   │   │   ├── ToggleKeyListener.py
│   │   │   └── __init__.py
│   │   └── screentaker/
│   │       ├── CapScreenTaker.py
│   │       ├── LocalMssScreenTaker.py
│   │       ├── LocalScreenTaker.py
│   │       ├── SocketScreenTaker.py
│   │       └── __init__.py
│   └── net/
│       ├── __init__.py
│       └── socket/
│           ├── Client.py
│           ├── ReaSnowSelectGunSocket.py
│           ├── Server.py
│           ├── SocketMouseMover.py
│           └── __init__.py
├── apex_yolov5/
│   ├── Counter.py
│   ├── FrameRateMonitor.py
│   ├── KeyAndMouseListener.py
│   ├── KmBoxNetListener.py
│   ├── LogUtil.py
│   ├── RecoildsCore.py
│   ├── SystemTrayApp.py
│   ├── Tools.py
│   ├── __init__.py
│   ├── apex_model.py
│   ├── auxiliary.py
│   ├── check_run.pyi
│   ├── global_img_info.py
│   ├── grabscreen.py
│   ├── job_listener/
│   │   ├── JoyListener.py
│   │   ├── JoyToKey.py
│   │   ├── RockerMonitor.py
│   │   ├── S1SwitchMonitor.py
│   │   └── __init__.py
│   ├── log/
│   │   ├── LogFactory.py
│   │   ├── LogWindow.py
│   │   ├── Logger.py
│   │   └── __init__.py
│   ├── magnifying_glass.py
│   ├── mouse.py
│   ├── mouse_lock.py
│   ├── mouse_mover/
│   │   ├── FeiMover.py
│   │   ├── GHubMover.py
│   │   ├── IntentManager.py
│   │   ├── KmBoxMover.py
│   │   ├── KmBoxNetMover.py
│   │   ├── MouseMover.py
│   │   ├── MoverFactory.py
│   │   ├── PanNiMover.py
│   │   ├── Win32ApiMover.py
│   │   ├── WuYaMover.py
│   │   └── __init__.py
│   ├── socket/
│   │   ├── config.py
│   │   ├── socket_util.py
│   │   └── yolov5_handler.py
│   ├── window_layout/
│   │   ├── ai_toggle_layout.py
│   │   ├── anthropomorphic_config_layout.py
│   │   ├── auto_charged_energy_layout.py
│   │   ├── auto_gun_config_layout.py
│   │   ├── auto_save_config_layout.py
│   │   ├── model_config_layout.py
│   │   ├── mouse_config_layout.py
│   │   └── screenshot_area_layout.py
│   └── windows/
│       ├── DebugWindow.py
│       ├── DisclaimerWindow.py
│       ├── __init__.py
│       ├── aim_show_window.py
│       ├── circle_window.py
│       └── config_window.py
├── apex_yolov5_main.py
├── apex_yolov5_main_asyn.py
├── benchmarks.py
├── bez_test.py
├── check.py
├── classify/
│   ├── predict.py
│   ├── train.py
│   ├── tutorial.ipynb
│   └── val.py
├── client.py
├── client.spec
├── config/
│   └── ref.txt
├── data/
│   ├── Argoverse.yaml
│   ├── GlobalWheat2020.yaml
│   ├── ImageNet.yaml
│   ├── ImageNet10.yaml
│   ├── ImageNet100.yaml
│   ├── ImageNet1000.yaml
│   ├── Objects365.yaml
│   ├── SKU-110K.yaml
│   ├── VOC.yaml
│   ├── VisDrone.yaml
│   ├── coco.yaml
│   ├── coco128-seg.yaml
│   ├── coco128.yaml
│   ├── hyps/
│   │   ├── hyp.Objects365.yaml
│   │   ├── hyp.VOC.yaml
│   │   ├── hyp.no-augmentation.yaml
│   │   ├── hyp.scratch-high.yaml
│   │   ├── hyp.scratch-low.yaml
│   │   └── hyp.scratch-med.yaml
│   ├── scripts/
│   │   ├── download_weights.sh
│   │   ├── get_coco.sh
│   │   ├── get_coco128.sh
│   │   ├── get_imagenet.sh
│   │   ├── get_imagenet10.sh
│   │   ├── get_imagenet100.sh
│   │   └── get_imagenet1000.sh
│   └── xView.yaml
├── detect.py
├── export.py
├── hubconf.py
├── images/
│   ├── 1920x1080/
│   │   └── list.txt
│   ├── 1920x1200/
│   │   └── list.txt
│   ├── 2048x1152/
│   │   └── list.txt
│   ├── 2560x1440/
│   │   └── list.txt
│   ├── hop_up/
│   │   ├── 1920x1080/
│   │   │   └── list.txt
│   │   └── 2560x1440/
│   │       └── list.txt
│   └── scope/
│       ├── 1920x1080/
│       │   └── list.txt
│       └── 2560x1440/
│           └── list.txt
├── joy_test.py
├── lg.py
├── main.py
├── models/
│   ├── __init__.py
│   ├── common.py
│   ├── experimental.py
│   ├── hub/
│   │   ├── anchors.yaml
│   │   ├── yolov3-spp.yaml
│   │   ├── yolov3-tiny.yaml
│   │   ├── yolov3.yaml
│   │   ├── yolov5-bifpn.yaml
│   │   ├── yolov5-fpn.yaml
│   │   ├── yolov5-p2.yaml
│   │   ├── yolov5-p34.yaml
│   │   ├── yolov5-p6.yaml
│   │   ├── yolov5-p7.yaml
│   │   ├── yolov5-panet.yaml
│   │   ├── yolov5l6.yaml
│   │   ├── yolov5m6.yaml
│   │   ├── yolov5n6.yaml
│   │   ├── yolov5s-LeakyReLU.yaml
│   │   ├── yolov5s-ghost.yaml
│   │   ├── yolov5s-transformer.yaml
│   │   ├── yolov5s6.yaml
│   │   └── yolov5x6.yaml
│   ├── mydata.yaml
│   ├── segment/
│   │   ├── yolov5l-seg.yaml
│   │   ├── yolov5m-seg.yaml
│   │   ├── yolov5n-seg.yaml
│   │   ├── yolov5s-seg.yaml
│   │   └── yolov5x-seg.yaml
│   ├── tf.py
│   ├── yolo.py
│   ├── yolov5l.yaml
│   ├── yolov5m.yaml
│   ├── yolov5n.yaml
│   ├── yolov5s.yaml
│   └── yolov5x.yaml
├── pyproject.toml
├── requirements.txt
├── segment/
│   ├── predict.py
│   ├── train.py
│   ├── tutorial.ipynb
│   └── val.py
├── server.py
├── server.spec
├── setenv.py
├── setup.py
├── setup_check.py
├── train.py
├── trt.spec
├── tutorial.ipynb
├── utils/
│   ├── __init__.py
│   ├── activations.py
│   ├── augmentations.py
│   ├── autoanchor.py
│   ├── autobatch.py
│   ├── aws/
│   │   ├── __init__.py
│   │   ├── mime.sh
│   │   ├── resume.py
│   │   └── userdata.sh
│   ├── callbacks.py
│   ├── dataloaders.py
│   ├── docker/
│   │   ├── Dockerfile
│   │   ├── Dockerfile-arm64
│   │   └── Dockerfile-cpu
│   ├── downloads.py
│   ├── flask_rest_api/
│   │   ├── README.md
│   │   ├── example_request.py
│   │   └── restapi.py
│   ├── general.py
│   ├── google_app_engine/
│   │   ├── Dockerfile
│   │   ├── additional_requirements.txt
│   │   └── app.yaml
│   ├── image_util.py
│   ├── loggers/
│   │   ├── __init__.py
│   │   ├── clearml/
│   │   │   ├── README.md
│   │   │   ├── __init__.py
│   │   │   ├── clearml_utils.py
│   │   │   └── hpo.py
│   │   ├── comet/
│   │   │   ├── README.md
│   │   │   ├── __init__.py
│   │   │   ├── comet_utils.py
│   │   │   └── hpo.py
│   │   └── wandb/
│   │       ├── __init__.py
│   │       └── wandb_utils.py
│   ├── loss.py
│   ├── metrics.py
│   ├── plots.py
│   ├── segment/
│   │   ├── __init__.py
│   │   ├── augmentations.py
│   │   ├── dataloaders.py
│   │   ├── general.py
│   │   ├── loss.py
│   │   ├── metrics.py
│   │   └── plots.py
│   ├── torch_utils.py
│   └── triton.py
├── val.py
├── validate.spec
└── 训练命令.txt
Download .txt
SYMBOL INDEX (1282 symbols across 118 files)

FILE: PID.py
  class Pid (line 4) | class Pid():
    method __init__ (line 5) | def __init__(self, kp, ki, kd):
    method cmd_pid (line 14) | def cmd_pid(self, exp_val):

FILE: apex_recoils/core/GameWindowsStatus.py
  class GameWindowsStatus (line 8) | class GameWindowsStatus:
    method __init__ (line 13) | def __init__(self):
    method timing_get_status_thread (line 18) | def timing_get_status_thread(self):
    method timing_get_status (line 24) | def timing_get_status(self):
    method get_game_windows_status (line 35) | def get_game_windows_status(self):
  function init (line 45) | def init():
  function get_game_status (line 50) | def get_game_status():

FILE: apex_recoils/core/ReaSnowSelectGun.py
  class ReaSnowSelectGun (line 9) | class ReaSnowSelectGun:
    method __init__ (line 14) | def __init__(self, config_name='ReaSnowGun'):
    method trigger_button (line 38) | def trigger_button(self, select_gun, select_scope, hot_pop):

FILE: apex_recoils/core/SelectGun.py
  class SelectGun (line 10) | class SelectGun:
    method __init__ (line 15) | def __init__(self, bbox, image_path, scope_bbox, scope_path, hop_up_bb...
    method timing_execution (line 44) | def timing_execution(self):
    method select_gun_threading (line 62) | def select_gun_threading(self, pressed=False, toggle=False):
    method select_gun_with_sign (line 73) | def select_gun_with_sign(self, pressed=False, toggle=False, auto=False):
    method get_images_from_bbox (line 90) | def get_images_from_bbox(self, bbox_list):
    method select_gun (line 103) | def select_gun(self, pressed=False, toggle=False, auto=False):
    method connect (line 156) | def connect(self, func):
    method test (line 159) | def test(self):
  function get_select_gun (line 179) | def get_select_gun():

FILE: apex_recoils/core/image_comparator/DynamicSizeImageComparator.py
  class DynamicSizeImageComparator (line 5) | class DynamicSizeImageComparator(NetImageComparator):
    method __init__ (line 10) | def __init__(self, base_path, screen_taker):
    method compare_with_path (line 17) | def compare_with_path(self, path, images, lock_score, discard_score):
    method match_template (line 24) | def match_template(self, path, image_info_arr, threshold=0.8):
    method cache_image (line 35) | def cache_image(self, base_path, line_content):

FILE: apex_recoils/core/image_comparator/ImageComparator.py
  class ImageComparator (line 12) | class ImageComparator:
    method __init__ (line 17) | def __init__(self, base_path):
    method compare_image (line 23) | def compare_image(self, img, path_image):
    method get_image_from_cache (line 51) | def get_image_from_cache(self, url):
    method compare_with_path (line 62) | def compare_with_path(self, path, images, lock_score, discard_score):
    method read_file_from_url_and_cache (line 86) | def read_file_from_url_and_cache(self, base_path, file_name):
    method read_file_from_url (line 104) | def read_file_from_url(self, url):
    method cache_image (line 110) | def cache_image(self, base_path, url):

FILE: apex_recoils/core/image_comparator/LocalImageComparator.py
  class LocalImageComparator (line 10) | class LocalImageComparator(ImageComparator):
    method __init__ (line 15) | def __init__(self, base_path):
    method read_file_from_url (line 22) | def read_file_from_url(self, filepath):
    method cache_image (line 46) | def cache_image(self, base_path, url):

FILE: apex_recoils/core/image_comparator/NetImageComparator.py
  class NetImageComparator (line 94) | class NetImageComparator(ImageComparator):
    method __init__ (line 95) | def __init__(self, base_path):
    method read_file_from_url (line 102) | def read_file_from_url(self, url):
    method cache_image (line 129) | def cache_image(self, base_path, url):
    method get_image_from_cache (line 151) | def get_image_from_cache(self, url):
    method compare_image (line 161) | def compare_image(self, img, path_image):

FILE: apex_recoils/core/kmnet_listener/ToggleKeyListener.py
  class ToggleKeyListener (line 10) | class ToggleKeyListener:
    method __init__ (line 15) | def __init__(self, km_box_net_listener: KmBoxNetListener, delayed_acti...
    method mask_toggle_key (line 44) | def mask_toggle_key(self):
    method toggle_change (line 50) | def toggle_change(self):
    method controller_toggle_hold_change (line 87) | def controller_toggle_hold_change(self, key):
    method delayed_activation (line 99) | def delayed_activation(self):
    method destory (line 165) | def destory(self):
  class DelayedActivationKey (line 169) | class DelayedActivationKey:
    method __init__ (line 174) | def __init__(self):
    method in_handle_list (line 179) | def in_handle_list(self, delay):
    method list_handle (line 182) | def list_handle(self, delay):
  class ToggleKey (line 186) | class ToggleKey:
    method __init__ (line 191) | def __init__(self):
    method toggle (line 195) | def toggle(self):
    method hold (line 198) | def hold(self, status):

FILE: apex_recoils/core/screentaker/CapScreenTaker.py
  class CapScreenTaker (line 6) | class CapScreenTaker:
    method __init__ (line 11) | def __init__(self):
    method get_images_from_bbox (line 18) | def get_images_from_bbox(self, bbox_list):

FILE: apex_recoils/core/screentaker/LocalMssScreenTaker.py
  class LocalMssScreenTaker (line 6) | class LocalMssScreenTaker:
    method __init__ (line 11) | def __init__(self):
    method get_images_from_bbox (line 14) | def get_images_from_bbox(self, bbox_list):

FILE: apex_recoils/core/screentaker/LocalScreenTaker.py
  class LocalScreenTaker (line 6) | class LocalScreenTaker:
    method __init__ (line 11) | def __init__(self):
    method get_images_from_bbox (line 14) | def get_images_from_bbox(self, bbox_list):

FILE: apex_recoils/core/screentaker/SocketScreenTaker.py
  class SocketScreenTaker (line 6) | class SocketScreenTaker:
    method __init__ (line 11) | def __init__(self, logger: Logger, socket_address=("127.0.0.1", 12345)):
    method get_images_from_bbox (line 17) | def get_images_from_bbox(self, bbox_list):
    method open (line 24) | def open(self):

FILE: apex_recoils/net/socket/Client.py
  class Client (line 9) | class Client:
    method __init__ (line 14) | def __init__(self, socket_address, client_type):
    method open (line 20) | def open(self):
    method close (line 27) | def close(self):
    method compare_with_path (line 35) | def compare_with_path(self, path, images, lock_score, discard_score):
    method key_trigger (line 52) | def key_trigger(self, select_gun, select_scope, hot_pop):
    method mouse_mover (line 63) | def mouse_mover(self, func_name, param):
    method get_images_from_bbox (line 74) | def get_images_from_bbox(self, bbox_list):

FILE: apex_recoils/net/socket/ReaSnowSelectGunSocket.py
  class ReaSnowSelectGunSocket (line 8) | class ReaSnowSelectGunSocket:
    method __init__ (line 13) | def __init__(self, select_gun: SelectGun, socket_address=("127.0.0.1",...
    method trigger_button (line 18) | def trigger_button(self, select_gun, select_scope, hot_pop):

FILE: apex_recoils/net/socket/Server.py
  class Server (line 11) | class Server:
    method __init__ (line 16) | def __init__(self, server_address, screen_taker: LocalScreenTaker):
    method open (line 24) | def open(self):
    method wait_client (line 35) | def wait_client(self):
    method listener (line 49) | def listener(self, client_socket, data_type):

FILE: apex_recoils/net/socket/SocketMouseMover.py
  class SocketMouseMover (line 8) | class SocketMouseMover(MouseMover):
    method __init__ (line 9) | def __init__(self, mouse_mover_param):
    method move_rp (line 17) | def move_rp(self, x: int, y: int):
    method move (line 20) | def move(self, x: int, y: int):
    method left_click (line 23) | def left_click(self):
    method key_down (line 26) | def key_down(self, value):
    method key_up (line 29) | def key_up(self, value):
    method get_position (line 32) | def get_position(self):
    method is_num_locked (line 35) | def is_num_locked(self):
    method is_caps_locked (line 38) | def is_caps_locked(self):
    method click_key (line 41) | def click_key(self, value):
    method destroy (line 44) | def destroy(self):

FILE: apex_yolov5/Counter.py
  class Counter (line 1) | class Counter:
    method __init__ (line 2) | def __init__(self):
    method increase (line 5) | def increase(self):
    method reset (line 9) | def reset(self):
    method get_count (line 12) | def get_count(self):
  function sure_no_aim (line 19) | def sure_no_aim(num):
  function reset_counter (line 23) | def reset_counter():

FILE: apex_yolov5/FrameRateMonitor.py
  class FrameRateMonitor (line 15) | class FrameRateMonitor(QMainWindow):
    method __init__ (line 16) | def __init__(self, config):
    method initUI (line 21) | def initUI(self):
    method add_frame_rate_plot (line 59) | def add_frame_rate_plot(self, frame_rate):
    method update_frame_rate_plot (line 62) | def update_frame_rate_plot(self, frame_rate):
  class FrameRateMonitorThread (line 90) | class FrameRateMonitorThread(QThread):
    method __init__ (line 96) | def __init__(self, queue: Tools.GetBlockQueue):
    method run (line 100) | def run(self):

FILE: apex_yolov5/KeyAndMouseListener.py
  class KeyListener (line 10) | class KeyListener:
    method __init__ (line 12) | def __init__(self):
    method on_press (line 18) | def on_press(self, key):
    method on_release (line 37) | def on_release(self, key):
    method is_open (line 49) | def is_open(self, button):
    method get_key_name (line 57) | def get_key_name(self, key):
  class MouseListener (line 71) | class MouseListener:
    method __init__ (line 72) | def __init__(self):
    method on_move (line 80) | def on_move(self, x, y):
    method on_click (line 105) | def on_click(self, x, y, button, pressed):
    method on_scroll (line 127) | def on_scroll(self, x, y, dx, dy):
    method watch_release (line 130) | def watch_release(self):
    method is_press (line 133) | def is_press(self, button):
    method is_toggle (line 136) | def is_toggle(self, button):
    method press_time (line 139) | def press_time(self, button):
    method get_aim_status (line 145) | def get_aim_status(self):
  class KMCallBack (line 152) | class KMCallBack:
    method __init__ (line 158) | def __init__(self, key_type, key, call_back, is_press=True):
    method connect (line 166) | def connect(callback):
    method remove (line 174) | def remove(key_type, key, is_press=True):

FILE: apex_yolov5/KmBoxNetListener.py
  class KmBoxNetListener (line 9) | class KmBoxNetListener:
    method __init__ (line 10) | def __init__(self, km_box_net_mover: KmBoxNetMover):
    method km_box_net_start (line 21) | def km_box_net_start(self):
    method stop (line 81) | def stop(self):
    method connect (line 84) | def connect(self, func):
    method connect_mouse_listner (line 91) | def connect_mouse_listner(self, func):

FILE: apex_yolov5/LogUtil.py
  class LogUtil (line 1) | class LogUtil:
    method __init__ (line 3) | def __init__(self):
    method set_time (line 6) | def set_time(self, use_time_type, use_time):
    method print_time (line 9) | def print_time(self, print_count):

FILE: apex_yolov5/RecoildsCore.py
  class RecoilsConfig (line 17) | class RecoilsConfig:
    method __init__ (line 22) | def __init__(self):
    method load (line 27) | def load(self):
    method get_config (line 41) | def get_config(self, name):
    method read_file_from_url (line 53) | def read_file_from_url(url):
  class RecoilsListener (line 77) | class RecoilsListener:
    method __init__ (line 82) | def __init__(self,
    method start (line 92) | def start(self):
    method run (line 99) | def run(self):
    method handle_serial (line 155) | def handle_serial(self, spec, start_time, num):
    method handle_intermittent (line 166) | def handle_intermittent(self, spec, num):
    method move_index_xy (line 187) | def move_index_xy(self, spec, current_index, point):
  function merge_x_y (line 215) | def merge_x_y(x, y, time_points_x, time_points_y):

FILE: apex_yolov5/SystemTrayApp.py
  class SystemTrayApp (line 7) | class SystemTrayApp:
    method __init__ (line 8) | def __init__(self, main_window, config):
    method init_ui (line 26) | def init_ui(self):
    method show_app (line 44) | def show_app(self):
    method hide_app (line 50) | def hide_app(self):
    method change_icon (line 55) | def change_icon(self, open_status):
    method tray_activated (line 62) | def tray_activated(self, reason):
    method exit_app (line 70) | def exit_app(self):

FILE: apex_yolov5/Tools.py
  class Tools (line 16) | class Tools:
    method get_resolution (line 18) | def get_resolution():
    method compare_image (line 25) | def compare_image(img, path_image):
    method current_milli_time (line 38) | def current_milli_time():
    method copy_file (line 42) | def copy_file(source_path, target_path):
    method is_apex_windows (line 51) | def is_apex_windows():
    method convert_to_decimal (line 57) | def convert_to_decimal(input_str):
    class FixedSizeQueue (line 72) | class FixedSizeQueue:
      method __init__ (line 73) | def __init__(self, max_size):
      method push (line 76) | def push(self, item):
      method pop (line 79) | def pop(self):
      method size (line 82) | def size(self):
      method get_last (line 85) | def get_last(self):
    class GetBlockQueue (line 89) | class GetBlockQueue:
      method __init__ (line 90) | def __init__(self, name, maxsize=1):
      method get (line 95) | def get(self):
      method put (line 99) | def put(self, data):
      method clear (line 112) | def clear(self):

FILE: apex_yolov5/apex_model.py
  function load_model (line 11) | def load_model():

FILE: apex_yolov5/auxiliary.py
  function set_intention (line 33) | def set_intention(x, y, lead_x, lead_y, random_deviation, base_sign=0, m...
  function get_intention (line 63) | def get_intention():
  function incr_executed_intention (line 67) | def incr_executed_intention(move_x, move_y):
  function get_executed_intention (line 84) | def get_executed_intention():
  function set_click (line 88) | def set_click():
  function get_lock_mode (line 101) | def get_lock_mode():
  function get_lock_mode_shoot (line 143) | def get_lock_mode_shoot():
  function start (line 147) | def start():
  function random_move (line 244) | def random_move(x, y, move_step, move_step_max, move_optimization=True):
  function split_coordinate (line 277) | def split_coordinate(x, y, move_step_temp, move_step_y_temp):
  function calculate_distance (line 289) | def calculate_distance(x, y):
  function find_range_index (line 295) | def find_range_index(ranges, num):
  function calculate_percentage_value (line 303) | def calculate_percentage_value(arr, m, n, based_on_character_box):
  function find_range_index_2 (line 321) | def find_range_index_2(ranges, num):

FILE: apex_yolov5/check_run.pyi
  function check (line 1) | def check(validate_type) -> None:
  function open_check (line 8) | def open_check(val_type=None):
  function auth (line 12) | def auth(func):

FILE: apex_yolov5/global_img_info.py
  class ImgInfo (line 1) | class ImgInfo:
    method __init__ (line 2) | def __init__(self):
    method set_img_origin (line 8) | def set_img_origin(self, img_origin, img_data):
    method set_img_origin_2 (line 14) | def set_img_origin_2(self, img_origin, img_data, shot_width, shot_heig...
  function set_current_img (line 24) | def set_current_img(img_origin, img_data):
  function set_current_img_2 (line 30) | def set_current_img_2(img_origin, img_data, shot_width, shot_height):
  function get_current_img (line 36) | def get_current_img():

FILE: apex_yolov5/grabscreen.py
  function grab_screen (line 20) | def grab_screen(region=None):
  function loop_screen (line 56) | def loop_screen(region=None, shot_width=416, shot_height=416):
  function grab_screen_int_array (line 63) | def grab_screen_int_array(region=None):
  function get_img_from_cap (line 96) | def get_img_from_cap(monitor):
  function grab_screen_int_array2 (line 108) | def grab_screen_int_array2(sct, monitor=None):
  function save_screen_to_file (line 125) | def save_screen_to_file(j=None, i=None):
  function save_rescreen_and_aims_to_file_with_thread (line 140) | def save_rescreen_and_aims_to_file_with_thread(img_origin, img, aims):
  function save_rescreen_and_aims_to_file (line 155) | def save_rescreen_and_aims_to_file(img_origin, img, aims):
  function save_img_and_aims_to_file (line 175) | def save_img_and_aims_to_file(img, aims):

FILE: apex_yolov5/job_listener/JoyListener.py
  class JoyListener (line 10) | class JoyListener:
    method __init__ (line 15) | def __init__(self):
    method start (line 24) | def start(self, main_windows):
    method aync (line 43) | def aync(self):
    method is_press (line 81) | def is_press(self, value):
    method connect_axis (line 91) | def connect_axis(self, func):
    method connect_button (line 98) | def connect_button(self, func):
    method connect_joystick (line 105) | def connect_joystick(self, py_type, func):
    method stop (line 114) | def stop(self):
  function get_joy_listener (line 124) | def get_joy_listener():

FILE: apex_yolov5/job_listener/JoyToKey.py
  class JoyToKey (line 5) | class JoyToKey:
    method __init__ (line 10) | def __init__(self, joy_to_key_map, c1_mouse_mover):
    method init_status_map (line 17) | def init_status_map(self):
    method axis_to_key (line 25) | def axis_to_key(self, axis, value):
    method all_hold (line 62) | def all_hold(self, current):

FILE: apex_yolov5/job_listener/RockerMonitor.py
  class RockerMonitor (line 9) | class RockerMonitor:
    method __init__ (line 14) | def __init__(self, joy_listener: JoyListener):
    method monitor (line 21) | def monitor(self, joystick, event):

FILE: apex_yolov5/job_listener/S1SwitchMonitor.py
  class S1SwitchMonitor (line 13) | class S1SwitchMonitor:
    method __init__ (line 18) | def __init__(self, joy_listener: JoyListener,
    method monitor (line 41) | def monitor(self, joystick, event):
    method monitor_thread (line 56) | def monitor_thread(self, joystick, scene, key_map):
    method time_out (line 133) | def time_out(self, start_time, detect_time):
    method finish_scence (line 136) | def finish_scence(self, scene):

FILE: apex_yolov5/log/LogFactory.py
  function init_logger (line 10) | def init_logger():
  function logger (line 15) | def logger():
  function getLogger (line 19) | def getLogger(cls):
  function prefix_search (line 33) | def prefix_search(full_path):
  class MultipleLogger (line 47) | class MultipleLogger(Logger):
    method __init__ (line 48) | def __init__(self, cls):
    method print_log (line 52) | def print_log(self, text, log_type="default"):

FILE: apex_yolov5/log/LogWindow.py
  class LogWindow (line 12) | class LogWindow(QMainWindow, Logger):
    method __new__ (line 19) | def __new__(cls, *args, **kwargs):
    method __init__ (line 24) | def __init__(self):
    method init_ui (line 40) | def init_ui(self):
    method print_log (line 57) | def print_log(self, log, log_type="default"):
    method closeEvent (line 65) | def closeEvent(self, event):
    method real_print (line 73) | def real_print(self, log_data):
    method add_log_tab (line 87) | def add_log_tab(self, log_type):
  class PrintLogThread (line 99) | class PrintLogThread(QThread):
    method __init__ (line 105) | def __init__(self, log_queue: Tools.GetBlockQueue):
    method run (line 109) | def run(self):

FILE: apex_yolov5/log/Logger.py
  class Logger (line 7) | class Logger:
    method print_log (line 12) | def print_log(self, text, log_type="default"):

FILE: apex_yolov5/magnifying_glass.py
  class MagnifyingGlassWindows (line 8) | class MagnifyingGlassWindows(QMainWindow):
    method __init__ (line 9) | def __init__(self):
    method set_image (line 23) | def set_image(self, img_data):

FILE: apex_yolov5/mouse.py
  class MOUSEINPUT (line 15) | class MOUSEINPUT(Structure):
  class _INPUTunion (line 24) | class _INPUTunion(Union):
  class INPUT (line 28) | class INPUT(Structure):
  function SendInput (line 33) | def SendInput(*inputs):
  function Input (line 41) | def Input(structure):
  function MouseInput (line 45) | def MouseInput(flags, x, y, data):
  function Mouse (line 49) | def Mouse(flags, x=0, y=0, data=0):
  function mouse_xy (line 53) | def mouse_xy(x, y):  # for import
  function mouse_down (line 59) | def mouse_down(key = 1):  # for import
  function mouse_up (line 68) | def mouse_up(key = 1):  # for import
  function mouse_close (line 77) | def mouse_close():  # for import

FILE: apex_yolov5/mouse_lock.py
  function lock (line 21) | def lock(aims, mouse, screen_width, screen_height, shot_width, shot_heig...
  function in_moving_raduis (line 130) | def in_moving_raduis(targetRealX, targetRealY, shot_width, shot_height, ...
  function in_delayed (line 144) | def in_delayed(width, height, targetRealX, targetRealY, screenCenterX, s...
  function average_target_proportion (line 155) | def average_target_proportion(target_size):
  function calculate_average (line 163) | def calculate_average():
  function lead_time_xy (line 186) | def lead_time_xy(targetRealX, targetRealY, current_mouse_x, current_mous...
  function lead_time_one (line 206) | def lead_time_one(name, target_real,
  function previous_movements (line 239) | def previous_movements(queue, current_quadrant, lead_time_decision_frame):
  function determine_quadrant (line 260) | def determine_quadrant(move):

FILE: apex_yolov5/mouse_mover/FeiMover.py
  class FeiMover (line 7) | class FeiMover(MouseMover):
    method __init__ (line 8) | def __init__(self, mouse_mover_param):
    method move_rp (line 19) | def move_rp(self, short_x: int, short_y: int, re_cut_size=0):
    method move (line 22) | def move(self, short_x: int, short_y: int):
    method left_click (line 25) | def left_click(self):
    method click_key (line 28) | def click_key(self, value):
    method init_dll (line 31) | def init_dll(self):

FILE: apex_yolov5/mouse_mover/GHubMover.py
  class GHubMover (line 7) | class GHubMover(MouseMover):
    method __init__ (line 8) | def __init__(self, mouse_mover_param):
    method move_rp (line 21) | def move_rp(self, x: int, y: int, re_cut_size=0):
    method move (line 24) | def move(self, x: int, y: int):
    method left_click (line 27) | def left_click(self):
    method click_mouse_button (line 30) | def click_mouse_button(self, button):
    method press_mouse_button (line 35) | def press_mouse_button(self, button):
    method release_mouse_button (line 40) | def release_mouse_button(self, button):

FILE: apex_yolov5/mouse_mover/IntentManager.py
  class IntentManager (line 10) | class IntentManager:
    method __init__ (line 15) | def __init__(self, mouse_mover: MouseMover):
    method set_intention (line 22) | def set_intention(self, x, y):
    method start (line 36) | def start(self):

FILE: apex_yolov5/mouse_mover/KmBoxMover.py
  class KmBoxMover (line 7) | class KmBoxMover(MouseMover):
    method __init__ (line 9) | def __init__(self, mouse_mover_param):
    method left_click (line 26) | def left_click(self):
    method left (line 31) | def left(self, vk_key: int):
    method move_rp (line 38) | def move_rp(self, short_x: int, short_y: int, re_cut_size=0):
    method move (line 41) | def move(self, short_x: int, short_y: int):

FILE: apex_yolov5/mouse_mover/KmBoxNetMover.py
  class KmBoxNetMover (line 6) | class KmBoxNetMover(MouseMover):
    method __init__ (line 8) | def __init__(self, mouse_mover_param):
    method left_click (line 26) | def left_click(self):
    method left (line 31) | def left(self, vk_key: int):
    method move_rp (line 39) | def move_rp(self, short_x: int, short_y: int, re_cut_size=0):
    method move (line 42) | def move(self, short_x: int, short_y: int):
    method destroy (line 54) | def destroy(self):
    method click_key (line 63) | def click_key(self, value):
    method key_down (line 67) | def key_down(self, value):
    method key_up (line 70) | def key_up(self, value):

FILE: apex_yolov5/mouse_mover/MouseMover.py
  class PointAPI (line 6) | class PointAPI(Structure):
  class MouseMover (line 14) | class MouseMover:
    method __init__ (line 19) | def __init__(self, mouse_mover_param):
    method move_rp (line 22) | def move_rp(self, x: int, y: int, re_cut_size=0):
    method move (line 31) | def move(self, x: int, y: int):
    method left_click (line 39) | def left_click(self):
    method get_position (line 46) | def get_position(self):
    method is_num_locked (line 54) | def is_num_locked(self):
    method is_caps_locked (line 69) | def is_caps_locked(self):
    method destroy (line 84) | def destroy(self):
    method move_test (line 90) | def move_test(self, x: int, y: int):
    method mouse_click (line 93) | def mouse_click(self, key, press):
    method left_down (line 110) | def left_down(self):
    method left_up (line 116) | def left_up(self):
    method right_down (line 122) | def right_down(self):
    method right_up (line 128) | def right_up(self):
    method click_key (line 134) | def click_key(self, value):
    method key_down (line 142) | def key_down(self, value):
    method key_up (line 148) | def key_up(self, value):
    method toggle_caps_lock (line 154) | def toggle_caps_lock(self, lock_status):

FILE: apex_yolov5/mouse_mover/MoverFactory.py
  function init_mover (line 19) | def init_mover(mouse_model, mouse_mover_params):
  function reload_mover (line 49) | def reload_mover(mouse_model, mouse_mover_params):
  function mouse_mover (line 55) | def mouse_mover():

FILE: apex_yolov5/mouse_mover/PanNiMover.py
  class PanNiMover (line 10) | class PanNiMover(MouseMover):
    method __init__ (line 11) | def __init__(self, mouse_mover_param):
    method __del__ (line 42) | def __del__(self):
    method OpenDevice (line 45) | def OpenDevice(self, pid, vid):
    method OpenDeviceByID (line 52) | def OpenDeviceByID(self, vid, pid):
    method _getVersion (line 79) | def _getVersion(self):
    method write_cmd (line 83) | def write_cmd(self, cmd, dat=None):
    method read_data_timeout_promise (line 105) | def read_data_timeout_promise(self, cmd, timeout=None):
    method read_data_timeout (line 120) | def read_data_timeout(self, timeout=None):
    method GetChipID (line 138) | def GetChipID(self):
    method GetStorageSize (line 151) | def GetStorageSize(self):
    method SetWaitRespon (line 163) | def SetWaitRespon(self, wait):
    method Close (line 172) | def Close(self):
    method mouse_event (line 185) | def mouse_event(self, e, x=0, y=0, extra1=0, extra2=0):
    method key_event (line 271) | def key_event(self, e, key):
    method DelayRandom (line 286) | def DelayRandom(delay_min, delay_max):
    method GetScanCodeFromKeyName (line 301) | def GetScanCodeFromKeyName(keyname):
    method move_rp (line 334) | def move_rp(self, x: int, y: int, re_cut_size=0):
    method move (line 337) | def move(self, x: int, y: int):
    method left_click (line 344) | def left_click(self):
    method mouse_click (line 349) | def mouse_click(self, key, press):
    method left_down (line 352) | def left_down(self):
    method left_up (line 355) | def left_up(self):
    method right_down (line 358) | def right_down(self):
    method right_up (line 361) | def right_up(self):
    method click_key (line 364) | def click_key(self, value):
    method key_down (line 369) | def key_down(self, value):
    method key_up (line 372) | def key_up(self, value):
  class GUID (line 382) | class GUID(Structure):
  class SP_DEVICE_INTERFACE_DATA (line 389) | class SP_DEVICE_INTERFACE_DATA(Structure):
  function SP_DATA_A_factory (line 396) | def SP_DATA_A_factory(length):
  class HID (line 403) | class HID:
    method __init__ (line 408) | def __init__(self):
    method __del__ (line 418) | def __del__(self):
    method enum_device (line 421) | def enum_device(self):
    method open (line 480) | def open(self, path):
    method close (line 492) | def close(self):
    method write (line 501) | def write(self, data):
    method read (line 514) | def read(self, len, timeout):

FILE: apex_yolov5/mouse_mover/Win32ApiMover.py
  class Win32ApiMover (line 15) | class Win32ApiMover(MouseMover):
    method __init__ (line 17) | def __init__(self, mouse_mover_param):
    method move_rp (line 22) | def move_rp(self, x: int, y: int, re_cut_size=0):
    method move (line 30) | def move(self, x, y):
    method left_click (line 33) | def left_click(self):
    method move_test (line 37) | def move_test(self, x: int, y: int):
    method split_coordinates (line 40) | def split_coordinates(self, x, y):
    method left_click (line 57) | def left_click(self):
    method left_down (line 61) | def left_down(self):
    method left_up (line 64) | def left_up(self):
    method right_down (line 67) | def right_down(self):
    method right_up (line 70) | def right_up(self):

FILE: apex_yolov5/mouse_mover/WuYaMover.py
  class WuYaMover (line 9) | class WuYaMover(MouseMover):
    method __init__ (line 10) | def __init__(self, mouse_mover_param):
    method move_rp (line 34) | def move_rp(self, short_x: int, short_y: int, re_cut_size=0):
    method move (line 37) | def move(self, short_x: int, short_y: int):
    method left_click (line 40) | def left_click(self):

FILE: apex_yolov5/socket/config.py
  function get_all_config_file_name (line 51) | def get_all_config_file_name(directory=config_ref_path):
  function read_config_file_name (line 69) | def read_config_file_name(file_path=use_ref_path, default="global_config"):
  function writer_config_file_name (line 83) | def writer_config_file_name(file_path=use_ref_path, content="global_conf...
  function read_config (line 94) | def read_config():
  function copy_config (line 108) | def copy_config(target):
  class Config (line 120) | class Config:
    method __init__ (line 126) | def __init__(self):
    method update (line 130) | def update(self):
    method init (line 134) | def init(self):
    method sign_shot_xy (line 392) | def sign_shot_xy(self, averager=(0, 0, 0, 0)):
    method change_shot_xy (line 396) | def change_shot_xy(self):
    method reset_shot_xy (line 427) | def reset_shot_xy(self):
    method increase_shot_xy (line 434) | def increase_shot_xy(self, step=8):
    method reduce_shot_xy (line 445) | def reduce_shot_xy(self, step=8):
    method update_shot_xy (line 456) | def update_shot_xy(self):
    method update_shot_other_data (line 460) | def update_shot_other_data(self):
    method get_config (line 471) | def get_config(config, pattern=None, default=None):
    method set_config (line 487) | def set_config(self, key, value):
    method save_config (line 490) | def save_config(self):

FILE: apex_yolov5/socket/socket_util.py
  function send (line 1) | def send(send_socket, byte_array, buffer_size=4096):
  function recv (line 8) | def recv(recv_socket, buffer_size=4096):

FILE: apex_yolov5/socket/yolov5_handler.py
  function reload_model (line 14) | def reload_model():
  function get_aims (line 21) | def get_aims(img0):

FILE: apex_yolov5/window_layout/ai_toggle_layout.py
  class AiToggleLayout (line 7) | class AiToggleLayout:
    method __init__ (line 8) | def __init__(self, config, main_window, parent_layout, system_tray):
    method add_layout (line 14) | def add_layout(self):
    method init_form_config (line 49) | def init_form_config(self):
    method handle_ai_toggled (line 61) | def handle_ai_toggled(self, checked):
    method handle_recoils_toggle (line 64) | def handle_recoils_toggle(self, checked):
    method handle_ai_middle_toggle_switch (line 68) | def handle_ai_middle_toggle_switch(self, checked):
    method handle_middle_toggled (line 71) | def handle_middle_toggled(self, pressed, toggle):
    method save_config (line 77) | def save_config(self):

FILE: apex_yolov5/window_layout/anthropomorphic_config_layout.py
  class AnthropomorphicConfigLayout (line 6) | class AnthropomorphicConfigLayout:
    method __init__ (line 7) | def __init__(self, config, main_window, parent_layout):
    method add_layout (line 12) | def add_layout(self):
    method lead_time_toggle_check (line 103) | def lead_time_toggle_check(self, checked):
    method delayed_aiming_toggle_check (line 109) | def delayed_aiming_toggle_check(self, checked):
    method init_form_config (line 115) | def init_form_config(self):
    method save_config (line 133) | def save_config(self):

FILE: apex_yolov5/window_layout/auto_charged_energy_layout.py
  class AutoChargedEnergyLayout (line 5) | class AutoChargedEnergyLayout:
    method __init__ (line 6) | def __init__(self, config, main_window, parent_layout):
    method add_layout (line 11) | def add_layout(self):
    method update_storage_interval_label (line 45) | def update_storage_interval_label(self, value):
    method init_form_config (line 49) | def init_form_config(self):
    method save_config (line 55) | def save_config(self):

FILE: apex_yolov5/window_layout/auto_gun_config_layout.py
  class AutoGunConfigLayout (line 5) | class AutoGunConfigLayout:
    method __init__ (line 6) | def __init__(self, config, main_window, parent_layout):
    method add_layout (line 11) | def add_layout(self):
    method init_form_config (line 72) | def init_form_config(self):
    method add_refresh_button_item (line 80) | def add_refresh_button_item(self):
    method delete_refresh_button_item (line 86) | def delete_refresh_button_item(self):
    method addGun (line 92) | def addGun(self):
    method removeGun (line 100) | def removeGun(self):

FILE: apex_yolov5/window_layout/auto_save_config_layout.py
  class AutoSaveConfigLayout (line 5) | class AutoSaveConfigLayout:
    method __init__ (line 6) | def __init__(self, config, main_window, parent_layout):
    method add_layout (line 11) | def add_layout(self):
    method init_form_config (line 44) | def init_form_config(self):

FILE: apex_yolov5/window_layout/model_config_layout.py
  class ModelConfigLayout (line 7) | class ModelConfigLayout:
    method __init__ (line 8) | def __init__(self, config, main_window, parent_layout):
    method add_layout (line 13) | def add_layout(self):
    method init_form_config (line 58) | def init_form_config(self):
    method selection_changed (line 71) | def selection_changed(self, index):
    method update_slieder_value (line 81) | def update_slieder_value(self, value):
    method update_iou_thres_value (line 86) | def update_iou_thres_value(self, value):

FILE: apex_yolov5/window_layout/mouse_config_layout.py
  class MouseConfigLayout (line 8) | class MouseConfigLayout:
    method __init__ (line 10) | def __init__(self, config, main_window, parent_layout):
    method add_layout (line 15) | def add_layout(self):
    method init_form_config (line 277) | def init_form_config(self):
    method update_aim_delay_slider (line 377) | def update_aim_delay_slider(self, value):
    method selection_changed (line 383) | def selection_changed(self, index):
    method selection_aim_model_changed (line 391) | def selection_aim_model_changed(self, index):
    method handle_toggled (line 394) | def handle_toggled(self, checked):
    method disable_silder_toggled (line 400) | def disable_silder_toggled(self, checked):
    method joy_move_toggled (line 411) | def joy_move_toggled(self, checked):
    method update_move_step_label (line 418) | def update_move_step_label(self, value):
    method update_move_step_y_label (line 424) | def update_move_step_y_label(self, value):
    method update_move_path_nx_label (line 430) | def update_move_path_nx_label(self, value):
    method update_move_path_ny_label (line 434) | def update_move_path_ny_label(self, value):
    method update_aim_move_step_label (line 438) | def update_aim_move_step_label(self, value):
    method update_aim_move_step_y_label (line 444) | def update_aim_move_step_y_label(self, value):
    method update_aim_move_path_nx_label (line 450) | def update_aim_move_path_nx_label(self, value):
    method update_aim_move_path_ny_label (line 454) | def update_aim_move_path_ny_label(self, value):
    method update_mouse_move_frequency_label (line 458) | def update_mouse_move_frequency_label(self, value):
    method update_re_cut_size_label (line 462) | def update_re_cut_size_label(self, value):
    method move_crosshair (line 466) | def move_crosshair(self, value):
    method paintEvent (line 472) | def paintEvent(self, event):
    method save_config (line 477) | def save_config(self):

FILE: apex_yolov5/window_layout/screenshot_area_layout.py
  class ScreenshotAreaLayout (line 10) | class ScreenshotAreaLayout:
    method __init__ (line 11) | def __init__(self, config, main_window, parent_layout):
    method add_layout (line 16) | def add_layout(self):
    method init_form_config (line 182) | def init_form_config(self):
    method delete_extra_zero (line 218) | def delete_extra_zero(self, n):
    method dynamic_screenshot_toggle (line 224) | def dynamic_screenshot_toggle(self, checked):
    method show_circle_toggle (line 246) | def show_circle_toggle(self, checked):
    method show_aim_toggle (line 254) | def show_aim_toggle(self, checked):
    method update_inner_rect_size (line 262) | def update_inner_rect_size(self):
    method update_inner_circle_size (line 268) | def update_inner_circle_size(self):
    method check_multi_stage_aiming_speed (line 277) | def check_multi_stage_aiming_speed(self, speed_up, multi_stage_aiming_...
    method save_config (line 303) | def save_config(self):
  class RectView (line 339) | class RectView(QGraphicsView):
    method __init__ (line 340) | def __init__(self, parent=None, outer_rect_size=(192, 108), inner_rect...
    method center_inner_rect (line 360) | def center_inner_rect(self):
    method center_inner_circle (line 365) | def center_inner_circle(self):
    method center_inner_circle_aim (line 369) | def center_inner_circle_aim(self):
    method resize_inner_rect (line 373) | def resize_inner_rect(self, width, height):
    method resize_inner_circle (line 378) | def resize_inner_circle(self, radius):
    method resize_inner_circle_aim (line 382) | def resize_inner_circle_aim(self, radius):

FILE: apex_yolov5/windows/DebugWindow.py
  class DebugWindow (line 13) | class DebugWindow(QMainWindow):
    method __new__ (line 17) | def __new__(cls, *args, **kwargs):
    method __init__ (line 22) | def __init__(self):
    method init_ui (line 37) | def init_ui(self):
    method set_image (line 53) | def set_image(self, img_data, bboxes):
    method show_image (line 56) | def show_image(self, data):
    method eventFilter (line 82) | def eventFilter(self, obj, event):
  class ShowImageThread (line 90) | class ShowImageThread(QThread):
    method __init__ (line 96) | def __init__(self, queue: Tools.GetBlockQueue):
    method run (line 100) | def run(self):

FILE: apex_yolov5/windows/DisclaimerWindow.py
  class DisclaimerWindow (line 7) | class DisclaimerWindow(QWidget):
    method __init__ (line 8) | def __init__(self, main_window):
    method initUI (line 13) | def initUI(self):
    method set_disclaimer_text (line 24) | def set_disclaimer_text(self):
    method show_disclaimer_message (line 40) | def show_disclaimer_message(self):
    method check_and_accept (line 53) | def check_and_accept(self):

FILE: apex_yolov5/windows/aim_show_window.py
  class AimShowWindows (line 9) | class AimShowWindows(QMainWindow):
    method __init__ (line 10) | def __init__(self, config):
    method update_box (line 22) | def update_box(self, left_top_xy, bbox):
    method clear_box (line 27) | def clear_box(self):
    method paintEvent (line 32) | def paintEvent(self, event):
  function get_aim_show_window (line 68) | def get_aim_show_window():
  function destory_aim_show_window (line 75) | def destory_aim_show_window():

FILE: apex_yolov5/windows/circle_window.py
  class CircleWindow (line 10) | class CircleWindow(QMainWindow):
    method __init__ (line 11) | def __init__(self, config):
    method update_circle (line 27) | def update_circle(self, pressed=False, toggle=False):
    method update_circle_auto_change (line 37) | def update_circle_auto_change(self, radius):
    method init_form_config (line 42) | def init_form_config(self):
    method paintEvent (line 54) | def paintEvent(self, event):
    method close (line 60) | def close(self):
  function get_circle_window (line 68) | def get_circle_window():
  function destory_circle_window (line 75) | def destory_circle_window():

FILE: apex_yolov5/windows/config_window.py
  class ConfigWindow (line 28) | class ConfigWindow(QMainWindow):
    method __new__ (line 33) | def __new__(cls, *args, **kwargs):
    method __init__ (line 38) | def __init__(self, config, title=None):
    method create_menus (line 74) | def create_menus(self):
    method mouse_performance_test (line 113) | def mouse_performance_test(self):
    method mouse_performance_test_threading (line 116) | def mouse_performance_test_threading(self):
    method open_disclaimer_window (line 126) | def open_disclaimer_window(self):
    method showFileDialog (line 129) | def showFileDialog(self):
    method detect_threading (line 136) | def detect_threading(self, file_path):
    method open_read_ref_glass_window (line 155) | def open_read_ref_glass_window(self):
    method open_new_ref_glass_window (line 189) | def open_new_ref_glass_window(self):
    method init_form_config (line 220) | def init_form_config(self):
    method open_config_window (line 230) | def open_config_window(self):
    method open_magnifying_glass_window (line 235) | def open_magnifying_glass_window(self):
    method open_frame_rate_monitor (line 240) | def open_frame_rate_monitor(self):
    method add_frame_rate_plot (line 245) | def add_frame_rate_plot(self, frame_rate):
    method initUI (line 249) | def initUI(self):
    method handle_toggled (line 275) | def handle_toggled(self, checked):
    method set_image (line 278) | def set_image(self, img_data, bboxes):
    method eventFilter (line 281) | def eventFilter(self, obj, event):
    method saveConfig (line 288) | def saveConfig(self):
    method changeEvent (line 297) | def changeEvent(self, event):
    method closeEvent (line 303) | def closeEvent(self, event):

FILE: apex_yolov5_main.py
  function main (line 17) | def main(log_window):

FILE: apex_yolov5_main_asyn.py
  function handle (line 21) | def handle(log_window):
  function main (line 82) | def main():

FILE: benchmarks.py
  function run (line 52) | def run(
  function test (line 116) | def test(
  function parse_opt (line 151) | def parse_opt():
  function main (line 169) | def main(opt):

FILE: check.py
  function check_files (line 8) | def check_files(directory):
  function class_change (line 24) | def class_change():
  function check_label_image (line 63) | def check_label_image():
  function delete_label (line 109) | def delete_label():
  function split_label_image (line 131) | def split_label_image():
  function class_change_1 (line 173) | def class_change_1():
  function classification (line 194) | def classification():

FILE: classify/predict.py
  function run (line 68) | def run(
  function parse_opt (line 206) | def parse_opt():
  function main (line 232) | def main(opt):

FILE: classify/train.py
  function train (line 78) | def train(opt, device):
  function parse_opt (line 309) | def parse_opt(known=False):
  function main (line 339) | def main(opt):
  function run (line 363) | def run(**kwargs):

FILE: classify/val.py
  function run (line 53) | def run(
  function parse_opt (line 149) | def parse_opt():
  function main (line 169) | def main(opt):

FILE: client.py
  function main (line 24) | def main():

FILE: detect.py
  function run (line 72) | def run(
  function parse_opt (line 278) | def parse_opt():
  function main (line 318) | def main(opt):

FILE: export.py
  class iOSModel (line 93) | class iOSModel(torch.nn.Module):
    method __init__ (line 94) | def __init__(self, model, im):
    method forward (line 107) | def forward(self, x):
  function export_formats (line 113) | def export_formats():
  function try_export (line 132) | def try_export(inner_func):
  function export_torchscript (line 151) | def export_torchscript(model, im, file, optimize, prefix=colorstr("Torch...
  function export_onnx (line 169) | def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colors...
  function export_openvino (line 226) | def export_openvino(file, metadata, half, int8, data, prefix=colorstr("O...
  function export_paddle (line 282) | def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePadd...
  function export_coreml (line 299) | def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("Cor...
  function export_engine (line 324) | def export_engine(model, im, file, half, dynamic, simplify, workspace=4,...
  function export_saved_model (line 394) | def export_saved_model(
  function export_pb (line 457) | def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
  function export_tflite (line 474) | def export_tflite(
  function export_edgetpu (line 510) | def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
  function export_tfjs (line 552) | def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
  function add_tflite_metadata (line 589) | def add_tflite_metadata(file, metadata, num_outputs):
  function pipeline_coreml (line 626) | def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML P...
  function run (line 760) | def run(
  function parse_opt (line 896) | def parse_opt(known=False):
  function main (line 942) | def main(opt):

FILE: hubconf.py
  function _create (line 16) | def _create(name, pretrained=True, channels=3, classes=80, autoshape=Tru...
  function custom (line 86) | def custom(path="path/to/model.pt", autoshape=True, _verbose=True, devic...
  function yolov5n (line 91) | def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
  function yolov5s (line 98) | def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
  function yolov5m (line 105) | def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
  function yolov5l (line 112) | def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
  function yolov5x (line 119) | def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _ve...
  function yolov5n6 (line 126) | def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
  function yolov5s6 (line 133) | def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
  function yolov5m6 (line 140) | def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
  function yolov5l6 (line 147) | def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _v...
  function yolov5x6 (line 154) | def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _v...

FILE: joy_test.py
  class TextPrint (line 11) | class TextPrint:
    method __init__ (line 12) | def __init__(self):
    method print (line 16) | def print(self, screen, textString):
    method reset (line 21) | def reset(self):
    method indent (line 26) | def indent(self):
    method unindent (line 29) | def unindent(self):

FILE: lg.py
  function press_mouse_button (line 19) | def press_mouse_button(button):
  function release_mouse_button (line 25) | def release_mouse_button(button):
  function click_mouse_button (line 31) | def click_mouse_button(button):
  function press_key (line 37) | def press_key(code):
  function release_key (line 43) | def release_key(code):
  function click_key (line 49) | def click_key(code):
  function mouse_xy (line 55) | def mouse_xy(x, y, abs_move=False):

FILE: main.py
  function main (line 31) | def main():

FILE: models/common.py
  function autopad (line 60) | def autopad(k, p=None, d=1):
  class Conv (line 73) | class Conv(nn.Module):
    method __init__ (line 77) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
    method forward (line 84) | def forward(self, x):
    method forward_fuse (line 88) | def forward_fuse(self, x):
  class DWConv (line 93) | class DWConv(Conv):
    method __init__ (line 95) | def __init__(self, c1, c2, k=1, s=1, d=1, act=True):
  class DWConvTranspose2d (line 102) | class DWConvTranspose2d(nn.ConvTranspose2d):
    method __init__ (line 104) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):
  class TransformerLayer (line 111) | class TransformerLayer(nn.Module):
    method __init__ (line 113) | def __init__(self, c, num_heads):
    method forward (line 127) | def forward(self, x):
  class TransformerBlock (line 134) | class TransformerBlock(nn.Module):
    method __init__ (line 136) | def __init__(self, c1, c2, num_heads, num_layers):
    method forward (line 148) | def forward(self, x):
  class Bottleneck (line 159) | class Bottleneck(nn.Module):
    method __init__ (line 161) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
    method forward (line 171) | def forward(self, x):
  class BottleneckCSP (line 178) | class BottleneckCSP(nn.Module):
    method __init__ (line 180) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    method forward (line 194) | def forward(self, x):
  class CrossConv (line 203) | class CrossConv(nn.Module):
    method __init__ (line 205) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
    method forward (line 218) | def forward(self, x):
  class C3 (line 223) | class C3(nn.Module):
    method __init__ (line 225) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    method forward (line 236) | def forward(self, x):
  class C3x (line 241) | class C3x(C3):
    method __init__ (line 243) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class C3TR (line 252) | class C3TR(C3):
    method __init__ (line 254) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class C3SPP (line 263) | class C3SPP(C3):
    method __init__ (line 265) | def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
  class C3Ghost (line 274) | class C3Ghost(C3):
    method __init__ (line 276) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class SPP (line 283) | class SPP(nn.Module):
    method __init__ (line 285) | def __init__(self, c1, c2, k=(5, 9, 13)):
    method forward (line 293) | def forward(self, x):
  class SPPF (line 303) | class SPPF(nn.Module):
    method __init__ (line 305) | def __init__(self, c1, c2, k=5):
    method forward (line 318) | def forward(self, x):
  class Focus (line 328) | class Focus(nn.Module):
    method __init__ (line 330) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
    method forward (line 338) | def forward(self, x):
  class GhostConv (line 344) | class GhostConv(nn.Module):
    method __init__ (line 346) | def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
    method forward (line 355) | def forward(self, x):
  class GhostBottleneck (line 361) | class GhostBottleneck(nn.Module):
    method __init__ (line 363) | def __init__(self, c1, c2, k=3, s=1):
    method forward (line 376) | def forward(self, x):
  class Contract (line 381) | class Contract(nn.Module):
    method __init__ (line 383) | def __init__(self, gain=2):
    method forward (line 390) | def forward(self, x):
  class Expand (line 401) | class Expand(nn.Module):
    method __init__ (line 403) | def __init__(self, gain=2):
    method forward (line 413) | def forward(self, x):
  class Concat (line 424) | class Concat(nn.Module):
    method __init__ (line 426) | def __init__(self, dimension=1):
    method forward (line 431) | def forward(self, x):
  class DetectMultiBackend (line 438) | class DetectMultiBackend(nn.Module):
    method __init__ (line 440) | def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), d...
    method forward (line 657) | def forward(self, im, augment=False, visualize=False):
    method from_numpy (line 739) | def from_numpy(self, x):
    method warmup (line 743) | def warmup(self, imgsz=(1, 3, 640, 640)):
    method _model_type (line 752) | def _model_type(p="path/to/model.pt"):
    method _load_metadata (line 772) | def _load_metadata(f=Path("path/to/meta.yaml")):
  class AutoShape (line 780) | class AutoShape(nn.Module):
    method __init__ (line 790) | def __init__(self, model, verbose=True):
    method _apply (line 804) | def _apply(self, fn):
    method forward (line 820) | def forward(self, ims, size=640, augment=False, profile=False):
  class Detections (line 891) | class Detections:
    method __init__ (line 893) | def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shap...
    method _run (line 911) | def _run(self, pprint=False, show=False, save=False, crop=False, rende...
    method show (line 966) | def show(self, labels=True):
    method save (line 974) | def save(self, labels=True, save_dir="runs/detect/exp", exist_ok=False):
    method crop (line 983) | def crop(self, save=True, save_dir="runs/detect/exp", exist_ok=False):
    method render (line 992) | def render(self, labels=True):
    method pandas (line 997) | def pandas(self):
    method tolist (line 1011) | def tolist(self):
    method print (line 1030) | def print(self):
    method __len__ (line 1034) | def __len__(self):
    method __str__ (line 1038) | def __str__(self):
    method __repr__ (line 1044) | def __repr__(self):
  class Proto (line 1049) | class Proto(nn.Module):
    method __init__ (line 1051) | def __init__(self, c1, c_=256, c2=32):
    method forward (line 1059) | def forward(self, x):
  class Classify (line 1064) | class Classify(nn.Module):
    method __init__ (line 1066) | def __init__(
    method forward (line 1076) | def forward(self, x):

FILE: models/experimental.py
  class Sum (line 13) | class Sum(nn.Module):
    method __init__ (line 16) | def __init__(self, n, weight=False):
    method forward (line 26) | def forward(self, x):
  class MixConv2d (line 39) | class MixConv2d(nn.Module):
    method __init__ (line 42) | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
    method forward (line 65) | def forward(self, x):
  class Ensemble (line 72) | class Ensemble(nn.ModuleList):
    method __init__ (line 75) | def __init__(self):
    method forward (line 79) | def forward(self, x, augment=False, profile=False, visualize=False):
  function attempt_load (line 88) | def attempt_load(weights, device=None, inplace=True, fuse=True):

FILE: models/tf.py
  class TFBN (line 51) | class TFBN(keras.layers.Layer):
    method __init__ (line 53) | def __init__(self, w=None):
    method call (line 64) | def call(self, inputs):
  class TFPad (line 69) | class TFPad(keras.layers.Layer):
    method __init__ (line 71) | def __init__(self, pad):
    method call (line 84) | def call(self, inputs):
  class TFConv (line 89) | class TFConv(keras.layers.Layer):
    method __init__ (line 91) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 115) | def call(self, inputs):
  class TFDWConv (line 120) | class TFDWConv(keras.layers.Layer):
    method __init__ (line 122) | def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
    method call (line 144) | def call(self, inputs):
  class TFDWConvTranspose2d (line 149) | class TFDWConvTranspose2d(keras.layers.Layer):
    method __init__ (line 151) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
    method call (line 176) | def call(self, inputs):
  class TFFocus (line 181) | class TFFocus(keras.layers.Layer):
    method __init__ (line 183) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 193) | def call(self, inputs):
  class TFBottleneck (line 203) | class TFBottleneck(keras.layers.Layer):
    method __init__ (line 205) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):
    method call (line 218) | def call(self, inputs):
  class TFCrossConv (line 225) | class TFCrossConv(keras.layers.Layer):
    method __init__ (line 227) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
    method call (line 235) | def call(self, inputs):
  class TFConv2d (line 240) | class TFConv2d(keras.layers.Layer):
    method __init__ (line 242) | def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
    method call (line 258) | def call(self, inputs):
  class TFBottleneckCSP (line 263) | class TFBottleneckCSP(keras.layers.Layer):
    method __init__ (line 265) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 282) | def call(self, inputs):
  class TFC3 (line 291) | class TFC3(keras.layers.Layer):
    method __init__ (line 293) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 306) | def call(self, inputs):
  class TFC3x (line 315) | class TFC3x(keras.layers.Layer):
    method __init__ (line 317) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 332) | def call(self, inputs):
  class TFSPP (line 337) | class TFSPP(keras.layers.Layer):
    method __init__ (line 339) | def __init__(self, c1, c2, k=(5, 9, 13), w=None):
    method call (line 347) | def call(self, inputs):
  class TFSPPF (line 353) | class TFSPPF(keras.layers.Layer):
    method __init__ (line 355) | def __init__(self, c1, c2, k=5, w=None):
    method call (line 365) | def call(self, inputs):
  class TFDetect (line 375) | class TFDetect(keras.layers.Layer):
    method __init__ (line 377) | def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None):
    method call (line 397) | def call(self, inputs):
    method _make_grid (line 422) | def _make_grid(nx=20, ny=20):
  class TFSegment (line 429) | class TFSegment(TFDetect):
    method __init__ (line 431) | def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(64...
    method call (line 443) | def call(self, x):
  class TFProto (line 452) | class TFProto(keras.layers.Layer):
    method __init__ (line 453) | def __init__(self, c1, c_=256, c2=32, w=None):
    method call (line 463) | def call(self, inputs):
  class TFUpsample (line 468) | class TFUpsample(keras.layers.Layer):
    method __init__ (line 470) | def __init__(self, size, scale_factor, mode, w=None):
    method call (line 485) | def call(self, inputs):
  class TFConcat (line 490) | class TFConcat(keras.layers.Layer):
    method __init__ (line 492) | def __init__(self, dimension=1, w=None):
    method call (line 498) | def call(self, inputs):
  function parse_model (line 503) | def parse_model(d, ch, model, imgsz):
  class TFModel (line 583) | class TFModel:
    method __init__ (line 585) | def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgs...
    method predict (line 605) | def predict(
    method _xywh2xyxy (line 646) | def _xywh2xyxy(xywh):
  class AgnosticNMS (line 654) | class AgnosticNMS(keras.layers.Layer):
    method call (line 656) | def call(self, input, topk_all, iou_thres, conf_thres):
    method _nms (line 666) | def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):
  function activations (line 701) | def activations(act=nn.SiLU):
  function representative_dataset_gen (line 713) | def representative_dataset_gen(dataset, ncalib=100):
  function run (line 726) | def run(
  function parse_opt (line 751) | def parse_opt():
  function main (line 766) | def main(opt):

FILE: models/yolo.py
  class Detect (line 72) | class Detect(nn.Module):
    method __init__ (line 78) | def __init__(self, nc=80, anchors=(), ch=(), inplace=True):
    method forward (line 91) | def forward(self, x):
    method _make_grid (line 117) | def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch...
  class Segment (line 129) | class Segment(Detect):
    method __init__ (line 131) | def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=T...
    method forward (line 141) | def forward(self, x):
  class BaseModel (line 150) | class BaseModel(nn.Module):
    method forward (line 153) | def forward(self, x, profile=False, visualize=False):
    method _forward_once (line 159) | def _forward_once(self, x, profile=False, visualize=False):
    method _profile_one_layer (line 173) | def _profile_one_layer(self, m, x, dt):
    method fuse (line 187) | def fuse(self):
    method info (line 198) | def info(self, verbose=False, img_size=640):
    method _apply (line 202) | def _apply(self, fn):
  class DetectionModel (line 216) | class DetectionModel(BaseModel):
    method __init__ (line 218) | def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None):
    method forward (line 259) | def forward(self, x, augment=False, profile=False, visualize=False):
    method _forward_augment (line 265) | def _forward_augment(self, x):
    method _descale_pred (line 280) | def _descale_pred(self, p, flips, scale, img_size):
    method _clip_augmented (line 297) | def _clip_augmented(self, y):
    method _initialize_biases (line 310) | def _initialize_biases(self, cf=None):
  class SegmentationModel (line 330) | class SegmentationModel(DetectionModel):
    method __init__ (line 332) | def __init__(self, cfg="yolov5s-seg.yaml", ch=3, nc=None, anchors=None):
  class ClassificationModel (line 337) | class ClassificationModel(BaseModel):
    method __init__ (line 339) | def __init__(self, cfg=None, model=None, nc=1000, cutoff=10):
    method _from_detection_model (line 346) | def _from_detection_model(self, model, nc=1000, cutoff=10):
    method _from_yaml (line 363) | def _from_yaml(self, cfg):
  function parse_model (line 368) | def parse_model(d, ch):

FILE: segment/predict.py
  function run (line 70) | def run(
  function parse_opt (line 259) | def parse_opt():
  function main (line 298) | def main(opt):

FILE: segment/train.py
  function train (line 98) | def train(hyp, opt, device, callbacks):
  function parse_opt (line 539) | def parse_opt(known=False):
  function main (line 588) | def main(opt, callbacks=Callbacks()):
  function run (line 745) | def run(**kwargs):

FILE: segment/val.py
  function save_one_txt (line 73) | def save_one_txt(predn, save_conf, shape, file):
  function save_one_json (line 85) | def save_one_json(predn, jdict, path, class_map, pred_masks):
  function process_batch (line 116) | def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=No...
  function run (line 154) | def run(
  function parse_opt (line 445) | def parse_opt():
  function main (line 480) | def main(opt):

FILE: server.py
  function main (line 31) | def main(log_window):

FILE: train.py
  function train (line 103) | def train(hyp, opt, device, callbacks):
  function parse_opt (line 513) | def parse_opt(known=False):
  function main (line 568) | def main(opt, callbacks=Callbacks()):
  function generate_individual (line 824) | def generate_individual(input_ranges, individual_length):
  function run (line 833) | def run(**kwargs):

FILE: utils/__init__.py
  function emojis (line 9) | def emojis(str=""):
  class TryExcept (line 14) | class TryExcept(contextlib.ContextDecorator):
    method __init__ (line 16) | def __init__(self, msg=""):
    method __enter__ (line 20) | def __enter__(self):
    method __exit__ (line 24) | def __exit__(self, exc_type, value, traceback):
  function threaded (line 33) | def threaded(func):
  function join_threads (line 44) | def join_threads(verbose=False):
  function notebook_init (line 58) | def notebook_init(verbose=True):

FILE: utils/activations.py
  class SiLU (line 9) | class SiLU(nn.Module):
    method forward (line 11) | def forward(x):
  class Hardswish (line 20) | class Hardswish(nn.Module):
    method forward (line 22) | def forward(x):
  class Mish (line 31) | class Mish(nn.Module):
    method forward (line 35) | def forward(x):
  class MemoryEfficientMish (line 40) | class MemoryEfficientMish(nn.Module):
    class F (line 41) | class F(torch.autograd.Function):
      method forward (line 43) | def forward(ctx, x):
      method backward (line 49) | def backward(ctx, grad_output):
    method forward (line 56) | def forward(self, x):
  class FReLU (line 61) | class FReLU(nn.Module):
    method __init__ (line 64) | def __init__(self, c1, k=3):  # ch_in, kernel
    method forward (line 70) | def forward(self, x):
  class AconC (line 79) | class AconC(nn.Module):
    method __init__ (line 87) | def __init__(self, c1):
    method forward (line 94) | def forward(self, x):
  class MetaAconC (line 100) | class MetaAconC(nn.Module):
    method __init__ (line 108) | def __init__(self, c1, k=1, s=1, r=16):
    method forward (line 119) | def forward(self, x):

FILE: utils/augmentations.py
  class Albumentations (line 20) | class Albumentations:
    method __init__ (line 22) | def __init__(self, size=640):
    method __call__ (line 49) | def __call__(self, im, labels, p=1.0):
  function normalize (line 57) | def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
  function denormalize (line 66) | def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
  function augment_hsv (line 73) | def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
  function hist_equalize (line 89) | def hist_equalize(im, clahe=True, bgr=False):
  function replicate (line 100) | def replicate(im, labels):
  function letterbox (line 121) | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True...
  function random_perspective (line 154) | def random_perspective(
  function copy_paste (line 244) | def copy_paste(im, labels, segments, p=0.5):
  function cutout (line 270) | def cutout(im, labels, p=0.5):
  function mixup (line 301) | def mixup(im, labels, im2, labels2):
  function box_candidates (line 313) | def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1...
  function classify_albumentations (line 326) | def classify_albumentations(
  function classify_transforms (line 370) | def classify_transforms(size=224):
  class LetterBox (line 377) | class LetterBox:
    method __init__ (line 379) | def __init__(self, size=(640, 640), auto=False, stride=32):
    method __call__ (line 388) | def __call__(self, im):
  class CenterCrop (line 404) | class CenterCrop:
    method __init__ (line 406) | def __init__(self, size=640):
    method __call__ (line 411) | def __call__(self, im):
  class ToTensor (line 423) | class ToTensor:
    method __init__ (line 425) | def __init__(self, half=False):
    method __call__ (line 430) | def __call__(self, im):

FILE: utils/autoanchor.py
  function check_anchor_order (line 17) | def check_anchor_order(m):
  function check_anchors (line 28) | def check_anchors(dataset, model, thr=4.0, imgsz=640):
  function kmean_anchors (line 65) | def kmean_anchors(dataset="./data/coco128.yaml", n=9, img_size=640, thr=...

FILE: utils/autobatch.py
  function check_train_batch_size (line 13) | def check_train_batch_size(model, imgsz=640, amp=True):
  function autobatch (line 19) | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):

FILE: utils/callbacks.py
  class Callbacks (line 7) | class Callbacks:
    method __init__ (line 10) | def __init__(self):
    method register_action (line 35) | def register_action(self, hook, name="", callback=None):
    method get_registered_actions (line 48) | def get_registered_actions(self, hook=None):
    method run (line 57) | def run(self, hook, *args, thread=False, **kwargs):

FILE: utils/dataloaders.py
  function get_hash (line 76) | def get_hash(paths):
  function exif_size (line 84) | def exif_size(img):
  function exif_transpose (line 94) | def exif_transpose(image):
  function seed_worker (line 121) | def seed_worker(worker_id):
  class SmartDistributedSampler (line 134) | class SmartDistributedSampler(distributed.DistributedSampler):
    method __iter__ (line 135) | def __iter__(self):
  function create_dataloader (line 159) | def create_dataloader(
  class InfiniteDataLoader (line 218) | class InfiniteDataLoader(dataloader.DataLoader):
    method __init__ (line 225) | def __init__(self, *args, **kwargs):
    method __len__ (line 233) | def __len__(self):
    method __iter__ (line 237) | def __iter__(self):
  class _RepeatSampler (line 243) | class _RepeatSampler:
    method __init__ (line 251) | def __init__(self, sampler):
    method __iter__ (line 255) | def __iter__(self):
  class LoadScreenshots (line 261) | class LoadScreenshots:
    method __init__ (line 263) | def __init__(self, source, img_size=640, stride=32, auto=True, transfo...
    method __iter__ (line 297) | def __iter__(self):
    method __next__ (line 301) | def __next__(self):
  class LoadImages (line 318) | class LoadImages:
    method __init__ (line 321) | def __init__(self, path, img_size=640, stride=32, auto=True, transform...
    method __iter__ (line 360) | def __iter__(self):
    method __next__ (line 365) | def __next__(self):
    method _new_video (line 408) | def _new_video(self, path):
    method _cv2_rotate (line 418) | def _cv2_rotate(self, im):
    method __len__ (line 428) | def __len__(self):
  class LoadStreams (line 433) | class LoadStreams:
    method __init__ (line 435) | def __init__(self, sources="file.streams", img_size=640, stride=32, au...
    method update (line 483) | def update(self, i, cap, stream):
    method __iter__ (line 499) | def __iter__(self):
    method __next__ (line 504) | def __next__(self):
    method __len__ (line 523) | def __len__(self):
  function img2label_paths (line 528) | def img2label_paths(img_paths):
  class LoadImagesAndLabels (line 536) | class LoadImagesAndLabels(Dataset):
    method __init__ (line 541) | def __init__(
    method check_cache_ram (line 699) | def check_cache_ram(self, safety_margin=0.1, prefix=""):
    method cache_labels (line 718) | def cache_labels(self, path=Path("./labels.cache"), prefix=""):
    method __len__ (line 758) | def __len__(self):
    method __getitem__ (line 768) | def __getitem__(self, index):
    method load_image (line 845) | def load_image(self, i):
    method cache_images_to_disk (line 870) | def cache_images_to_disk(self, i):
    method load_mosaic (line 876) | def load_mosaic(self, index):
    method load_mosaic9 (line 936) | def load_mosaic9(self, index):
    method collate_fn (line 1018) | def collate_fn(batch):
    method collate_fn4 (line 1026) | def collate_fn4(batch):
  function flatten_recursive (line 1055) | def flatten_recursive(path=DATASETS_DIR / "coco128"):
  function extract_boxes (line 1067) | def extract_boxes(path=DATASETS_DIR / "coco128"):
  function autosplit (line 1106) | def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0...
  function verify_image_label (line 1132) | def verify_image_label(args):
  class HUBDatasetStats (line 1184) | class HUBDatasetStats:
    method __init__ (line 1200) | def __init__(self, path="coco128.yaml", autodownload=False):
    method _find_yaml (line 1221) | def _find_yaml(dir):
    method _unzip (line 1233) | def _unzip(self, path):
    method _hub_ops (line 1243) | def _hub_ops(self, f, max_dim=1920):
    method get_json (line 1261) | def get_json(self, save=False, verbose=False):
    method process_images (line 1299) | def process_images(self):
  class ClassificationDataset (line 1315) | class ClassificationDataset(torchvision.datasets.ImageFolder):
    method __init__ (line 1325) | def __init__(self, root, augment, imgsz, cache=False):
    method __getitem__ (line 1336) | def __getitem__(self, i):
  function create_classification_dataloader (line 1354) | def create_classification_dataloader(

FILE: utils/downloads.py
  function is_url (line 13) | def is_url(url, check=True):
  function gsutil_getsize (line 24) | def gsutil_getsize(url=""):
  function url_getsize (line 34) | def url_getsize(url="https://ultralytics.com/images/bus.jpg"):
  function curl_download (line 40) | def curl_download(url, filename, *, silent: bool = False) -> bool:
  function safe_download (line 60) | def safe_download(file, url, url2=None, min_bytes=1e0, error_msg=""):
  function attempt_download (line 88) | def attempt_download(file, repo="ultralytics/yolov5", release="v7.0"):

FILE: utils/flask_rest_api/restapi.py
  function predict (line 18) | def predict(model):

FILE: utils/general.py
  function is_ascii (line 75) | def is_ascii(s=""):
  function is_chinese (line 81) | def is_chinese(s="人工智能"):
  function is_colab (line 86) | def is_colab():
  function is_jupyter (line 91) | def is_jupyter():
  function is_kaggle (line 105) | def is_kaggle():
  function is_docker (line 110) | def is_docker() -> bool:
  function is_writeable (line 121) | def is_writeable(dir, test=False):
  function set_logging (line 138) | def set_logging(name=LOGGING_NAME, verbose=True):
  function user_config_dir (line 172) | def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"):
  class Profile (line 190) | class Profile(contextlib.ContextDecorator):
    method __init__ (line 192) | def __init__(self, t=0.0, device: torch.device = None):
    method __enter__ (line 198) | def __enter__(self):
    method __exit__ (line 203) | def __exit__(self, type, value, traceback):
    method time (line 208) | def time(self):
  class Timeout (line 215) | class Timeout(contextlib.ContextDecorator):
    method __init__ (line 217) | def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors...
    method _timeout_handler (line 223) | def _timeout_handler(self, signum, frame):
    method __enter__ (line 227) | def __enter__(self):
    method __exit__ (line 233) | def __exit__(self, exc_type, exc_val, exc_tb):
  class WorkingDirectory (line 241) | class WorkingDirectory(contextlib.ContextDecorator):
    method __init__ (line 243) | def __init__(self, new_dir):
    method __enter__ (line 248) | def __enter__(self):
    method __exit__ (line 252) | def __exit__(self, exc_type, exc_val, exc_tb):
  function methods (line 257) | def methods(instance):
  function print_args (line 262) | def print_args(args: Optional[dict] = None, show_file=True, show_func=Fa...
  function init_seeds (line 277) | def init_seeds(seed=0, deterministic=False):
  function intersect_dicts (line 296) | def intersect_dicts(da, db, exclude=()):
  function get_default_args (line 303) | def get_default_args(func):
  function get_latest_run (line 309) | def get_latest_run(search_dir="."):
  function file_age (line 315) | def file_age(path=__file__):
  function file_date (line 321) | def file_date(path=__file__):
  function file_size (line 327) | def file_size(path):
  function check_online (line 339) | def check_online():
  function git_describe (line 356) | def git_describe(path=ROOT):
  function check_git_status (line 371) | def check_git_status(repo="ultralytics/yolov5", branch="master"):
  function check_git_info (line 400) | def check_git_info(path="."):
  function check_python (line 418) | def check_python(minimum="3.8.0"):
  function check_version (line 423) | def check_version(current="0.0.0", minimum="0.0.0", name="version ", pin...
  function check_img_size (line 435) | def check_img_size(imgsz, s=32, floor=0):
  function check_imshow (line 447) | def check_imshow(warn=False):
  function check_suffix (line 463) | def check_suffix(file="yolov5s.pt", suffix=(".pt",), msg=""):
  function check_yaml (line 474) | def check_yaml(file, suffix=(".yaml", ".yml")):
  function check_file (line 479) | def check_file(file, suffix=""):
  function check_font (line 509) | def check_font(font=FONT, progress=False):
  function check_dataset (line 519) | def check_dataset(data, autodownload=True):
  function check_amp (line 585) | def check_amp(model):
  function yaml_load (line 613) | def yaml_load(file="data.yaml"):
  function yaml_save (line 619) | def yaml_save(file="data.yaml", data=None):
  function unzip_file (line 629) | def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX")):
  function url2file (line 641) | def url2file(url):
  function download (line 651) | def download(url, dir=".", unzip=True, delete=True, curl=False, threads=...
  function make_divisible (line 698) | def make_divisible(x, divisor):
  function clean_str (line 705) | def clean_str(s):
  function one_cycle (line 712) | def one_cycle(y1=0.0, y2=1.0, steps=100):
  function colorstr (line 721) | def colorstr(*input):
  function labels_to_class_weights (line 752) | def labels_to_class_weights(labels, nc=80):
  function labels_to_image_weights (line 771) | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  function coco80_to_coco91_class (line 778) | def coco80_to_coco91_class():
  function xyxy2xywh (line 872) | def xyxy2xywh(x):
  function xywh2xyxy (line 882) | def xywh2xyxy(x):
  function xywhn2xyxy (line 892) | def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  function xyxy2xywhn (line 902) | def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
  function xyn2xy (line 914) | def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  function segment2box (line 922) | def segment2box(segment, width=640, height=640):
  function segments2boxes (line 933) | def segments2boxes(segments):
  function resample_segments (line 942) | def resample_segments(segments, n=1000):
  function scale_boxes (line 952) | def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
  function scale_segments (line 968) | def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, nor...
  function clip_boxes (line 987) | def clip_boxes(boxes, shape):
  function clip_segments (line 999) | def clip_segments(segments, shape):
  function non_max_suppression (line 1009) | def non_max_suppression(
  function strip_optimizer (line 1132) | def strip_optimizer(f="best.pt", s=""):
  function print_mutation (line 1153) | def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr...
  function apply_classifier (line 1208) | def apply_classifier(x, model, img, im0):
  function increment_path (line 1243) | def increment_path(path, exist_ok=False, sep="", mkdir=False):
  function imread (line 1278) | def imread(filename, flags=cv2.IMREAD_COLOR):
  function imwrite (line 1285) | def imwrite(filename, img):
  function imshow (line 1294) | def imshow(path, im):

FILE: utils/image_util.py
  function crop_and_restore_image (line 5) | def crop_and_restore_image(image, x, y, w, h):
  function crop_center (line 28) | def crop_center(image, target_width, target_height):
  function crop_center_xy (line 56) | def crop_center_xy(image, target_width, target_height, xyxy):

FILE: utils/loggers/__init__.py
  function _json_default (line 60) | def _json_default(value):
  class Loggers (line 74) | class Loggers:
    method __init__ (line 76) | def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, lo...
    method remote_dataset (line 154) | def remote_dataset(self):
    method on_train_start (line 166) | def on_train_start(self):
    method on_pretrain_routine_start (line 171) | def on_pretrain_routine_start(self):
    method on_pretrain_routine_end (line 176) | def on_pretrain_routine_end(self, labels, names):
    method on_train_batch_end (line 189) | def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):
    method on_train_epoch_end (line 210) | def on_train_epoch_end(self, epoch):
    method on_val_start (line 218) | def on_val_start(self):
    method on_val_image_end (line 223) | def on_val_image_end(self, pred, predn, path, names, im):
    method on_val_batch_end (line 230) | def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):
    method on_val_end (line 235) | def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusi...
    method on_fit_epoch_end (line 247) | def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
    method on_model_save (line 286) | def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
    method on_train_end (line 299) | def on_train_end(self, last, best, epoch, results):
    method on_params_update (line 335) | def on_params_update(self, params: dict):
  class GenericLogger (line 345) | class GenericLogger:
    method __init__ (line 355) | def __init__(self, opt, console_logger, include=("tb", "wandb", "clear...
    method log_metrics (line 390) | def log_metrics(self, metrics, epoch):
    method log_images (line 409) | def log_images(self, files, name="Images", epoch=0):
    method log_graph (line 427) | def log_graph(self, model, imgsz=(640, 640)):
    method log_model (line 432) | def log_model(self, model_path, epoch=0, metadata=None):
    method update_params (line 444) | def update_params(self, params):
  function log_tensorboard_graph (line 452) | def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
  function web_project_name (line 465) | def web_project_name(project):

FILE: utils/loggers/clearml/clearml_utils.py
  function construct_dataset (line 23) | def construct_dataset(clearml_info_string):
  class ClearmlLogger (line 67) | class ClearmlLogger:
    method __init__ (line 78) | def __init__(self, opt, hyp):
    method log_scalars (line 132) | def log_scalars(self, metrics, epoch):
    method log_model (line 144) | def log_model(self, model_path, model_name, epoch=0):
    method log_summary (line 157) | def log_summary(self, metrics):
    method log_plot (line 167) | def log_plot(self, title, plot_path):
    method log_debug_samples (line 182) | def log_debug_samples(self, files, title="Debug Samples"):
    method log_image_with_boxes (line 198) | def log_image_with_boxes(self, image_path, boxes, class_names, image, ...

FILE: utils/loggers/comet/__init__.py
  class CometLogger (line 66) | class CometLogger:
    method __init__ (line 69) | def __init__(self, opt, hyp, run_id=None, job_type="Training", **exper...
    method _get_experiment (line 169) | def _get_experiment(self, mode, experiment_id=None):
    method log_metrics (line 202) | def log_metrics(self, log_dict, **kwargs):
    method log_parameters (line 206) | def log_parameters(self, log_dict, **kwargs):
    method log_asset (line 210) | def log_asset(self, asset_path, **kwargs):
    method log_asset_data (line 214) | def log_asset_data(self, asset, **kwargs):
    method log_image (line 218) | def log_image(self, img, **kwargs):
    method log_model (line 222) | def log_model(self, path, opt, epoch, fitness_score, best_model=False):
    method check_dataset (line 246) | def check_dataset(self, data_file):
    method log_predictions (line 259) | def log_predictions(self, image, labelsn, path, shape, predn):
    method preprocess_prediction (line 300) | def preprocess_prediction(self, image, labels, shape, pred):
    method add_assets_to_artifact (line 320) | def add_assets_to_artifact(self, artifact, path, asset_path, split):
    method upload_dataset_artifact (line 346) | def upload_dataset_artifact(self):
    method download_dataset_artifact (line 371) | def download_dataset_artifact(self, artifact_path):
    method update_data_paths (line 391) | def update_data_paths(self, data_dict):
    method on_pretrain_routine_end (line 404) | def on_pretrain_routine_end(self, paths):
    method on_train_start (line 417) | def on_train_start(self):
    method on_train_epoch_start (line 421) | def on_train_epoch_start(self):
    method on_train_epoch_end (line 425) | def on_train_epoch_end(self, epoch):
    method on_train_batch_start (line 431) | def on_train_batch_start(self):
    method on_train_batch_end (line 435) | def on_train_batch_end(self, log_dict, step):
    method on_train_end (line 443) | def on_train_end(self, files, save_dir, last, best, epoch, results):
    method on_val_start (line 471) | def on_val_start(self):
    method on_val_batch_start (line 475) | def on_val_batch_start(self):
    method on_val_batch_end (line 479) | def on_val_batch_end(self, batch_i, images, targets, paths, shapes, ou...
    method on_val_end (line 498) | def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusi...
    method on_fit_epoch_end (line 533) | def on_fit_epoch_end(self, result, epoch):
    method on_model_save (line 537) | def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
    method on_params_update (line 542) | def on_params_update(self, params):
    method finish_run (line 546) | def finish_run(self):

FILE: utils/loggers/comet/comet_utils.py
  function download_model_checkpoint (line 21) | def download_model_checkpoint(opt, experiment):
  function set_opt_parameters (line 69) | def set_opt_parameters(opt, experiment):
  function check_comet_weights (line 100) | def check_comet_weights(opt):
  function check_comet_resume (line 126) | def check_comet_resume(opt):

FILE: utils/loggers/comet/hpo.py
  function get_args (line 29) | def get_args(known=False):
  function run (line 90) | def run(parameters, opt):

FILE: utils/loggers/wandb/wandb_utils.py
  class WandbLogger (line 33) | class WandbLogger:
    method __init__ (line 46) | def __init__(self, opt, run_id=None, job_type="Training"):
    method setup_training (line 86) | def setup_training(self, opt):
    method log_model (line 119) | def log_model(self, path, opt, epoch, fitness_score, best_model=False):
    method val_one_image (line 154) | def val_one_image(self, pred, predn, path, names, im):
    method log (line 158) | def log(self, log_dict):
    method end_epoch (line 169) | def end_epoch(self):
    method finish_run (line 188) | def finish_run(self):
  function all_logging_disabled (line 199) | def all_logging_disabled(highest_level=logging.CRITICAL):

FILE: utils/loss.py
  function smooth_BCE (line 11) | def smooth_BCE(eps=0.1):
  class BCEBlurWithLogitsLoss (line 16) | class BCEBlurWithLogitsLoss(nn.Module):
    method __init__ (line 18) | def __init__(self, alpha=0.05):
    method forward (line 26) | def forward(self, pred, true):
  class FocalLoss (line 39) | class FocalLoss(nn.Module):
    method __init__ (line 41) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 52) | def forward(self, pred, true):
  class QFocalLoss (line 73) | class QFocalLoss(nn.Module):
    method __init__ (line 75) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 84) | def forward(self, pred, true):
  class ComputeLoss (line 103) | class ComputeLoss:
    method __init__ (line 107) | def __init__(self, model, autobalance=False):
    method __call__ (line 134) | def __call__(self, p, targets):  # predictions, targets
    method build_targets (line 191) | def build_targets(self, p, targets):

FILE: utils/metrics.py
  function fitness (line 15) | def fitness(x):
  function smooth (line 21) | def smooth(y, f=0.05):
  function ap_per_class (line 29) | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir="....
  function compute_ap (line 98) | def compute_ap(recall, precision):
  class ConfusionMatrix (line 126) | class ConfusionMatrix:
    method __init__ (line 128) | def __init__(self, nc, conf=0.25, iou_thres=0.45):
    method process_batch (line 135) | def process_batch(self, detections, labels):
    method tp_fp (line 182) | def tp_fp(self):
    method plot (line 192) | def plot(self, normalize=True, save_dir="", names=()):
    method print (line 224) | def print(self):
  function bbox_iou (line 230) | def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, ...
  function box_iou (line 276) | def box_iou(box1, box2, eps=1e-7):
  function bbox_ioa (line 298) | def bbox_ioa(box1, box2, eps=1e-7):
  function wh_iou (line 324) | def wh_iou(wh1, wh2, eps=1e-7):
  function plot_pr_curve (line 338) | def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()):
  function plot_mc_curve (line 363) | def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabe...

FILE: utils/plots.py
  class Colors (line 31) | class Colors:
    method __init__ (line 33) | def __init__(self):
    method __call__ (line 64) | def __call__(self, i, bgr=False):
    method hex2rgb (line 70) | def hex2rgb(h):
  function feature_visualization (line 78) | def feature_visualization(x, module_type, stage, n=32, save_dir=Path("ru...
  function hist2d (line 108) | def hist2d(x, y, n=100):
  function butter_lowpass_filtfilt (line 121) | def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  function output_to_target (line 135) | def output_to_target(output, max_det=300):
  function plot_images (line 148) | def plot_images(images, targets, paths=None, fname="images.jpg", names=N...
  function plot_lr_scheduler (line 212) | def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""):
  function plot_val_txt (line 229) | def plot_val_txt():
  function plot_targets_txt (line 251) | def plot_targets_txt():
  function plot_val_study (line 268) | def plot_val_study(file="", dir="", x=None):
  function plot_labels (line 324) | def plot_labels(labels, names=(), save_dir=Path("")):
  function imshow_cls (line 369) | def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=...
  function plot_evolve (line 399) | def plot_evolve(evolve_csv="path/to/evolve.csv"):
  function plot_results (line 430) | def plot_results(file="path/to/results.csv", dir=""):
  function profile_idetection (line 461) | def profile_idetection(start=0, stop=0, labels=(), save_dir=""):
  function save_one_box (line 496) | def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, squar...

FILE: utils/segment/augmentations.py
  function mixup (line 14) | def mixup(im, labels, segments, im2, labels2, segments2):
  function random_perspective (line 27) | def random_perspective(

FILE: utils/segment/dataloaders.py
  function create_dataloader (line 21) | def create_dataloader(
  class LoadImagesAndLabelsAndMasks (line 84) | class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels):  # for training/...
    method __init__ (line 85) | def __init__(
    method __getitem__ (line 125) | def __getitem__(self, index):
    method load_mosaic (line 233) | def load_mosaic(self, index):
    method collate_fn (line 294) | def collate_fn(batch):
  function polygon2mask (line 303) | def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
  function polygons2masks (line 323) | def polygons2masks(img_size, polygons, color, downsample_ratio=1):
  function polygons2masks_overlap (line 338) | def polygons2masks_overlap(img_size, segments, downsample_ratio=1):

FILE: utils/segment/general.py
  function crop_mask (line 9) | def crop_mask(masks, boxes):
  function process_mask_upsample (line 26) | def process_mask_upsample(protos, masks_in, bboxes, shape):
  function process_mask (line 44) | def process_mask(protos, masks_in, bboxes, shape, upsample=False):
  function process_mask_native (line 71) | def process_mask_native(protos, masks_in, bboxes, shape):
  function scale_image (line 94) | def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
  function mask_iou (line 122) | def mask_iou(mask1, mask2, eps=1e-7):
  function masks_iou (line 135) | def masks_iou(mask1, mask2, eps=1e-7):
  function masks2segments (line 148) | def masks2segments(masks, strategy="largest"):

FILE: utils/segment/loss.py
  class ComputeLoss (line 14) | class ComputeLoss:
    method __init__ (line 16) | def __init__(self, model, autobalance=False, overlap=False):
    method __call__ (line 48) | def __call__(self, preds, targets, masks):  # predictions, targets, model
    method single_mask_loss (line 117) | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
    method build_targets (line 123) | def build_targets(self, p, targets):

FILE: utils/segment/metrics.py
  function fitness (line 9) | def fitness(x):
  function ap_per_class_box_and_mask (line 15) | def ap_per_class_box_and_mask(
  class Metric (line 56) | class Metric:
    method __init__ (line 57) | def __init__(self) -> None:
    method ap50 (line 65) | def ap50(self):
    method ap (line 75) | def ap(self):
    method mp (line 83) | def mp(self):
    method mr (line 93) | def mr(self):
    method map50 (line 103) | def map50(self):
    method map (line 113) | def map(self):
    method mean_results (line 122) | def mean_results(self):
    method class_result (line 126) | def class_result(self, i):
    method get_maps (line 130) | def get_maps(self, nc):
    method update (line 137) | def update(self, results):
  class Metrics (line 150) | class Metrics:
    method __init__ (line 153) | def __init__(self) -> None:
    method update (line 157) | def update(self, results):
    method mean_results (line 165) | def mean_results(self):
    method class_result (line 169) | def class_result(self, i):
    method get_maps (line 173) | def get_maps(self, nc):
    method ap_class_index (line 180) | def ap_class_index(self):

FILE: utils/segment/plots.py
  function plot_images_and_masks (line 19) | def plot_images_and_masks(images, targets, masks, paths=None, fname="ima...
  function plot_results_with_masks (line 115) | def plot_results_with_masks(file="path/to/results.csv", dir="", best=True):

FILE: utils/torch_utils.py
  function smart_inference_mode (line 36) | def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9...
  function smartCrossEntropyLoss (line 45) | def smartCrossEntropyLoss(label_smoothing=0.0):
  function smart_DDP (line 56) | def smart_DDP(model):
  function reshape_classifier_output (line 68) | def reshape_classifier_output(model, n=1000):
  function torch_distributed_zero_first (line 92) | def torch_distributed_zero_first(local_rank: int):
  function device_count (line 103) | def device_count():
  function select_device (line 113) | def select_device(device="", batch_size=0, newline=True):
  function time_sync (line 161) | def time_sync():
  function profile (line 168) | def profile(input, ops, n=10, device=None):
  function is_parallel (line 221) | def is_parallel(model):
  function de_parallel (line 226) | def de_parallel(model):
  function initialize_weights (line 231) | def initialize_weights(model):
  function find_modules (line 246) | def find_modules(model, mclass=nn.Conv2d):
  function sparsity (line 251) | def sparsity(model):
  function prune (line 262) | def prune(model, amount=0.3):
  function fuse_conv_and_bn (line 273) | def fuse_conv_and_bn(conv, bn):
  function model_info (line 307) | def model_info(model, verbose=False, imgsz=640):
  function scale_img (line 338) | def scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,...
  function copy_attr (line 352) | def copy_attr(a, b, include=(), exclude=()):
  function smart_optimizer (line 361) | def smart_optimizer(model, name="Adam", lr=0.001, momentum=0.9, decay=1e...
  function smart_hub_load (line 398) | def smart_hub_load(repo="ultralytics/yolov5", model="yolov5s", **kwargs):
  function smart_resume (line 410) | def smart_resume(ckpt, optimizer, ema=None, weights="yolov5s.pt", epochs...
  class EarlyStopping (line 432) | class EarlyStopping:
    method __init__ (line 434) | def __init__(self, patience=30):
    method __call__ (line 441) | def __call__(self, epoch, fitness):
  class ModelEMA (line 459) | class ModelEMA:
    method __init__ (line 465) | def __init__(self, model, decay=0.9999, tau=2000, updates=0):
    method update (line 475) | def update(self, model):
    method update_attr (line 487) | def update_attr(self, model, include=(), exclude=("process_group", "re...

FILE: utils/triton.py
  class TritonRemoteModel (line 10) | class TritonRemoteModel:
    method __init__ (line 18) | def __init__(self, url: str):
    method runtime (line 54) | def runtime(self):
    method __call__ (line 58) | def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typi...
    method _create_inputs (line 73) | def _create_inputs(self, *args, **kwargs):

FILE: val.py
  function save_one_txt (line 64) | def save_one_txt(predn, save_conf, shape, file):
  function save_one_json (line 74) | def save_one_json(predn, jdict, path, class_map):
  function process_batch (line 94) | def process_batch(detections, labels, iouv):
  function run (line 121) | def run(
  function parse_opt (line 365) | def parse_opt():
  function main (line 398) | def main(opt):
Condensed preview — 244 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,652K chars).
[
  {
    "path": ".dockerignore",
    "chars": 3701,
    "preview": "# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------"
  },
  {
    "path": ".gitattributes",
    "chars": 75,
    "preview": "# this drop notebooks from GitHub language stats\n*.ipynb linguist-vendored\n"
  },
  {
    "path": ".github/workflows/cla.yml",
    "chars": 1634,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Ultralytics Contributor License Agreement (CLA) action https://docs.ultralyti"
  },
  {
    "path": ".github/workflows/format.yml",
    "chars": 1060,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Ultralytics Actions https://github.com/ultralytics/actions\n# This workflow au"
  },
  {
    "path": ".github/workflows/merge-main-into-prs.yml",
    "chars": 1776,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Automatically merges repository 'main' branch into all open PRs to keep them "
  },
  {
    "path": ".gitignore",
    "chars": 4012,
    "preview": "# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------"
  },
  {
    "path": "CITATION.cff",
    "chars": 393,
    "preview": "cff-version: 1.2.0\npreferred-citation:\n  type: software\n  message: If you use YOLOv5, please cite it as below.\n  authors"
  },
  {
    "path": "CONTRIBUTING.md",
    "chars": 4946,
    "preview": "## Contributing to YOLOv5 🚀\n\nWe love your input! We want to make contributing to YOLOv5 as easy and transparent as possi"
  },
  {
    "path": "LICENSE",
    "chars": 34523,
    "preview": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C)"
  },
  {
    "path": "MouseHook.py",
    "chars": 1881,
    "preview": "# import os.path as op\n# import json\n# import threading\n# import time\n#\n# import pynput\n# from pynput.mouse import Butto"
  },
  {
    "path": "PID.py",
    "chars": 1278,
    "preview": "import time\n\n\nclass Pid():\n    def __init__(self, kp, ki, kd):\n        self.KP = kp\n        self.KI = ki\n        self.KD"
  },
  {
    "path": "README-yolo.md",
    "chars": 41687,
    "preview": "<div align=\"center\">\n  <p>\n    <a align=\"center\" href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n      <img width"
  },
  {
    "path": "README-yolo.zh-CN.md",
    "chars": 38076,
    "preview": "<div align=\"center\">\n  <p>\n    <a href=\"http://www.ultralytics.com/blog/ultralytics-yolov8-turns-one-a-year-of-breakthro"
  },
  {
    "path": "README.md",
    "chars": 1518,
    "preview": "# apex gun\n\n基于yolov5的apex英雄目标检测自动瞄准器\n\n开源交流群新建于2024-04-25,群号:206666041,加群前请先star。\n\n进群细则:请具有一定代码基础的人再进群,本群各管理都不会对一些过于基础的问题"
  },
  {
    "path": "ag.spec",
    "chars": 1425,
    "preview": "# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\npathex = [\n    'C:/Users/Administrator/PycharmProjects/yol"
  },
  {
    "path": "ag_asyn.spec",
    "chars": 1495,
    "preview": "# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\npathex = [\n    'C:/Users/Administrator/PycharmProjects/yol"
  },
  {
    "path": "apex_recoils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_recoils/core/GameWindowsStatus.py",
    "chars": 1019,
    "preview": "import threading\nimport time\n\nfrom apex_yolov5.Tools import Tools\nfrom apex_yolov5.log import LogFactory\n\n\nclass GameWin"
  },
  {
    "path": "apex_recoils/core/ReaSnowSelectGun.py",
    "chars": 2906,
    "preview": "import json\nimport os.path as op\n\nfrom apex_yolov5.Tools import Tools\nfrom apex_yolov5.log import LogFactory\nfrom apex_y"
  },
  {
    "path": "apex_recoils/core/SelectGun.py",
    "chars": 7239,
    "preview": "import threading\nimport time\nimport traceback\n\nfrom apex_recoils.core.screentaker.LocalScreenTaker import LocalScreenTak"
  },
  {
    "path": "apex_recoils/core/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_recoils/core/image_comparator/DynamicSizeImageComparator.py",
    "chars": 1691,
    "preview": "from apex_recoils.core.image_comparator.NetImageComparator import NetImageComparator\nfrom apex_yolov5.log import LogFact"
  },
  {
    "path": "apex_recoils/core/image_comparator/ImageComparator.py",
    "chars": 3422,
    "preview": "import concurrent.futures\n\nfrom apex_yolov5.log import LogFactory\nimport concurrent.futures\nimport traceback\nfrom io imp"
  },
  {
    "path": "apex_recoils/core/image_comparator/LocalImageComparator.py",
    "chars": 1709,
    "preview": "import os\nimport re\n\nfrom apex_recoils.core.image_comparator.ImageComparator import ImageComparator\nfrom apex_yolov5.log"
  },
  {
    "path": "apex_recoils/core/image_comparator/NetImageComparator.py",
    "chars": 9496,
    "preview": "import re\nimport traceback\nfrom io import BytesIO\n\nimport cv2\nimport numpy as np\nimport requests\nfrom skimage.metrics im"
  },
  {
    "path": "apex_recoils/core/image_comparator/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_recoils/core/kmnet_listener/ToggleKeyListener.py",
    "chars": 9147,
    "preview": "import time\n\nfrom apex_recoils.core import GameWindowsStatus\nfrom apex_yolov5.KmBoxNetListener import KmBoxNetListener\nf"
  },
  {
    "path": "apex_recoils/core/kmnet_listener/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_recoils/core/screentaker/CapScreenTaker.py",
    "chars": 630,
    "preview": "import cv2\n\nfrom apex_yolov5.log import LogFactory\n\n\nclass CapScreenTaker:\n    \"\"\"\n        本地截图\n    \"\"\"\n\n    def __init_"
  },
  {
    "path": "apex_recoils/core/screentaker/LocalMssScreenTaker.py",
    "chars": 778,
    "preview": "import mss\n\nfrom apex_yolov5.log import LogFactory\n\n\nclass LocalMssScreenTaker:\n    \"\"\"\n        本地截图\n    \"\"\"\n\n    def __"
  },
  {
    "path": "apex_recoils/core/screentaker/LocalScreenTaker.py",
    "chars": 635,
    "preview": "from PIL import ImageGrab\n\nfrom apex_yolov5.log import LogFactory\n\n\nclass LocalScreenTaker:\n    \"\"\"\n        本地截图\n    \"\"\""
  },
  {
    "path": "apex_recoils/core/screentaker/SocketScreenTaker.py",
    "chars": 808,
    "preview": "from apex_recoils.net.socket.Client import Client\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.log.Logger imp"
  },
  {
    "path": "apex_recoils/core/screentaker/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_recoils/net/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_recoils/net/socket/Client.py",
    "chars": 2283,
    "preview": "import pickle  # 用于序列化/反序列化数据\nimport socket\n\nfrom apex_yolov5.socket import socket_util\n\nclient_cache = {}\n\n\nclass Clien"
  },
  {
    "path": "apex_recoils/net/socket/ReaSnowSelectGunSocket.py",
    "chars": 913,
    "preview": "import time\n\nfrom apex_recoils.core.SelectGun import SelectGun\nfrom apex_recoils.net.socket.Client import Client\nfrom ap"
  },
  {
    "path": "apex_recoils/net/socket/Server.py",
    "chars": 2183,
    "preview": "import pickle\nimport socket\nimport threading\nimport traceback\n\nfrom apex_recoils.core.screentaker.LocalScreenTaker impor"
  },
  {
    "path": "apex_recoils/net/socket/SocketMouseMover.py",
    "chars": 1394,
    "preview": "from log.Logger import Logger\nfrom mouse_mover.MouseMover import MouseMover\nfrom net.socket.Client import Client\n\nfrom a"
  },
  {
    "path": "apex_recoils/net/socket/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_yolov5/Counter.py",
    "chars": 390,
    "preview": "class Counter:\n    def __init__(self):\n        self.count = 0\n\n    def increase(self):\n        self.count += 1\n        r"
  },
  {
    "path": "apex_yolov5/FrameRateMonitor.py",
    "chars": 3956,
    "preview": "import sys\nimport time\nimport traceback\n\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QA"
  },
  {
    "path": "apex_yolov5/KeyAndMouseListener.py",
    "chars": 6214,
    "preview": "import time\n\nfrom pynput.mouse import Button\n\nfrom apex_yolov5.Tools import Tools\nfrom apex_yolov5.mouse_mover import Mo"
  },
  {
    "path": "apex_yolov5/KmBoxNetListener.py",
    "chars": 3767,
    "preview": "import time\nimport traceback\n\nfrom pynput.mouse import Button\n\nfrom apex_yolov5.mouse_mover.KmBoxNetMover import KmBoxNe"
  },
  {
    "path": "apex_yolov5/LogUtil.py",
    "chars": 418,
    "preview": "class LogUtil:\n\n    def __init__(self):\n        self.use_time_dict = dict()\n\n    def set_time(self, use_time_type, use_t"
  },
  {
    "path": "apex_yolov5/RecoildsCore.py",
    "chars": 8779,
    "preview": "import json\nimport os.path as op\nimport threading\nimport time\n\nimport requests\nfrom pynput.mouse import Button\n\nfrom ape"
  },
  {
    "path": "apex_yolov5/SystemTrayApp.py",
    "chars": 2266,
    "preview": "import os\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QSystemTrayIcon, QMenu, QAction\n\n\nclass SystemTrayA"
  },
  {
    "path": "apex_yolov5/Tools.py",
    "chars": 3435,
    "preview": "import ctypes\nimport os\nimport threading\nimport time\nfrom io import BytesIO\nfrom shutil import copyfile\n\nimport cv2\nimpo"
  },
  {
    "path": "apex_yolov5/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_yolov5/apex_model.py",
    "chars": 1259,
    "preview": "from torch.cuda import is_available\n\nfrom apex_yolov5.socket.config import global_config\nfrom models.common import Detec"
  },
  {
    "path": "apex_yolov5/auxiliary.py",
    "chars": 13533,
    "preview": "import math\nimport random\nimport threading\nimport time\nimport traceback\n\nfrom pynput.mouse import Button\n\nfrom apex_reco"
  },
  {
    "path": "apex_yolov5/check_run.pyi",
    "chars": 136,
    "preview": "def check(validate_type) -> None:\n    \"\"\"\n        监权\n    \"\"\"\n    ...\n\n\ndef open_check(val_type=None):\n    ...\n\n\ndef auth"
  },
  {
    "path": "apex_yolov5/global_img_info.py",
    "chars": 1043,
    "preview": "class ImgInfo:\n    def __init__(self):\n        self.img_origin = None\n        self.shot_width = None\n        self.shot_h"
  },
  {
    "path": "apex_yolov5/grabscreen.py",
    "chars": 7315,
    "preview": "import os\nimport threading\nimport time\nimport traceback\nfrom datetime import datetime\n\nimport cv2\nimport mss\nimport mss."
  },
  {
    "path": "apex_yolov5/job_listener/JoyListener.py",
    "chars": 3362,
    "preview": "import threading\nimport traceback\n\nimport pygame\nfrom PyQt5.QtWidgets import QMessageBox\n\nfrom apex_yolov5.log import Lo"
  },
  {
    "path": "apex_yolov5/job_listener/JoyToKey.py",
    "chars": 2235,
    "preview": "from apex_yolov5.Tools import Tools\nfrom apex_yolov5.log import LogFactory\n\n\nclass JoyToKey:\n    \"\"\"\n        jtk\n    \"\"\""
  },
  {
    "path": "apex_yolov5/job_listener/RockerMonitor.py",
    "chars": 2064,
    "preview": "import time\n\nimport pygame\n\nfrom apex_yolov5.job_listener.JoyListener import JoyListener\nfrom apex_yolov5.log import Log"
  },
  {
    "path": "apex_yolov5/job_listener/S1SwitchMonitor.py",
    "chars": 6120,
    "preview": "import threading\nimport time\n\nimport pygame\n\nfrom apex_recoils.core.image_comparator.DynamicSizeImageComparator import D"
  },
  {
    "path": "apex_yolov5/job_listener/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_yolov5/log/LogFactory.py",
    "chars": 1359,
    "preview": "import json\nimport os.path\n\nfrom apex_yolov5.log.LogWindow import LogWindow\nfrom apex_yolov5.log.Logger import Logger\n\nc"
  },
  {
    "path": "apex_yolov5/log/LogWindow.py",
    "chars": 3242,
    "preview": "import os\nimport time\nimport traceback\n\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QMa"
  },
  {
    "path": "apex_yolov5/log/Logger.py",
    "chars": 790,
    "preview": "import inspect\nimport os\n\nmax_length = 0\n\n\nclass Logger:\n    \"\"\"\n        日志抽象\n    \"\"\"\n\n    def print_log(self, text, log"
  },
  {
    "path": "apex_yolov5/log/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_yolov5/magnifying_glass.py",
    "chars": 1268,
    "preview": "import cv2\nfrom PyQt5.QtGui import QImage, QPixmap, QPainter\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QLabel, Q"
  },
  {
    "path": "apex_yolov5/mouse.py",
    "chars": 1910,
    "preview": "from ctypes import windll, c_long, c_ulong, Structure, Union, c_int, POINTER, sizeof, CDLL\nfrom os import path\n\n\nbasedir"
  },
  {
    "path": "apex_yolov5/mouse_lock.py",
    "chars": 11485,
    "preview": "import math\nimport random\nimport traceback\n\nfrom apex_yolov5.KeyAndMouseListener import apex_mouse_listener\nfrom apex_yo"
  },
  {
    "path": "apex_yolov5/mouse_mover/FeiMover.py",
    "chars": 3015,
    "preview": "import ctypes\n\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover.MouseMover import MouseMover\n\n\nclass "
  },
  {
    "path": "apex_yolov5/mouse_mover/GHubMover.py",
    "chars": 1177,
    "preview": "from ctypes import CDLL\n\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover.MouseMover import MouseMove"
  },
  {
    "path": "apex_yolov5/mouse_mover/IntentManager.py",
    "chars": 2008,
    "preview": "import threading\nimport time\n\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover.MouseMover import Mous"
  },
  {
    "path": "apex_yolov5/mouse_mover/KmBoxMover.py",
    "chars": 1508,
    "preview": "import ctypes\n\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover.MouseMover import MouseMover\n\n\nclass "
  },
  {
    "path": "apex_yolov5/mouse_mover/KmBoxNetMover.py",
    "chars": 1875,
    "preview": "import traceback\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover.MouseMover import MouseMover\n\n\nclas"
  },
  {
    "path": "apex_yolov5/mouse_mover/MouseMover.py",
    "chars": 3254,
    "preview": "from ctypes import Structure, c_ulong, byref, windll\nimport win32api\nimport win32con\n\n\nclass PointAPI(Structure):\n    \"\""
  },
  {
    "path": "apex_yolov5/mouse_mover/MoverFactory.py",
    "chars": 2510,
    "preview": "import threading\n\nfrom apex_recoils.core.kmnet_listener.ToggleKeyListener import ToggleKeyListener\nfrom apex_yolov5.KmBo"
  },
  {
    "path": "apex_yolov5/mouse_mover/PanNiMover.py",
    "chars": 16508,
    "preview": "import ctypes\nimport random\nimport sys\nimport time\n\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover."
  },
  {
    "path": "apex_yolov5/mouse_mover/Win32ApiMover.py",
    "chars": 2178,
    "preview": "from ctypes import windll\n\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover.MouseMover import MouseMo"
  },
  {
    "path": "apex_yolov5/mouse_mover/WuYaMover.py",
    "chars": 1425,
    "preview": "from ctypes import *\n\nimport win32com.client\n\nfrom apex_yolov5.log import LogFactory\nfrom apex_yolov5.mouse_mover.MouseM"
  },
  {
    "path": "apex_yolov5/mouse_mover/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_yolov5/socket/config.py",
    "chars": 25496,
    "preview": "import json\nimport os\nimport os.path as op\nimport shutil\n\nimport jsonpath as jsonpath\nimport pynput\n\nfrom apex_yolov5.Co"
  },
  {
    "path": "apex_yolov5/socket/socket_util.py",
    "chars": 819,
    "preview": "def send(send_socket, byte_array, buffer_size=4096):\n    send_socket.sendall(str(len(byte_array)).encode('utf-8'))\n    r"
  },
  {
    "path": "apex_yolov5/socket/yolov5_handler.py",
    "chars": 2123,
    "preview": "import time\nimport numpy as np\nfrom torch import from_numpy, tensor\n\nfrom apex_yolov5 import apex_model\nfrom apex_yolov5"
  },
  {
    "path": "apex_yolov5/window_layout/ai_toggle_layout.py",
    "chars": 3528,
    "preview": "from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QVBoxLayout, QLabel, QCheckBox, QHBoxLayout, QComboBox, QLineEdi"
  },
  {
    "path": "apex_yolov5/window_layout/anthropomorphic_config_layout.py",
    "chars": 8675,
    "preview": "from PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QIntValidator, QDoubleValidator\nfrom PyQt5.QtWidgets import QVBoxLay"
  },
  {
    "path": "apex_yolov5/window_layout/auto_charged_energy_layout.py",
    "chars": 3204,
    "preview": "from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QVBoxLayout, QLabel, QCheckBox, QHBoxLayout, QSlider, QLineEdit\n"
  },
  {
    "path": "apex_yolov5/window_layout/auto_gun_config_layout.py",
    "chars": 5024,
    "preview": "from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLabel, QListWidget, QLineEdit, QPushB"
  },
  {
    "path": "apex_yolov5/window_layout/auto_save_config_layout.py",
    "chars": 2037,
    "preview": "from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QVBoxLayout, QCheckBox, QLabel\n\n\nclass AutoSaveConfigLayout:\n   "
  },
  {
    "path": "apex_yolov5/window_layout/model_config_layout.py",
    "chars": 3772,
    "preview": "from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QVBoxLayout, QComboBox, QLabel, QHBoxLayout, QSlider\n\nfrom apex_"
  },
  {
    "path": "apex_yolov5/window_layout/mouse_config_layout.py",
    "chars": 26333,
    "preview": "from PyQt5.QtCore import Qt, QPoint\nfrom PyQt5.QtGui import QPixmap, QPainter\nfrom PyQt5.QtWidgets import QHBoxLayout, Q"
  },
  {
    "path": "apex_yolov5/window_layout/screenshot_area_layout.py",
    "chars": 22779,
    "preview": "from PyQt5.QtCore import Qt, QRectF\nfrom PyQt5.QtGui import QIntValidator, QColor\nfrom PyQt5.QtWidgets import QVBoxLayou"
  },
  {
    "path": "apex_yolov5/windows/DebugWindow.py",
    "chars": 3611,
    "preview": "import time\nimport traceback\n\nfrom PyQt5.QtCore import QPoint, QRect, QEvent, QThread, pyqtSignal, Qt\nfrom PyQt5.QtGui i"
  },
  {
    "path": "apex_yolov5/windows/DisclaimerWindow.py",
    "chars": 2065,
    "preview": "import sys\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QMessageBox, QVBoxLayout, QWidget, QCheckBox\n\n\nclass"
  },
  {
    "path": "apex_yolov5/windows/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "apex_yolov5/windows/aim_show_window.py",
    "chars": 2896,
    "preview": "from PyQt5.QtGui import QPainter, QPen, QColor, QPixmap\nfrom PyQt5.QtWidgets import QMainWindow\nfrom PyQt5.QtCore import"
  },
  {
    "path": "apex_yolov5/windows/circle_window.py",
    "chars": 2860,
    "preview": "from PyQt5.QtCore import Qt, QPoint\nfrom PyQt5.QtGui import QPainter, QPen\nfrom PyQt5.QtWidgets import QMainWindow\n\nfrom"
  },
  {
    "path": "apex_yolov5/windows/config_window.py",
    "chars": 12549,
    "preview": "import os\nimport threading\nimport time\n\nfrom PyQt5.QtCore import Qt, QEvent\nfrom PyQt5.QtWidgets import QMainWindow, QVB"
  },
  {
    "path": "apex_yolov5_main.py",
    "chars": 3934,
    "preview": "import time\nimport traceback\n\nimport cv2\nimport mss\nimport numpy as np\n\nfrom apex_yolov5 import global_img_info\nfrom ape"
  },
  {
    "path": "apex_yolov5_main_asyn.py",
    "chars": 4419,
    "preview": "import time\nimport traceback\n\nimport cv2\nimport mss\nimport numpy as np\n\nfrom apex_yolov5 import global_img_info\nfrom ape"
  },
  {
    "path": "benchmarks.py",
    "chars": 8054,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nRun YOLOv5 benchmarks on all supported export formats.\n\nFormat             "
  },
  {
    "path": "bez_test.py",
    "chars": 1089,
    "preview": "from celluloid import Camera  # 保存动图时用,pip install celluloid\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nP0 = np"
  },
  {
    "path": "check.py",
    "chars": 7870,
    "preview": "import hashlib\nimport os\nimport os\nimport shutil\n\n\n# 打印不在标注类别里的txt\ndef check_files(directory):\n    # 遍历指定目录\n    for file"
  },
  {
    "path": "classify/predict.py",
    "chars": 11987,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nRun YOLOv5 classification inference on images, videos, directories, globs, "
  },
  {
    "path": "classify/train.py",
    "chars": 16342,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nTrain a YOLOv5 classifier model on a classification dataset.\n\nUsage - Singl"
  },
  {
    "path": "classify/tutorial.ipynb",
    "chars": 103614,
    "preview": "{\n  \"cells\": [\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"t6MPjfT5NrKQ\"\n      },\n      \"sou"
  },
  {
    "path": "classify/val.py",
    "chars": 8089,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nValidate a trained YOLOv5 classification model on a classification dataset."
  },
  {
    "path": "client.py",
    "chars": 3305,
    "preview": "import pickle\nimport socket\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport mss\nfrom PyQt5.QtWidgets im"
  },
  {
    "path": "client.spec",
    "chars": 1387,
    "preview": "# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\npathex = [\n    'C:/Users/Administrator/PycharmProjects/yol"
  },
  {
    "path": "config/ref.txt",
    "chars": 13,
    "preview": "global_config"
  },
  {
    "path": "data/Argoverse.yaml",
    "chars": 2702,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengti"
  },
  {
    "path": "data/GlobalWheat2020.yaml",
    "chars": 1857,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saska"
  },
  {
    "path": "data/ImageNet.yaml",
    "chars": 18840,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford Universit"
  },
  {
    "path": "data/ImageNet10.yaml",
    "chars": 906,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford Universit"
  },
  {
    "path": "data/ImageNet100.yaml",
    "chars": 2623,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford Universit"
  },
  {
    "path": "data/ImageNet1000.yaml",
    "chars": 18842,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford Universit"
  },
  {
    "path": "data/Objects365.yaml",
    "chars": 9173,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Objects365 dataset https://www.objects365.org/ by Megvii\n# Example usage: pyt"
  },
  {
    "path": "data/SKU-110K.yaml",
    "chars": 2310,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Tra"
  },
  {
    "path": "data/VOC.yaml",
    "chars": 3466,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Ox"
  },
  {
    "path": "data/VisDrone.yaml",
    "chars": 2940,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tian"
  },
  {
    "path": "data/coco.yaml",
    "chars": 2464,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# COCO 2017 dataset http://cocodataset.org by Microsoft\n# Example usage: python"
  },
  {
    "path": "data/coco128-seg.yaml",
    "chars": 1837,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 ima"
  },
  {
    "path": "data/coco128.yaml",
    "chars": 1821,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images "
  },
  {
    "path": "data/hyps/hyp.Objects365.yaml",
    "chars": 668,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Hyperparameters for Objects365 training\n# python train.py --weights yolov5m.p"
  },
  {
    "path": "data/hyps/hyp.VOC.yaml",
    "chars": 1151,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Hyperparameters for VOC training\n# python train.py --batch 128 --weights yolo"
  },
  {
    "path": "data/hyps/hyp.no-augmentation.yaml",
    "chars": 1650,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Hyperparameters when using Albumentations frameworks\n# python train.py --hyp "
  },
  {
    "path": "data/hyps/hyp.scratch-high.yaml",
    "chars": 1650,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Hyperparameters for high-augmentation COCO training from scratch\n# python tra"
  },
  {
    "path": "data/hyps/hyp.scratch-low.yaml",
    "chars": 1658,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Hyperparameters for low-augmentation COCO training from scratch\n# python trai"
  },
  {
    "path": "data/hyps/hyp.scratch-med.yaml",
    "chars": 1652,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Hyperparameters for medium-augmentation COCO training from scratch\n# python t"
  },
  {
    "path": "data/scripts/download_weights.sh",
    "chars": 612,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n# Download latest models from https://github.com/ultralytics/yol"
  },
  {
    "path": "data/scripts/get_coco.sh",
    "chars": 1544,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n# Download COCO 2017 dataset http://cocodataset.org\n# Example us"
  },
  {
    "path": "data/scripts/get_coco128.sh",
    "chars": 596,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n# Download COCO128 dataset https://www.kaggle.com/ultralytics/co"
  },
  {
    "path": "data/scripts/get_imagenet.sh",
    "chars": 1649,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# E"
  },
  {
    "path": "data/scripts/get_imagenet10.sh",
    "chars": 711,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# E"
  },
  {
    "path": "data/scripts/get_imagenet100.sh",
    "chars": 715,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# E"
  },
  {
    "path": "data/scripts/get_imagenet1000.sh",
    "chars": 719,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n# Download ILSVRC2012 ImageNet dataset https://image-net.org\n# E"
  },
  {
    "path": "data/xView.yaml",
    "chars": 5140,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National"
  },
  {
    "path": "detect.py",
    "chars": 16965,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nRun YOLOv5 detection inference on images, videos, directories, globs, YouTu"
  },
  {
    "path": "export.py",
    "chars": 42867,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nExport a YOLOv5 PyTorch model to other formats. TensorFlow exports authored"
  },
  {
    "path": "hubconf.py",
    "chars": 8758,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nPyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5\n\nUsage:\n    i"
  },
  {
    "path": "images/1920x1080/list.txt",
    "chars": 246,
    "preview": "3030.png\ncar.png\nEVA-8.png\nG7.png\nlstart.png\np2020.png\nR99.png\nR-301.png\nre-45.png\n三重.png\n专注.png\n充能步枪.png\n克雷贝尔.png\n和平捍卫者"
  },
  {
    "path": "images/1920x1200/list.txt",
    "chars": 246,
    "preview": "3030.jpg\ncar.jpg\nEVA-8.jpg\nG7.jpg\nlstart.jpg\np2020.jpg\nR99.jpg\nR-301.jpg\nre-45.jpg\n三重.jpg\n专注.jpg\n充能步枪.jpg\n克雷贝尔.jpg\n和平捍卫者"
  },
  {
    "path": "images/2048x1152/list.txt",
    "chars": 246,
    "preview": "3030.png\ncar.png\nEVA-8.png\nG7.png\nlstart.png\np2020.png\nR99.png\nR-301.png\nre-45.png\n三重.png\n专注.png\n充能步枪.png\n克雷贝尔.png\n和平捍卫者"
  },
  {
    "path": "images/2560x1440/list.txt",
    "chars": 246,
    "preview": "3030.png\ncar.png\nEVA-8.png\nG7.png\nlstart.png\np2020.png\nR99.png\nR-301.png\nre-45.png\n三重.png\n专注.png\n充能步枪.png\n克雷贝尔.png\n和平捍卫者"
  },
  {
    "path": "images/hop_up/1920x1080/list.txt",
    "chars": 16,
    "preview": "turbocharger.png"
  },
  {
    "path": "images/hop_up/2560x1440/list.txt",
    "chars": 16,
    "preview": "turbocharger.png"
  },
  {
    "path": "images/scope/1920x1080/list.txt",
    "chars": 111,
    "preview": "1x-2xVariableHolo.png\n1xClassic.png\n1xDigitalThreat.png\n1xHolo.png\n2xBruiser.png\n3xRanger.png\n4xVariableAOG.png"
  },
  {
    "path": "images/scope/2560x1440/list.txt",
    "chars": 111,
    "preview": "1x-2xVariableHolo.png\n1xClassic.png\n1xDigitalThreat.png\n1xHolo.png\n2xBruiser.png\n3xRanger.png\n4xVariableAOG.png"
  },
  {
    "path": "joy_test.py",
    "chars": 4090,
    "preview": "import pygame\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n\n# This is a simple class that will help "
  },
  {
    "path": "lg.py",
    "chars": 867,
    "preview": "from ctypes import CDLL\n\ngmok = False\ngm = None\n\n\n# try:\n#     gm = CDLL(r'./ghub_device1.dll')\n#     gmok = gm.device_o"
  },
  {
    "path": "main.py",
    "chars": 5198,
    "preview": "import sys\nimport threading\n\nimport pynput\nfrom PyQt5.QtWidgets import QApplication\n\nfrom apex_recoils.core.image_compar"
  },
  {
    "path": "models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "models/common.py",
    "chars": 50557,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"Common modules.\"\"\"\n\nimport ast\nimport contextlib\nimport json\nimport math\nimp"
  },
  {
    "path": "models/experimental.py",
    "chars": 5150,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"Experimental modules.\"\"\"\n\nimport math\n\nimport numpy as np\nimport torch\nimpor"
  },
  {
    "path": "models/hub/anchors.yaml",
    "chars": 3333,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n# Default anchors for COCO data\n\n# P5 -----------------------------------------"
  },
  {
    "path": "models/hub/yolov3-spp.yaml",
    "chars": 1586,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov3-tiny.yaml",
    "chars": 1243,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov3.yaml",
    "chars": 1577,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5-bifpn.yaml",
    "chars": 1437,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5-fpn.yaml",
    "chars": 1226,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5-p2.yaml",
    "chars": 1697,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5-p34.yaml",
    "chars": 1237,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/hub/yolov5-p6.yaml",
    "chars": 1752,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5-p7.yaml",
    "chars": 2137,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5-panet.yaml",
    "chars": 1421,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5l6.yaml",
    "chars": 1832,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/hub/yolov5m6.yaml",
    "chars": 1834,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.67 # model depth mul"
  },
  {
    "path": "models/hub/yolov5n6.yaml",
    "chars": 1834,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/hub/yolov5s-LeakyReLU.yaml",
    "chars": 1510,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\nactivation: nn.LeakyReLU(0.1) # <-----"
  },
  {
    "path": "models/hub/yolov5s-ghost.yaml",
    "chars": 1497,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/hub/yolov5s-transformer.yaml",
    "chars": 1454,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/hub/yolov5s6.yaml",
    "chars": 1834,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/hub/yolov5x6.yaml",
    "chars": 1834,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.33 # model depth mul"
  },
  {
    "path": "models/mydata.yaml",
    "chars": 220,
    "preview": "train: F:/GameHelper2/yolov5/data/mydata/images/train\nval: F:/GameHelper2/yolov5/data/mydata/images/val\n\n# Classes\nnc: 1"
  },
  {
    "path": "models/segment/yolov5l-seg.yaml",
    "chars": 1425,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/segment/yolov5m-seg.yaml",
    "chars": 1427,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.67 # model depth mul"
  },
  {
    "path": "models/segment/yolov5n-seg.yaml",
    "chars": 1427,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/segment/yolov5s-seg.yaml",
    "chars": 1426,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/segment/yolov5x-seg.yaml",
    "chars": 1427,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.33 # model depth mul"
  },
  {
    "path": "models/tf.py",
    "chars": 31980,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nTensorFlow, Keras and TFLite versions of YOLOv5\nAuthored by https://github."
  },
  {
    "path": "models/yolo.py",
    "chars": 20528,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nYOLO-specific modules.\n\nUsage:\n    $ python models/yolo.py --cfg yolov5s.ya"
  },
  {
    "path": "models/yolov5l.yaml",
    "chars": 1415,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.0 # model depth mult"
  },
  {
    "path": "models/yolov5m.yaml",
    "chars": 1417,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.67 # model depth mul"
  },
  {
    "path": "models/yolov5n.yaml",
    "chars": 1417,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/yolov5s.yaml",
    "chars": 1417,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 0.33 # model depth mul"
  },
  {
    "path": "models/yolov5x.yaml",
    "chars": 1417,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Parameters\nnc: 80 # number of classes\ndepth_multiple: 1.33 # model depth mul"
  },
  {
    "path": "pyproject.toml",
    "chars": 5375,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\n# Overview:\n# This pyproject.toml file manages the build, packaging, and distr"
  },
  {
    "path": "requirements.txt",
    "chars": 1780,
    "preview": "# YOLOv5 requirements\n# Usage: pip install -r requirements.txt\n\n# Base -------------------------------------------------"
  },
  {
    "path": "segment/predict.py",
    "chars": 16159,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nRun YOLOv5 segmentation inference on images, videos, directories, streams, "
  },
  {
    "path": "segment/train.py",
    "chars": 35059,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nTrain a YOLOv5 segment model on a segment dataset Models and datasets downl"
  },
  {
    "path": "segment/tutorial.ipynb",
    "chars": 43396,
    "preview": "{\n  \"cells\": [\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"t6MPjfT5NrKQ\"\n      },\n      \"sou"
  },
  {
    "path": "segment/val.py",
    "chars": 24125,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nValidate a trained YOLOv5 segment model on a segment dataset.\n\nUsage:\n    $"
  },
  {
    "path": "server.py",
    "chars": 6538,
    "preview": "import pickle\nimport socket\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport cv2\nimport numpy as np\nimpo"
  },
  {
    "path": "server.spec",
    "chars": 1423,
    "preview": "# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\npathex = [\n    'C:/Users/Administrator/PycharmProjects/yol"
  },
  {
    "path": "setenv.py",
    "chars": 134,
    "preview": "import os\n\nprint(\"配置cuda环境变量:CUDA_MODULE_LOADING=LAZY\")\nos.environ[\"CUDA_MODULE_LOADING\"] = \"LAZY\"\nos.environ[\"VALIDATE_"
  },
  {
    "path": "setup.py",
    "chars": 175,
    "preview": "from distutils.core import setup\nfrom Cython.Build import cythonize\n\nsetup(\n    name='yolov5 app',\n    ext_modules=cytho"
  },
  {
    "path": "setup_check.py",
    "chars": 173,
    "preview": "from distutils.core import setup\nfrom Cython.Build import cythonize\n\nsetup(\n    name='check_run',\n    ext_modules=cython"
  },
  {
    "path": "train.py",
    "chars": 39772,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"\nTrain a YOLOv5 model on a custom dataset. Models and datasets download auto"
  },
  {
    "path": "trt.spec",
    "chars": 1178,
    "preview": "# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\npathex = [\n    'C:/Users/Administrator/PycharmProjects/yol"
  },
  {
    "path": "tutorial.ipynb",
    "chars": 41311,
    "preview": "{\n  \"nbformat\": 4,\n  \"nbformat_minor\": 0,\n  \"metadata\": {\n    \"colab\": {\n      \"name\": \"YOLOv5 Tutorial\",\n      \"provena"
  },
  {
    "path": "utils/__init__.py",
    "chars": 3117,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"utils/initialization.\"\"\"\n\nimport contextlib\nimport platform\nimport threading"
  },
  {
    "path": "utils/activations.py",
    "chars": 4603,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"Activation functions.\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn"
  },
  {
    "path": "utils/augmentations.py",
    "chars": 18633,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"Image augmentation functions.\"\"\"\n\nimport math\nimport random\n\nimport cv2\nimpo"
  },
  {
    "path": "utils/autoanchor.py",
    "chars": 7436,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"AutoAnchor utils.\"\"\"\n\nimport random\n\nimport numpy as np\nimport torch\nimport "
  },
  {
    "path": "utils/autobatch.py",
    "chars": 3029,
    "preview": "# Ultralytics YOLOv5 🚀, AGPL-3.0 license\n\"\"\"Auto-batch utils.\"\"\"\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport t"
  }
]

// ... and 44 more files (download for full content)

About this extraction

This page contains the full source code of the wdragondragon/apex-yolov5 GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 244 files (1.5 MB), approximately 434.8k tokens, and a symbol index with 1282 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!