main 2d1f448d75da cached
118 files
95.5 MB
1.1M tokens
634 symbols
1 requests
Download .txt
Showing preview only (4,490K chars total). Download the full file or copy to clipboard to get everything.
Repository: Ai-trainee/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI
Branch: main
Commit: 2d1f448d75da
Files: 118
Total size: 95.5 MB

Directory structure:
gitextract_f02wz10a/

├── .github/
│   └── workflows/
│       ├── jekyll-gh-pages.yml
│       └── main.yml
├── .idea/
│   ├── Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI.iml
│   ├── inspectionProfiles/
│   │   └── profiles_settings.xml
│   ├── misc.xml
│   ├── modules.xml
│   ├── vcs.xml
│   └── workspace.xml
├── MouseLabel.py
├── README.md
├── apprcc.qrc
├── apprcc_rc.py
├── config/
│   ├── fold.json
│   ├── ip.json
│   └── setting.json
├── data/
│   ├── doc/
│   │   ├── LICENSE
│   │   ├── README_Parameter adjustment.md
│   │   └── README_cn.md
│   ├── regn_mysql.sql
│   ├── run/
│   │   └── exp52/
│   │       ├── hyp.yaml
│   │       └── opt.yaml
│   └── scripts/
│       ├── download_weights.sh
│       ├── get_coco.sh
│       └── get_coco128.sh
├── detect.py
├── dialog/
│   ├── rtsp_dialog.py
│   ├── rtsp_dialog.ui
│   └── rtsp_win.py
├── hubconf.py
├── login_lj.py
├── main.py
├── main_win/
│   ├── login.py
│   ├── login.ui
│   ├── win.py
│   └── win.ui
├── models/
│   ├── __init__.py
│   ├── common.py
│   ├── experimental.py
│   ├── hub/
│   │   ├── anchors.yaml
│   │   ├── yolov3-spp.yaml
│   │   ├── yolov3-tiny.yaml
│   │   ├── yolov3.yaml
│   │   ├── yolov5-bifpn.yaml
│   │   ├── yolov5-fpn.yaml
│   │   ├── yolov5-p2.yaml
│   │   ├── yolov5-p34.yaml
│   │   ├── yolov5-p6.yaml
│   │   ├── yolov5-p7.yaml
│   │   ├── yolov5-panet.yaml
│   │   ├── yolov5l6.yaml
│   │   ├── yolov5m6.yaml
│   │   ├── yolov5n6.yaml
│   │   ├── yolov5s-ghost.yaml
│   │   ├── yolov5s-transformer.yaml
│   │   ├── yolov5s6.yaml
│   │   └── yolov5x6.yaml
│   ├── tf.py
│   └── yolo.py
├── pt/
│   ├── best.engine
│   ├── best.onnx
│   ├── best.pt
│   └── yolov5s.pt
├── rc_apprcc.py
├── requirements.txt
├── setup-database.bat
└── utils/
    ├── CustomMessageBox.py
    ├── __init__.py
    ├── activations.py
    ├── augmentations.py
    ├── autoanchor.py
    ├── autobatch.py
    ├── aws/
    │   ├── __init__.py
    │   ├── mime.sh
    │   ├── resume.py
    │   └── userdata.sh
    ├── benchmarks.py
    ├── cal_fps.py
    ├── callbacks.py
    ├── capnums.py
    ├── datasets.py
    ├── downloads.py
    ├── flask_rest_api/
    │   ├── README.md
    │   ├── example_request.py
    │   └── restapi.py
    ├── general.py
    ├── google_app_engine/
    │   ├── Dockerfile
    │   ├── additional_requirements.txt
    │   └── app.yaml
    ├── google_utils.py
    ├── loggers/
    │   ├── __init__.py
    │   └── wandb/
    │       ├── README.md
    │       ├── __init__.py
    │       ├── log_dataset.py
    │       ├── sweep.py
    │       ├── sweep.yaml
    │       └── wandb_utils.py
    ├── loss.py
    ├── metrics.py
    ├── plots.py
    ├── torch_utils.py
    ├── tt100k_to_voc-main/
    │   ├── 1.py
    │   ├── 1_build_voc_dir.py
    │   ├── 2_json2xml.py
    │   ├── 3_delete_jpg_and_xml.py
    │   ├── 4_spilt_data.py
    │   ├── 5_label.py
    │   ├── Not_TT45_list_train.txt
    │   ├── Not_TT45_list_val.txt
    │   ├── README.md
    │   ├── TT100K_VOC_classes.json
    │   ├── __init__.py
    │   ├── annotations_all.json
    │   └── 新建 Internet 快捷方式.url
    └── wandb_logging/
        ├── __init__.py
        ├── log_dataset.py
        ├── sweep.py
        ├── sweep.yaml
        └── wandb_utils.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/workflows/jekyll-gh-pages.yml
================================================
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
name: Deploy Jekyll with GitHub Pages dependencies preinstalled

on:
  # Runs on pushes targeting the default branch
  push:
    branches: ["main"]

  # Allows you to run this workflow manually from the Actions tab
  workflow_dispatch:

# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
  contents: read
  pages: write
  id-token: write

# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
  group: "pages"
  cancel-in-progress: false

jobs:
  # Build job
  build:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v3
      - name: Setup Pages
        uses: actions/configure-pages@v3
      - name: Build with Jekyll
        uses: actions/jekyll-build-pages@v1
        with:
          source: ./
          destination: ./_site
      - name: Upload artifact
        uses: actions/upload-pages-artifact@v1

  # Deployment job
  deploy:
    environment:
      name: github-pages
      url: ${{ steps.deployment.outputs.page_url }}
    runs-on: ubuntu-latest
    needs: build
    steps:
      - name: Deploy to GitHub Pages
        id: deployment
        uses: actions/deploy-pages@v2


================================================
FILE: .github/workflows/main.yml
================================================
name: Sync with Gitee

on:
  schedule:
    - cron: '0 0 * * *' # 每天午夜(UTC时间)运行

jobs:
  sync:
    runs-on: ubuntu-latest
    steps:
    - name: Checkout repository
      uses: actions/checkout@v2

    - name: Sync with Gitee
      run: |
        git remote add gitee https://AItrainee:a8655383c91d497cebf620b16b1da580@gitee.com/AItrainee/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI.git
        git push --force gitee main


================================================
FILE: .idea/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI.iml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
  <component name="NewModuleRootManager">
    <content url="file://$MODULE_DIR$" />
    <orderEntry type="jdk" jdkName="Python 3.9" jdkType="Python SDK" />
    <orderEntry type="sourceFolder" forTests="false" />
  </component>
  <component name="PyDocumentationSettings">
    <option name="format" value="PLAIN" />
    <option name="myDocStringFormat" value="Plain" />
  </component>
</module>

================================================
FILE: .idea/inspectionProfiles/profiles_settings.xml
================================================
<component name="InspectionProjectProfileManager">
  <settings>
    <option name="USE_PROJECT_PROFILE" value="false" />
    <version value="1.0" />
  </settings>
</component>

================================================
FILE: .idea/misc.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="Black">
    <option name="sdkName" value="Python 3.9" />
  </component>
  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
</project>

================================================
FILE: .idea/modules.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="ProjectModuleManager">
    <modules>
      <module fileurl="file://$PROJECT_DIR$/.idea/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI.iml" filepath="$PROJECT_DIR$/.idea/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI.iml" />
    </modules>
  </component>
</project>

================================================
FILE: .idea/vcs.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="VcsDirectoryMappings">
    <mapping directory="" vcs="Git" />
  </component>
</project>

================================================
FILE: .idea/workspace.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="AutoImportSettings">
    <option name="autoReloadType" value="SELECTIVE" />
  </component>
  <component name="ChangeListManager">
    <list default="true" id="f03b60a6-ddde-4a79-8cf9-7033249259c2" name="更改" comment="--">
      <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
      <change beforePath="$PROJECT_DIR$/README.md" beforeDir="false" afterPath="$PROJECT_DIR$/README.md" afterDir="false" />
      <change beforePath="$PROJECT_DIR$/data/doc/README_cn.md" beforeDir="false" afterPath="$PROJECT_DIR$/data/doc/README_cn.md" afterDir="false" />
    </list>
    <option name="SHOW_DIALOG" value="false" />
    <option name="HIGHLIGHT_CONFLICTS" value="true" />
    <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
    <option name="LAST_RESOLUTION" value="IGNORE" />
  </component>
  <component name="FileTemplateManagerImpl">
    <option name="RECENT_TEMPLATES">
      <list>
        <option value="Python Script" />
      </list>
    </option>
  </component>
  <component name="Git.Settings">
    <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
  </component>
  <component name="MarkdownSettingsMigration">
    <option name="stateVersion" value="1" />
  </component>
  <component name="ProjectColorInfo">{
  &quot;associatedIndex&quot;: 8
}</component>
  <component name="ProjectId" id="2WD2F48ZVWiZitToGltG3S1bWuI" />
  <component name="ProjectLevelVcsManager">
    <ConfirmationsSetting value="2" id="Add" />
  </component>
  <component name="ProjectViewState">
    <option name="hideEmptyMiddlePackages" value="true" />
    <option name="showLibraryContents" value="true" />
  </component>
  <component name="PropertiesComponent">{
  &quot;keyToString&quot;: {
    &quot;ASKED_ADD_EXTERNAL_FILES&quot;: &quot;true&quot;,
    &quot;Python.main.executor&quot;: &quot;Run&quot;,
    &quot;RunOnceActivity.OpenProjectViewOnStart&quot;: &quot;true&quot;,
    &quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
    &quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;,
    &quot;WebServerToolWindowFactoryState&quot;: &quot;false&quot;,
    &quot;git-widget-placeholder&quot;: &quot;main&quot;,
    &quot;ignore.virus.scanning.warn.message&quot;: &quot;true&quot;,
    &quot;last_opened_file_path&quot;: &quot;D:/Desktop/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI/data/doc&quot;,
    &quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
    &quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
    &quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
    &quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
    &quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
    &quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
  }
}</component>
  <component name="RecentsManager">
    <key name="CopyFile.RECENT_KEYS">
      <recent name="D:\Desktop\Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI\data\doc" />
    </key>
    <key name="MoveFile.RECENT_KEYS">
      <recent name="D:\Videos\Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI\data" />
      <recent name="D:\Desktop\Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI\data" />
      <recent name="D:\Desktop\Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI\doc" />
      <recent name="D:\Desktop\Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI\utils" />
    </key>
  </component>
  <component name="RunManager">
    <configuration name="main" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
      <module name="Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI" />
      <option name="ENV_FILES" value="" />
      <option name="INTERPRETER_OPTIONS" value="" />
      <option name="PARENT_ENVS" value="true" />
      <envs>
        <env name="PYTHONUNBUFFERED" value="1" />
      </envs>
      <option name="SDK_HOME" value="" />
      <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
      <option name="IS_MODULE_SDK" value="true" />
      <option name="ADD_CONTENT_ROOTS" value="true" />
      <option name="ADD_SOURCE_ROOTS" value="true" />
      <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/main.py" />
      <option name="PARAMETERS" value="" />
      <option name="SHOW_COMMAND_LINE" value="false" />
      <option name="EMULATE_TERMINAL" value="false" />
      <option name="MODULE_MODE" value="false" />
      <option name="REDIRECT_INPUT" value="false" />
      <option name="INPUT_FILE" value="" />
      <method v="2" />
    </configuration>
    <recent_temporary>
      <list>
        <item itemvalue="Python.main" />
      </list>
    </recent_temporary>
  </component>
  <component name="SharedIndexes">
    <attachedChunks>
      <set>
        <option value="bundled-python-sdk-50da183f06c8-2887949eec09-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-233.13135.95" />
      </set>
    </attachedChunks>
  </component>
  <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
  <component name="TaskManager">
    <task active="true" id="Default" summary="默认任务">
      <changelist id="f03b60a6-ddde-4a79-8cf9-7033249259c2" name="更改" comment="" />
      <created>1696252727514</created>
      <option name="number" value="Default" />
      <option name="presentableId" value="Default" />
      <updated>1696252727514</updated>
      <workItem from="1696252728043" duration="2530000" />
      <workItem from="1696261111531" duration="868000" />
      <workItem from="1696262025621" duration="1703000" />
      <workItem from="1704811391519" duration="74000" />
      <workItem from="1704811482802" duration="3115000" />
      <workItem from="1704863966587" duration="2899000" />
      <workItem from="1704869713795" duration="74000" />
      <workItem from="1704871606268" duration="662000" />
      <workItem from="1704890086406" duration="1606000" />
      <workItem from="1704894596663" duration="163000" />
      <workItem from="1704901928535" duration="1361000" />
      <workItem from="1704903395969" duration="611000" />
      <workItem from="1704941012512" duration="3228000" />
    </task>
    <task id="LOCAL-00001" summary="调整的项目结构,修改了README,完善了数据库链接">
      <option name="closed" value="true" />
      <created>1704814255043</created>
      <option name="number" value="00001" />
      <option name="presentableId" value="LOCAL-00001" />
      <option name="project" value="LOCAL" />
      <updated>1704814255043</updated>
    </task>
    <task id="LOCAL-00002" summary="--">
      <option name="closed" value="true" />
      <created>1704814625700</created>
      <option name="number" value="00002" />
      <option name="presentableId" value="LOCAL-00002" />
      <option name="project" value="LOCAL" />
      <updated>1704814625701</updated>
    </task>
    <task id="LOCAL-00003" summary="--">
      <option name="closed" value="true" />
      <created>1704864957385</created>
      <option name="number" value="00003" />
      <option name="presentableId" value="LOCAL-00003" />
      <option name="project" value="LOCAL" />
      <updated>1704864957385</updated>
    </task>
    <task id="LOCAL-00004" summary="--">
      <option name="closed" value="true" />
      <created>1704868411937</created>
      <option name="number" value="00004" />
      <option name="presentableId" value="LOCAL-00004" />
      <option name="project" value="LOCAL" />
      <updated>1704868411937</updated>
    </task>
    <task id="LOCAL-00005" summary="--">
      <option name="closed" value="true" />
      <created>1704871865860</created>
      <option name="number" value="00005" />
      <option name="presentableId" value="LOCAL-00005" />
      <option name="project" value="LOCAL" />
      <updated>1704871865860</updated>
    </task>
    <task id="LOCAL-00006" summary="--">
      <option name="closed" value="true" />
      <created>1704871909975</created>
      <option name="number" value="00006" />
      <option name="presentableId" value="LOCAL-00006" />
      <option name="project" value="LOCAL" />
      <updated>1704871909975</updated>
    </task>
    <task id="LOCAL-00007" summary="--">
      <option name="closed" value="true" />
      <created>1704872270968</created>
      <option name="number" value="00007" />
      <option name="presentableId" value="LOCAL-00007" />
      <option name="project" value="LOCAL" />
      <updated>1704872270968</updated>
    </task>
    <task id="LOCAL-00008" summary="--">
      <option name="closed" value="true" />
      <created>1704903333985</created>
      <option name="number" value="00008" />
      <option name="presentableId" value="LOCAL-00008" />
      <option name="project" value="LOCAL" />
      <updated>1704903333985</updated>
    </task>
    <task id="LOCAL-00009" summary="--">
      <option name="closed" value="true" />
      <created>1704903407211</created>
      <option name="number" value="00009" />
      <option name="presentableId" value="LOCAL-00009" />
      <option name="project" value="LOCAL" />
      <updated>1704903407211</updated>
    </task>
    <option name="localTasksCounter" value="10" />
    <servers />
  </component>
  <component name="TypeScriptGeneratedFilesManager">
    <option name="version" value="3" />
  </component>
  <component name="Vcs.Log.Tabs.Properties">
    <option name="TAB_STATES">
      <map>
        <entry key="MAIN">
          <value>
            <State />
          </value>
        </entry>
      </map>
    </option>
  </component>
  <component name="VcsManagerConfiguration">
    <option name="ADD_EXTERNAL_FILES_SILENTLY" value="true" />
    <MESSAGE value="调整的项目结构,修改了README,完善了数据库链接" />
    <MESSAGE value="--" />
    <option name="LAST_COMMIT_MESSAGE" value="--" />
  </component>
  <component name="com.intellij.coverage.CoverageDataManagerImpl">
    <SUITE FILE_PATH="coverage/Traffic_Sign_Recognition_PyQt5_YOLOv5_GUI$main.coverage" NAME="main 覆盖结果" MODIFIED="1704894741551" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
  </component>
</project>

================================================
FILE: MouseLabel.py
================================================
from PyQt5.QtWidgets import QLabel
from PyQt5.QtCore import pyqtSignal


class LabelMouse(QLabel):
    double_clicked = pyqtSignal()

    # 鼠标双击事件
    def mouseDoubleClickEvent(self, event):
        self.double_clicked.emit()

    def mouseMoveEvent(self):
        """
        当鼠标划过标签label2时触发事件
        :return:
        """
        print('当鼠标划过标签label2时触发事件')


class Label_click_Mouse(QLabel):
    clicked = pyqtSignal()

    # 鼠标点击事件
    def mousePressEvent(self, event):
        self.clicked.emit()

================================================
FILE: README.md
================================================
<h1 align="center">Road Sign Recognition Project Based on YOLOv5 (YOLOv5 GUI)</h1>

<p align="center">
  <a href="README.md">English</a> |
  <a href="data/doc/README_cn.md">简体中文</a>
</p>

<p align="center">
  <a href="data/doc/README_Parameter adjustment.md">训练策略</a>
</p>


This system is a road sign recognition application leveraging YOLOv5🚀 😊. It employs a MySQL database 💽, PyQt5 for the interface design 🎨, PyTorch deep learning framework⚡. Additionally, it incorporates CSS styles 🌈.

The system comprises five key modules:

1. System Login Module 🔑: Responsible for user authentication.
2. Initialization Parameter Module 📋: Provides settings for initializing YOLOv5 model parameters.
3. Sign Recognition Module 🔍: The core functionality responsible for recognizing road signs and updating the database with the results.
4. Database Module 💾: Consists of two sub-modules - basic database operations and data analysis.
5. Image Processing Module 🖼️: Handles the processing of individual images and associated data.

The entire system is designed to support various data input methods and model switching. Additionally, it offers image enhancement techniques such as mosaic and mixup 📈.

![00013.jpg](data/doc/maxre1sdefault.jpg)
## Screenshots

* ### Sign Recognition Module
`The three checkboxes in the lower left corner are results save, start database entry, and model visual analysis.`

  ![img.png](data/doc/img.png)
* ### Image Processing and Data Augmentation Module

`The right column is a batch image data enhancement with custom parameters (using the checked data increment method for all images in a folder with a certain probability)`

![img_1.png](data/doc/img_1.png)
* ### Parameter Initialization Module
`Model basic parameters Select Configure`

  ![img_2.png](data/doc/img_2.png)
* ### Database Module
  ![img_3.png](data/doc/img_3.png)
* ### Data Analysis Module
  ![img_4.png](data/doc/img_4.png)
* ### Login Interface
  ![img_5.png](data/doc/img_5.png)
  






### Video Demo

[YOUTUBE DEMO: Road Sign Recognition System Based on YOLOV5](https://youtu.be/qoHaXvp_Gxk?si=xIAm1UXCLTjR8kUD)
[BiliBili Demo: Road Sign Recognition System Based on YOLOV5](https://www.bilibili.com/video/BV1Ck4y1Y7Bk/?spm_id_from=333.999.0.0&vd_source=40d9cda43378fbc89cd5184e09bf1272)

### Install Dependencies

To install the required dependencies, run:

```bash
pip install -r requirements.txt
```
---
## **Quick Start**

### 1. **Setting Up the Database**

To run the application, you need to set up your MySQL database. Follow these steps to prepare your database:

- **Automatic Database Creation (Optional)**:
    - If you prefer an automated setup, a batch script is provided. Run the **`setup_database.bat`** script to create the database. This requires MySQL to be installed and configured on your system.
- **Manual Database Creation**:
    - Alternatively, you can manually create the database in MySQL. Import and execute the **`data/regn_mysql.sql`** file in your MySQL environment to set up the necessary database and tables.

### 2. **Configuring Database Connection in Code**
After setting up the database, update the connection Settings in the code to change the authentication information for your local database (these four variables are at the beginning of the code, around line 59, as follows); P.S. These authentication messages are called twice in the code (around lines 111 and 1783)
```python
# Database connection settings as global variables
DB_HOST = 'localhost'    # Database host
DB_USER = 'root'         # Database user
DB_PASSWORD = '1234'     # Database password
DB_NAME = 'traffic_sign_recognition'  # Database name
```

### **Note on Cryptography Package**

If you encounter a **`RuntimeError: 'cryptography' package is required for sha256_password or caching_sha2_password auth methods`**, 
This is because the database authentication has gone wrong and the database needs to be properly created and the password entered.
This error will also be reported if you do not have the mysql service started locally, so make sure your mysql service is started.

### 3. Run `main.py`.

### 4. Enter your account and password to log in

Here are the default login credentials:

| Username | Password |
|----------|----------|
| admin    | 123456   |
| 1        | 2        |

Or modify the main function in main.py: remove the logon logic to enter the system directly without authentication.

---
## Project Structure

- `pt` folder: Contains the YOLOv5 model file `best.pt` for road sign recognition.
- `main_with` folder: Contains `login.py` for the login UI and `win.py` for the main UI.
- `dialog` folder: Contains the RTSP pop-up interface.
- `apprcc_rc.py`: The resource file for the project.
- `login_ji.py`: Implements the login logic for the UI.
- `data/run/run-exp52`: The YOLOv5 road sign recognition model trained for 300 epochs.
- `utils/tt100k_to_voc-main` folder: Tool for converting JSON annotations to YOLO format.
- `result`: Folder to save inference results.
- `run`: Folder to save training logs and outputs.
- Dataset: Download from [TT100k : Traffic-Sign Detection and Classification in the Wild](https://cg.cs.tsinghua.edu.cn/traffic-sign/).
- Database files: Located in the `data` folder, see `-regn_mysql.sql` for setup.

> Since this project was done while I was learning YOLOv5 (quite a while ago), the main logic is concentrated in the main.py file. In other words, I didn't modularize different functions, and I didn't have a clear division of module structure. Now I want to divide it into modules, but I'm too lazy,  ha ha :smile:. If you're interested, you can modularize it so it's clearer.
## Acknowledgements

- For converting the TT100K dataset to VOC format and selecting more than 100 images and XMLs for each category, see this [CSDN blog post](https://blog.csdn.net/Hankerchen/article/details/120727299?spm=1001.2014.3001.5502).
- The PyQt5-YOLOv5 integration was inspired by this [GitHub repository](https://github.com/Javacr/PyQt5-YOLOv5).

## Contact Us
> WeChat:AIGCSD
> 
> email: 2545197649@qq.com
> 
> Official Accounts:AI进修生

## Star History

Track the GitHub star history of this project:

![Star History Chart](https://api.star-history.com/svg?repos=Ai-trainee/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI&type=Date)



================================================
FILE: apprcc.qrc
================================================
<RCC>
  <qresource prefix="img">
    <file>icon/yjtp-modified.png</file>
    <file>icon/yjtp.png</file>
    <file>C:/Users/25451/Pictures/头像壁纸/QQ图片20210731101509.jpg</file>
    <file>icon/img.png</file>
    <file>icon/3.1关注-选中 (1).png</file>
    <file>icon/3.1关注-选中.png</file>
    <file>icon/3.1关注-选中.png</file>
    <file>icon/结束 (1).png</file>
    <file>icon/预警信息 (1).png</file>
    <file>icon/wnt_预警报警.png</file>
    <file>icon/预警.png</file>
    <file>icon/29道路.png</file>
    <file>icon/道路运输经营许可证.png</file>
    <file>icon/6-医疗-神经网络 (2).png</file>
    <file>icon/结束.png</file>
    <file>icon/播放 (1).png</file>
    <file>icon/最小化 (3).png</file>
    <file>icon/关闭 (4).png</file>
    <file>icon/最小化 (2).png</file>
    <file>icon/取消全屏_o.png</file>
    <file>icon/全屏 (1).png</file>
    <file>icon/关闭 (2).png</file>
    <file>icon/全屏.png</file>
    <file>icon/最小化 (1).png</file>
    <file>icon/关闭 (1).png</file>
    <file>icon/24gl-fullScreenEnter2.png</file>
    <file>icon/链接.png</file>
    <file>icon/摄像头 (4).png</file>
    <file>icon/文件夹-打开-没文件.png</file>
    <file>icon/摄像头 (3).png</file>
    <file>icon/摄像头 (2).png</file>
    <file>icon/摄像头 (1).png</file>
    <file>icon/摄像头.png</file>
    <file>icon/folder-open-line.png</file>
    <file>icon/6-医疗-神经网络 (1).png</file>
    <file>icon/保存硬盘_save-one.png</file>
    <file>icon/监控摄像头_surveillance-cameras-two.png</file>
    <file>icon/摄像头_camera-five.png</file>
    <file>icon/摄像头_camera-one.png</file>
    <file>icon/扫码识别.png</file>
    <file>icon/圆形选中.png</file>
    <file>icon/圆点.png</file>
    <file>icon/选择.png</file>
    <file>icon/播放.png</file>
    <file>icon/人工智能机器人*.png</file>
    <file>icon/button-off.png</file>
    <file>icon/button-on.png</file>
    <file>icon/暂停.png</file>
    <file>icon/笑脸.png</file>
    <file>icon/终止.png</file>
    <file>icon/下拉_白色.png</file>
    <file>icon/正方形.png</file>
    <file>icon/实时视频流解析.png</file>
    <file>icon/运行.png</file>
    <file>icon/conan.png</file>
    <file>icon/还原.png</file>
    <file>icon/doctor.png</file>
    <file>icon/圆.png</file>
    <file>icon/evil.png</file>
    <file>icon/关闭.png</file>
    <file>icon/箭头_列表收起.png</file>
    <file>icon/箭头_列表展开.png</file>
    <file>icon/最小化.png</file>
    <file>icon/background.jpg</file>
    <file>icon/背景.png</file>
    <file>icon/打开.png</file>
    <file>icon/摄像头关.png</file>
    <file>icon/摄像头开.png</file>
    <file>icon/数据探索.png</file>
    <file>icon/停止.png</file>
    <file>icon/图片1.png</file>
    <file>icon/赞停.png</file>
  </qresource>
</RCC>


================================================
FILE: apprcc_rc.py
================================================
[File too large to display: 19.3 MB]

================================================
FILE: config/fold.json
================================================
{
  "open_fold": "D:/Videos/OBS_Video"
}

================================================
FILE: config/ip.json
================================================
{
  "ip": "rtsp://admin:admin888@192.168.1.67:555"
}


================================================
FILE: config/setting.json
================================================
{
  "iou": 0.41,
  "conf": 0.46,
  "rate": 1,
  "check": 0,
  "savecheck": 0
}

================================================
FILE: data/doc/LICENSE
================================================
GNU GENERAL PUBLIC LICENSE
                       Version 3, 29 June 2007

 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

                            Preamble

  The GNU General Public License is a free, copyleft license for
software and other kinds of works.

  The licenses for most software and other practical works are designed
to take away your freedom to share and change the works.  By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.  We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors.  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.

  To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights.  Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received.  You must make sure that they, too, receive
or can get the source code.  And you must show them these terms so they
know their rights.

  Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.

  For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software.  For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.

  Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so.  This is fundamentally incompatible with the aim of
protecting users' freedom to change the software.  The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable.  Therefore, we
have designed this version of the GPL to prohibit the practice for those
products.  If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.

  Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary.  To prevent this, the GPL assures that
patents cannot be used to render the program non-free.

  The precise terms and conditions for copying, distribution and
modification follow.

                       TERMS AND CONDITIONS

  0. Definitions.

  "This License" refers to version 3 of the GNU General Public License.

  "Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.

  "The Program" refers to any copyrightable work licensed under this
License.  Each licensee is addressed as "you".  "Licensees" and
"recipients" may be individuals or organizations.

  To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy.  The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.

  A "covered work" means either the unmodified Program or a work based
on the Program.

  To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy.  Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.

  To "convey" a work means any kind of propagation that enables other
parties to make or receive copies.  Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.

  An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License.  If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.

  1. Source Code.

  The "source code" for a work means the preferred form of the work
for making modifications to it.  "Object code" means any non-source
form of a work.

  A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.

  The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form.  A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.

  The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities.  However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work.  For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.

  The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.

  The Corresponding Source for a work in source code form is that
same work.

  2. Basic Permissions.

  All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met.  This License explicitly affirms your unlimited
permission to run the unmodified Program.  The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work.  This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.

  You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force.  You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright.  Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.

  Conveying under any other circumstances is permitted solely under
the conditions stated below.  Sublicensing is not allowed; section 10
makes it unnecessary.

  3. Protecting Users' Legal Rights From Anti-Circumvention Law.

  No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.

  When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.

  4. Conveying Verbatim Copies.

  You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.

  You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.

  5. Conveying Modified Source Versions.

  You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:

    a) The work must carry prominent notices stating that you modified
    it, and giving a relevant date.

    b) The work must carry prominent notices stating that it is
    released under this License and any conditions added under section
    7.  This requirement modifies the requirement in section 4 to
    "keep intact all notices".

    c) You must license the entire work, as a whole, under this
    License to anyone who comes into possession of a copy.  This
    License will therefore apply, along with any applicable section 7
    additional terms, to the whole of the work, and all its parts,
    regardless of how they are packaged.  This License gives no
    permission to license the work in any other way, but it does not
    invalidate such permission if you have separately received it.

    d) If the work has interactive user interfaces, each must display
    Appropriate Legal Notices; however, if the Program has interactive
    interfaces that do not display Appropriate Legal Notices, your
    work need not make them do so.

  A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit.  Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.

  6. Conveying Non-Source Forms.

  You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:

    a) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by the
    Corresponding Source fixed on a durable physical medium
    customarily used for software interchange.

    b) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by a
    written offer, valid for at least three years and valid for as
    long as you offer spare parts or customer support for that product
    model, to give anyone who possesses the object code either (1) a
    copy of the Corresponding Source for all the software in the
    product that is covered by this License, on a durable physical
    medium customarily used for software interchange, for a price no
    more than your reasonable cost of physically performing this
    conveying of source, or (2) access to copy the
    Corresponding Source from a network server at no charge.

    c) Convey individual copies of the object code with a copy of the
    written offer to provide the Corresponding Source.  This
    alternative is allowed only occasionally and noncommercially, and
    only if you received the object code with such an offer, in accord
    with subsection 6b.

    d) Convey the object code by offering access from a designated
    place (gratis or for a charge), and offer equivalent access to the
    Corresponding Source in the same way through the same place at no
    further charge.  You need not require recipients to copy the
    Corresponding Source along with the object code.  If the place to
    copy the object code is a network server, the Corresponding Source
    may be on a different server (operated by you or a third party)
    that supports equivalent copying facilities, provided you maintain
    clear directions next to the object code saying where to find the
    Corresponding Source.  Regardless of what server hosts the
    Corresponding Source, you remain obligated to ensure that it is
    available for as long as needed to satisfy these requirements.

    e) Convey the object code using peer-to-peer transmission, provided
    you inform other peers where the object code and Corresponding
    Source of the work are being offered to the general public at no
    charge under subsection 6d.

  A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.

  A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling.  In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage.  For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product.  A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.

  "Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source.  The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.

  If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information.  But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).

  The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed.  Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.

  Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.

  7. Additional Terms.

  "Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law.  If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.

  When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it.  (Additional permissions may be written to require their own
removal in certain cases when you modify the work.)  You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.

  Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:

    a) Disclaiming warranty or limiting liability differently from the
    terms of sections 15 and 16 of this License; or

    b) Requiring preservation of specified reasonable legal notices or
    author attributions in that material or in the Appropriate Legal
    Notices displayed by works containing it; or

    c) Prohibiting misrepresentation of the origin of that material, or
    requiring that modified versions of such material be marked in
    reasonable ways as different from the original version; or

    d) Limiting the use for publicity purposes of names of licensors or
    authors of the material; or

    e) Declining to grant rights under trademark law for use of some
    trade names, trademarks, or service marks; or

    f) Requiring indemnification of licensors and authors of that
    material by anyone who conveys the material (or modified versions of
    it) with contractual assumptions of liability to the recipient, for
    any liability that these contractual assumptions directly impose on
    those licensors and authors.

  All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10.  If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term.  If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.

  If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.

  Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.

  8. Termination.

  You may not propagate or modify a covered work except as expressly
provided under this License.  Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).

  However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.

  Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.

  Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License.  If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.

  9. Acceptance Not Required for Having Copies.

  You are not required to accept this License in order to receive or
run a copy of the Program.  Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance.  However,
nothing other than this License grants you permission to propagate or
modify any covered work.  These actions infringe copyright if you do
not accept this License.  Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.

  10. Automatic Licensing of Downstream Recipients.

  Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License.  You are not responsible
for enforcing compliance by third parties with this License.

  An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations.  If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.

  You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License.  For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.

  11. Patents.

  A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based.  The
work thus licensed is called the contributor's "contributor version".

  A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version.  For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.

  Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.

  In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement).  To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.

  If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients.  "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.

  If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.

  A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License.  You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.

  Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.

  12. No Surrender of Others' Freedom.

  If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all.  For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.

  13. Use with the GNU Affero General Public License.

  Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work.  The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.

  14. Revised Versions of this License.

  The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

  Each version is given a distinguishing version number.  If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation.  If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.

  If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.

  Later license versions may give you additional or different
permissions.  However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.

  15. Disclaimer of Warranty.

  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. Limitation of Liability.

  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.

  17. Interpretation of Sections 15 and 16.

  If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.

                     END OF TERMS AND CONDITIONS

            How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.

Also add information on how to contact you by electronic and paper mail.

  If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:

    <program>  Copyright (C) <year>  <name of author>
    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".

  You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.

  The GNU General Public License does not permit incorporating your program
into proprietary programs.  If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library.  If this is what you want to do, use the GNU Lesser General
Public License instead of this License.  But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

================================================
FILE: data/doc/README_Parameter adjustment.md
================================================
由于一些原因,后期再上传,同时包括(readme详细操作)


================================================
FILE: data/doc/README_cn.md
================================================
<h1 align="center">基于YOLOv5的道路标志识别项目(yolov5界面GUI)</h1>
<p align="center">
  <a href="../../README.md">English</a> |
  <a href="README_cn.md">简体中文</a>
</p>


这是一个关于yolov5的道路标志识别项目,使用Pyqt5开发界面,Yolov5训练模型,数据库Mysql,包含五个模块:初始化参数、标志识别、数据库、数据分析和图像处理。

## 软件截图

* ### 标志识别模块
`左下角三个勾选框分别是结果保存、启动数据库录入、以及模型可视化分析`

  ![img.png](img.png)

  
  
* ### 图像处理与数据增强模块
`右侧栏是自定义参数的批量图像数据增强(按一定概率对一个文件夹所有图片使用勾选的数据增加方法)`

  ![img_1.png](img_1.png)

* ### 初始化参数模块
`  模型基本参数勾选配置`

  ![img_2.png](img_2.png)


* ### 数据库模块
  ![img_3.png](img_3.png)
  
 
* ### 数据分析模块
  ![img_4.png](img_4.png)
* ### 登录界面
  ![img_5.png](img_5.png)

### 演示视频

[基于YOLOV5的道路标志识别系统](https://www.bilibili.com/video/BV1Ck4y1Y7Bk/?spm_id_from=333.999.0.0&vd_source=40d9cda43378fbc89cd5184e09bf1272)


### 安装依赖

pip install -r requirements.txt

---
## **快速开始**

### 1. **设置数据库**

为了运行应用程序,您需要设置您的 MySQL 数据库。按照以下步骤准备您的数据库:

- **自动数据库创建(可选)**:
    - 如果您喜欢自动设置,我们提供了一个批处理脚本。运行 **`setup_database.bat`** 脚本来创建数据库。这需要在您的系统上安装并配置 MySQL。
- **手动数据库创建**:
    - 或者,您可以在 MySQL 中手动创建数据库。在您的 MySQL 环境中导入并执行 **`data/regn_mysql.sql`** 文件,以设置必要的数据库和表。

### 2. **在代码中配置数据库连接**

设置数据库之后,请更新代码中的连接设置,请更改成你本地数据库的身份验证信息(这4个变量在代码的开头 , 大约在59行,具体如下);附:这些身份验证信息在代码中被两次调用(大约第111行和第1783行)

```python
# 数据库连接设置作为全局变量
DB_HOST = 'localhost'    # 数据库主机
DB_USER = 'root'         # 数据库用户
DB_PASSWORD = '1234'     # 数据库密码
DB_NAME = 'traffic_sign_recognition'  # 数据库名
```

### **关于数据库链接的注意事项**

如果遇到 **`RuntimeError: 'cryptography' package is required for sha256_password or caching_sha2_password auth methods`** 错误,这是因为数据库身份验证出错了,需要正确地创建数据库并输入密码。
并且如果你本地没有启动mysql服务,也会报这个错,所以请确保你的mysql服务已经启动。
### 3. 运行 `main.py`。

### 4. 输入您的账号和密码以登录。

以下是默认的登录凭据:

| 用户名   | 密码     |
|----------|----------|
| admin    | 123456   |
| 1        | 2        |

或者修改`main.py`中的主函数:删除登陆逻辑,以直接进入系统而无需进行身份验证。

---

## 项目模块

- `pt`文件夹:存放模型(best.pt是道路标志识别模型)
- `main_with`文件夹:`login.py`(登陆ui)、`win.py`(主ui)
- `dialog`文件夹:rtsp弹出界面
- `apprcc_rc.py`:资源文件
- `login_ji.py`:界面登陆逻辑文件
- `data/run/run-exp52`:300轮训练后的道路标志识别模型
- `utils/tt100k_to_voc-main`文件夹:json转yolo格式
- `result`保存一些推理文件,`run`保存训练文件
- 数据集:[TT100k : Traffic-Sign Detection and Classification in the Wild](https://cg.cs.tsinghua.edu.cn/traffic-sign/)
- 数据库文件:`data`文件夹下`-regn_mysql.sql`


> 由于这个项目是在我学习YOLOv5时完成的(已经过了很长一段时间),因此主要的逻辑代码都集中在main.py文件中。换句话说,我没有将不同功能模块化,没有进行模块结构的清晰划分。虽然现在我想给它划分一下模块结构,但是我还是太懒了,嘻嘻 :smile:。如果您有兴趣,可以将其模块化,这样它就会更加清晰。



## 致谢

- [将TT100K数据集转成VOC格式,并且用Python脚本选出45类超过100张的图片和XML](https://blog.csdn.net/Hankerchen/article/details/120727299?spm=1001.2014.3001.5502)
- https://github.com/Javacr/PyQt5-YOLOv5

## 联系我们
> 微信:AIGC004
> 
> 邮箱: 2545197649@qq.com
> 
> 公众号:AI进修生


## Star History

[![Star History Chart](https://api.star-history.com/svg?repos=Ai-trainee/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI&type=Date)](https://star-history.com/#Ai-trainee/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI&Date)


================================================
FILE: data/regn_mysql.sql
================================================
/*
SQLyog Community v13.2.0 (64 bit)
MySQL - 8.0.32 : Database - traffic_sign_recognition
*********************************************************************
*/

/*!40101 SET NAMES utf8 */;

/*!40101 SET SQL_MODE=''*/;

/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
CREATE DATABASE /*!32312 IF NOT EXISTS*/`traffic_sign_recognition` /*!40100 DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci */ /*!80016 DEFAULT ENCRYPTION='N' */;

USE `traffic_sign_recognition`;

/*Table structure for table `detection_results` */

DROP TABLE IF EXISTS `detection_results`;

CREATE TABLE `detection_results` (
  `id` int NOT NULL AUTO_INCREMENT,
  `sign_type` varchar(255) NOT NULL,
  `sign_count` int NOT NULL,
  `detection_time` varchar(255) DEFAULT NULL,
  `additional_info` text,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=456 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;

/*Data for the table `detection_results` */

insert  into `detection_results`(`id`,`sign_type`,`sign_count`,`detection_time`,`additional_info`) values 
(1,'stop',5,'2022-10-02 15:30:00','Weather condition: sunny'),
(3,'pn',1,'2023-04-09 20:00:01','Confidence: 0.91'),
(4,'pl40',1,'2023-04-09 20:00:01','Confidence: 0.93'),
(5,'i2',1,'2023-04-09 20:00:01','Confidence: 0.53'),
(6,'i4',1,'2023-04-09 20:00:01','Confidence: 0.88'),
(8,'pl40',1,'2023-04-09 20:00:01','Confidence: 0.93'),
(14,'pn',1,'2023-04-09 20:00:01','Confidence: 0.90'),
(15,'pl40',1,'2023-04-09 20:00:01','Confidence: 0.94'),
(16,'i2',1,'2023-04-09 20:00:01','Confidence: 0.95'),
(17,'i4',1,'2023-04-09 20:00:02','Confidence: 0.89'),
(18,'pn',1,'2023-04-09 20:00:02','Confidence: 0.90'),
(19,'pl40',1,'2023-04-09 20:00:02','Confidence: 0.94'),
(20,'i2',1,'2023-04-09 20:00:02','Confidence: 0.96'),
(21,'pn',1,'2023-04-09 20:00:02','Confidence: 0.89'),
(22,'i4',1,'2023-04-09 20:00:02','Confidence: 0.91'),
(23,'pl40',1,'2023-04-09 20:00:02','Confidence: 0.94'),
(24,'i2',1,'2023-04-09 20:00:02','Confidence: 0.94'),
(25,'pl40',1,'2023-04-09 20:00:02','Confidence: 0.92'),
(26,'i4',1,'2023-04-09 20:00:02','Confidence: 0.92'),
(27,'pn',1,'2023-04-09 20:00:02','Confidence: 0.93'),
(28,'i2',1,'2023-04-09 20:00:02','Confidence: 0.96'),
(29,'pn',1,'2023-04-09 20:00:02','Confidence: 0.91'),
(30,'i4',1,'2023-04-09 20:00:02','Confidence: 0.92'),
(31,'pl40',1,'2023-04-09 20:00:02','Confidence: 0.94'),
(32,'i2',1,'2023-04-09 20:00:02','Confidence: 0.97'),
(33,'pn',1,'2023-04-09 20:00:03','Confidence: 0.92'),
(34,'i4',1,'2023-04-09 20:00:03','Confidence: 0.92'),
(35,'pl40',1,'2023-04-09 20:00:03','Confidence: 0.94'),
(36,'i2',1,'2023-04-09 20:00:03','Confidence: 0.96'),
(37,'pn',1,'2023-04-09 20:00:03','Confidence: 0.89'),
(38,'i4',1,'2023-04-09 20:00:03','Confidence: 0.93'),
(39,'pl40',1,'2023-04-09 20:00:03','Confidence: 0.94'),
(40,'i2',1,'2023-04-09 20:00:03','Confidence: 0.96'),
(41,'pn',1,'2023-04-09 20:00:03','Confidence: 0.88'),
(42,'i4',1,'2023-04-09 20:00:03','Confidence: 0.93'),
(43,'pl40',1,'2023-04-09 20:00:03','Confidence: 0.94'),
(44,'i2',1,'2023-04-09 20:00:03','Confidence: 0.96'),
(45,'pn',1,'2023-04-09 20:00:03','Confidence: 0.90'),
(46,'i4',1,'2023-04-09 20:00:03','Confidence: 0.94'),
(47,'pl40',1,'2023-04-09 20:00:03','Confidence: 0.95'),
(48,'i2',1,'2023-04-09 20:00:03','Confidence: 0.96'),
(49,'pn',1,'2023-04-09 20:00:04','Confidence: 0.91'),
(50,'i4',1,'2023-04-09 20:00:04','Confidence: 0.93'),
(51,'pl40',1,'2023-04-09 20:00:04','Confidence: 0.94'),
(52,'i2',1,'2023-04-09 20:00:04','Confidence: 0.96'),
(53,'pn',1,'2023-04-09 20:00:04','Confidence: 0.92'),
(54,'i4',1,'2023-04-09 20:00:04','Confidence: 0.93'),
(55,'pl40',1,'2023-04-09 20:00:04','Confidence: 0.94'),
(56,'i2',1,'2023-04-09 20:00:04','Confidence: 0.95'),
(57,'pn',1,'2023-04-09 20:00:04','Confidence: 0.91'),
(58,'i4',1,'2023-04-09 20:00:04','Confidence: 0.93'),
(59,'i2',1,'2023-04-09 20:00:04','Confidence: 0.93'),
(60,'pl40',1,'2023-04-09 20:00:04','Confidence: 0.95'),
(61,'pn',1,'2023-04-09 20:00:04','Confidence: 0.93'),
(62,'i4',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(63,'pl40',1,'2023-04-09 20:00:05','Confidence: 0.95'),
(64,'i2',1,'2023-04-09 20:00:05','Confidence: 0.96'),
(65,'i4',1,'2023-04-09 20:00:05','Confidence: 0.92'),
(66,'pn',1,'2023-04-09 20:00:05','Confidence: 0.92'),
(67,'i2',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(68,'pl40',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(69,'pn',1,'2023-04-09 20:00:05','Confidence: 0.92'),
(70,'i4',1,'2023-04-09 20:00:05','Confidence: 0.93'),
(71,'pl40',1,'2023-04-09 20:00:05','Confidence: 0.95'),
(72,'i2',1,'2023-04-09 20:00:05','Confidence: 0.95'),
(73,'pn',1,'2023-04-09 20:00:05','Confidence: 0.93'),
(74,'i4',1,'2023-04-09 20:00:05','Confidence: 0.93'),
(75,'pl40',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(76,'i2',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(77,'i4',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(78,'pn',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(79,'i2',1,'2023-04-09 20:00:05','Confidence: 0.94'),
(80,'pl40',1,'2023-04-09 20:00:06','Confidence: 0.95'),
(81,'i4',1,'2023-04-09 20:00:06','Confidence: 0.92'),
(82,'pn',1,'2023-04-09 20:00:06','Confidence: 0.94'),
(83,'pl40',1,'2023-04-09 20:00:06','Confidence: 0.95'),
(84,'i2',1,'2023-04-09 20:00:06','Confidence: 0.95'),
(85,'i2',1,'2023-04-09 20:00:06','Confidence: 0.70'),
(86,'pl40',1,'2023-04-09 20:00:06','Confidence: 0.91'),
(87,'i4',1,'2023-04-09 20:00:06','Confidence: 0.91'),
(88,'pn',1,'2023-04-09 20:00:06','Confidence: 0.94'),
(89,'i2',2,'2023-04-09 20:00:06','Confidence: 0.97'),
(90,'p26',1,'2023-04-09 20:00:06','Confidence: 0.62'),
(91,'i4',1,'2023-04-09 20:00:06','Confidence: 0.93'),
(92,'i2',1,'2023-04-09 20:00:06','Confidence: 0.94'),
(93,'pn',1,'2023-04-09 20:00:06','Confidence: 0.94'),
(94,'i4',1,'2023-04-09 20:00:06','Confidence: 0.91'),
(95,'i2',1,'2023-04-09 20:00:06','Confidence: 0.94'),
(96,'pn',1,'2023-04-09 20:00:06','Confidence: 0.94'),
(97,'i2',1,'2023-04-09 20:00:06','Confidence: 0.92'),
(98,'pn',1,'2023-04-09 20:00:06','Confidence: 0.93'),
(99,'i4',1,'2023-04-09 20:00:06','Confidence: 0.93'),
(100,'i2',1,'2023-04-09 20:00:07','Confidence: 0.91'),
(101,'i4',1,'2023-04-09 20:00:07','Confidence: 0.92'),
(102,'pn',1,'2023-04-09 20:00:07','Confidence: 0.93'),
(103,'i5',1,'2023-04-09 20:00:07','Confidence: 0.53'),
(104,'i2',1,'2023-04-09 20:00:07','Confidence: 0.89'),
(105,'i4',1,'2023-04-09 20:00:07','Confidence: 0.91'),
(106,'i4',1,'2023-04-09 20:00:07','Confidence: 0.89'),
(107,'i2',1,'2023-04-09 20:00:07','Confidence: 0.93'),
(108,'i2',1,'2023-04-09 20:00:07','Confidence: 0.77'),
(109,'i4',1,'2023-04-09 20:00:07','Confidence: 0.80'),
(110,'pl40',1,'2023-04-09 20:00:12','Confidence: 0.80'),
(111,'pl40',1,'2023-04-09 20:00:12','Confidence: 0.78'),
(112,'pl40',1,'2023-04-09 20:00:13','Confidence: 0.43'),
(113,'pl40',1,'2023-04-09 20:00:13','Confidence: 0.57'),
(114,'pr40',1,'2023-04-09 20:00:13','Confidence: 0.76'),
(115,'ph4',1,'2023-04-09 20:00:13','Confidence: 0.42'),
(116,'p19',1,'2023-04-09 20:00:13','Confidence: 0.47'),
(117,'pl20',1,'2023-04-09 20:00:13','Confidence: 0.69'),
(118,'ph4',1,'2023-04-09 20:00:13','Confidence: 0.46'),
(119,'pl20',1,'2023-04-09 20:00:13','Confidence: 0.60'),
(120,'pl20',1,'2023-04-09 20:00:14','Confidence: 0.55'),
(121,'p19',1,'2023-04-09 20:00:14','Confidence: 0.43'),
(122,'pl20',1,'2023-04-09 20:00:14','Confidence: 0.43'),
(123,'p10',1,'2023-04-09 20:00:14','Confidence: 0.43'),
(124,'pl40',1,'2023-04-09 20:00:14','Confidence: 0.44'),
(125,'ph4',1,'2023-04-09 20:00:14','Confidence: 0.46'),
(126,'p6',1,'2023-04-09 20:00:14','Confidence: 0.48'),
(127,'p19',1,'2023-04-09 20:00:14','Confidence: 0.58'),
(128,'p10',1,'2023-04-09 20:00:14','Confidence: 0.44'),
(129,'ph4',1,'2023-04-09 20:00:14','Confidence: 0.60'),
(130,'pm20',1,'2023-04-09 20:00:14','Confidence: 0.71'),
(131,'p10',1,'2023-04-09 20:00:15','Confidence: 0.57'),
(132,'pl40',1,'2023-04-09 20:00:15','Confidence: 0.80'),
(133,'p10',1,'2023-04-09 20:00:15','Confidence: 0.68'),
(134,'pl40',1,'2023-04-09 20:00:15','Confidence: 0.81'),
(135,'pn',1,'2023-04-09 20:25:49','Confidence: 0.50'),
(136,'pl40',1,'2023-04-09 20:25:49','Confidence: 0.89'),
(137,'pl40',1,'2023-04-09 20:25:49','Confidence: 0.91'),
(138,'pn',1,'2023-04-09 20:25:49','Confidence: 0.52'),
(139,'pl40',1,'2023-04-09 20:25:49','Confidence: 0.92'),
(140,'pn',1,'2023-04-09 20:25:49','Confidence: 0.82'),
(141,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.92'),
(142,'pn',1,'2023-04-09 20:25:50','Confidence: 0.82'),
(143,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.91'),
(144,'pn',1,'2023-04-09 20:25:50','Confidence: 0.45'),
(145,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.85'),
(146,'pn',1,'2023-04-09 20:25:50','Confidence: 0.82'),
(147,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.91'),
(148,'pn',1,'2023-04-09 20:25:50','Confidence: 0.85'),
(149,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.91'),
(150,'pn',1,'2023-04-09 20:25:50','Confidence: 0.90'),
(151,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.90'),
(152,'pn',1,'2023-04-09 20:25:50','Confidence: 0.89'),
(153,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.91'),
(154,'pl40',1,'2023-04-09 20:25:50','Confidence: 0.89'),
(155,'pn',1,'2023-04-09 20:25:50','Confidence: 0.89'),
(156,'pn',1,'2023-04-09 20:25:51','Confidence: 0.78'),
(157,'pl40',1,'2023-04-09 20:25:51','Confidence: 0.89'),
(158,'pl40',1,'2023-04-09 20:25:51','Confidence: 0.89'),
(159,'pn',1,'2023-04-09 20:25:51','Confidence: 0.90'),
(160,'pn',1,'2023-04-09 20:25:51','Confidence: 0.88'),
(161,'pl40',1,'2023-04-09 20:25:51','Confidence: 0.92'),
(162,'pn',1,'2023-04-09 20:25:51','Confidence: 0.86'),
(163,'pl40',1,'2023-04-09 20:25:51','Confidence: 0.91'),
(164,'pn',1,'2023-04-09 20:25:51','Confidence: 0.78'),
(165,'pl40',1,'2023-04-09 20:25:51','Confidence: 0.91'),
(166,'pn',1,'2023-04-09 20:25:51','Confidence: 0.90'),
(167,'pl40',1,'2023-04-09 20:25:51','Confidence: 0.91'),
(168,'pn',1,'2023-04-09 20:25:51','Confidence: 0.90'),
(169,'pl40',1,'2023-04-09 20:25:51','Confidence: 0.91'),
(170,'pn',1,'2023-04-09 20:25:52','Confidence: 0.91'),
(171,'pl40',1,'2023-04-09 20:25:52','Confidence: 0.93'),
(172,'i2',1,'2023-04-09 20:25:52','Confidence: 0.53'),
(173,'i4',1,'2023-04-09 20:25:52','Confidence: 0.88'),
(174,'pn',1,'2023-04-09 20:25:52','Confidence: 0.91'),
(175,'pl40',1,'2023-04-09 20:25:52','Confidence: 0.93'),
(176,'pn',1,'2023-04-09 20:25:52','Confidence: 0.86'),
(177,'i4',1,'2023-04-09 20:25:52','Confidence: 0.88'),
(178,'pl40',1,'2023-04-09 20:25:52','Confidence: 0.91'),
(179,'i2',1,'2023-04-09 20:25:52','Confidence: 0.94'),
(180,'i4',1,'2023-04-09 20:25:52','Confidence: 0.87'),
(181,'pn',1,'2023-04-09 20:25:52','Confidence: 0.90'),
(182,'pl40',1,'2023-04-09 20:25:52','Confidence: 0.94'),
(183,'i2',1,'2023-04-09 20:25:52','Confidence: 0.95'),
(184,'i4',1,'2023-04-09 20:25:52','Confidence: 0.89'),
(185,'pn',1,'2023-04-09 20:25:52','Confidence: 0.90'),
(186,'pl40',1,'2023-04-09 20:25:52','Confidence: 0.94'),
(187,'i2',1,'2023-04-09 20:25:52','Confidence: 0.96'),
(188,'pn',1,'2023-04-09 20:25:53','Confidence: 0.89'),
(189,'i4',1,'2023-04-09 20:25:53','Confidence: 0.91'),
(190,'pl40',1,'2023-04-09 20:25:53','Confidence: 0.94'),
(191,'i2',1,'2023-04-09 20:25:53','Confidence: 0.94'),
(192,'pl40',1,'2023-04-09 20:25:53','Confidence: 0.92'),
(193,'i4',1,'2023-04-09 20:25:53','Confidence: 0.92'),
(194,'pn',1,'2023-04-09 20:25:53','Confidence: 0.93'),
(195,'i2',1,'2023-04-09 20:25:53','Confidence: 0.96'),
(196,'pn',1,'2023-04-09 20:25:53','Confidence: 0.91'),
(197,'i4',1,'2023-04-09 20:25:53','Confidence: 0.92'),
(198,'pl40',1,'2023-04-09 20:25:53','Confidence: 0.94'),
(199,'i2',1,'2023-04-09 20:25:53','Confidence: 0.97'),
(200,'pn',1,'2023-04-09 20:25:53','Confidence: 0.92'),
(201,'i4',1,'2023-04-09 20:25:53','Confidence: 0.92'),
(202,'pl40',1,'2023-04-09 20:25:53','Confidence: 0.94'),
(203,'i2',1,'2023-04-09 20:25:53','Confidence: 0.96'),
(204,'pn',1,'2023-04-09 20:25:53','Confidence: 0.89'),
(205,'i4',1,'2023-04-09 20:25:53','Confidence: 0.93'),
(206,'pl40',1,'2023-04-09 20:25:53','Confidence: 0.94'),
(207,'i2',1,'2023-04-09 20:25:54','Confidence: 0.96'),
(208,'pn',1,'2023-04-09 20:25:54','Confidence: 0.88'),
(209,'i4',1,'2023-04-09 20:25:54','Confidence: 0.93'),
(210,'pl40',1,'2023-04-09 20:25:54','Confidence: 0.94'),
(211,'i2',1,'2023-04-09 20:25:54','Confidence: 0.96'),
(212,'pn',1,'2023-04-09 20:25:54','Confidence: 0.90'),
(213,'i4',1,'2023-04-09 20:25:54','Confidence: 0.94'),
(214,'pl40',1,'2023-04-09 20:25:54','Confidence: 0.95'),
(215,'i2',1,'2023-04-09 20:25:54','Confidence: 0.96'),
(216,'pn',1,'2023-04-09 20:25:54','Confidence: 0.91'),
(217,'i4',1,'2023-04-09 20:25:54','Confidence: 0.93'),
(218,'pl40',1,'2023-04-09 20:25:54','Confidence: 0.94'),
(219,'i2',1,'2023-04-09 20:25:54','Confidence: 0.96'),
(220,'pn',1,'2023-04-09 20:25:54','Confidence: 0.92'),
(221,'i4',1,'2023-04-09 20:25:55','Confidence: 0.93'),
(222,'pl40',1,'2023-04-09 20:25:55','Confidence: 0.94'),
(223,'i2',1,'2023-04-09 20:25:55','Confidence: 0.95'),
(224,'pn',1,'2023-04-09 20:25:55','Confidence: 0.91'),
(225,'i4',1,'2023-04-09 20:25:55','Confidence: 0.93'),
(226,'i2',1,'2023-04-09 20:25:55','Confidence: 0.93'),
(227,'pl40',1,'2023-04-09 20:25:55','Confidence: 0.95'),
(228,'pn',1,'2023-04-09 20:25:55','Confidence: 0.93'),
(229,'i4',1,'2023-04-09 20:25:55','Confidence: 0.94'),
(230,'pl40',1,'2023-04-09 20:25:55','Confidence: 0.95'),
(231,'i2',1,'2023-04-09 20:25:55','Confidence: 0.96'),
(232,'i4',1,'2023-04-09 20:25:55','Confidence: 0.92'),
(233,'pn',1,'2023-04-09 20:25:55','Confidence: 0.92'),
(234,'i2',1,'2023-04-09 20:25:55','Confidence: 0.94'),
(235,'pl40',1,'2023-04-09 20:25:55','Confidence: 0.94'),
(236,'pn',1,'2023-04-09 20:25:55','Confidence: 0.92'),
(237,'i4',1,'2023-04-09 20:25:55','Confidence: 0.93'),
(238,'pl40',1,'2023-04-09 20:25:56','Confidence: 0.95'),
(239,'i2',1,'2023-04-09 20:25:56','Confidence: 0.95'),
(240,'pn',1,'2023-04-09 20:25:56','Confidence: 0.93'),
(241,'i4',1,'2023-04-09 20:25:56','Confidence: 0.93'),
(242,'pl40',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(243,'i2',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(244,'i4',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(245,'pn',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(246,'i2',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(247,'pl40',1,'2023-04-09 20:25:56','Confidence: 0.95'),
(248,'i4',1,'2023-04-09 20:25:56','Confidence: 0.92'),
(249,'pn',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(250,'pl40',1,'2023-04-09 20:25:56','Confidence: 0.95'),
(251,'i2',1,'2023-04-09 20:25:56','Confidence: 0.95'),
(252,'i2',1,'2023-04-09 20:25:56','Confidence: 0.70'),
(253,'pl40',1,'2023-04-09 20:25:56','Confidence: 0.91'),
(254,'i4',1,'2023-04-09 20:25:56','Confidence: 0.91'),
(255,'pn',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(256,'i2',2,'2023-04-09 20:25:56','Confidence: 0.97'),
(257,'p26',1,'2023-04-09 20:25:56','Confidence: 0.62'),
(258,'i4',1,'2023-04-09 20:25:56','Confidence: 0.93'),
(259,'i2',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(260,'pn',1,'2023-04-09 20:25:56','Confidence: 0.94'),
(261,'i4',1,'2023-04-09 20:25:57','Confidence: 0.91'),
(262,'i2',1,'2023-04-09 20:25:57','Confidence: 0.94'),
(263,'pn',1,'2023-04-09 20:25:57','Confidence: 0.94'),
(264,'i2',1,'2023-04-09 20:25:57','Confidence: 0.92'),
(265,'pn',1,'2023-04-09 20:25:57','Confidence: 0.93'),
(266,'i4',1,'2023-04-09 20:25:57','Confidence: 0.93'),
(267,'i2',1,'2023-04-09 20:25:57','Confidence: 0.91'),
(268,'i4',1,'2023-04-09 20:25:57','Confidence: 0.92'),
(269,'pn',1,'2023-04-09 20:25:57','Confidence: 0.93'),
(270,'i5',1,'2023-04-09 20:25:57','Confidence: 0.53'),
(271,'i2',1,'2023-04-09 20:25:57','Confidence: 0.89'),
(272,'i4',1,'2023-04-09 20:25:57','Confidence: 0.91'),
(273,'i4',1,'2023-04-09 20:25:57','Confidence: 0.89'),
(274,'i2',1,'2023-04-09 20:25:57','Confidence: 0.93'),
(275,'i2',1,'2023-04-09 20:25:57','Confidence: 0.77'),
(276,'i4',1,'2023-04-09 20:25:57','Confidence: 0.80'),
(277,'pl40',1,'2023-04-09 20:26:02','Confidence: 0.35'),
(278,'pl40',1,'2023-04-09 20:26:02','Confidence: 0.80'),
(279,'pl40',1,'2023-04-09 20:26:02','Confidence: 0.78'),
(280,'pl40',1,'2023-04-09 20:26:02','Confidence: 0.43'),
(281,'pl40',1,'2023-04-09 20:26:02','Confidence: 0.57'),
(282,'pr40',1,'2023-04-09 20:26:03','Confidence: 0.76'),
(283,'ph4',1,'2023-04-09 20:26:03','Confidence: 0.42'),
(284,'p19',1,'2023-04-09 20:26:03','Confidence: 0.47'),
(285,'pl20',1,'2023-04-09 20:26:03','Confidence: 0.69'),
(286,'ph4',1,'2023-04-09 20:26:03','Confidence: 0.46'),
(287,'pl20',1,'2023-04-09 20:26:03','Confidence: 0.60'),
(288,'p10',1,'2023-04-09 20:26:03','Confidence: 0.37'),
(289,'pl20',1,'2023-04-09 20:26:04','Confidence: 0.55'),
(290,'p19',1,'2023-04-09 20:26:04','Confidence: 0.43'),
(291,'pl20',1,'2023-04-09 20:26:04','Confidence: 0.43'),
(292,'pl40',1,'2023-04-09 20:26:04','Confidence: 0.38'),
(293,'p10',1,'2023-04-09 20:26:04','Confidence: 0.43'),
(294,'pl40',1,'2023-04-09 20:26:04','Confidence: 0.44'),
(295,'ph4',1,'2023-04-09 20:26:04','Confidence: 0.46'),
(296,'p6',1,'2023-04-09 20:26:04','Confidence: 0.48'),
(297,'p19',1,'2023-04-09 20:26:04','Confidence: 0.58'),
(298,'p10',1,'2023-04-09 20:26:04','Confidence: 0.44'),
(299,'ph4',1,'2023-04-09 20:26:04','Confidence: 0.60'),
(300,'pm20',1,'2023-04-09 20:26:04','Confidence: 0.71'),
(301,'p10',1,'2023-04-09 20:26:04','Confidence: 0.37'),
(302,'p10',1,'2023-04-09 20:26:05','Confidence: 0.57'),
(303,'pl40',1,'2023-04-09 20:26:05','Confidence: 0.80'),
(304,'p10',1,'2023-04-09 20:26:05','Confidence: 0.68'),
(305,'pl40',1,'2023-04-09 20:26:05','Confidence: 0.81'),
(306,'p6',1,'2023-04-09 20:26:05','Confidence: 0.37'),
(307,'w32',1,'2023-04-09 20:26:05','Confidence: 0.44'),
(308,'pl70',1,'2023-04-09 20:26:05','Confidence: 0.49'),
(309,'p10',1,'2023-04-09 20:26:05','Confidence: 0.55'),
(310,'pl40',1,'2023-04-09 20:26:05','Confidence: 0.64'),
(311,'pl40',1,'2023-04-09 20:26:05','Confidence: 0.81'),
(312,'w32',1,'2023-04-09 20:26:05','Confidence: 0.39'),
(313,'p10',1,'2023-04-09 20:26:05','Confidence: 0.84'),
(314,'pl40',1,'2023-04-09 20:26:05','Confidence: 0.88'),
(315,'w32',1,'2023-04-09 20:26:05','Confidence: 0.48'),
(316,'pl20',1,'2023-04-09 20:26:06','Confidence: 0.49'),
(317,'w32',1,'2023-04-09 20:26:06','Confidence: 0.43'),
(318,'p19',1,'2023-04-09 20:26:06','Confidence: 0.82'),
(319,'pl40',1,'2023-04-09 20:26:06','Confidence: 0.92'),
(320,'p23',1,'2023-04-09 20:26:06','Confidence: 0.40'),
(321,'p19',1,'2023-04-09 20:26:06','Confidence: 0.68'),
(322,'pl40',1,'2023-04-09 20:26:06','Confidence: 0.83'),
(323,'pl40',1,'2023-04-09 20:26:06','Confidence: 0.37'),
(324,'w32',1,'2023-04-09 20:26:06','Confidence: 0.40'),
(325,'p10',1,'2023-04-09 20:26:06','Confidence: 0.75'),
(326,'pl60',1,'2023-04-09 20:26:06','Confidence: 0.86'),
(327,'w32',1,'2023-04-09 20:26:06','Confidence: 0.53'),
(328,'p10',1,'2023-04-09 20:26:06','Confidence: 0.53'),
(329,'p6',1,'2023-04-09 20:26:06','Confidence: 0.53'),
(330,'pm30',1,'2023-04-09 20:26:06','Confidence: 0.55'),
(331,'pl60',1,'2023-04-09 20:26:06','Confidence: 0.81'),
(332,'p6',1,'2023-04-09 20:26:06','Confidence: 0.43'),
(333,'w32',1,'2023-04-09 20:26:07','Confidence: 0.58'),
(334,'pl40',1,'2023-04-09 20:26:07','Confidence: 0.64'),
(335,'w32',1,'2023-04-09 20:26:07','Confidence: 0.51'),
(336,'pm20',1,'2023-04-09 20:26:07','Confidence: 0.82'),
(337,'p10',1,'2023-04-09 20:26:07','Confidence: 0.86'),
(338,'w32',1,'2023-04-09 20:26:07','Confidence: 0.41'),
(339,'pl60',1,'2023-04-09 20:26:07','Confidence: 0.66'),
(340,'p19',1,'2023-04-09 20:26:07','Confidence: 0.87'),
(341,'p10',1,'2023-04-09 20:26:07','Confidence: 0.43'),
(342,'w32',1,'2023-04-09 20:26:07','Confidence: 0.47'),
(343,'pl40',1,'2023-04-09 20:26:07','Confidence: 0.53'),
(344,'p6',1,'2023-04-09 20:26:07','Confidence: 0.69'),
(345,'pl40',1,'2023-04-09 20:26:07','Confidence: 0.43'),
(346,'p23',1,'2023-04-09 20:26:07','Confidence: 0.53'),
(347,'pm30',1,'2023-04-09 20:26:07','Confidence: 0.44'),
(348,'pm20',1,'2023-04-09 20:26:07','Confidence: 0.88'),
(349,'p23',1,'2023-04-09 20:26:08','Confidence: 0.48'),
(350,'p19',1,'2023-04-09 20:26:08','Confidence: 0.61'),
(351,'pm20',1,'2023-04-09 20:26:08','Confidence: 0.87'),
(352,'w32',1,'2023-04-09 20:26:08','Confidence: 0.40'),
(353,'p19',1,'2023-04-09 20:26:08','Confidence: 0.89'),
(354,'pm20',1,'2023-04-09 20:26:08','Confidence: 0.90'),
(355,'p19',1,'2023-04-09 20:26:08','Confidence: 0.40'),
(356,'w32',1,'2023-04-09 20:26:08','Confidence: 0.49'),
(357,'p10',1,'2023-04-09 20:26:08','Confidence: 0.52'),
(358,'p23',1,'2023-04-09 20:26:08','Confidence: 0.61'),
(359,'pm20',1,'2023-04-09 20:26:08','Confidence: 0.74'),
(360,'pm30',1,'2023-04-09 20:26:08','Confidence: 0.80'),
(362,'w55',1,'2023-05-12 19:24:42','Confidence: 0.90'),
(363,'w55',1,'2023-05-12 19:24:45','Confidence: 0.92'),
(364,'w55',1,'2023-05-12 19:24:45','Confidence: 0.90'),
(365,'w55',1,'2023-05-12 19:24:45','Confidence: 0.93'),
(366,'w55',1,'2023-05-12 19:24:45','Confidence: 0.90'),
(367,'w55',1,'2023-05-12 19:24:45','Confidence: 0.93'),
(368,'w13',1,'2023-05-12 19:24:46','Confidence: 0.81'),
(369,'w13',1,'2023-05-12 19:24:46','Confidence: 0.69'),
(370,'w13',1,'2023-05-12 19:24:46','Confidence: 0.80'),
(371,'w55',1,'2023-05-12 19:24:46','Confidence: 0.74'),
(372,'pl40',1,'2023-05-25 17:27:24','Confidence: 0.80'),
(373,'pl40',1,'2023-05-25 17:27:25','Confidence: 0.78'),
(374,'pl40',1,'2023-05-25 17:27:25','Confidence: 0.43'),
(375,'pl40',1,'2023-05-25 17:27:25','Confidence: 0.57'),
(376,'pr40',1,'2023-05-25 17:27:25','Confidence: 0.76'),
(377,'ph4',1,'2023-05-25 17:27:25','Confidence: 0.42'),
(378,'p19',1,'2023-05-25 17:27:25','Confidence: 0.47'),
(379,'pl20',1,'2023-05-25 17:27:26','Confidence: 0.69'),
(380,'ph4',1,'2023-05-25 17:27:26','Confidence: 0.46'),
(381,'pl20',1,'2023-05-25 17:27:26','Confidence: 0.60'),
(382,'pl20',1,'2023-05-25 17:27:26','Confidence: 0.55'),
(383,'p19',1,'2023-05-25 17:27:26','Confidence: 0.43'),
(384,'pl20',1,'2023-05-25 17:27:26','Confidence: 0.43'),
(385,'p10',1,'2023-05-25 17:27:26','Confidence: 0.43'),
(386,'pl40',1,'2023-05-25 17:27:26','Confidence: 0.44'),
(387,'ph4',1,'2023-05-25 17:27:26','Confidence: 0.46'),
(388,'p6',1,'2023-05-25 17:27:26','Confidence: 0.48'),
(389,'p19',1,'2023-05-25 17:27:26','Confidence: 0.58'),
(390,'p10',1,'2023-05-25 17:27:27','Confidence: 0.44'),
(391,'ph4',1,'2023-05-25 17:27:27','Confidence: 0.60'),
(392,'pm20',1,'2023-05-25 17:27:27','Confidence: 0.71'),
(393,'p10',1,'2023-05-25 17:27:27','Confidence: 0.57'),
(394,'pl40',1,'2023-05-25 17:27:27','Confidence: 0.80'),
(395,'p10',1,'2023-05-25 17:27:27','Confidence: 0.68'),
(396,'pl40',1,'2023-05-25 17:27:27','Confidence: 0.81'),
(397,'w32',1,'2023-05-25 17:27:27','Confidence: 0.44'),
(398,'pl70',1,'2023-05-25 17:27:27','Confidence: 0.49'),
(399,'p10',1,'2023-05-25 17:27:27','Confidence: 0.55'),
(400,'pl40',1,'2023-05-25 17:27:27','Confidence: 0.64'),
(401,'pl40',1,'2023-05-25 17:27:27','Confidence: 0.81'),
(402,'p10',1,'2023-05-25 17:27:27','Confidence: 0.84'),
(403,'pl40',1,'2023-05-25 17:27:27','Confidence: 0.88'),
(404,'w32',1,'2023-05-25 17:27:27','Confidence: 0.48'),
(405,'pl20',1,'2023-05-25 17:27:27','Confidence: 0.49'),
(406,'w32',1,'2023-05-25 17:27:27','Confidence: 0.43'),
(407,'p19',1,'2023-05-25 17:27:27','Confidence: 0.82'),
(408,'pl40',1,'2023-05-25 17:27:27','Confidence: 0.92'),
(409,'p19',1,'2023-05-25 17:27:28','Confidence: 0.68'),
(410,'pl40',1,'2023-05-25 17:27:28','Confidence: 0.83'),
(411,'p10',1,'2023-05-25 17:27:28','Confidence: 0.75'),
(412,'pl60',1,'2023-05-25 17:27:28','Confidence: 0.86'),
(413,'w32',1,'2023-05-25 17:27:28','Confidence: 0.53'),
(414,'p10',1,'2023-05-25 17:27:28','Confidence: 0.53'),
(415,'p6',1,'2023-05-25 17:27:28','Confidence: 0.53'),
(416,'pm30',1,'2023-05-25 17:27:28','Confidence: 0.55'),
(417,'pl60',1,'2023-05-25 17:27:28','Confidence: 0.81'),
(418,'p6',1,'2023-05-25 17:27:35','Confidence: 0.43'),
(419,'w32',1,'2023-05-25 17:27:35','Confidence: 0.58'),
(420,'pl40',1,'2023-05-25 17:27:35','Confidence: 0.64'),
(421,'w32',1,'2023-05-25 17:27:35','Confidence: 0.51'),
(422,'pm20',1,'2023-05-25 17:27:36','Confidence: 0.82'),
(423,'p10',1,'2023-05-25 17:27:36','Confidence: 0.86'),
(424,'w32',1,'2023-05-25 17:27:36','Confidence: 0.41'),
(425,'pl60',1,'2023-05-25 17:27:36','Confidence: 0.66'),
(426,'p19',1,'2023-05-25 17:27:36','Confidence: 0.87'),
(427,'p10',1,'2023-05-25 17:27:36','Confidence: 0.43'),
(428,'w32',1,'2023-05-25 17:27:36','Confidence: 0.47'),
(429,'pl40',1,'2023-05-25 17:27:36','Confidence: 0.53'),
(430,'p6',1,'2023-05-25 17:27:36','Confidence: 0.69'),
(431,'pl40',1,'2023-05-25 17:27:36','Confidence: 0.43'),
(432,'p23',1,'2023-05-25 17:27:36','Confidence: 0.53'),
(433,'pm30',1,'2023-05-25 17:27:37','Confidence: 0.44'),
(434,'pm20',1,'2023-05-25 17:27:37','Confidence: 0.88'),
(435,'p23',1,'2023-05-25 17:27:37','Confidence: 0.48'),
(436,'p19',1,'2023-05-25 17:27:37','Confidence: 0.61'),
(437,'pm20',1,'2023-05-25 17:27:37','Confidence: 0.87'),
(438,'p19',1,'2023-05-25 17:27:37','Confidence: 0.89'),
(439,'pm20',1,'2023-05-25 17:27:37','Confidence: 0.90'),
(440,'w32',1,'2023-05-25 17:27:37','Confidence: 0.49'),
(441,'p10',1,'2023-05-25 17:27:37','Confidence: 0.52'),
(442,'p23',1,'2023-05-25 17:27:37','Confidence: 0.61'),
(443,'pm20',1,'2023-05-25 17:27:37','Confidence: 0.74'),
(444,'pm30',1,'2023-05-25 17:27:37','Confidence: 0.80'),
(445,'p23',1,'2023-05-25 17:27:37','Confidence: 0.55'),
(446,'w32',1,'2023-05-25 17:27:37','Confidence: 0.56'),
(447,'pm20',1,'2023-05-25 17:27:37','Confidence: 0.75'),
(448,'p19',1,'2023-05-25 17:27:37','Confidence: 0.76'),
(449,'p19',1,'2023-05-25 17:27:38','Confidence: 0.59'),
(450,'pm20',1,'2023-05-25 17:27:38','Confidence: 0.78'),
(451,'p23',1,'2023-05-25 17:27:38','Confidence: 0.84'),
(452,'pl40',1,'2023-05-25 17:27:38','Confidence: 0.45'),
(453,'w32',1,'2023-05-25 17:27:38','Confidence: 0.50'),
(454,'pm20',1,'2023-05-25 17:27:38','Confidence: 0.85'),
(455,'p19',1,'2023-05-25 17:27:38','Confidence: 0.91');

/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;


================================================
FILE: data/run/exp52/hyp.yaml
================================================
lr0: 0.001
lrf: 0.01
momentum: 0.937
weight_decay: 0.0005
warmup_epochs: 3.0
warmup_momentum: 0.8
warmup_bias_lr: 0.1
box: 0.05
cls: 0.5
cls_pw: 1.0
obj: 1.0
obj_pw: 1.0
iou_t: 0.2
anchor_t: 4.0
fl_gamma: 0.0
hsv_h: 0.015
hsv_s: 0.7
hsv_v: 0.4
degrees: 0.0
translate: 0.1
scale: 0.5
shear: 0.0
perspective: 0.0
flipud: 0.0
fliplr: 0.5
mosaic: 1.0
mixup: 0.0
copy_paste: 0.0


================================================
FILE: data/run/exp52/opt.yaml
================================================
weights: runs\train\exp51\weights\best.pt
cfg: ''
data: E:\Desktop\yolov5-6.0\data\1.yaml
hyp: data\hyps\hyp.scratch-low.yaml
epochs: 20
batch_size: 8
imgsz: 1280
rect: false
resume: false
nosave: false
noval: false
noautoanchor: false
evolve: null
bucket: ''
cache: null
image_weights: true
device: ''
multi_scale: false
single_cls: false
adam: true
sync_bn: false
workers: 4
project: runs\train
name: exp
exist_ok: false
quad: false
linear_lr: false
label_smoothing: 0.0
patience: 100
freeze: 0
save_period: -1
local_rank: -1
entity: null
upload_dataset: false
bbox_interval: -1
artifact_alias: latest
save_dir: runs\train\exp52


================================================
FILE: data/scripts/download_weights.sh
================================================
#!/bin/bash
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
# Download latest models from https://github.com/ultralytics/yolov5/releases
# Example usage: bash path/to/download_weights.sh
# parent
# └── yolov5
#     ├── yolov5s.pt  ← downloads here
#     ├── yolov5m.pt
#     └── ...

python - <<EOF
from utils.downloads import attempt_download

models = ['n', 's', 'm', 'l', 'x']
models.extend([x + '6' for x in models])  # add P6 models

for x in models:
    attempt_download(f'yolov5{x}.pt')

EOF


================================================
FILE: data/scripts/get_coco.sh
================================================
#!/bin/bash
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
# Download COCO 2017 dataset http://cocodataset.org
# Example usage: bash data/scripts/get_coco.sh
# parent
# ├── yolov5
# └── datasets
#     └── coco  ← downloads here

# Download/unzip labels
d='../datasets' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB
echo 'Downloading' $url$f ' ...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &

# Download/unzip images
d='../datasets/coco/images' # unzip directory
url=http://images.cocodataset.org/zips/
f1='train2017.zip' # 19G, 118k images
f2='val2017.zip'   # 1G, 5k images
f3='test2017.zip'  # 7G, 41k images (optional)
for f in $f1 $f2; do
  echo 'Downloading' $url$f '...'
  curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &
done
wait # finish background tasks


================================================
FILE: data/scripts/get_coco128.sh
================================================
#!/bin/bash
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
# Example usage: bash data/scripts/get_coco128.sh
# parent
# ├── yolov5
# └── datasets
#     └── coco128  ← downloads here

# Download/unzip images and labels
d='../datasets' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f='coco128.zip' # or 'coco128-segments.zip', 68 MB
echo 'Downloading' $url$f ' ...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &

wait # finish background tasks


================================================
FILE: detect.py
================================================
"""Run inference with a YOLOv5 model on images, videos, directories, streams

Usage:
    $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640
"""

import argparse
import sys
import time
from pathlib import Path

import cv2
import torch
import torch.backends.cudnn as cudnn

FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[0].as_posix())  # add yolov5/ to path

from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \
    apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import select_device, load_classifier, time_sync

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

@torch.no_grad()
def run(weights='yolov5s.pt',  # model.pt path(s)
        source='data/images',  # file/dir/URL/glob, 0 for webcam
        imgsz=640,  # inference size (pixels)
        conf_thres=0.25,  # confidence threshold
        iou_thres=0.45,  # NMS IOU threshold
        max_det=1000,  # maximum detections per image
        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        view_img=False,  # show results
        save_txt=False,  # save results to *.txt
        save_conf=False,  # save confidences in --save-txt labels
        save_crop=False,  # save cropped prediction boxes
        nosave=False,  # do not save images/videos
        classes=None,  # filter by class: --class 0, or --class 0 2 3
        agnostic_nms=False,  # class-agnostic NMS
        augment=False,  # augmented inference
        visualize=False,  # visualize features
        update=False,  # update all models
        project='runs/detect',  # save results to project/name
        name='exp',  # save results to project/name
        exist_ok=False,  # existing project/name ok, do not increment
        line_thickness=3,  # bounding box thickness (pixels)
        hide_labels=False,  # hide labels
        hide_conf=False,  # hide confidences
        half=False,  # use FP16 half-precision inference
        ):
    save_img = not nosave and not source.endswith('.txt')  # save inference images
    webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
        ('rtsp://', 'rtmp://', 'http://', 'https://'))

    # Directories
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
    (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir

    # Initialize
    set_logging()
    device = select_device(device)
    half &= device.type != 'cpu'  # half precision only supported on CUDA

    # Load model
    model = attempt_load(weights, map_location=device)  # load FP32 model
    stride = int(model.stride.max())  # model stride
    imgsz = check_img_size(imgsz, s=stride)  # check image size
    names = model.module.names if hasattr(model, 'module') else model.names  # get class names
    if half:
        model.half()  # to FP16

    # Second-stage classifier
    classify = False
    if classify:
        modelc = load_classifier(name='resnet50', n=2)  # initialize
        modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval()

    # Dataloader
    if webcam:
        view_img = check_imshow()
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz, stride=stride)
        bs = len(dataset)  # batch_size
    else:
        dataset = LoadImages(source, img_size=imgsz, stride=stride)
        bs = 1  # batch_size
    vid_path, vid_writer = [None] * bs, [None] * bs

    # Run inference
    if device.type != 'cpu':
        model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
    t0 = time.time()
    for path, img, im0s, vid_cap in dataset:
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = time_sync()
        pred = model(img,
                     augment=augment,
                     visualize=increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False)[0]

        # Apply NMS
        pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
        t2 = time_sync()

        # Apply Classifier
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count
            else:
                p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)

            p = Path(p)  # to Path
            save_path = str(save_dir / p.name)  # img.jpg
            txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # img.txt
            s += '%gx%g ' % img.shape[2:]  # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
            imc = im0.copy() if save_crop else im0  # for save_crop
            if len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

                # Write results
                for *xyxy, conf, cls in reversed(det):
                    if save_txt:  # Write to file
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                        line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format
                        with open(txt_path + '.txt', 'a') as f:
                            f.write(('%g ' * len(line)).rstrip() % line + '\n')

                    if save_img or save_crop or view_img:  # Add bbox to image
                        c = int(cls)  # integer class
                        label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
                        plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness)
                        if save_crop:
                            save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)

            # Print time (inference + NMS)
            print(f'{s}Done. ({t2 - t1:.3f}s)')

            # Stream results
            if view_img:
                cv2.imshow(str(p), im0)
                cv2.waitKey(1)  # 1 millisecond

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'image':
                    cv2.imwrite(save_path, im0)
                else:  # 'video' or 'stream'
                    if vid_path[i] != save_path:  # new video
                        vid_path[i] = save_path
                        if isinstance(vid_writer[i], cv2.VideoWriter):
                            vid_writer[i].release()  # release previous video writer
                        if vid_cap:  # video
                            fps = vid_cap.get(cv2.CAP_PROP_FPS)
                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        else:  # stream
                            fps, w, h = 30, im0.shape[1], im0.shape[0]
                            save_path += '.mp4'
                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
                    vid_writer[i].write(im0)

    if save_txt or save_img:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
        print(f"Results saved to {save_dir}{s}")

    if update:
        strip_optimizer(weights)  # update model (to fix SourceChangeWarning)

    print(f'Done. ({time.time() - t0:.3f}s)')


def parse_opt():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
    parser.add_argument('--source', type=str, default='imgs/GUI_new.png', help='file/dir/URL/glob, 0 for webcam')
    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
    parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--view-img', action='store_true', help='show results')
    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
    parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
    parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
    parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
    parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--visualize', action='store_true', help='visualize features')
    parser.add_argument('--update', action='store_true', help='update all models')
    parser.add_argument('--project', default='runs/detect', help='save results to project/name')
    parser.add_argument('--name', default='exp', help='save results to project/name')
    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
    parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
    parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
    parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
    opt = parser.parse_args()
    return opt


def main(opt):
    print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
    check_requirements(exclude=('tensorboard', 'thop'))
    run(**vars(opt))


if __name__ == "__main__":
    opt = parse_opt()
    main(opt)


================================================
FILE: dialog/rtsp_dialog.py
================================================
# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'rtsp_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again.  Do not edit this file unless you know what you are doing.


from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_Form(object):
    def setupUi(self, Form):
        Form.setObjectName("Form")
        Form.resize(783, 40)
        Form.setMinimumSize(QtCore.QSize(0, 40))
        Form.setMaximumSize(QtCore.QSize(16777215, 41))
        icon = QtGui.QIcon()
        icon.addPixmap(QtGui.QPixmap(":/img/icon/实时视频流解析.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        Form.setWindowIcon(icon)
        Form.setStyleSheet("#Form{background:rgba(120,120,120,255)}")
        self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
        self.horizontalLayout.setContentsMargins(-1, 5, -1, 5)
        self.horizontalLayout.setObjectName("horizontalLayout")
        self.label = QtWidgets.QLabel(Form)
        self.label.setMinimumSize(QtCore.QSize(0, 30))
        self.label.setMaximumSize(QtCore.QSize(16777215, 30))
        self.label.setStyleSheet("QLabel{font-family: \"Microsoft YaHei\";\n"
"font-size: 18px;\n"
"font-weight: bold;\n"
"color:white;}")
        self.label.setObjectName("label")
        self.horizontalLayout.addWidget(self.label)
        self.rtspEdit = QtWidgets.QLineEdit(Form)
        self.rtspEdit.setMinimumSize(QtCore.QSize(0, 31))
        self.rtspEdit.setStyleSheet("background-color: rgb(207, 207, 207);")
        self.rtspEdit.setObjectName("rtspEdit")
        self.horizontalLayout.addWidget(self.rtspEdit)
        self.rtspButton = QtWidgets.QPushButton(Form)
        self.rtspButton.setStyleSheet("QPushButton{font-family: \"Microsoft YaHei\";\n"
"font-size: 18px;\n"
"font-weight: bold;\n"
"color:white;\n"
"text-align: center center;\n"
"padding-left: 5px;\n"
"padding-right: 5px;\n"
"padding-top: 4px;\n"
"padding-bottom: 4px;\n"
"border-style: solid;\n"
"border-width: 0px;\n"
"border-color: rgba(255, 255, 255, 255);\n"
"border-radius: 3px;\n"
"background-color: rgba(255,255,255,30);}\n"
"\n"
"QPushButton:focus{outline: none;}\n"
"\n"
"QPushButton::pressed{font-family: \"Microsoft YaHei\";\n"
"                     font-size: 16px;\n"
"                     font-weight: bold;\n"
"                     color:rgb(200,200,200);\n"
"                     text-align: center center;\n"
"                     padding-left: 5px;\n"
"                     padding-right: 5px;\n"
"                     padding-top: 4px;\n"
"                     padding-bottom: 4px;\n"
"                     border-style: solid;\n"
"                     border-width: 0px;\n"
"                     border-color: rgba(255, 255, 255, 255);\n"
"                     border-radius: 3px;\n"
"                     background-color:  rgba(255,255,255,150);}\n"
"\n"
"QPushButton::hover {\n"
"border-style: solid;\n"
"border-width: 0px;\n"
"border-radius: 0px;\n"
"background-color: rgba(255,255,255,50);}")
        self.rtspButton.setObjectName("rtspButton")
        self.horizontalLayout.addWidget(self.rtspButton)

        self.retranslateUi(Form)
        QtCore.QMetaObject.connectSlotsByName(Form)

    def retranslateUi(self, Form):
        _translate = QtCore.QCoreApplication.translate
        Form.setWindowTitle(_translate("Form", "Form"))
        self.label.setText(_translate("Form", "rtsp address:"))
        self.rtspButton.setText(_translate("Form", "confirm"))
import apprcc_rc


================================================
FILE: dialog/rtsp_dialog.ui
================================================
<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
 <class>Form</class>
 <widget class="QWidget" name="Form">
  <property name="geometry">
   <rect>
    <x>0</x>
    <y>0</y>
    <width>783</width>
    <height>40</height>
   </rect>
  </property>
  <property name="minimumSize">
   <size>
    <width>0</width>
    <height>40</height>
   </size>
  </property>
  <property name="maximumSize">
   <size>
    <width>16777215</width>
    <height>41</height>
   </size>
  </property>
  <property name="windowTitle">
   <string>Form</string>
  </property>
  <property name="windowIcon">
   <iconset resource="../apprcc.qrc">
    <normaloff>:/img/icon/实时视频流解析.png</normaloff>:/img/icon/实时视频流解析.png</iconset>
  </property>
  <property name="styleSheet">
   <string notr="true">#Form{background:rgba(120,120,120,255)}</string>
  </property>
  <layout class="QHBoxLayout" name="horizontalLayout">
   <property name="topMargin">
    <number>5</number>
   </property>
   <property name="bottomMargin">
    <number>5</number>
   </property>
   <item>
    <widget class="QLabel" name="label">
     <property name="minimumSize">
      <size>
       <width>0</width>
       <height>30</height>
      </size>
     </property>
     <property name="maximumSize">
      <size>
       <width>16777215</width>
       <height>30</height>
      </size>
     </property>
     <property name="styleSheet">
      <string notr="true">QLabel{font-family: &quot;Microsoft YaHei&quot;;
font-size: 18px;
font-weight: bold;
color:white;}</string>
     </property>
     <property name="text">
      <string>rtsp address:</string>
     </property>
    </widget>
   </item>
   <item>
    <widget class="QLineEdit" name="rtspEdit">
     <property name="minimumSize">
      <size>
       <width>0</width>
       <height>31</height>
      </size>
     </property>
     <property name="styleSheet">
      <string notr="true">background-color: rgb(207, 207, 207);</string>
     </property>
    </widget>
   </item>
   <item>
    <widget class="QPushButton" name="rtspButton">
     <property name="styleSheet">
      <string notr="true">QPushButton{font-family: &quot;Microsoft YaHei&quot;;
font-size: 18px;
font-weight: bold;
color:white;
text-align: center center;
padding-left: 5px;
padding-right: 5px;
padding-top: 4px;
padding-bottom: 4px;
border-style: solid;
border-width: 0px;
border-color: rgba(255, 255, 255, 255);
border-radius: 3px;
background-color: rgba(255,255,255,30);}

QPushButton:focus{outline: none;}

QPushButton::pressed{font-family: &quot;Microsoft YaHei&quot;;
                     font-size: 16px;
                     font-weight: bold;
                     color:rgb(200,200,200);
                     text-align: center center;
                     padding-left: 5px;
                     padding-right: 5px;
                     padding-top: 4px;
                     padding-bottom: 4px;
                     border-style: solid;
                     border-width: 0px;
                     border-color: rgba(255, 255, 255, 255);
                     border-radius: 3px;
                     background-color:  rgba(255,255,255,150);}

QPushButton::hover {
border-style: solid;
border-width: 0px;
border-radius: 0px;
background-color: rgba(255,255,255,50);}</string>
     </property>
     <property name="text">
      <string>confirm</string>
     </property>
    </widget>
   </item>
  </layout>
 </widget>
 <resources>
  <include location="../apprcc.qrc"/>
 </resources>
 <connections/>
</ui>


================================================
FILE: dialog/rtsp_win.py
================================================
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from dialog.rtsp_dialog import Ui_Form


class Window(QWidget, Ui_Form):
    def __init__(self):
        super(Window, self).__init__()
        self.setupUi(self)


if __name__ == '__main__':
    app = QApplication(sys.argv)
    window = Window()
    window.show()
    sys.exit(app.exec_())


================================================
FILE: hubconf.py
================================================
"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/

Usage:
    import torch
    model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
"""

import torch


def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    """Creates a specified YOLOv5 model

    Arguments:
        name (str): name of model, i.e. 'yolov5s'
        pretrained (bool): load pretrained weights into the model
        channels (int): number of input channels
        classes (int): number of model classes
        autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
        verbose (bool): print all information to screen
        device (str, torch.device, None): device to use for model parameters

    Returns:
        YOLOv5 pytorch model
    """
    from pathlib import Path

    from models.yolo import Model, attempt_load
    from utils.general import check_requirements, set_logging
    from utils.google_utils import attempt_download
    from utils.torch_utils import select_device

    file = Path(__file__).absolute()
    check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python'))
    set_logging(verbose=verbose)

    save_dir = Path('') if str(name).endswith('.pt') else file.parent
    path = (save_dir / name).with_suffix('.pt')  # checkpoint path
    try:
        device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)

        if pretrained and channels == 3 and classes == 80:
            model = attempt_load(path, map_location=device)  # download/load FP32 model
        else:
            cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0]  # model.yaml path
            model = Model(cfg, channels, classes)  # create model
            if pretrained:
                ckpt = torch.load(attempt_download(path), map_location=device)  # load
                msd = model.state_dict()  # model state_dict
                csd = ckpt['model'].float().state_dict()  # checkpoint state_dict as FP32
                csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape}  # filter
                model.load_state_dict(csd, strict=False)  # load
                if len(ckpt['model'].names) == classes:
                    model.names = ckpt['model'].names  # set class names attribute
        if autoshape:
            model = model.autoshape()  # for file/URI/PIL/cv2/np inputs and NMS
        return model.to(device)

    except Exception as e:
        help_url = 'https://github.com/ultralytics/yolov5/issues/36'
        s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url
        raise Exception(s) from e


def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
    # YOLOv5 custom or local model
    return _create(path, autoshape=autoshape, verbose=verbose, device=device)


def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-small model https://github.com/ultralytics/yolov5
    return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)


def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-medium model https://github.com/ultralytics/yolov5
    return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)


def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-large model https://github.com/ultralytics/yolov5
    return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)


def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
    return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)


def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
    return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)


def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
    return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)


def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
    return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)


def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
    # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
    return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)


if __name__ == '__main__':
    model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)  # pretrained
    # model = custom(path='path/to/model.pt')  # custom

    # Verify inference
    import cv2
    import numpy as np
    from PIL import Image

    imgs = ['data/images/zidane.jpg',  # filename
            'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg',  # URI
            cv2.imread('data/images/bus.jpg')[:, :, ::-1],  # OpenCV
            Image.open('data/images/bus.jpg'),  # PIL
            np.zeros((320, 640, 3))]  # numpy

    results = model(imgs)  # batched inference
    results.print()
    results.save()


================================================
FILE: login_lj.py
================================================
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox

from main_win.login import Ui_Dialog


class Login(QtWidgets.QDialog, Ui_Dialog):
    def __init__(self):
        super(Login, self).__init__()
        self.setupUi(self)

        # 加载保存的用户名和密码
        self.load_credentials()

        # 将登录方法与登录按钮关联
        self.pushButton.clicked.connect(self.login)
        self.accept()

    def login(self):
        # 获取输入框中的用户名和密码
        username = self.lineEdit.text()
        password = self.lineEdit_2.text()

        # 判断用户名和密码是否正确
        if (username == "admin" and password == "123456") or \
                (username == "1" and password == "2") or \
                (username == "user2" and password == "password2"):
            # 登录成功,弹出提示框
            QMessageBox.information(self, "提示", "登录成功!")
            self.save_credentials()  # 保存用户名和密码
            self.accept()  # 关闭登录界面
        else:
            # 登录失败,弹出提示框
            QMessageBox.warning(self, "警告", "用户名或密码错误!")

    def save_credentials(self):
        settings = QtCore.QSettings()
        if self.checkBox.isChecked():
            settings.setValue("username", self.lineEdit.text())
            settings.setValue("password", self.lineEdit_2.text())
        else:
            settings.setValue("username", "")
            settings.setValue("password", "")

    def load_credentials(self):
        settings = QtCore.QSettings()
        username = settings.value("username", "")
        password = settings.value("password", "")
        self.lineEdit.setText(username)
        self.lineEdit_2.setText(password)
        self.checkBox.setChecked(bool(username and password))


if __name__ == "__main__":
    import sys
    app = QtWidgets.QApplication(sys.argv)
    Dialog = Login()
    if Dialog.exec_() == QtWidgets.QDialog.Accepted:
        print("登录成功!")
    else:
        print("登录失败!")
    sys.exit(app.exec_())


================================================
FILE: main.py
================================================
import sys
import json
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import os
import time
from models.experimental import attempt_load
from utils.datasets import LoadImages, LoadWebcam
from utils.CustomMessageBox import MessageBox
from utils.general import check_img_size, check_imshow, non_max_suppression, \
    scale_coords, increment_path
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device
from utils.capnums import Camera
from dialog.rtsp_win import Window
import datetime
from pathlib import Path
from PyQt5.QtGui import QTransform
from PyQt5.QtWidgets import QHeaderView
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QMessageBox
import pymysql
from PyQt5.QtCore import  QAbstractTableModel
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QFileDialog, QMenu
from main_win.win import Ui_mainWindow
from PyQt5.QtCore import Qt, QPoint, QTimer, QThread, pyqtSignal
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QLineEdit, QVBoxLayout, QLabel, QDialogButtonBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QInputDialog
from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog
import seaborn as sns

sns.set(style='darkgrid', palette='pastel')
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  #

visualize = False
databases = False
agnostic_nms=False
half=False # use FP16 half-precision inference
imgsz=1280
save_crop = False
augment = False
classes = None
project = ROOT / 'result' # save results to project/name
hide_labels=False  # hide labels
hide_conf=False  # hide confidences
line_thickness=3  # bounding box thickness (pixels)

# Database connection settings as global variables
DB_HOST = 'localhost'
DB_USER = 'root'
DB_PASSWORD = '1234'
DB_NAME = 'traffic_sign_recognition'


#检测线程
class DetThread(QThread):
    """这段代码定义了6个信号,用于在PyQt程序中进行通信。
send_img信号用于发送图像数组,
send_raw信号用于发送原始数组,
send_statistic信号用于发送统计字典数据,
send_msg信号用于发送分析(检测)的状态消息,
send_percent信号用于发送分析(检测)进度,
最后send_fps用于发送帧率数"""
    send_img = pyqtSignal(np.ndarray)
    send_raw = pyqtSignal(np.ndarray)
    send_statistic = pyqtSignal(dict)
    # emit:detecting/pause/stop/finished/error msg
    send_msg = pyqtSignal(str)
    send_percent = pyqtSignal(int)
    send_fps = pyqtSignal(str)

    def __init__(self):
        super(DetThread, self).__init__()
        self.weights = './yolov5s.pt'# 设置权重
        self.current_weight = './yolov5s.pt'# 当前权重
        self.source = '0'# 视频源
        self.conf_thres = 0.25 # 置信度
        self.iou_thres = 0.45# iou
        self.jump_out = False                   # jump out of the loop # 跳出循环
        self.is_continue = True                 # continue/pause# 进度条
        self.percent_length = 1000              # progress bar# 是否启用延时
        self.rate_check = True                  # Whether to enable delay# 延时HZ
        self.rate = 100
        self.save_fold = './result' # 保存文件夹


#数据库录入
    def get_db_connection(self, db_host=DB_HOST, db_user=DB_USER, db_password=DB_PASSWORD, db_name=DB_NAME):
        return pymysql.connect(
            host=DB_HOST,
            user=DB_USER,
            password=DB_PASSWORD,
            database=DB_NAME
        )
    def insert_detection_result_to_database(self, sign_type, sign_count, additional_info):
        with self.get_db_connection() as connection:
            with connection.cursor() as cursor:
                current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                sql = "INSERT INTO detection_results (sign_type, sign_count, detection_time, additional_info) VALUES (%s, %s, %s, %s)"
                cursor.execute(sql, (sign_type, sign_count, current_time, additional_info))
                connection.commit()


    @torch.no_grad()
    def run(self,
            # imgsz=1280,  # inference size (pixels)
            max_det=1000,  # maximum detections per image
            device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
            view_img=True,  # show results
            # save_crop=False,  # save cropped prediction boxes
            # augment=False,  # augmented inference
            # visualize=True,  # visualize features
            # classes=None,  # filter by class: --class 0, or --class 0 2 3
            # agnostic_nms=False,  # class-agnostic NMS
            # project=ROOT / 'result',  # save results to project/name
            name='exp',  # save results to project/name
            exist_ok=False,  # existing project/name ok, do not increment, False是自增,true是不自增覆盖
            # line_thickness=3,  # bounding box thickness (pixels)
            # hide_labels=False,  # hide labels
            # hide_conf=False,  # hide confidences
            # half=False,  # use FP16 half-precision inference
            save_txt=False,  # save results to *.txt
            save_conf=False,  # save confidences in --save-txt labels
            update=False,  # update all models
            nosave=False,  # do not save images/videos
            prune_model =True
            ):

        global half
        global imgsz
        # Initialize
        try:
            device = select_device(device)
            print("half:",half)
            half &= device.type != 'cpu'  # half precision only supported on CUDA

            # Load model这段代码中,num_params变量用于计算模型中参数的总个数。在 for 循环中,对每一个 param,累加其 numel() 方法返回的参数总数。
            model = attempt_load(self.weights, map_location=device)  # load FP32 model

            num_params = 0
            for param in model.parameters():
                num_params += param.numel()
            stride = int(model.stride.max())  # model stride
            print("imgsz:",imgsz)
            imgsz = check_img_size(imgsz, s=stride)  # check image size
            names = model.module.names if hasattr(model, 'module') else model.names  # get class names
            if half:
                model.half()  # to FP16




            # Dataloader
            if self.source.isnumeric() or self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')):
                view_img = check_imshow()
                cudnn.benchmark = True  # set True to speed up constant image size inference
                dataset = LoadWebcam(self.source, img_size=imgsz, stride=stride)
                # bs = len(dataset)  # batch_size
            else:
                dataset = LoadImages(self.source, img_size=imgsz, stride=stride)




            # Run inference
            if device.type != 'cpu':
                model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
            count = 0
            # 跳帧检测
            jump_count = 0
            start_time = time.time()
            dataset = iter(dataset)

            global save_crop
            print("save_crop", save_crop)
            global visualize
            global augment
            print("augment", augment)

            global agnostic_nms
            global classes
            print("agnostic_nms:", agnostic_nms)
            print("classes:", classes)
            global hide_labels
            print("hide_labels:",hide_labels)
            global hide_conf
            print("hide_conf:",hide_conf)
            global line_thickness
            print("line_thickness:",line_thickness)


            while True:
                # 手动停止
                if self.jump_out:
                    self.vid_cap.release()
                    self.send_percent.emit(0)
                    self.send_msg.emit('Stop')
                    if hasattr(self, 'out'):
                        self.out.release()
                    break



                # 临时更换模型
                if self.current_weight != self.weights:
                    # Load model
                    model = attempt_load(self.weights, map_location=device)  # load FP32 model

                    num_params = 0
                    for param in model.parameters():
                        num_params += param.numel()
                    stride = int(model.stride.max())  # model stride
                    imgsz = check_img_size(imgsz, s=stride)  # check image size
                    names = model.module.names if hasattr(model, 'module') else model.names  # get class names
                    if half:
                        model.half()  # to FP16
                    # Run inference
                    if device.type != 'cpu':
                        model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
                    self.current_weight = self.weights


                # 暂停开关,这个就是主程序
                if self.is_continue:
                    path, img, im0s, self.vid_cap = next(dataset)
                    # jump_count += 1
                    # if jump_count % 5 != 0:
                    #     continue
                    count += 1
                    # 每三十帧刷新一次输出帧率
                    if count % 30 == 0 and count >= 30:
                        fps = int(30/(time.time()-start_time))
                        self.send_fps.emit('fps:'+str(fps))
                        start_time = time.time()
                    if self.vid_cap:
                        percent = int(count/self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT)*self.percent_length)
                        self.send_percent.emit(percent)
                    else:
                        percent = self.percent_length

                    statistic_dic = {name: 0 for name in names}
                    img = torch.from_numpy(img).to(device)
                    img = img.half() if half else img.float()  # uint8 to fp16/32
                    img /= 255.0  # 0 - 255 to 0.0 - 1.0
                    if img.ndimension() == 3:
                        img = img.unsqueeze(0)

                    #1. pred = model(img, augment=augment)[0]

                    # 2.save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
                    # (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
                    #  pred = model(img,
                    #              augment=augment,
                    #              visualize=increment_path(save_dir / Path(path).stem,
                    #                                       mkdir=True) if visualize else False)[0]

                    # 3.global visualize
                    # vis_path = False
                    # print("visualize:", visualize)
                    # if visualize :
                    #     vis_path = increment_path(save_dir / Path(path).stem, mkdir=True)
                    # print("vis_path:", vis_path)
                    # pred = model(img, augment=augment, visualize=vis_path)[0]
#特征提取参数
                    global project
                    print("project:", project)
                    if visualize:
                        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
                        save_dir.mkdir(parents=True, exist_ok=True)  # make dir
                        vis_path = increment_path(save_dir / Path(path).stem, mkdir=True)
                        pred = model(img, augment=augment, visualize=vis_path)[0]
                    else:
                        pred = model(img, augment=augment)[0]


                    # Apply NMS
                    pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes, agnostic_nms, max_det=max_det)
                    # Process detections
                    for i, det in enumerate(pred):  # detections per image
                        im0 = im0s.copy()
                        annotator = Annotator(im0, line_width=line_thickness, example=str(names))
                        if len(det):
                            # Rescale boxes from img_size to im0 size
                            det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                            # Write results
                            for *xyxy, conf, cls in reversed(det):
                                c = int(cls)  # integer class
                                statistic_dic[names[c]] += 1
                                label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
                                annotator.box_label(xyxy, label, color=colors(c, True))
#数据库录入
                                if databases==True:
                                    # Insert the detection result into the database
                                    print("该检测值已录入数据库")
                                    sign_type = names[c]
                                    sign_count = statistic_dic[names[c]]
                                    additional_info = f"Confidence: {conf:.2f}"
                                    self.insert_detection_result_to_database(sign_type, sign_count, additional_info)


                                im0 = im0.copy() if save_crop else im0  # for save_crop
                                if save_crop:
                                        file_path ='result'
                                        save_dir = increment_path(Path(project) / name,
                                                                  exist_ok=True)  # increment run
                                        save_dir.mkdir(parents=True, exist_ok=True)  # make dir
                                        save_one_box(xyxy, im0, file=save_dir / 'crops' / names[c] / f'{file_path}.jpg',
                                                     BGR=True)
                                        print("crop已保存")
                                        # save_one_box(xyxy, im0, file=save_dir / 'crops' / names[c] / 'file_path.jpg', BGR=True)


                    # 控制视频发送频率
                    if self.rate_check:
                        time.sleep(1/self.rate)
                    im0 = annotator.result()
                    self.send_img.emit(im0)
                    self.send_raw.emit(im0s if isinstance(im0s, np.ndarray) else im0s[0])
                    self.send_statistic.emit(statistic_dic)

                    # 如果自动录制
                    if self.save_fold:
                        os.makedirs(self.save_fold, exist_ok=True)
                        if self.vid_cap is None:
                            save_path = os.path.join(self.save_fold,
                                                     time.strftime('%Y_%m_%d_%H_%M_%S',
                                                                   time.localtime()) + '.jpg')
                            cv2.imwrite(save_path, im0)
                        else:
                            if count == 1: # 第一帧时初始化录制
                                # 以视频原始帧率进行录制
                                ori_fps = int(self.vid_cap.get(cv2.CAP_PROP_FPS))
                                if ori_fps == 0:
                                    ori_fps = 25
                                # width = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                                # height = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                                width, height = im0.shape[1], im0.shape[0]
                                save_path = os.path.join(self.save_fold, time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime()) + '.mp4')
                                self.out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), ori_fps,
                                                           (width, height))
                            self.out.write(im0)
                    if percent == self.percent_length:
                        # print(count)
                        self.send_percent.emit(0)
                        self.send_msg.emit('finished')
                        if hasattr(self, 'out'):
                            self.out.release()
                        # 正常跳出循环
                        break

        except Exception as e:
            self.send_msg.emit('%s' % e)

#插入弹窗数据库
class AddRecordDialog(QtWidgets.QDialog):
    def __init__(self, parent=None):
        super(AddRecordDialog, self).__init__(parent)

        self.sign_type_line_edit = QtWidgets.QLineEdit()
        self.sign_count_line_edit = QtWidgets.QLineEdit()
        self.additional_info_line_edit = QtWidgets.QLineEdit()

        button_box = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
        button_box.accepted.connect(self.accept)
        button_box.rejected.connect(self.reject)

        layout = QtWidgets.QFormLayout()
        layout.addRow("Sign Type:", self.sign_type_line_edit)
        layout.addRow("Sign Count:", self.sign_count_line_edit)
        layout.addRow("Additional Info:", self.additional_info_line_edit)
        layout.addWidget(button_box)

        self.setLayout(layout)

    def get_record_data(self):
        sign_type = self.sign_type_line_edit.text()
        sign_count = self.sign_count_line_edit.text()
        additional_info = self.additional_info_line_edit.text()

        return sign_type, sign_count, additional_info
#总体数据库
class PymysqlTableModel(QAbstractTableModel):
    def __init__(self, data, headers):
        super().__init__()
        self._data = data
        self._headers = headers

    def rowCount(self, parent=None):
        return len(self._data)

    def columnCount(self, parent=None):
        return len(self._data[0])

    def data(self, index, role=Qt.DisplayRole):
        if role == Qt.DisplayRole:
            return self._data[index.row()][index.column()]
        return None

    def headerData(self, section, orientation, role=Qt.DisplayRole):
        if role == Qt.DisplayRole and orientation == Qt.Horizontal:
            return self._headers[section]
        return None
    def filter_data(self, filter_function):
        self._filtered_data = list(filter(filter_function, self._data))
        self.layoutChanged.emit()

#图像增强线程
import os
import cv2
import random
from typing import List, Tuple
import numpy as np
from typing import Optional

class ImageAugmentor(QObject):
    progress_updated = pyqtSignal(int)
    def __init__(self):
        super().__init__()
        self.operations = [
            self.flip, self.random_crop, self.random_scale, self.random_rotate,
            self.random_brightness_contrast_saturation_hue, self.random_erase,
            self.random_noise, self.cutout, self.mixup, self.mosaic
        ]
        self.input_folder = None  # 添加此行



    def process_images(self, input_folder: str, output_folder: str, probabilities: List[float]) -> Tuple[int, int, int]:
        self.input_folder = input_folder
        success_count = 0
        failure_count = 0
        image_count = sum(1 for file in os.listdir(input_folder) if file.lower().endswith(('.png', '.jpg', '.jpeg')))


        for index, file in enumerate(os.listdir(input_folder)):
            filepath = os.path.join(input_folder, file)
            if os.path.isfile(filepath) and file.lower().endswith(('.png', '.jpg', '.jpeg')):
                # 读取图像
                image = cv2.imdecode(np.fromfile(filepath, dtype=np.uint8), cv2.IMREAD_COLOR)

                # 检查图像是否为空
                if image is None:
                    print(f"Error loading image: {filepath}")
                    failure_count += 1
                    continue

                for i, probability in enumerate(probabilities):
                    if random.random() < probability:
                        if i in {8, 9}:  # Mixup and Mosaic operations
                            extra_image = self.load_random_image(input_folder)
                            image = self.apply_operation(image, i, extra_image)
                        else:
                            image = self.apply_operation(image, i)

                output_path = os.path.join(output_folder, file)

                # 保存图像
                encoded_image = cv2.imencode('.jpg', image)[1]
                if encoded_image is not None:
                    encoded_image.tofile(output_path)
                    success_count += 1
                else:
                    failure_count += 1
            self.progress_updated.emit(success_count + failure_count)

        return success_count, failure_count, image_count

    def load_random_image(self, input_folder: str) -> np.ndarray:
        files = [file for file in os.listdir(input_folder) if file.lower().endswith(('.png', '.jpg', '.jpeg'))]
        random_file = random.choice(files)
        filepath = os.path.join(input_folder, random_file)
        return cv2.imdecode(np.fromfile(filepath, dtype=np.uint8), cv2.IMREAD_COLOR)

    def apply_operation(self, image: np.ndarray, operation_index: int,
                        extra_image: Optional[np.ndarray] = None) -> np.ndarray:
        if operation_index == 8:  # Mixup
            if extra_image is not None:
                return self.mixup(image, extra_image)
            else:
                return image
        elif operation_index == 9:  # Mosaic
            if extra_image is not None:
                return self.mosaic(image, extra_image)
            else:
                return image
        else:
            return self.operations[operation_index](image)

    # 1. 图像翻转
    def flip(self, image):
        mode = random.choice(['horizontal', 'vertical'])
        print("flip启用")
        return cv2.flip(image, 1 if mode == 'horizontal' else 0)

    # 2. 随机裁剪
    def random_crop(self, image):
        height, width, _ = image.shape
        crop_ratio = random.uniform(0.5, 1.0)
        new_height, new_width = int(height * crop_ratio), int(width * crop_ratio)

        y = random.randint(0, height - new_height)
        x = random.randint(0, width - new_width)
        print("random_crop启用")
        return image[y:y + new_height, x:x + new_width]

    # 3. 随机缩放
    def random_scale(self, image):
        scale_factor = random.uniform(0.5, 2.0)
        print("random_scale启用")
        return cv2.resize(image, None, fx=scale_factor, fy=scale_factor)

    # 4. 随机旋转
    def random_rotate(self, image):
        angle = random.randint(-180, 180)
        height, width = image.shape[:2]
        matrix = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1)
        print("random_rotate启用")
        return cv2.warpAffine(image, matrix, (width, height))

    # 5. 随机亮度、对比度、饱和度、色调调整
    def random_brightness_contrast_saturation_hue(self, image):
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        brightness = random.uniform(0.5, 1.5)
        hsv[:, :, 2] = np.clip(hsv[:, :, 2] * brightness, 0, 255)

        contrast = random.uniform(0.5, 1.5)
        image = np.clip((image - 128) * contrast + 128, 0, 255)

        saturation = random.uniform(0.5, 1.5)
        hsv[:, :, 1] = np.clip(hsv[:, :, 1] * saturation, 0, 255)

        hue_shift = random.uniform(-30, 30)
        hsv[:, :, 0] = np.clip(hsv[:, :, 0] + hue_shift, 0, 180)
        print("random_brightness_contrast_saturation_hue启用")
        return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    # 6. 随机擦除
    def random_erase(self, image):
        height, width, _ = image.shape
        erase_size = random.randint(10, min(height, width) // 2)

        y = random.randint(0, height - erase_size)
        x = random.randint(0, width - erase_size)

        image[y:y + erase_size, x:x + erase_size, :] = 0
        print("random_erase启用")
        return image

    # 7. 随机加噪
    def random_noise(self, image):
        row, col, ch = image.shape
        mean = 0
        sigma = random.uniform(10, 50)
        gauss = np.random.normal(mean, sigma, (row, col, ch))
        gauss = gauss.reshape(row, col, ch)
        noisy = image + gauss
        print("random_noise启用")
        return np.clip(noisy, 0, 255).astype(np.uint8)

    # 8. Mixup
    def mixup(self, image):
        # 在这个示例中,我们将随机选择另一张图像进行混合。您可以根据需要自定义图像选择方法。
        image_list = [f for f in os.listdir(self.input_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
        random_image_path = os.path.join(self.input_folder, random.choice(image_list))
        mixup_image = cv2.imread(random_image_path)

        height, width, _ = image.shape
        mixup_image = cv2.resize(mixup_image, (width, height))

        alpha = random.uniform(0, 1)
        print("mixup启用")
        return cv2.addWeighted(image, alpha, mixup_image, 1 - alpha, 0)

    # 9. CutOut
    def cutout(self, image):
        height, width, _ = image.shape
        cutout_size = random.randint(10, min(height, width) // 2)

        y = random.randint(0, height - cutout_size)
        x = random.randint(0, width - cutout_size)

        image[y:y + cutout_size, x:x + cutout_size, :] = 0
        print("cutout启用")
        return image

    # 10. Mosaic
    def mosaic(self, image: np.ndarray, extra_image: np.ndarray) -> np.ndarray:
        image_list = [os.path.join(self.input_folder, f) for f in os.listdir(self.input_folder) if
                      f.lower().endswith(('.png', '.jpg', '.jpeg'))]
        random_images = [cv2.imread(random.choice(image_list)) for _ in range(3)]

        height, width, _ = image.shape
        height = height - height % 2  # Make height an even number
        width = width - width % 2  # Make width an even number

        # Resize the input image
        image = cv2.resize(image, (width, height))

        for i in range(3):
            random_images[i] = cv2.resize(random_images[i], (width // 2, height // 2))

        mosaic_image = np.zeros((height, width, 3), dtype=np.uint8)

        mosaic_image[:height // 2, :width // 2] = image[:height // 2, :width // 2]
        mosaic_image[height // 2:, :width // 2] = random_images[0][:height // 2, :width // 2]
        mosaic_image[:height // 2, width // 2:] = random_images[1][:height // 2, :width // 2]
        mosaic_image[height // 2:, width // 2:] = random_images[2][:height // 2, :width // 2]
        print("mosaic启用")
        return mosaic_image


#主UI线程
class MainWindow(QMainWindow, Ui_mainWindow):
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)
        self.setupUi(self)
        self.m_flag = False

        # style 1: window can be stretched
        # self.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowStaysOnTopHint)
        self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
        # self.setWindowFlags(Qt.CustomizeWindowHint | Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)

        # style 2: window can not be stretched
        # self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint
        #                     | Qt.WindowSystemMenuHint | Qt.WindowMinimizeButtonHint | Qt.WindowMaximizeButtonHint)
        # self.setWindowOpacity(0.85)  # Transparency of window

        # 自定义标题栏按钮
        self.minButton.clicked.connect(self.showMinimized)
        self.maxButton.clicked.connect(self.max_or_restore)
        # show Maximized window
        # self.maxButton.animateClick(10)
        self.closeButton.clicked.connect(self.close)

        # 定时清空自定义状态栏上的文字
        self.qtimer = QTimer(self)
        self.qtimer.setSingleShot(True)
        self.qtimer.timeout.connect(lambda: self.statistic_label.clear())

        # 自动搜索模型
        self.comboBox.clear()
        self.pt_list = os.listdir('./pt')
        self.pt_list = [file for file in self.pt_list if file.endswith('.pt')]
        self.pt_list.sort(key=lambda x: os.path.getsize('./pt/'+x))
        self.comboBox.clear()
        self.comboBox.addItems(self.pt_list)
        self.qtimer_search = QTimer(self)
        self.qtimer_search.timeout.connect(lambda: self.search_pt())
        self.qtimer_search.start(2000)

        #  yolov5线程
        self.det_thread = DetThread()
        self.model_type = self.comboBox.currentText()
        self.det_thread.weights = "./pt/%s" % self.model_type # 权重
        self.det_thread.source = '0'     # 默认打开本机摄像头,无需保存到配置文件
        self.det_thread.percent_length = self.progressBar.maximum()
        self.det_thread.send_raw.connect(lambda x: self.show_image(x, self.raw_video))
        self.det_thread.send_img.connect(lambda x: self.show_image(x, self.out_video))
        self.det_thread.send_statistic.connect(self.show_statistic)
        self.det_thread.send_msg.connect(lambda x: self.show_msg(x))
        self.det_thread.send_percent.connect(lambda x: self.progressBar.setValue(x))
        self.det_thread.send_fps.connect(lambda x: self.fps_label.setText(x))


        self.fileButton.clicked.connect(self.open_file)
        self.cameraButton.clicked.connect(self.chose_cam)
        self.rtspButton.clicked.connect(self.chose_rtsp)

        self.runButton.clicked.connect(self.run_or_continue)
        self.stopButton.clicked.connect(self.stop)

        self.comboBox.currentTextChanged.connect(self.change_model)
        self.confSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'confSpinBox'))
        self.confSlider.valueChanged.connect(lambda x: self.change_val(x, 'confSlider'))
        self.iouSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'iouSpinBox'))
        self.iouSlider.valueChanged.connect(lambda x: self.change_val(x, 'iouSlider'))
        self.rateSpinBox.valueChanged.connect(lambda x: self.change_val(x, 'rateSpinBox'))
        self.rateSlider.valueChanged.connect(lambda x: self.change_val(x, 'rateSlider'))

        self.checkBox.clicked.connect(self.checkrate)
        self.saveCheckBox.clicked.connect(self.is_save)
        self.load_setting()

# 在您的窗口类的 __init__ 方法中,连接按钮的 clicked 信号到相应的槽函数
        self.pushButton.clicked.connect(lambda: self.open_stackedWidget(2))
        self.pushButton_4.clicked.connect(lambda: self.open_stackedWidget(0))
        self.refreshButton_2.clicked.connect(lambda: self.open_stackedWidget(1))
        self.pushButton_2.clicked.connect(lambda: self.open_stackedWidget(3))
        self.pushButton_5.clicked.connect(lambda: self.open_stackedWidget(4))
#数据库大模块
        self.tableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
        self.table_model = self.fetch_data_from_database()
        if self.table_model:
            self.tableView.setModel(self.table_model)
        # Assuming the button is named refreshButton
        self.refreshButton.clicked.connect(self.fetch_data_from_database)
# Connect the delete button to the on_delete_button_clicked slotgraphicsView
        self.deleteButton_2.clicked.connect(self.on_delete_button_clicked)
        self.insertButton_2.clicked.connect(self.on_add_button_clicked)
#排序
        # print("开始排序111!!!")
        self.sortcomboBox_3.currentIndexChanged.connect(self.on_sort_changed)
        self.sortcomboBox_2.currentIndexChanged.connect(self.on_sort_changed)
#过滤
        self.filterButton_3.clicked.connect(self.apply_filter)

#数据分析
        # Add the following lines in the __init__ method
        self.figure, self.ax = plt.subplots()
        self.canvas = FigureCanvas(self.figure)
        self.plotLayout = QVBoxLayout(self.graphicsView)
        self.plotLayout.addWidget(self.canvas)

        self.refreshButton_3.clicked.connect(self.on_analyze_button_clicked)

#高参数大模块
        self.visualize_checkbox1.stateChanged.connect(self.on_visualize_checkbox_stateChanged)
        self.SavedatabaseCheckBox.stateChanged.connect(self.on_databases_checkbox_stateChanged)
        self.agnostic_nmscheckbox.stateChanged.connect(self.on_agnostic_nmscheckbox_checkbox_stateChanged)
        self.halfcheckbox_2.stateChanged.connect(self.on_halfcheckbox_2_nmscheckbox_checkbox_stateChanged)
        self.save_cropcheckbox_2.stateChanged.connect(self.on_save_crop_nmscheckbox_checkbox_stateChanged)
        self.augmentedcheckbox_3.stateChanged.connect(self.on_augment_nmscheckbox_checkbox_stateChanged)
        self.hide_labelscheckbox_4.stateChanged.connect(self.on_hide_labelscheckbox_checkbox_checkbox_stateChanged)
        self.hide_confscheckbox_6.stateChanged.connect(self.on_hide_confcheckbox_checkbox_checkbox_stateChanged)

        self.imgsz_spinbox12.valueChanged.connect(self.on_imgsz_spinbox_valueChanged)
        self.line_thickness_spinbox12_2.valueChanged.connect(self.on_line_thickness_spinbox_valueChanged)
        self.lineEdit.textChanged.connect(self.update_classes)
        self.browse_result_folder_button.clicked.connect(self.on_browse_result_folder_button_clicked)





#批处理图像增强大模块
        # 创建一个 ImageAugmentor 实例
        self.image_augmentor = ImageAugmentor()

        # 将 UI 控件连接到相关方法
        self.inputFolderButton.clicked.connect(self.select_input_folder)
        self.outputFolderButton.clicked.connect(self.select_output_folder)
        self.startProcessingButton.clicked.connect(self.start_processing)

        self.init_progress_bar()
        # 在MainWindow类的__init__方法中为每个复选框添加事件处理程序
        for checkbox in [self.enhanceMethod1Checkbox, self.enhanceMethod2Checkbox, self.enhanceMethod3Checkbox,
                         self.enhanceMethod4Checkbox, self.enhanceMethod5Checkbox, self.enhanceMethod6Checkbox,
                         self.enhanceMethod7Checkbox, self.enhanceMethod8Checkbox, self.enhanceMethod9Checkbox,
                         self.enhanceMethod10Checkbox]:
            checkbox.clicked.connect(self.checkbox_clicked)

        self.helpButton.clicked.connect(self.show_help)
        # 高级参数设置
        self.advancedSettingsButton.clicked.connect(self.open_advanced_settings)
        # 设置默认值
        self.enhanceMethod2Parameter = QLineEdit("0.8, 1.0")  # 示例:设置增强方法2的默认参数值




























# 单个图像处理大模块
        # 加载缩放
        self.loadImageButton.clicked.connect(self.load_image1)
        self.scaleSlider.valueChanged.connect(self.scale_image)
        # # 翻转
        # self.comboBox_3.currentTextChanged.connect(self.on_comboBox_3_currentIndexChanged)
        # 旋转
        self.rotationSlider.valueChanged.connect(self.rotate_image)

# 正常显示和预览
        self.imageEnhanceButton.clicked.connect(lambda: self.apply_image_enhance(preview=False))
        self.previewEnhanceButton.clicked.connect(lambda: self.apply_image_enhance(preview=True))

        self.imageFilterButton.clicked.connect(lambda: self.apply_image_filter(preview=False))
        self.previewFilterButton.clicked.connect(lambda: self.apply_image_filter(preview=True))

        self.morphologyButton.clicked.connect(lambda: self.apply_morphology(preview=False))
        self.previewmorphologyButton.clicked.connect(lambda: self.apply_morphology(preview=True))

        self.histogramButton.clicked.connect(lambda: self.apply_histogram(preview=False))
        self.previewhistogramButton.clicked.connect(lambda: self.apply_histogram(preview=True))

        #打开一个文件夹选择上一个下一个
        self.image_list = []
        self.current_image_index = -1
        self.save_folder = ""
        #文件按钮
        self.openFolderButton.clicked.connect(self.open_folder)
        self.saveFolderButton.clicked.connect(self.select_save_folder)
        self.nextImageButton.clicked.connect(self.next_image)
        self.previousImageButton.clicked.connect(self.previous_image)
        self.saveButton.clicked.connect(self.save_image)


# 自定义参数
        self.slider_adjust_contrast.valueChanged.connect(self.slider_adjustment)
        self.apply_custom_adjustments_imageEnhance.clicked.connect(self.apply_custom_adjustments_with_value)

#这里只提供图像增强的滑条和设置具体数值的按钮,其他的自行按照这种模式添加
    def slider_adjustment(self, value):
        operation = self.imageEnhanceComboBox.currentText()
        value = value / 10.0
        if operation == '对比度增强':
            self.adjust_contrast(alpha=value, preview=False)
        elif operation == '饱和度调整':
            self.adjust_saturation(value=value, preview=False)
        elif operation == '调整色相':
            self.adjust_hue(value=value, preview=False)
        elif operation == '调整亮度':
            self.adjust_brightness1(value=value, preview=False)
        elif operation == 'Gamma 校正':
            self.adjust_gamma(gamma=value, preview=False)
        elif operation == '二值化':
            self.binarize_image(threshold=value, preview=False)

    def apply_custom_adjustments_with_value(self):
        operation = self.imageEnhanceComboBox.currentText()
        value, ok = QInputDialog.getInt(self, "输入参数", f"设置{operation}的值:", min=0, max=100)

        if ok:
            if operation == '对比度增强':
                self.adjust_contrast(alpha=value, preview=False)
            elif operation == '饱和度调整':
                self.adjust_saturation(value=value, preview=False)
            elif operation == '调整色相':
                self.adjust_hue(value=value, preview=False)
            elif operation == '调整亮度':
                self.adjust_brightness1(value=value, preview=False)
            elif operation == 'Gamma 校正':
                self.adjust_gamma(gamma=value, preview=False)
            elif operation == '二值化':
                self.binarize_image(threshold=value, preview=False)

   # 二值化
    def binarize_image(self, threshold=128, preview=True):

        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            if original_image is None:
                QMessageBox.warning(self, "提示", "无法处理此图片")
                return

            gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
            _, result_image = cv2.threshold(gray_image, threshold, 255, cv2.THRESH_BINARY)

            pixmap = self.cv2_image_to_pixmap(result_image)

            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)

            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)

        except Exception as e:
            print(f"Error in binarize_image: {e}")




    def adjust_gamma(self, gamma=1.0, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            if original_image is None:
                QMessageBox.warning(self, "提示", "无法处理此图片")
                return

            result_image = np.power(original_image, gamma)

            pixmap = self.cv2_image_to_pixmap(result_image)

            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)

            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in adjust_gamma: {e}")


    #调整亮度
    def adjust_brightness1(self, value=1.5, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            if original_image is None:
                QMessageBox.warning(self, "提示", "无法处理此图片")
                return

            hsv_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)

            brightness_multiplier = np.ones(hsv_image.shape[:2], dtype=np.float32) * value
            hsv_image[..., 2] = cv2.add(hsv_image[..., 2], brightness_multiplier, dtype=cv2.CV_32F)

            hsv_image[..., 2] = cv2.convertScaleAbs(hsv_image[..., 2])

            result_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

            pixmap = self.cv2_image_to_pixmap(result_image)

            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)

            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in adjust_brightness1: {e}")


        # ...
#调整色相
    def adjust_hue(self, value=1.5, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            if original_image is None:
                QMessageBox.warning(self, "提示", "无法处理此图片")
                return

            hsv_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)

            hue_multiplier = np.ones(hsv_image.shape[:2], dtype=np.float32) * value
            hsv_image[..., 0] = cv2.add(hsv_image[..., 0], hue_multiplier, dtype=cv2.CV_32F)

            hsv_image[..., 0] = cv2.convertScaleAbs(hsv_image[..., 0])

            result_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

            pixmap = self.cv2_image_to_pixmap(result_image)

            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)

            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in adjust_hue: {e}")


    def adjust_saturation(self, value=1.5, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

                # 获取原始图像
            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            if original_image is None:
                QMessageBox.warning(self, "提示", "无法处理此图片")
                return

            # 转换为 HSV 颜色空间
            hsv_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)

            # 增加饱和度
            saturation_multiplier = np.ones(hsv_image.shape[:2], dtype=np.float32) * value
            hsv_image[..., 1] = cv2.multiply(hsv_image[..., 1], saturation_multiplier, dtype=cv2.CV_32F)

            hsv_image[..., 1] = cv2.convertScaleAbs(hsv_image[..., 1])

            # 转换回 BGR 颜色空间
            result_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

            # 将结果显示在界面上
            pixmap = self.cv2_image_to_pixmap(result_image)

            # Scale the pixmap to fit the scrollArea
            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)

            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in adjust_saturation: {e}")




    def adjust_contrast(self, alpha=1.5, preview=True):
        try :
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            original_image = original_image.astype(np.float64)
            contrast_multiplier = np.ones(original_image.shape, dtype=np.float64) * alpha
            contrast_image = cv2.multiply(original_image, contrast_multiplier)
            contrast_image = cv2.convertScaleAbs(contrast_image)
            pixmap = self.cv2_image_to_pixmap(contrast_image)

            # Scale the pixmap to fit the scrollArea
            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)

            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in adjust_contrast: {e}")


    def adjust_brightness(self, beta=50, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            brightness_image = cv2.add(original_image, np.full(original_image.shape, beta, dtype=np.uint8))

            pixmap = self.cv2_image_to_pixmap(brightness_image)
            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in adjust_brightness: {e}")

    def sharpen_image(self, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
            sharpened_image = cv2.filter2D(original_image, -1, kernel)
            pixmap = self.cv2_image_to_pixmap(sharpened_image)
            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in sharpen_image: {e}")

    def smooth_image(self, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            smooth_image = cv2.GaussianBlur(original_image, (7, 7), 0)
            pixmap = self.cv2_image_to_pixmap(smooth_image)
            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in smooth_image: {e}")

    def flip_image(self,  orientation='水平翻转',preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())

            if orientation == '水平翻转':
                flipped_image = cv2.flip(original_image, 1)
            elif orientation == '垂直翻转':
                flipped_image = cv2.flip(original_image, 0)
            elif orientation == '水平垂直翻转':
                flipped_image = cv2.flip(original_image, -1)

            pixmap = self.cv2_image_to_pixmap(flipped_image)
            # scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
                # self.imageLabel_2.setPixmap(scaled_pixmap)
                # self.imageLabel_2.setScaledContents(False)
                # self.imageLabel_2.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
        except Exception as e:
            print(f"Error in flip_image: {e}")
    def histogram_equalization(self, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
            equalized_image = cv2.equalizeHist(gray_image)
            pixmap = self.cv2_image_to_pixmap(equalized_image)
            scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in histogram_equalization: {e}")

    def pixmap_to_cv2_image(self, pixmap):
        try:
            qimage = pixmap.toImage()
            if not qimage.constBits():
                return None
            qimage = qimage.convertToFormat(QImage.Format_ARGB32)
            buffer = qimage.constBits().asarray(qimage.byteCount())
            image = np.frombuffer(buffer, dtype=np.uint8).reshape((qimage.height(), qimage.width(), 4))
            image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
            return image
        except Exception as e:
            print(f"Error in pixmap_to_cv2_image: {e}")

    def cv2_image_to_pixmap(self, image):
        try:
            qimage = QImage(image.data, image.shape[1], image.shape[0], QImage.Format.Format_RGB888).rgbSwapped()
            pixmap = QPixmap.fromImage(qimage)
            return pixmap
        except Exception as e:
            print(f"Error in cv2_image_to_pixmap: {e}")

    def morphology_operation(self, operation, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
            _, binary_image = cv2.threshold(gray_image, 128, 255, cv2.THRESH_BINARY)

            kernel = np.ones((5, 5), np.uint8)

            if operation == 'dilate':
                result_image = cv2.dilate(binary_image, kernel, iterations=1)
            elif operation == 'erode':
                result_image = cv2.erode(binary_image, kernel, iterations=1)
            elif operation == 'opening':
                result_image = cv2.morphologyEx(binary_image, cv2.MORPH_OPEN, kernel)
            elif operation == 'closing':
                result_image = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)

            pixmap = self.cv2_image_to_pixmap(result_image)
            scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in morphology_operation: {e}")
    def mean_filter(self, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            mean_filtered_image = cv2.blur(original_image, (5, 5))
            pixmap = self.cv2_image_to_pixmap(mean_filtered_image)
            scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in mean_filter: {e}")

    def gaussian_filter(self, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            gaussian_filtered_image = cv2.GaussianBlur(original_image, (5, 5), 0)
            pixmap = self.cv2_image_to_pixmap(gaussian_filtered_image)
            scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in gaussian_filter: {e}")

    def median_filter(self, preview=True):
        try:
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_image = self.pixmap_to_cv2_image(self.imageLabel.pixmap())
            median_filtered_image = cv2.medianBlur(original_image, 5)
            pixmap = self.cv2_image_to_pixmap(median_filtered_image)
            scaled_pixmap = pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            if preview:
                label_size = self.previewLabel.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.previewLabel.setPixmap(scaled_pixmap)

            else:
                label_size = self.imageLabel_2.size()
                scaled_pixmap = pixmap.scaled(label_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.imageLabel_2.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in median_filter: {e}")

    def load_image1(self, file_name):
        options = QFileDialog.Options()
        options |= QFileDialog.ReadOnly
        file_name, _ = QFileDialog.getOpenFileName(self, "Load Image", "",
                                                   "Images (*.png *.xpm *.jpg *.bmp);;All Files (*)", options=options)

        if file_name:
            # Load and display the image
            pixmap = QPixmap(file_name)
            self.imageLabel.setPixmap(
                pixmap.scaled(self.imageLabel.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
            self.imageLabel.setScaledContents(False)
            self.imageLabel.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)

            # 将图片添加到self.image_list并设置self.current_image_index为0
            self.image_list = [file_name]
            self.current_image_index = 0

    def load_image(self, file_name):
        if file_name:
            pixmap = QPixmap(file_name)
            self.imageLabel.setPixmap(
                pixmap.scaled(self.imageLabel.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
            self.imageLabel.setScaledContents(False)
            self.imageLabel.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)

    def open_folder(self):
        folder_name = QFileDialog.getExistingDirectory(self, "打开文件夹")
        if folder_name:
            self.image_list = [os.path.join(folder_name, f) for f in os.listdir(folder_name)
                               if os.path.splitext(f)[-1].lower() in ['.png', '.jpg', '.bmp', '.xpm']]
            self.current_image_index = 0
            self.load_image(self.image_list[self.current_image_index])

    def select_save_folder(self):
        self.save_folder = QFileDialog.getExistingDirectory(self, "选择保存文件夹")

    def next_image(self):
        if not self.image_list:
            QMessageBox.warning(self, "提示", "请先打开一个文件夹")
            return

        self.current_image_index = (self.current_image_index + 1) % len(self.image_list)
        self.load_image(self.image_list[self.current_image_index])

    def previous_image(self):
        if not self.image_list:
            QMessageBox.warning(self, "提示", "请先打开一个文件夹")
            return

        self.current_image_index = (self.current_image_index - 1) % len(self.image_list)
        self.load_image(self.image_list[self.current_image_index])

    def save_image(self):
        if not self.imageLabel_2.pixmap():
            QMessageBox.warning(self, "提示", "没有处理后的图片可供保存")
            return

        if not self.save_folder:
            QMessageBox.warning(self, "提示", "请先选择保存文件夹")
            return

        if not self.image_list or self.current_image_index >= len(self.image_list):
            QMessageBox.warning(self, "提示", "当前没有图像可供保存")
            return

        file_name = os.path.basename(self.image_list[self.current_image_index])
        save_path = os.path.join(self.save_folder, "processed_" + file_name)
        self.imageLabel_2.pixmap().save(save_path)
        QMessageBox.information(self, "提示", f"图片已成功保存到:{save_path}")




    def apply_image_enhance(self, preview):
        operation = self.imageEnhanceComboBox.currentText()

        if operation == '对比度增强':
            self.adjust_contrast(preview=preview)
        # ... and so on for other operations

        elif operation == '亮度增强':
            self.adjust_brightness(preview=preview)
        elif operation == '锐化':
            self.sharpen_image(preview=preview)
        elif operation == '平滑':
            self.smooth_image(preview=preview)
        else:
            self.flip_image(operation,preview=preview)


    def apply_image_filter(self,preview):
        operation = self.imageFilterComboBox.currentText()

        if operation == '均值滤波':
            self.mean_filter(preview=preview)
        elif operation == '高斯滤波':
            self.gaussian_filter(preview=preview)
        elif operation == '中值滤波':
            self.median_filter(preview=preview)

    def apply_morphology(self,preview):
        operation = self.morphologyComboBox.currentText()

        if operation == '膨胀':
            self.morphology_operation('dilate', preview=preview)
        elif operation == '腐蚀':
            self.morphology_operation('erode', preview=preview)
        elif operation == '开运算':
            self.morphology_operation('opening', preview=preview)
        elif operation == '闭运算':
            self.morphology_operation('closing', preview=preview)

    def apply_histogram(self,preview):
        operation = self.histogramComboBox.currentText()

        if operation == '直方图均衡化':
            self.histogram_equalization(preview=preview)

    # def on_comboBox_3_currentIndexChanged(self):
    #     orientation = self.comboBox_3.currentText()
    #     self.flip_image(orientation)
    # def flip_image(self, orientation):
    #     if not self.imageLabel.pixmap():
    #         QMessageBox.warning(self, "提示", "请先加载一张图片")
    #         return
    #
    #     original_pixmap = self.imageLabel.pixmap()
    #
    #     if orientation == '水平翻转':
    #         flipped_pixmap = original_pixmap.transformed(QTransform().scale(-1, 1), mode=Qt.SmoothTransformation)
    #     elif orientation == '垂直翻转':
    #         flipped_pixmap = original_pixmap.transformed(QTransform().scale(1, -1), mode=Qt.SmoothTransformation)
    #     elif orientation == '无翻转':
    #         pass
    #
    #     self.imageLabel_2.setPixmap(
    #         flipped_pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
    #     self.imageLabel_2.setScaledContents(False)
    #     self.imageLabel_2.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
    def scale_image(self):
        try:
            # 获取滑块的值(将滑块的值除以100,使其表示为百分比,然后用200减去当前值实现反转)
            scale_value = (200 - self.scaleSlider.value()) / 100.0
            # 检查原始图像是否已加载
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return
            # 获取原始图像
            original_pixmap = self.imageLabel.pixmap()
            original_size = original_pixmap.size()
            # 根据缩放比例计算新的图像尺寸
            new_size = original_size * scale_value
            # 对原始图像进行缩放
            scaled_pixmap = original_pixmap.scaled(new_size, Qt.KeepAspectRatio)
            # 在 imageLabel 上显示缩放后的图像
            self.imageLabel.setPixmap(scaled_pixmap)
        except Exception as e:
            print(f"Error in scale_image: {e}")

    def rotate_image(self):
        try:
            rotation_angle = self.rotationSlider.value()
            if not self.imageLabel.pixmap():
                QMessageBox.warning(self, "提示", "请先加载一张图片")
                return

            original_pixmap = self.imageLabel.pixmap()
            original_size = original_pixmap.size()

            transform = QTransform().rotate(rotation_angle)
            rotated_pixmap = original_pixmap.transformed(transform, mode=Qt.SmoothTransformation)

            self.imageLabel_2.setPixmap(
                rotated_pixmap.scaled(self.scrollArea.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
            self.imageLabel_2.setScaledContents(False)
            self.imageLabel_2.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
        except Exception as e:
            print(f"Error in rotate_image: {e}")

























    def show_help(self):
        help_text = (
            "<h3>图像增强操作详细信息:</h3>"
            "<ul>"
            "<li><b>翻转</b>:水平或垂直翻转图像。</li>"
            "<li><b>随机裁剪</b>:从图像中随机裁剪一部分。</li>"
            "<li><b>随机缩放</b>:对图像进行随机缩放。</li>"
            "<li><b>随机旋转</b>:在一定范围内随机旋转图像。</li>"
            "<li><b>随机亮度/对比度/饱和度/色调</b>:调整图像的亮度、对比度、饱和度和色调。</li>"
            "<li><b>随机擦除</b>:在图像上随机擦除一个矩形区域。</li>"
            "<li><b>随机噪声</b>:在图像上添加随机噪声。</li>"
            "<li><b>剪切</b>:在图像上随机剪切一个矩形区域。</li>"
            "<li><b>Mixup</b>:将两个图像按一定比例混合在一起。</li>"
            "<li><b>Mosaic</b>:将四个图像组合成一个马赛克图像。</li>"
            "</ul>"
            "<p><b>注意:</b>Mixup 和 Mosaic 操作不能与其他操作同时使用。</p>"
            "<h3>建议的图像增强组合:</h3>"
            "<ol>"
            "<li>翻转 + 随机裁剪 + 随机缩放 + 随机旋转</li>"
            "<li>随机亮度/对比度/饱和度/色调 + 随机擦除 + 随机噪声 + 剪切</li>"
            "<li>使用 Mixup 或 Mosaic 作为单独操作。</li>"
            "</ol>"
            "<p><a href='https://www.bilibili.com/video/BV1h84y1G72P/?spm_id_from=333.337.search-card.all.click'>查看视频教程</a></p>"
        )

        style_sheet = """
            QMessageBox {
                background-color: rgba(0, 0, 0, 50%);
                color: white;
                font-size: 14px;
                font-weight: bold;
                border-radius: 10px;
            }

            QMessageBox QLabel {
                color: white;
                font-size: 14px;
            }

            QPushButton {
                background-color: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 rgb(3, 85, 160), stop:1 rgb(13, 206, 197));
                color: white;
                font-size: 14px;
                font-weight: bold;
                border-radius: 5px;
                padding: 5px;
                min-width: 100px;
            }

            QPushButton:hover {
                background-color: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 rgb(13, 206, 197), stop:1 rgb(3, 85, 160));
            }

            QPushButton:pressed {
                background-color: rgb(20, 120, 100);
            }
        """

        # 创建自定义 QMessageBox
        message_box = QMessageBox(self)
        message_box.setWindowTitle("帮助")
        message_box.setText(help_text)
        message_box.setIcon(QMessageBox.Information)


        message_box.setStyleSheet(style_sheet)

        message_box.exec_()

    # 添加新的 checkbox_clicked 方法
    def checkbox_clicked(self):
            incompatible_operations = {8, 9}  # Mixup 和 Mosaic 的索引

            # 获取当前已选中的复选框索引
            checked_indexes = [i for i, checkbox in enumerate(
                [self.enhanceMethod1Checkbox, self.enhanceMethod2Checkbox, self.enhanceMethod3Checkbox,
                 self.enhanceMethod4Checkbox, self.enhanceMethod5Checkbox, self.enhanceMethod6Checkbox,
                 self.enhanceMethod7Checkbox, self.enhanceMethod8Checkbox, self.enhanceMethod9Checkbox,
                 self.enhanceMethod10Checkbox]) if checkbox.isChecked()]

            # 检查是否选中了不兼容的操作
            if any(index in checked_indexes for index in incompatible_operations) and len(checked_indexes) > 1:
                QMessageBox.warning(self, "警告", "Mixup 或 Mosaic 操作不能与其他操作同时使用,请取消选择不兼容的操作。")


    def init_progress_bar(self):
        self.image_augmentor.progress_updated.connect(self.update_progress)
        self.progressBar_2.setValue(0)  # 假设 progressBar 是您在 Qt Designer 中添加的 QProgressBar 对象的名称

    @pyqtSlot(int)
    def update_progress(self, value):
        self.progressBar_2.setValue(self.progressBar_2.value() + value)






    # 在这里添加 select_input_folder、select_output_folder 和start_processing 槽函数的实现。
    def select_input_folder(self):
        folder_path = QFileDialog.getExistingDirectory()
        self.inputFolderLabel.setText(folder_path)

    def select_output_folder(self):
        folder_path = QFileDialog.getExistingDirectory()
        self.outputFolderLabel.setText(folder_path)

    def start_processing(self):
        input_folder = self.inputFolderLabel.text()
        output_folder = self.outputFolderLabel.text()
        probabilities = [float(self.enhanceProbabilityInput.text()) if checkbox.isChecked() else 0 for checkbox in
                         [self.enhanceMethod1Checkbox, self.enhanceMethod2Checkbox, self.enhanceMethod3Checkbox,
                          self.enhanceMethod4Checkbox, self.enhanceMethod5Checkbox, self.enhanceMethod6Checkbox,
                          self.enhanceMethod7Checkbox, self.enhanceMethod8Checkbox, self.enhanceMethod9Checkbox,
                          self.enhanceMethod10Checkbox]]

        success_count, failure_count, image_count = self.image_augmentor.process_images(input_folder, output_folder,
                                                                                        probabilities)

        # 设置进度条的最大值
        self.progressBar_2.setMaximum(image_count)

        # 显示弹窗
        QMessageBox.information(self, "处理完成", f"成功处理 {success_count} 个图像,失败 {failure_count} 个")

        self.progressBar_2.setValue(0)

    def open_advanced_settings(self):
        dialog = QDialog(self)
        dialog.setWindowTitle("高级参数设置")

        layout = QVBoxLayout()

        # 为每个增强方法添加一个 QLineEdit,用于输入自定义参数
        # 添加默认值和输入规范的 QLabel
        enhance_method2_parameter = QLineEdit(self.enhanceMethod2Parameter.text())  # 使用默认值作为 QLineEdit 的初始值
        enhance_method2_label = QLabel("scale_range (tuple): (min, max), e.g., (0.8, 1.0)")
        layout.addWidget(enhance_method2_label)
        layout.addWidget(enhance_method2_parameter)

        # ...在此处添加更多的增强方法参数和 QLabel...

        # 添加确认和取消按钮
        buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
        buttons.accepted.connect(dialog.accept)
        buttons.rejected.connect(dialog.reject)
        layout.addWidget(buttons)

        dialog.setLayout(layout)

        # 如果用户点击“确认”,请使用新的参数值更新增强方法
        if dialog.exec() == QDialog.Accepted:
            self.enhanceMethod2Parameter.setText(enhance_method2_parameter.text())
            # ...在此处更新更多增强方法参数值...










#检测线程的参数
    def on_visualize_checkbox_stateChanged(self, state):
        global visualize
        visualize = state == 2
        print(f"Visualize is now: {visualize}")
    def on_databases_checkbox_stateChanged(self, state):
        global databases
        databases = state == 2
        print(f"databases is now: {databases}")
    def on_agnostic_nmscheckbox_checkbox_stateChanged(self, state):
        global agnostic_nms
        agnostic_nms = state == 2
        print(f"agnostic_nmscheckbox is now: {agnostic_nms}")
    def on_halfcheckbox_2_nmscheckbox_checkbox_stateChanged(self, state):
        global half
        half = state == 2
        print(f"agnostic_nmscheckbox is now: {half}")
    def on_save_crop_nmscheckbox_checkbox_stateChanged(self, state):
        global save_crop
        save_crop = state == 2
        print(f"on_save_crop_nmscheckbox is now: {save_crop}")
    def on_augment_nmscheckbox_checkbox_stateChanged(self, state):
        global augment
        augment = state == 2
        print(f"augmentedcheckbox_3 is now: {augment}")
    def on_imgsz_spinbox_valueChanged(self, value):
        global imgsz
        imgsz = value
        print(f"imgsz is now: {imgsz}")
    def on_line_thickness_spinbox_valueChanged(self, value):
        global line_thickness
        line_thickness = value
        print(f"line_thickness is now: {line_thickness}")
    def update_classes(self, text):
        global classes
        if text.strip():
            classes = [int(c) for c in text.split()]
        else:
            classes = None
        print(f"Classes are now: {classes}")
    def on_hide_labelscheckbox_checkbox_checkbox_stateChanged(self, state):
        global hide_labels
        hide_labels = state == 2
        print(f"hide_labels is now: {hide_labels}")
    def on_hide_confcheckbox_checkbox_checkbox_stateChanged(self, state):
        global hide_conf
        hide_conf = state == 2
        print(f"hide_conf is now: {hide_conf}")
    def on_browse_result_folder_button_clicked(self):
        # print("Function on_browse_result_folder_button_clicked called")
        global project
        # Get the button that triggered the slot
        button = self.sender()
        # Disconnect the button's clicked signal
        button.clicked.disconnect()
        # Browse the folder
        folder_path = QFileDialog.getExistingDirectory(self, "Select Result Folder", str(Path.home()))
        if folder_path:
            project = Path(folder_path)
            self.result_folder_lineEdit.setText(folder_path)
        # Reconnect the button's clicked signal
        button.clicked.connect(self.on_browse_result_folder_button_clicked)
        # print("on_browse_result_folder_button_clickedproject:",project)

    def get_db_connection( self, host=DB_HOST, user=DB_USER, password=DB_PASSWORD, database=DB_NAME):
        return pymysql.connect(
            host=DB_HOST,
            user=DB_USER,
            password=DB_PASSWORD,
            database=DB_NAME
        )

    def draw_bar_chart(self, data):
        sns.set(style='darkgrid', palette='pastel')

        # Extract sign_type and sign_count from data
        sign_types = [row[1] for row in data]
        sign_counts = [row[2] for row in data]

        # Clear the previous plot
        self.ax.clear()

        # Create the bar chart
        self.ax.bar(sign_types, sign_counts)

        # Set plot labels and title
        self.ax.set_xlabel('Sign Types')
        self.ax.set_ylabel('Detection Counts')
        self.ax.set_title('Detection Counts by Sign Type')

        # Draw the plot
        self.canvas.draw()

    def draw_line_chart(self, data):
        sns.set(style='darkgrid', palette='pastel')

        # Extract detection_time and sign_count from data
        detection_times = [row[3] for row in data]
        sign_counts = [row[2] for row in data]

        # Clear the previous plot
        self.ax.clear()

        # Create the line chart
        self.ax.plot(detection_times, sign_counts)

        # Set plot labels and title
        self.ax.set_xlabel('Detection Time')
        self.ax.set_ylabel('Detection Counts')
        self.ax.set_title('Detection Counts by Time')

        # Draw the plot
        self.canvas.draw()

    def draw_pie_chart(self, data, threshold=5):
        sns.set(style='darkgrid', palette='pastel')

        # Extract sign_type and sign_count from data
        sign_types = [row[1] for row in data]
        sign_counts = [row[2] for row in data]

        # Filter out data with counts below the threshold
        filtered_sign_types = []
        filtered_sign_counts = []
        other_count = 0
        for i in range(len(sign_types)):
            if sign_counts[i] >= threshold:
                filtered_sign_types.append(sign_types[i])
                filtered_sign_counts.append(sign_counts[i])
            else:
                other_count += sign_counts[i]
        if other_count > 0:
            filtered_sign_types.append('Other')
            filtered_sign_counts.append(other_count)

        # Clear the previous plot
        self.ax.clear()

        # Create the pie chart
        explode = [0.05] * len(filtered_sign_types)
        self.ax.pie(filtered_sign_counts, labels=filtered_sign_types, explode=explode,
                    autopct='%1.1f%%', shadow=True, startangle=90)

        # Set plot title
        self.ax.set_title('Detection Counts by Sign Type')

        # Draw the plot
        self.canvas.draw()

    def draw_box_plot(self, data):
        sns.set(style='darkgrid', palette='pastel')

        # Extract sign_type and additional_info (confidence) from data
      
Download .txt
gitextract_f02wz10a/

├── .github/
│   └── workflows/
│       ├── jekyll-gh-pages.yml
│       └── main.yml
├── .idea/
│   ├── Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI.iml
│   ├── inspectionProfiles/
│   │   └── profiles_settings.xml
│   ├── misc.xml
│   ├── modules.xml
│   ├── vcs.xml
│   └── workspace.xml
├── MouseLabel.py
├── README.md
├── apprcc.qrc
├── apprcc_rc.py
├── config/
│   ├── fold.json
│   ├── ip.json
│   └── setting.json
├── data/
│   ├── doc/
│   │   ├── LICENSE
│   │   ├── README_Parameter adjustment.md
│   │   └── README_cn.md
│   ├── regn_mysql.sql
│   ├── run/
│   │   └── exp52/
│   │       ├── hyp.yaml
│   │       └── opt.yaml
│   └── scripts/
│       ├── download_weights.sh
│       ├── get_coco.sh
│       └── get_coco128.sh
├── detect.py
├── dialog/
│   ├── rtsp_dialog.py
│   ├── rtsp_dialog.ui
│   └── rtsp_win.py
├── hubconf.py
├── login_lj.py
├── main.py
├── main_win/
│   ├── login.py
│   ├── login.ui
│   ├── win.py
│   └── win.ui
├── models/
│   ├── __init__.py
│   ├── common.py
│   ├── experimental.py
│   ├── hub/
│   │   ├── anchors.yaml
│   │   ├── yolov3-spp.yaml
│   │   ├── yolov3-tiny.yaml
│   │   ├── yolov3.yaml
│   │   ├── yolov5-bifpn.yaml
│   │   ├── yolov5-fpn.yaml
│   │   ├── yolov5-p2.yaml
│   │   ├── yolov5-p34.yaml
│   │   ├── yolov5-p6.yaml
│   │   ├── yolov5-p7.yaml
│   │   ├── yolov5-panet.yaml
│   │   ├── yolov5l6.yaml
│   │   ├── yolov5m6.yaml
│   │   ├── yolov5n6.yaml
│   │   ├── yolov5s-ghost.yaml
│   │   ├── yolov5s-transformer.yaml
│   │   ├── yolov5s6.yaml
│   │   └── yolov5x6.yaml
│   ├── tf.py
│   └── yolo.py
├── pt/
│   ├── best.engine
│   ├── best.onnx
│   ├── best.pt
│   └── yolov5s.pt
├── rc_apprcc.py
├── requirements.txt
├── setup-database.bat
└── utils/
    ├── CustomMessageBox.py
    ├── __init__.py
    ├── activations.py
    ├── augmentations.py
    ├── autoanchor.py
    ├── autobatch.py
    ├── aws/
    │   ├── __init__.py
    │   ├── mime.sh
    │   ├── resume.py
    │   └── userdata.sh
    ├── benchmarks.py
    ├── cal_fps.py
    ├── callbacks.py
    ├── capnums.py
    ├── datasets.py
    ├── downloads.py
    ├── flask_rest_api/
    │   ├── README.md
    │   ├── example_request.py
    │   └── restapi.py
    ├── general.py
    ├── google_app_engine/
    │   ├── Dockerfile
    │   ├── additional_requirements.txt
    │   └── app.yaml
    ├── google_utils.py
    ├── loggers/
    │   ├── __init__.py
    │   └── wandb/
    │       ├── README.md
    │       ├── __init__.py
    │       ├── log_dataset.py
    │       ├── sweep.py
    │       ├── sweep.yaml
    │       └── wandb_utils.py
    ├── loss.py
    ├── metrics.py
    ├── plots.py
    ├── torch_utils.py
    ├── tt100k_to_voc-main/
    │   ├── 1.py
    │   ├── 1_build_voc_dir.py
    │   ├── 2_json2xml.py
    │   ├── 3_delete_jpg_and_xml.py
    │   ├── 4_spilt_data.py
    │   ├── 5_label.py
    │   ├── Not_TT45_list_train.txt
    │   ├── Not_TT45_list_val.txt
    │   ├── README.md
    │   ├── TT100K_VOC_classes.json
    │   ├── __init__.py
    │   ├── annotations_all.json
    │   └── 新建 Internet 快捷方式.url
    └── wandb_logging/
        ├── __init__.py
        ├── log_dataset.py
        ├── sweep.py
        ├── sweep.yaml
        └── wandb_utils.py
Download .txt
SYMBOL INDEX (634 symbols across 43 files)

FILE: MouseLabel.py
  class LabelMouse (line 5) | class LabelMouse(QLabel):
    method mouseDoubleClickEvent (line 9) | def mouseDoubleClickEvent(self, event):
    method mouseMoveEvent (line 12) | def mouseMoveEvent(self):
  class Label_click_Mouse (line 20) | class Label_click_Mouse(QLabel):
    method mousePressEvent (line 24) | def mousePressEvent(self, event):

FILE: data/regn_mysql.sql
  type `detection_results` (line 23) | CREATE TABLE `detection_results` (

FILE: detect.py
  function run (line 33) | def run(weights='yolov5s.pt',  # model.pt path(s)
  function parse_opt (line 195) | def parse_opt():
  function main (line 225) | def main(opt):

FILE: dialog/rtsp_dialog.py
  class Ui_Form (line 14) | class Ui_Form(object):
    method setupUi (line 15) | def setupUi(self, Form):
    method retranslateUi (line 85) | def retranslateUi(self, Form):

FILE: dialog/rtsp_win.py
  class Window (line 6) | class Window(QWidget, Ui_Form):
    method __init__ (line 7) | def __init__(self):

FILE: hubconf.py
  function _create (line 11) | def _create(name, pretrained=True, channels=3, classes=80, autoshape=Tru...
  function custom (line 65) | def custom(path='path/to/model.pt', autoshape=True, verbose=True, device...
  function yolov5s (line 70) | def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, ver...
  function yolov5m (line 75) | def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, ver...
  function yolov5l (line 80) | def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, ver...
  function yolov5x (line 85) | def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, ver...
  function yolov5s6 (line 90) | def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, ve...
  function yolov5m6 (line 95) | def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, ve...
  function yolov5l6 (line 100) | def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, ve...
  function yolov5x6 (line 105) | def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, ve...

FILE: login_lj.py
  class Login (line 7) | class Login(QtWidgets.QDialog, Ui_Dialog):
    method __init__ (line 8) | def __init__(self):
    method login (line 19) | def login(self):
    method save_credentials (line 36) | def save_credentials(self):
    method load_credentials (line 45) | def load_credentials(self):

FILE: main.py
  class DetThread (line 67) | class DetThread(QThread):
    method __init__ (line 83) | def __init__(self):
    method get_db_connection (line 99) | def get_db_connection(self, db_host=DB_HOST, db_user=DB_USER, db_passw...
    method insert_detection_result_to_database (line 106) | def insert_detection_result_to_database(self, sign_type, sign_count, a...
    method run (line 116) | def run(self,
  class AddRecordDialog (line 367) | class AddRecordDialog(QtWidgets.QDialog):
    method __init__ (line 368) | def __init__(self, parent=None):
    method get_record_data (line 387) | def get_record_data(self):
  class PymysqlTableModel (line 394) | class PymysqlTableModel(QAbstractTableModel):
    method __init__ (line 395) | def __init__(self, data, headers):
    method rowCount (line 400) | def rowCount(self, parent=None):
    method columnCount (line 403) | def columnCount(self, parent=None):
    method data (line 406) | def data(self, index, role=Qt.DisplayRole):
    method headerData (line 411) | def headerData(self, section, orientation, role=Qt.DisplayRole):
    method filter_data (line 415) | def filter_data(self, filter_function):
  class ImageAugmentor (line 427) | class ImageAugmentor(QObject):
    method __init__ (line 429) | def __init__(self):
    method process_images (line 440) | def process_images(self, input_folder: str, output_folder: str, probab...
    method load_random_image (line 480) | def load_random_image(self, input_folder: str) -> np.ndarray:
    method apply_operation (line 486) | def apply_operation(self, image: np.ndarray, operation_index: int,
    method flip (line 502) | def flip(self, image):
    method random_crop (line 508) | def random_crop(self, image):
    method random_scale (line 519) | def random_scale(self, image):
    method random_rotate (line 525) | def random_rotate(self, image):
    method random_brightness_contrast_saturation_hue (line 533) | def random_brightness_contrast_saturation_hue(self, image):
    method random_erase (line 551) | def random_erase(self, image):
    method random_noise (line 563) | def random_noise(self, image):
    method mixup (line 574) | def mixup(self, image):
    method cutout (line 588) | def cutout(self, image):
    method mosaic (line 600) | def mosaic(self, image: np.ndarray, extra_image: np.ndarray) -> np.nda...
  class MainWindow (line 626) | class MainWindow(QMainWindow, Ui_mainWindow):
    method __init__ (line 627) | def __init__(self, parent=None):
    method slider_adjustment (line 838) | def slider_adjustment(self, value):
    method apply_custom_adjustments_with_value (line 854) | def apply_custom_adjustments_with_value(self):
    method binarize_image (line 873) | def binarize_image(self, threshold=128, preview=True):
    method adjust_gamma (line 908) | def adjust_gamma(self, gamma=1.0, preview=True):
    method adjust_brightness1 (line 939) | def adjust_brightness1(self, value=1.5, preview=True):
    method adjust_hue (line 978) | def adjust_hue(self, value=1.5, preview=True):
    method adjust_saturation (line 1015) | def adjust_saturation(self, value=1.5, preview=True):
    method adjust_contrast (line 1060) | def adjust_contrast(self, alpha=1.5, preview=True):
    method adjust_brightness (line 1089) | def adjust_brightness(self, beta=50, preview=True):
    method sharpen_image (line 1112) | def sharpen_image(self, preview=True):
    method smooth_image (line 1135) | def smooth_image(self, preview=True):
    method flip_image (line 1157) | def flip_image(self,  orientation='水平翻转',preview=True):
    method histogram_equalization (line 1188) | def histogram_equalization(self, preview=True):
    method pixmap_to_cv2_image (line 1211) | def pixmap_to_cv2_image(self, pixmap):
    method cv2_image_to_pixmap (line 1224) | def cv2_image_to_pixmap(self, image):
    method morphology_operation (line 1232) | def morphology_operation(self, operation, preview=True):
    method mean_filter (line 1266) | def mean_filter(self, preview=True):
    method gaussian_filter (line 1288) | def gaussian_filter(self, preview=True):
    method median_filter (line 1310) | def median_filter(self, preview=True):
    method load_image1 (line 1332) | def load_image1(self, file_name):
    method load_image (line 1350) | def load_image(self, file_name):
    method open_folder (line 1358) | def open_folder(self):
    method select_save_folder (line 1366) | def select_save_folder(self):
    method next_image (line 1369) | def next_image(self):
    method previous_image (line 1377) | def previous_image(self):
    method save_image (line 1385) | def save_image(self):
    method apply_image_enhance (line 1406) | def apply_image_enhance(self, preview):
    method apply_image_filter (line 1423) | def apply_image_filter(self,preview):
    method apply_morphology (line 1433) | def apply_morphology(self,preview):
    method apply_histogram (line 1445) | def apply_histogram(self,preview):
    method scale_image (line 1472) | def scale_image(self):
    method rotate_image (line 1492) | def rotate_image(self):
    method show_help (line 1536) | def show_help(self):
    method checkbox_clicked (line 1606) | def checkbox_clicked(self):
    method init_progress_bar (line 1621) | def init_progress_bar(self):
    method update_progress (line 1626) | def update_progress(self, value):
    method select_input_folder (line 1635) | def select_input_folder(self):
    method select_output_folder (line 1639) | def select_output_folder(self):
    method start_processing (line 1643) | def start_processing(self):
    method open_advanced_settings (line 1663) | def open_advanced_settings(self):
    method on_visualize_checkbox_stateChanged (line 1701) | def on_visualize_checkbox_stateChanged(self, state):
    method on_databases_checkbox_stateChanged (line 1705) | def on_databases_checkbox_stateChanged(self, state):
    method on_agnostic_nmscheckbox_checkbox_stateChanged (line 1709) | def on_agnostic_nmscheckbox_checkbox_stateChanged(self, state):
    method on_halfcheckbox_2_nmscheckbox_checkbox_stateChanged (line 1713) | def on_halfcheckbox_2_nmscheckbox_checkbox_stateChanged(self, state):
    method on_save_crop_nmscheckbox_checkbox_stateChanged (line 1717) | def on_save_crop_nmscheckbox_checkbox_stateChanged(self, state):
    method on_augment_nmscheckbox_checkbox_stateChanged (line 1721) | def on_augment_nmscheckbox_checkbox_stateChanged(self, state):
    method on_imgsz_spinbox_valueChanged (line 1725) | def on_imgsz_spinbox_valueChanged(self, value):
    method on_line_thickness_spinbox_valueChanged (line 1729) | def on_line_thickness_spinbox_valueChanged(self, value):
    method update_classes (line 1733) | def update_classes(self, text):
    method on_hide_labelscheckbox_checkbox_checkbox_stateChanged (line 1740) | def on_hide_labelscheckbox_checkbox_checkbox_stateChanged(self, state):
    method on_hide_confcheckbox_checkbox_checkbox_stateChanged (line 1744) | def on_hide_confcheckbox_checkbox_checkbox_stateChanged(self, state):
    method on_browse_result_folder_button_clicked (line 1748) | def on_browse_result_folder_button_clicked(self):
    method get_db_connection (line 1764) | def get_db_connection( self, host=DB_HOST, user=DB_USER, password=DB_P...
    method draw_bar_chart (line 1772) | def draw_bar_chart(self, data):
    method draw_line_chart (line 1793) | def draw_line_chart(self, data):
    method draw_pie_chart (line 1814) | def draw_pie_chart(self, data, threshold=5):
    method draw_box_plot (line 1849) | def draw_box_plot(self, data):
    method on_analyze_button_clicked (line 1870) | def on_analyze_button_clicked(self):
    method open_stackedWidget (line 1884) | def open_stackedWidget(self, index):
    method apply_filter (line 1887) | def apply_filter(self):
    method filter_data_in_database (line 1892) | def filter_data_in_database(self, user_input):
    method on_sort_changed (line 1908) | def on_sort_changed(self):
    method sort_data_in_database (line 1923) | def sort_data_in_database(self, sort_column, qt_sort_order):
    method on_add_button_clicked (line 1946) | def on_add_button_clicked(self):
    method insert_data_to_database (line 1955) | def insert_data_to_database(self, sign_type, sign_count, additional_in...
    method on_delete_button_clicked (line 1966) | def on_delete_button_clicked(self):
    method fetch_data_from_database (line 1991) | def fetch_data_from_database(self):
    method refresh_data (line 2009) | def refresh_data(self):
    method search_pt (line 2024) | def search_pt(self):
    method is_save (line 2041) | def is_save(self):
    method checkrate (line 2049) | def checkrate(self):
    method chose_rtsp (line 2055) | def chose_rtsp(self):
    method load_rtsp (line 2071) | def load_rtsp(self, ip):
    method chose_cam (line 2086) | def chose_cam(self):
    method load_setting (line 2129) | def load_setting(self):
    method change_val (line 2168) | def change_val(self, x, flag):
    method statistic_msg (line 2187) | def statistic_msg(self, msg):
    method show_msg (line 2191) | def show_msg(self, msg):
    method change_model (line 2197) | def change_model(self, x):
    method open_file (line 2202) | def open_file(self):
    method max_or_restore (line 2222) | def max_or_restore(self):
    method run_or_continue (line 2229) | def run_or_continue(self):
    method stop (line 2246) | def stop(self):
    method mousePressEvent (line 2250) | def mousePressEvent(self, event):
    method mouseMoveEvent (line 2257) | def mouseMoveEvent(self, QMouseEvent):
    method mouseReleaseEvent (line 2261) | def mouseReleaseEvent(self, QMouseEvent):
    method show_image (line 2265) | def show_image(img_src, label):
    method show_statistic (line 2295) | def show_statistic(self, statistic_dic):
    method closeEvent (line 2306) | def closeEvent(self, event):

FILE: main_win/login.py
  class Ui_Dialog (line 6) | class Ui_Dialog(object):
    method setupUi (line 7) | def setupUi(self, Dialog):
    method retranslateUi (line 101) | def retranslateUi(self, Dialog):

FILE: main_win/win.py
  class Ui_mainWindow (line 11) | class Ui_mainWindow(object):
    method setupUi (line 12) | def setupUi(self, mainWindow):
    method retranslateUi (line 3994) | def retranslateUi(self, mainWindow):

FILE: models/common.py
  function autopad (line 31) | def autopad(k, p=None):  # kernel, padding
  class Conv (line 38) | class Conv(nn.Module):
    method __init__ (line 40) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 46) | def forward(self, x):
    method forward_fuse (line 49) | def forward_fuse(self, x):
  class DWConv (line 53) | class DWConv(Conv):
    method __init__ (line 55) | def __init__(self, c1, c2, k=1, s=1, act=True):  # ch_in, ch_out, kern...
  class TransformerLayer (line 59) | class TransformerLayer(nn.Module):
    method __init__ (line 61) | def __init__(self, c, num_heads):
    method forward (line 70) | def forward(self, x):
  class TransformerBlock (line 76) | class TransformerBlock(nn.Module):
    method __init__ (line 78) | def __init__(self, c1, c2, num_heads, num_layers):
    method forward (line 87) | def forward(self, x):
  class Bottleneck (line 95) | class Bottleneck(nn.Module):
    method __init__ (line 97) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 104) | def forward(self, x):
  class BottleneckCSP (line 108) | class BottleneckCSP(nn.Module):
    method __init__ (line 110) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 121) | def forward(self, x):
  class C3 (line 127) | class C3(nn.Module):
    method __init__ (line 129) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 138) | def forward(self, x):
  class C3TR (line 142) | class C3TR(C3):
    method __init__ (line 144) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class C3SPP (line 150) | class C3SPP(C3):
    method __init__ (line 152) | def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
  class C3Ghost (line 158) | class C3Ghost(C3):
    method __init__ (line 160) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class SPP (line 166) | class SPP(nn.Module):
    method __init__ (line 168) | def __init__(self, c1, c2, k=(5, 9, 13)):
    method forward (line 175) | def forward(self, x):
  class SPPF (line 182) | class SPPF(nn.Module):
    method __init__ (line 184) | def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))
    method forward (line 191) | def forward(self, x):
  class Focus (line 200) | class Focus(nn.Module):
    method __init__ (line 202) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 207) | def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
  class GhostConv (line 212) | class GhostConv(nn.Module):
    method __init__ (line 214) | def __init__(self, c1, c2, k=1, s=1, g=1, act=True):  # ch_in, ch_out,...
    method forward (line 220) | def forward(self, x):
  class GhostBottleneck (line 225) | class GhostBottleneck(nn.Module):
    method __init__ (line 227) | def __init__(self, c1, c2, k=3, s=1):  # ch_in, ch_out, kernel, stride
    method forward (line 236) | def forward(self, x):
  class Contract (line 240) | class Contract(nn.Module):
    method __init__ (line 242) | def __init__(self, gain=2):
    method forward (line 246) | def forward(self, x):
  class Expand (line 254) | class Expand(nn.Module):
    method __init__ (line 256) | def __init__(self, gain=2):
    method forward (line 260) | def forward(self, x):
  class Concat (line 268) | class Concat(nn.Module):
    method __init__ (line 270) | def __init__(self, dimension=1):
    method forward (line 274) | def forward(self, x):
  class DetectMultiBackend (line 278) | class DetectMultiBackend(nn.Module):
    method __init__ (line 280) | def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=...
    method forward (line 398) | def forward(self, im, augment=False, visualize=False, val=False):
    method warmup (line 458) | def warmup(self, imgsz=(1, 3, 640, 640), half=False):
    method model_type (line 466) | def model_type(p='path/to/model.pt'):
  class AutoShape (line 478) | class AutoShape(nn.Module):
    method __init__ (line 488) | def __init__(self, model):
    method _apply (line 496) | def _apply(self, fn):
    method forward (line 508) | def forward(self, imgs, size=640, augment=False, profile=False):
  class Detections (line 566) | class Detections:
    method __init__ (line 568) | def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, ...
    method display (line 585) | def display(self, pprint=False, show=False, save=False, crop=False, re...
    method print (line 624) | def print(self):
    method show (line 629) | def show(self):
    method save (line 632) | def save(self, save_dir='runs/detect/exp'):
    method crop (line 636) | def crop(self, save=True, save_dir='runs/detect/exp'):
    method render (line 640) | def render(self):
    method pandas (line 644) | def pandas(self):
    method tolist (line 654) | def tolist(self):
    method __len__ (line 663) | def __len__(self):
  class Classify (line 667) | class Classify(nn.Module):
    method __init__ (line 669) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1):  # ch_in, ch_out, k...
    method forward (line 675) | def forward(self, x):

FILE: models/experimental.py
  class CrossConv (line 15) | class CrossConv(nn.Module):
    method __init__ (line 17) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
    method forward (line 25) | def forward(self, x):
  class Sum (line 29) | class Sum(nn.Module):
    method __init__ (line 31) | def __init__(self, n, weight=False):  # n: number of inputs
    method forward (line 38) | def forward(self, x):
  class MixConv2d (line 50) | class MixConv2d(nn.Module):
    method __init__ (line 52) | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):  # ch_in, ch...
    method forward (line 71) | def forward(self, x):
  class Ensemble (line 75) | class Ensemble(nn.ModuleList):
    method __init__ (line 77) | def __init__(self):
    method forward (line 80) | def forward(self, x, augment=False, profile=False, visualize=False):
  function attempt_load (line 90) | def attempt_load(weights, map_location=None, inplace=True, fuse=True):

FILE: models/tf.py
  class TFBN (line 37) | class TFBN(keras.layers.Layer):
    method __init__ (line 39) | def __init__(self, w=None):
    method call (line 48) | def call(self, inputs):
  class TFPad (line 52) | class TFPad(keras.layers.Layer):
    method __init__ (line 53) | def __init__(self, pad):
    method call (line 57) | def call(self, inputs):
  class TFConv (line 61) | class TFConv(keras.layers.Layer):
    method __init__ (line 63) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 88) | def call(self, inputs):
  class TFFocus (line 92) | class TFFocus(keras.layers.Layer):
    method __init__ (line 94) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 99) | def call(self, inputs):  # x(b,w,h,c) -> y(b,w/2,h/2,4c)
  class TFBottleneck (line 107) | class TFBottleneck(keras.layers.Layer):
    method __init__ (line 109) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):  # ch_i...
    method call (line 116) | def call(self, inputs):
  class TFConv2d (line 120) | class TFConv2d(keras.layers.Layer):
    method __init__ (line 122) | def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
    method call (line 130) | def call(self, inputs):
  class TFBottleneckCSP (line 134) | class TFBottleneckCSP(keras.layers.Layer):
    method __init__ (line 136) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 148) | def call(self, inputs):
  class TFC3 (line 154) | class TFC3(keras.layers.Layer):
    method __init__ (line 156) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 165) | def call(self, inputs):
  class TFSPP (line 169) | class TFSPP(keras.layers.Layer):
    method __init__ (line 171) | def __init__(self, c1, c2, k=(5, 9, 13), w=None):
    method call (line 178) | def call(self, inputs):
  class TFSPPF (line 183) | class TFSPPF(keras.layers.Layer):
    method __init__ (line 185) | def __init__(self, c1, c2, k=5, w=None):
    method call (line 192) | def call(self, inputs):
  class TFDetect (line 199) | class TFDetect(keras.layers.Layer):
    method __init__ (line 200) | def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None)...
    method call (line 218) | def call(self, inputs):
    method _make_grid (line 240) | def _make_grid(nx=20, ny=20):
  class TFUpsample (line 247) | class TFUpsample(keras.layers.Layer):
    method __init__ (line 248) | def __init__(self, size, scale_factor, mode, w=None):  # warning: all ...
    method call (line 257) | def call(self, inputs):
  class TFConcat (line 261) | class TFConcat(keras.layers.Layer):
    method __init__ (line 262) | def __init__(self, dimension=1, w=None):
    method call (line 267) | def call(self, inputs):
  function parse_model (line 271) | def parse_model(d, ch, model, imgsz):  # model_dict, input_channels(3)
  class TFModel (line 323) | class TFModel:
    method __init__ (line 324) | def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgs...
    method predict (line 340) | def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_c...
    method _xywh2xyxy (line 374) | def _xywh2xyxy(xywh):
  class AgnosticNMS (line 380) | class AgnosticNMS(keras.layers.Layer):
    method call (line 382) | def call(self, input, topk_all, iou_thres, conf_thres):
    method _nms (line 389) | def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):  # agnosti...
  function representative_dataset_gen (line 411) | def representative_dataset_gen(dataset, ncalib=100):
  function run (line 422) | def run(weights=ROOT / 'yolov5s.pt',  # weights path
  function parse_opt (line 446) | def parse_opt():
  function main (line 458) | def main(opt):

FILE: models/yolo.py
  class Detect (line 33) | class Detect(nn.Module):
    method __init__ (line 37) | def __init__(self, nc=80, anchors=(), ch=(), inplace=True):  # detecti...
    method forward (line 49) | def forward(self, x):
    method _make_grid (line 72) | def _make_grid(self, nx=20, ny=20, i=0):
  class Model (line 84) | class Model(nn.Module):
    method __init__ (line 85) | def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None):  ...
    method forward (line 123) | def forward(self, x, augment=False, profile=False, visualize=False):
    method _forward_augment (line 128) | def _forward_augment(self, x):
    method _forward_once (line 142) | def _forward_once(self, x, profile=False, visualize=False):
    method _descale_pred (line 155) | def _descale_pred(self, p, flips, scale, img_size):
    method _clip_augmented (line 172) | def _clip_augmented(self, y):
    method _profile_one_layer (line 183) | def _profile_one_layer(self, m, x, dt):
    method _initialize_biases (line 196) | def _initialize_biases(self, cf=None):  # initialize biases into Detec...
    method _print_biases (line 206) | def _print_biases(self):
    method fuse (line 218) | def fuse(self):  # fuse model Conv2d() + BatchNorm2d() layers
    method info (line 228) | def info(self, verbose=False, img_size=640):  # print model information
    method _apply (line 231) | def _apply(self, fn):
  function parse_model (line 243) | def parse_model(d, ch):  # model_dict, input_channels(3)

FILE: utils/CustomMessageBox.py
  class MessageBox (line 7) | class MessageBox(QMessageBox):
    method __init__ (line 8) | def __init__(self, *args, title='提示', count=1, time=1000, auto=False, ...
    method doCountDown (line 34) | def doCountDown(self):

FILE: utils/__init__.py
  function notebook_init (line 7) | def notebook_init(verbose=True):

FILE: utils/activations.py
  class SiLU (line 12) | class SiLU(nn.Module):  # export-friendly version of nn.SiLU()
    method forward (line 14) | def forward(x):
  class Hardswish (line 18) | class Hardswish(nn.Module):  # export-friendly version of nn.Hardswish()
    method forward (line 20) | def forward(x):
  class Mish (line 26) | class Mish(nn.Module):
    method forward (line 28) | def forward(x):
  class MemoryEfficientMish (line 32) | class MemoryEfficientMish(nn.Module):
    class F (line 33) | class F(torch.autograd.Function):
      method forward (line 35) | def forward(ctx, x):
      method backward (line 40) | def backward(ctx, grad_output):
    method forward (line 46) | def forward(self, x):
  class FReLU (line 51) | class FReLU(nn.Module):
    method __init__ (line 52) | def __init__(self, c1, k=3):  # ch_in, kernel
    method forward (line 57) | def forward(self, x):
  class AconC (line 62) | class AconC(nn.Module):
    method __init__ (line 68) | def __init__(self, c1):
    method forward (line 74) | def forward(self, x):
  class MetaAconC (line 79) | class MetaAconC(nn.Module):
    method __init__ (line 85) | def __init__(self, c1, k=1, s=1, r=16):  # ch_in, kernel, stride, r
    method forward (line 95) | def forward(self, x):

FILE: utils/augmentations.py
  class Albumentations (line 16) | class Albumentations:
    method __init__ (line 18) | def __init__(self):
    method __call__ (line 40) | def __call__(self, im, labels, p=1.0):
  function augment_hsv (line 47) | def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
  function hist_equalize (line 63) | def hist_equalize(im, clahe=True, bgr=False):
  function replicate (line 74) | def replicate(im, labels):
  function letterbox (line 91) | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True...
  function random_perspective (line 124) | def random_perspective(im, targets=(), segments=(), degrees=10, translat...
  function copy_paste (line 213) | def copy_paste(im, labels, segments, p=0.5):
  function cutout (line 237) | def cutout(im, labels, p=0.5):
  function mixup (line 264) | def mixup(im, labels, im2, labels2):
  function box_candidates (line 272) | def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1...

FILE: utils/autoanchor.py
  function check_anchor_order (line 18) | def check_anchor_order(m):
  function check_anchors (line 28) | def check_anchors(dataset, model, thr=4.0, imgsz=640):
  function kmean_anchors (line 65) | def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=...

FILE: utils/autobatch.py
  function check_train_batch_size (line 16) | def check_train_batch_size(model, imgsz=640):
  function autobatch (line 22) | def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):

FILE: utils/benchmarks.py
  function run (line 46) | def run(weights=ROOT / 'yolov5s.pt',  # weights path
  function parse_opt (line 75) | def parse_opt():
  function main (line 86) | def main(opt):

FILE: utils/callbacks.py
  class Callbacks (line 7) | class Callbacks:
    method __init__ (line 12) | def __init__(self):
    method register_action (line 40) | def register_action(self, hook, name='', callback=None):
    method get_registered_actions (line 53) | def get_registered_actions(self, hook=None):
    method run (line 65) | def run(self, hook, *args, **kwargs):

FILE: utils/capnums.py
  class Camera (line 4) | class Camera:
    method __init__ (line 5) | def __init__(self, cam_preset_num=5):
    method get_cam_num (line 8) | def get_cam_num(self):

FILE: utils/datasets.py
  function get_hash (line 45) | def get_hash(paths):
  function exif_size (line 53) | def exif_size(img):
  function exif_transpose (line 68) | def exif_transpose(image):
  function create_dataloader (line 94) | def create_dataloader(path, imgsz, batch_size, stride, single_cls=False,...
  class InfiniteDataLoader (line 125) | class InfiniteDataLoader(dataloader.DataLoader):
    method __init__ (line 131) | def __init__(self, *args, **kwargs):
    method __len__ (line 136) | def __len__(self):
    method __iter__ (line 139) | def __iter__(self):
  class _RepeatSampler (line 144) | class _RepeatSampler:
    method __init__ (line 151) | def __init__(self, sampler):
    method __iter__ (line 154) | def __iter__(self):
  class LoadImages (line 159) | class LoadImages:
    method __init__ (line 161) | def __init__(self, path, img_size=640, stride=32, auto=True):
    method __iter__ (line 190) | def __iter__(self):
    method __next__ (line 194) | def __next__(self):
    method new_video (line 232) | def new_video(self, path):
    method __len__ (line 237) | def __len__(self):
  class LoadWebcam (line 241) | class LoadWebcam:  # for inference
    method __init__ (line 243) | def __init__(self, pipe='0', img_size=640, stride=32):
    method __iter__ (line 250) | def __iter__(self):
    method __next__ (line 254) | def __next__(self):
    method __len__ (line 279) | def __len__(self):
  class LoadStreams (line 283) | class LoadStreams:
    method __init__ (line 285) | def __init__(self, sources='streams.txt', img_size=640, stride=32, aut...
    method update (line 328) | def update(self, i, cap, stream):
    method __iter__ (line 345) | def __iter__(self):
    method __next__ (line 349) | def __next__(self):
    method __len__ (line 368) | def __len__(self):
  function img2label_paths (line 372) | def img2label_paths(img_paths):
  class LoadImagesAndLabels (line 378) | class LoadImagesAndLabels(Dataset):
    method __init__ (line 382) | def __init__(self, path, img_size=640, batch_size=16, augment=False, h...
    method cache_labels (line 509) | def cache_labels(self, path=Path('./labels.cache'), prefix=''):
    method __len__ (line 545) | def __len__(self):
    method __getitem__ (line 554) | def __getitem__(self, index):
    method load_image (line 627) | def load_image(self, i):
    method load_mosaic (line 648) | def load_mosaic(self, index):
    method load_mosaic9 (line 704) | def load_mosaic9(self, index):
    method collate_fn (line 779) | def collate_fn(batch):
    method collate_fn4 (line 786) | def collate_fn4(batch):
  function create_folder (line 813) | def create_folder(path='./new'):
  function flatten_recursive (line 820) | def flatten_recursive(path=DATASETS_DIR / 'coco128'):
  function extract_boxes (line 828) | def extract_boxes(path=DATASETS_DIR / 'coco128'):  # from utils.datasets...
  function autosplit (line 862) | def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0...
  function verify_image_label (line 886) | def verify_image_label(args):
  function dataset_stats (line 938) | def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False...

FILE: utils/downloads.py
  function gsutil_getsize (line 18) | def gsutil_getsize(url=''):
  function safe_download (line 24) | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
  function attempt_download (line 43) | def attempt_download(file, repo='ultralytics/yolov5'):  # from utils.dow...
  function gdrive_download (line 83) | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zi...
  function get_token (line 118) | def get_token(cookie="./cookie"):

FILE: utils/flask_rest_api/restapi.py
  function predict (line 17) | def predict():

FILE: utils/general.py
  function is_kaggle (line 50) | def is_kaggle():
  function is_writeable (line 60) | def is_writeable(dir, test=False):
  function set_logging (line 75) | def set_logging(name=None, verbose=VERBOSE):
  function user_config_dir (line 88) | def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
  class Profile (line 104) | class Profile(contextlib.ContextDecorator):
    method __enter__ (line 106) | def __enter__(self):
    method __exit__ (line 109) | def __exit__(self, type, value, traceback):
  class Timeout (line 113) | class Timeout(contextlib.ContextDecorator):
    method __init__ (line 115) | def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors...
    method _timeout_handler (line 120) | def _timeout_handler(self, signum, frame):
    method __enter__ (line 123) | def __enter__(self):
    method __exit__ (line 127) | def __exit__(self, exc_type, exc_val, exc_tb):
  class WorkingDirectory (line 133) | class WorkingDirectory(contextlib.ContextDecorator):
    method __init__ (line 135) | def __init__(self, new_dir):
    method __enter__ (line 139) | def __enter__(self):
    method __exit__ (line 142) | def __exit__(self, exc_type, exc_val, exc_tb):
  function try_except (line 146) | def try_except(func):
  function methods (line 157) | def methods(instance):
  function print_args (line 162) | def print_args(name, opt):
  function init_seeds (line 167) | def init_seeds(seed=0):
  function intersect_dicts (line 177) | def intersect_dicts(da, db, exclude=()):
  function get_latest_run (line 182) | def get_latest_run(search_dir='.'):
  function is_docker (line 188) | def is_docker():
  function is_colab (line 193) | def is_colab():
  function is_pip (line 202) | def is_pip():
  function is_ascii (line 207) | def is_ascii(s=''):
  function is_chinese (line 213) | def is_chinese(s='人工智能'):
  function emojis (line 218) | def emojis(str=''):
  function file_size (line 223) | def file_size(path):
  function check_online (line 234) | def check_online():
  function check_git_status (line 246) | def check_git_status():
  function check_python (line 265) | def check_python(minimum='3.6.2'):
  function check_version (line 270) | def check_version(current='0.0.0', minimum='0.0.0', name='version ', pin...
  function check_requirements (line 283) | def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(...
  function check_img_size (line 319) | def check_img_size(imgsz, s=32, floor=0):
  function check_imshow (line 330) | def check_imshow():
  function check_suffix (line 345) | def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
  function check_yaml (line 356) | def check_yaml(file, suffix=('.yaml', '.yml')):
  function check_file (line 361) | def check_file(file, suffix=''):
  function check_font (line 386) | def check_font(font=FONT):
  function check_dataset (line 395) | def check_dataset(data, autodownload=True):
  function url2file (line 450) | def url2file(url):
  function download (line 457) | def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
  function make_divisible (line 491) | def make_divisible(x, divisor):
  function clean_str (line 498) | def clean_str(s):
  function one_cycle (line 503) | def one_cycle(y1=0.0, y2=1.0, steps=100):
  function colorstr (line 508) | def colorstr(*input):
  function labels_to_class_weights (line 533) | def labels_to_class_weights(labels, nc=80):
  function labels_to_image_weights (line 552) | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  function coco80_to_coco91_class (line 560) | def coco80_to_coco91_class():  # converts 80-index (val2014) to 91-index...
  function xyxy2xywh (line 572) | def xyxy2xywh(x):
  function xywh2xyxy (line 582) | def xywh2xyxy(x):
  function xywhn2xyxy (line 592) | def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  function xyxy2xywhn (line 602) | def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
  function xyn2xy (line 614) | def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  function segment2box (line 622) | def segment2box(segment, width=640, height=640):
  function segments2boxes (line 630) | def segments2boxes(segments):
  function resample_segments (line 639) | def resample_segments(segments, n=1000):
  function scale_coords (line 648) | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  function clip_coords (line 664) | def clip_coords(boxes, shape):
  function non_max_suppression (line 676) | def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, cla...
  function strip_optimizer (line 770) | def strip_optimizer(f='best.pt', s=''):  # from utils.general import *; ...
  function print_mutation (line 786) | def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evol...
  function apply_classifier (line 828) | def apply_classifier(x, model, img, im0):
  function increment_path (line 864) | def increment_path(path, exist_ok=False, sep='', mkdir=False):

FILE: utils/google_utils.py
  function gsutil_getsize (line 14) | def gsutil_getsize(url=''):
  function safe_download (line 20) | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
  function attempt_download (line 39) | def attempt_download(file, repo='ultralytics/yolov5'):  # from utils.goo...
  function gdrive_download (line 76) | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zi...
  function get_token (line 111) | def get_token(cookie="./cookie"):

FILE: utils/loggers/__init__.py
  class Loggers (line 37) | class Loggers():
    method __init__ (line 39) | def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, lo...
    method on_pretrain_routine_end (line 77) | def on_pretrain_routine_end(self):
    method on_train_batch_end (line 83) | def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, s...
    method on_train_epoch_end (line 98) | def on_train_epoch_end(self, epoch):
    method on_val_image_end (line 103) | def on_val_image_end(self, pred, predn, path, names, im):
    method on_val_end (line 108) | def on_val_end(self):
    method on_fit_epoch_end (line 114) | def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
    method on_model_save (line 136) | def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
    method on_train_end (line 142) | def on_train_end(self, last, best, plots, epoch, results):
    method on_params_update (line 164) | def on_params_update(self, params):

FILE: utils/loggers/wandb/log_dataset.py
  function create_dataset_artifact (line 10) | def create_dataset_artifact(opt):

FILE: utils/loggers/wandb/sweep.py
  function sweep (line 17) | def sweep():

FILE: utils/loggers/wandb/wandb_utils.py
  function remove_prefix (line 32) | def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
  function check_wandb_config_file (line 36) | def check_wandb_config_file(data_config_file):
  function check_wandb_dataset (line 43) | def check_wandb_dataset(data_file):
  function get_run_info (line 59) | def get_run_info(run_path):
  function check_wandb_resume (line 68) | def check_wandb_resume(opt):
  function process_wandb_config_ddp_mode (line 82) | def process_wandb_config_ddp_mode(opt):
  class WandbLogger (line 106) | class WandbLogger():
    method __init__ (line 120) | def __init__(self, opt, run_id=None, job_type='Training'):
    method check_and_upload_dataset (line 192) | def check_and_upload_dataset(self, opt):
    method setup_training (line 210) | def setup_training(self, opt):
    method download_dataset_artifact (line 262) | def download_dataset_artifact(self, path, alias):
    method download_model_artifact (line 282) | def download_model_artifact(self, opt):
    method log_model (line 300) | def log_model(self, path, opt, epoch, fitness_score, best_model=False):
    method log_dataset_artifact (line 324) | def log_dataset_artifact(self, data_file, single_cls, project, overwri...
    method map_val_table_path (line 381) | def map_val_table_path(self):
    method create_dataset_table (line 391) | def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_...
    method log_training_progress (line 433) | def log_training_progress(self, predn, path, names):
    method val_one_image (line 474) | def val_one_image(self, pred, predn, path, names, im):
    method log (line 496) | def log(self, log_dict):
    method end_epoch (line 507) | def end_epoch(self, best_result=False):
    method finish_run (line 539) | def finish_run(self):
  function all_logging_disabled (line 551) | def all_logging_disabled(highest_level=logging.CRITICAL):

FILE: utils/loss.py
  function smooth_BCE (line 13) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class BCEBlurWithLogitsLoss (line 18) | class BCEBlurWithLogitsLoss(nn.Module):
    method __init__ (line 20) | def __init__(self, alpha=0.05):
    method forward (line 25) | def forward(self, pred, true):
  class FocalLoss (line 35) | class FocalLoss(nn.Module):
    method __init__ (line 37) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 45) | def forward(self, pred, true):
  class QFocalLoss (line 65) | class QFocalLoss(nn.Module):
    method __init__ (line 67) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 75) | def forward(self, pred, true):
  class ComputeLoss (line 91) | class ComputeLoss:
    method __init__ (line 93) | def __init__(self, model, autobalance=False):
    method __call__ (line 117) | def __call__(self, p, targets):  # predictions, targets, model
    method build_targets (line 169) | def build_targets(self, p, targets):

FILE: utils/metrics.py
  function fitness (line 15) | def fitness(x):
  function ap_per_class (line 21) | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='....
  function compute_ap (line 89) | def compute_ap(recall, precision):
  class ConfusionMatrix (line 117) | class ConfusionMatrix:
    method __init__ (line 119) | def __init__(self, nc, conf=0.25, iou_thres=0.45):
    method process_batch (line 125) | def process_batch(self, detections, labels):
    method matrix (line 165) | def matrix(self):
    method tp_fp (line 168) | def tp_fp(self):
    method plot (line 174) | def plot(self, normalize=True, save_dir='', names=()):
    method print (line 197) | def print(self):
  function bbox_iou (line 202) | def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=Fal...
  function box_iou (line 244) | def box_iou(box1, box2):
  function bbox_ioa (line 269) | def bbox_ioa(box1, box2, eps=1E-7):
  function wh_iou (line 293) | def wh_iou(wh1, wh2):
  function plot_pr_curve (line 303) | def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()):
  function plot_mc_curve (line 324) | def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Con...

FILE: utils/plots.py
  class Colors (line 30) | class Colors:
    method __init__ (line 32) | def __init__(self):
    method __call__ (line 39) | def __call__(self, i, bgr=False):
    method hex2rgb (line 44) | def hex2rgb(h):  # rgb order (PIL)
  function check_pil_font (line 51) | def check_pil_font(font=FONT, size=10):
  class Annotator (line 65) | class Annotator:
    method __init__ (line 70) | def __init__(self, im, line_width=None, font_size=None, font='Arial.tt...
    method box_label (line 82) | def box_label(self, box, label='', color=(128, 128, 128), txt_color=(2...
    method rectangle (line 107) | def rectangle(self, xy, fill=None, outline=None, width=1):
    method text (line 111) | def text(self, xy, text, txt_color=(255, 255, 255)):
    method result (line 116) | def result(self):
  function feature_visualization (line 121) | def feature_visualization(x, module_type, stage, n=32, save_dir=Path('ru...
  function hist2d (line 149) | def hist2d(x, y, n=100):
  function butter_lowpass_filtfilt (line 158) | def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  function output_to_target (line 171) | def output_to_target(output):
  function plot_images (line 180) | def plot_images(images, targets, paths=None, fname='images.jpg', names=N...
  function plot_lr_scheduler (line 241) | def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  function plot_val_txt (line 258) | def plot_val_txt():  # from utils.plots import *; plot_val()
  function plot_targets_txt (line 275) | def plot_targets_txt():  # from utils.plots import *; plot_targets_txt()
  function plot_val_study (line 288) | def plot_val_study(file='', dir='', x=None):  # from utils.plots import ...
  function plot_labels (line 327) | def plot_labels(labels, names=(), save_dir=Path('')):
  function plot_evolve (line 374) | def plot_evolve(evolve_csv='path/to/evolve.csv'):  # from utils.plots im...
  function plot_results (line 401) | def plot_results(file='path/to/results.csv', dir=''):
  function profile_idetection (line 427) | def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
  function save_one_box (line 458) | def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=F...

FILE: utils/torch_utils.py
  function torch_distributed_zero_first (line 34) | def torch_distributed_zero_first(local_rank: int):
  function date_modified (line 45) | def date_modified(path=__file__):
  function git_describe (line 51) | def git_describe(path=Path(__file__).parent):  # path must be a directory
  function device_count (line 60) | def device_count():
  function select_device (line 70) | def select_device(device='', batch_size=0, newline=True):
  function time_sync (line 101) | def time_sync():
  function profile (line 108) | def profile(input, ops, n=10, device=None):
  function is_parallel (line 160) | def is_parallel(model):
  function de_parallel (line 165) | def de_parallel(model):
  function initialize_weights (line 170) | def initialize_weights(model):
  function find_modules (line 182) | def find_modules(model, mclass=nn.Conv2d):
  function sparsity (line 187) | def sparsity(model):
  function prune (line 196) | def prune(model, amount=0.3):
  function fuse_conv_and_bn (line 207) | def fuse_conv_and_bn(conv, bn):
  function model_info (line 230) | def model_info(model, verbose=False, img_size=640):
  function scale_img (line 254) | def scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,...
  function copy_attr (line 267) | def copy_attr(a, b, include=(), exclude=()):
  class EarlyStopping (line 276) | class EarlyStopping:
    method __init__ (line 278) | def __init__(self, patience=30):
    method __call__ (line 284) | def __call__(self, epoch, fitness):
  class ModelEMA (line 299) | class ModelEMA:
    method __init__ (line 305) | def __init__(self, model, decay=0.9999, updates=0):
    method update (line 315) | def update(self, model):
    method update_attr (line 327) | def update_attr(self, model, include=(), exclude=('process_group', 're...

FILE: utils/tt100k_to_voc-main/1_build_voc_dir.py
  function make_voc_dir (line 5) | def make_voc_dir():

FILE: utils/tt100k_to_voc-main/2_json2xml.py
  function edit_xml (line 6) | def edit_xml(objects, id, dir):
  function getDirId (line 59) | def  getDirId(dir):  # get the  id list  of id.png
  function is_tt45 (line 71) | def is_tt45(objects):

FILE: utils/tt100k_to_voc-main/3_delete_jpg_and_xml.py
  function delete_train_jpg (line 5) | def delete_train_jpg(train_txt):
  function delete_test_jpg (line 15) | def delete_test_jpg(test_txt):
  function delete_train_xml (line 24) | def delete_train_xml(train_txt):
  function delete_test_xml (line 34) | def delete_test_xml(test_txt):

FILE: utils/tt100k_to_voc-main/5_label.py
  function convert (line 65) | def convert(size, box):
  function convert_annotation (line 81) | def convert_annotation(image_name):

FILE: utils/wandb_logging/log_dataset.py
  function create_dataset_artifact (line 10) | def create_dataset_artifact(opt):

FILE: utils/wandb_logging/sweep.py
  function sweep (line 14) | def sweep():

FILE: utils/wandb_logging/wandb_utils.py
  function remove_prefix (line 26) | def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
  function check_wandb_config_file (line 30) | def check_wandb_config_file(data_config_file):
  function get_run_info (line 37) | def get_run_info(run_path):
  function check_wandb_resume (line 46) | def check_wandb_resume(opt):
  function process_wandb_config_ddp_mode (line 60) | def process_wandb_config_ddp_mode(opt):
  class WandbLogger (line 84) | class WandbLogger():
    method __init__ (line 98) | def __init__(self, opt, name, run_id, data_dict, job_type='Training'):
    method check_and_upload_dataset (line 145) | def check_and_upload_dataset(self, opt):
    method setup_training (line 155) | def setup_training(self, opt, data_dict):
    method download_dataset_artifact (line 192) | def download_dataset_artifact(self, path, alias):
    method download_model_artifact (line 201) | def download_model_artifact(self, opt):
    method log_model (line 213) | def log_model(self, path, opt, epoch, fitness_score, best_model=False):
    method log_dataset_artifact (line 227) | def log_dataset_artifact(self, data_file, single_cls, project, overwri...
    method map_val_table_path (line 258) | def map_val_table_path(self):
    method create_dataset_table (line 264) | def create_dataset_table(self, dataset, class_to_id, name='dataset'):
    method log_training_progress (line 295) | def log_training_progress(self, predn, path, names):
    method val_one_image (line 317) | def val_one_image(self, pred, predn, path, names, im):
    method log (line 332) | def log(self, log_dict):
    method end_epoch (line 337) | def end_epoch(self, best_result=False):
    method finish_run (line 354) | def finish_run(self):
  function all_logging_disabled (line 363) | def all_logging_disabled(highest_level=logging.CRITICAL):
Condensed preview — 118 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (5,021K chars).
[
  {
    "path": ".github/workflows/jekyll-gh-pages.yml",
    "chars": 1409,
    "preview": "# Sample workflow for building and deploying a Jekyll site to GitHub Pages\nname: Deploy Jekyll with GitHub Pages depende"
  },
  {
    "path": ".github/workflows/main.yml",
    "chars": 420,
    "preview": "name: Sync with Gitee\n\non:\n  schedule:\n    - cron: '0 0 * * *' # 每天午夜(UTC时间)运行\n\njobs:\n  sync:\n    runs-on: ubuntu-latest"
  },
  {
    "path": ".idea/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI.iml",
    "chars": 474,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<module type=\"PYTHON_MODULE\" version=\"4\">\n  <component name=\"NewModuleRootManager"
  },
  {
    "path": ".idea/inspectionProfiles/profiles_settings.xml",
    "chars": 174,
    "preview": "<component name=\"InspectionProjectProfileManager\">\n  <settings>\n    <option name=\"USE_PROJECT_PROFILE\" value=\"false\" />\n"
  },
  {
    "path": ".idea/misc.xml",
    "chars": 276,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"Black\">\n    <option name=\"sdkName\" value"
  },
  {
    "path": ".idea/modules.xml",
    "chars": 334,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectModuleManager\">\n    <modules>\n   "
  },
  {
    "path": ".idea/vcs.xml",
    "chars": 167,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping dire"
  },
  {
    "path": ".idea/workspace.xml",
    "chars": 10416,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"AutoImportSettings\">\n    <option name=\"a"
  },
  {
    "path": "MouseLabel.py",
    "chars": 502,
    "preview": "from PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtCore import pyqtSignal\n\n\nclass LabelMouse(QLabel):\n    double_clicked = "
  },
  {
    "path": "README.md",
    "chars": 6295,
    "preview": "<h1 align=\"center\">Road Sign Recognition Project Based on YOLOv5 (YOLOv5 GUI)</h1>\n\n<p align=\"center\">\n  <a href=\"README"
  },
  {
    "path": "apprcc.qrc",
    "chars": 2499,
    "preview": "<RCC>\n  <qresource prefix=\"img\">\n    <file>icon/yjtp-modified.png</file>\n    <file>icon/yjtp.png</file>\n    <file>C:/Use"
  },
  {
    "path": "config/fold.json",
    "chars": 42,
    "preview": "{\r\n  \"open_fold\": \"D:/Videos/OBS_Video\"\r\n}"
  },
  {
    "path": "config/ip.json",
    "chars": 53,
    "preview": "{\n  \"ip\": \"rtsp://admin:admin888@192.168.1.67:555\"\n}\n"
  },
  {
    "path": "config/setting.json",
    "chars": 84,
    "preview": "{\r\n  \"iou\": 0.41,\r\n  \"conf\": 0.46,\r\n  \"rate\": 1,\r\n  \"check\": 0,\r\n  \"savecheck\": 0\r\n}"
  },
  {
    "path": "data/doc/LICENSE",
    "chars": 35126,
    "preview": "GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation,"
  },
  {
    "path": "data/doc/README_Parameter adjustment.md",
    "chars": 30,
    "preview": "由于一些原因,后期再上传,同时包括(readme详细操作)\n"
  },
  {
    "path": "data/doc/README_cn.md",
    "chars": 2871,
    "preview": "<h1 align=\"center\">基于YOLOv5的道路标志识别项目(yolov5界面GUI)</h1>\n<p align=\"center\">\n  <a href=\"../../README.md\">English</a> |\n  <a"
  },
  {
    "path": "data/regn_mysql.sql",
    "chars": 26860,
    "preview": "/*\r\nSQLyog Community v13.2.0 (64 bit)\r\nMySQL - 8.0.32 : Database - traffic_sign_recognition\r\n***************************"
  },
  {
    "path": "data/run/exp52/hyp.yaml",
    "chars": 374,
    "preview": "lr0: 0.001\nlrf: 0.01\nmomentum: 0.937\nweight_decay: 0.0005\nwarmup_epochs: 3.0\nwarmup_momentum: 0.8\nwarmup_bias_lr: 0.1\nbo"
  },
  {
    "path": "data/run/exp52/opt.yaml",
    "chars": 631,
    "preview": "weights: runs\\train\\exp51\\weights\\best.pt\ncfg: ''\ndata: E:\\Desktop\\yolov5-6.0\\data\\1.yaml\nhyp: data\\hyps\\hyp.scratch-low"
  },
  {
    "path": "data/scripts/download_weights.sh",
    "chars": 494,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Download latest models from https://github.com/ultralytics/yolo"
  },
  {
    "path": "data/scripts/get_coco.sh",
    "chars": 877,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Download COCO 2017 dataset http://cocodataset.org\n# Example usa"
  },
  {
    "path": "data/scripts/get_coco128.sh",
    "chars": 592,
    "preview": "#!/bin/bash\n# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Download COCO128 dataset https://www.kaggle.com/ultralytics/coc"
  },
  {
    "path": "detect.py",
    "chars": 11347,
    "preview": "\"\"\"Run inference with a YOLOv5 model on images, videos, directories, streams\n\nUsage:\n    $ python path/to/detect.py --so"
  },
  {
    "path": "dialog/rtsp_dialog.py",
    "chars": 3613,
    "preview": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'rtsp_dialog.ui'\r\n#\r\n# Created by: PyQt5"
  },
  {
    "path": "dialog/rtsp_dialog.ui",
    "chars": 3622,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<ui version=\"4.0\">\r\n <class>Form</class>\r\n <widget class=\"QWidget\" name=\"Form\">\r"
  },
  {
    "path": "dialog/rtsp_win.py",
    "chars": 352,
    "preview": "import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom dialog.rtsp_dialog import Ui_Form\n\n\nclass Window(QWidg"
  },
  {
    "path": "hubconf.py",
    "chars": 5658,
    "preview": "\"\"\"YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/\n\nUsage:\n    import torch\n    model = torch.hub."
  },
  {
    "path": "login_lj.py",
    "chars": 1976,
    "preview": "from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\nfrom main_win.login import Ui_Dia"
  },
  {
    "path": "main.py",
    "chars": 97756,
    "preview": "import sys\r\nimport json\r\nimport numpy as np\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nimport os\r\nimport time\r"
  },
  {
    "path": "main_win/login.py",
    "chars": 6632,
    "preview": "# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_Dialog(object):\r\n    def s"
  },
  {
    "path": "main_win/login.ui",
    "chars": 6288,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<ui version=\"4.0\">\r\n <class>Dialog</class>\r\n <widget class=\"QDialog\" name=\"Dialo"
  },
  {
    "path": "main_win/win.py",
    "chars": 164843,
    "preview": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'win.ui'\r\n#\r\n# Created by: PyQt5 UI code"
  },
  {
    "path": "main_win/win.ui",
    "chars": 184067,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<ui version=\"4.0\">\r\n <class>mainWindow</class>\r\n <widget class=\"QMainWindow\" nam"
  },
  {
    "path": "models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "models/common.py",
    "chars": 33217,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCommon modules\n\"\"\"\n\nimport json\nimport math\nimport platform\nimport warnin"
  },
  {
    "path": "models/experimental.py",
    "chars": 4581,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nExperimental modules\n\"\"\"\nimport math\n\nimport numpy as np\nimport torch\nimp"
  },
  {
    "path": "models/hub/anchors.yaml",
    "chars": 3332,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Default anchors for COCO data\n\n\n# P5 --------------------------------------"
  },
  {
    "path": "models/hub/yolov3-spp.yaml",
    "chars": 1564,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov3-tiny.yaml",
    "chars": 1229,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov3.yaml",
    "chars": 1555,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5-bifpn.yaml",
    "chars": 1420,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5-fpn.yaml",
    "chars": 1211,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5-p2.yaml",
    "chars": 1684,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5-p34.yaml",
    "chars": 1346,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth"
  },
  {
    "path": "models/hub/yolov5-p6.yaml",
    "chars": 1738,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5-p7.yaml",
    "chars": 2119,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5-panet.yaml",
    "chars": 1404,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5l6.yaml",
    "chars": 1817,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth "
  },
  {
    "path": "models/hub/yolov5m6.yaml",
    "chars": 1819,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.67  # model depth"
  },
  {
    "path": "models/hub/yolov5n6.yaml",
    "chars": 1819,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth"
  },
  {
    "path": "models/hub/yolov5s-ghost.yaml",
    "chars": 1480,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth"
  },
  {
    "path": "models/hub/yolov5s-transformer.yaml",
    "chars": 1438,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth"
  },
  {
    "path": "models/hub/yolov5s6.yaml",
    "chars": 1819,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth"
  },
  {
    "path": "models/hub/yolov5x6.yaml",
    "chars": 1819,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.33  # model depth"
  },
  {
    "path": "models/tf.py",
    "chars": 20646,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nTensorFlow, Keras and TFLite versions of YOLOv5\nAuthored by https://githu"
  },
  {
    "path": "models/yolo.py",
    "chars": 14954,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nYOLO-specific modules\n\nUsage:\n    $ python path/to/models/yolo.py --cfg y"
  },
  {
    "path": "requirements.txt",
    "chars": 721,
    "preview": "# pip install -r requirements.txt\n\n# base ----------------------------------------\nmatplotlib>=3.2.2\nnumpy>=1.18.5\nopenc"
  },
  {
    "path": "setup-database.bat",
    "chars": 690,
    "preview": "@echo off\necho Creating the database...\n\nREM Set path to the SQL file\nset SQL_FILE_PATH=data/regn_mysql.sql\n\nREM Databas"
  },
  {
    "path": "utils/CustomMessageBox.py",
    "chars": 1712,
    "preview": "from PyQt5.QtCore import QTimer\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtGui import QPixmap, QIcon\n\n\n# 单按钮对话"
  },
  {
    "path": "utils/__init__.py",
    "chars": 1098,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nutils/initialization\n\"\"\"\n\n\ndef notebook_init(verbose=True):\n    # Check s"
  },
  {
    "path": "utils/activations.py",
    "chars": 3774,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nActivation functions\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch"
  },
  {
    "path": "utils/augmentations.py",
    "chars": 11730,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nImage augmentation functions\n\"\"\"\n\nimport math\nimport random\n\nimport cv2\ni"
  },
  {
    "path": "utils/autoanchor.py",
    "chars": 7309,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nAutoAnchor utils\n\"\"\"\n\nimport random\n\nimport numpy as np\nimport torch\nimpo"
  },
  {
    "path": "utils/autobatch.py",
    "chars": 2175,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nAuto-batch utils\n\"\"\"\n\nfrom copy import deepcopy\n\nimport numpy as np\nimpor"
  },
  {
    "path": "utils/aws/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "utils/aws/mime.sh",
    "chars": 780,
    "preview": "# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/\n#"
  },
  {
    "path": "utils/aws/resume.py",
    "chars": 1198,
    "preview": "# Resume all interrupted trainings in yolov5/ dir including DDP trainings\n# Usage: $ python utils/aws/resume.py\n\nimport "
  },
  {
    "path": "utils/aws/userdata.sh",
    "chars": 1247,
    "preview": "#!/bin/bash\n# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html\n# This "
  },
  {
    "path": "utils/benchmarks.py",
    "chars": 3807,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nRun YOLOv5 benchmarks on all supported export formats\n\nFormat            "
  },
  {
    "path": "utils/cal_fps.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "utils/callbacks.py",
    "chars": 2462,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCallback utils\n\"\"\"\n\n\nclass Callbacks:\n    \"\"\"\"\n    Handles all registered"
  },
  {
    "path": "utils/capnums.py",
    "chars": 643,
    "preview": "import cv2\n\n\nclass Camera:\n    def __init__(self, cam_preset_num=5):\n        self.cam_preset_num = cam_preset_num\n\n    d"
  },
  {
    "path": "utils/datasets.py",
    "chars": 45903,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nDataloaders and dataset utils\n\"\"\"\n\nimport glob\nimport hashlib\nimport json"
  },
  {
    "path": "utils/downloads.py",
    "chars": 6289,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nDownload utils\n\"\"\"\n\nimport os\nimport platform\nimport subprocess\nimport ti"
  },
  {
    "path": "utils/flask_rest_api/README.md",
    "chars": 1710,
    "preview": "# Flask REST API\n\n[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/w"
  },
  {
    "path": "utils/flask_rest_api/example_request.py",
    "chars": 299,
    "preview": "\"\"\"Perform test request\"\"\"\nimport pprint\n\nimport requests\n\nDETECTION_URL = \"http://localhost:5000/v1/object-detection/yo"
  },
  {
    "path": "utils/flask_rest_api/restapi.py",
    "chars": 1078,
    "preview": "\"\"\"\nRun a rest API exposing the yolov5s object detection model\n\"\"\"\nimport argparse\nimport io\n\nimport torch\nfrom flask im"
  },
  {
    "path": "utils/general.py",
    "chars": 36457,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nGeneral utils\n\"\"\"\n\nimport contextlib\nimport glob\nimport logging\nimport ma"
  },
  {
    "path": "utils/google_app_engine/Dockerfile",
    "chars": 821,
    "preview": "FROM gcr.io/google-appengine/python\n\n# Create a virtualenv for dependencies. This isolates these packages from\n# system-"
  },
  {
    "path": "utils/google_app_engine/additional_requirements.txt",
    "chars": 105,
    "preview": "# add these requirements in your app on top of the existing ones\npip==21.1\nFlask==1.0.2\ngunicorn==19.9.0\n"
  },
  {
    "path": "utils/google_app_engine/app.yaml",
    "chars": 174,
    "preview": "runtime: custom\nenv: flex\n\nservice: yolov5app\n\nliveness_check:\n  initial_delay_sec: 600\n\nmanual_scaling:\n  instances: 1\n"
  },
  {
    "path": "utils/google_utils.py",
    "chars": 5962,
    "preview": "# Google utils: https://cloud.google.com/storage/docs/reference/libraries\n\nimport os\nimport platform\nimport subprocess\ni"
  },
  {
    "path": "utils/loggers/__init__.py",
    "chars": 7618,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nLogging utils\n\"\"\"\n\nimport os\nimport warnings\nfrom threading import Thread"
  },
  {
    "path": "utils/loggers/wandb/README.md",
    "chars": 10815,
    "preview": "📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021.\n* [About Weights &"
  },
  {
    "path": "utils/loggers/wandb/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "utils/loggers/wandb/log_dataset.py",
    "chars": 1032,
    "preview": "import argparse\n\nfrom wandb_utils import WandbLogger\n\nfrom utils.general import LOGGER\n\nWANDB_ARTIFACT_PREFIX = 'wandb-a"
  },
  {
    "path": "utils/loggers/wandb/sweep.py",
    "chars": 1142,
    "preview": "import sys\nfrom pathlib import Path\n\nimport wandb\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[3]  # YOLOv5 root"
  },
  {
    "path": "utils/loggers/wandb/sweep.yaml",
    "chars": 2463,
    "preview": "# Hyperparameters for training\n# To set range-\n# Provide min and max values as:\n#      parameter:\n#\n#         min: scala"
  },
  {
    "path": "utils/loggers/wandb/wandb_utils.py",
    "chars": 27147,
    "preview": "\"\"\"Utilities and tools for tracking runs with Weights & Biases.\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom contextlib "
  },
  {
    "path": "utils/loss.py",
    "chars": 9616,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nLoss functions\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.metric"
  },
  {
    "path": "utils/metrics.py",
    "chars": 14057,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nModel validation metrics\n\"\"\"\n\nimport math\nimport warnings\nfrom pathlib im"
  },
  {
    "path": "utils/plots.py",
    "chars": 20523,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nPlotting utils\n\"\"\"\n\nimport math\nimport os\nfrom copy import copy\nfrom path"
  },
  {
    "path": "utils/torch_utils.py",
    "chars": 13850,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nPyTorch utils\n\"\"\"\n\nimport datetime\nimport math\nimport os\nimport platform\n"
  },
  {
    "path": "utils/tt100k_to_voc-main/1.py",
    "chars": 289,
    "preview": "# @Time : 2023-02-18 21:15\n# @Author : AItrainee\n# @File : 1.py\n#计算E:\\Desktop\\tt100k_to_voc-main\\xmlLabel1\\train下的xml文件的"
  },
  {
    "path": "utils/tt100k_to_voc-main/1_build_voc_dir.py",
    "chars": 356,
    "preview": "import os\n\n# 建立相关文件夹\n# build voc2007 folder structure\ndef make_voc_dir():\n    root_dir = os.getcwd()\n    os.makedirs(roo"
  },
  {
    "path": "utils/tt100k_to_voc-main/2_json2xml.py",
    "chars": 4147,
    "preview": "import os\nimport json\nfrom lxml import etree as ET\nfrom xml.dom import minidom\n#找出训练集和测试集中的不在45类的标注图片的id\ndef edit_xml(ob"
  },
  {
    "path": "utils/tt100k_to_voc-main/3_delete_jpg_and_xml.py",
    "chars": 1475,
    "preview": "import os\nimport glob\n\n# 删除txt中id对应XML和图片\ndef delete_train_jpg(train_txt):\n    root_dir = os.getcwd()\n    for line in op"
  },
  {
    "path": "utils/tt100k_to_voc-main/4_spilt_data.py",
    "chars": 803,
    "preview": "import os\nimport random\n\n\nfiles_path = r\"E:\\Desktop\\tt100k_to_voc-main\\xmlLabel1\\test\"\nif not os.path.exists(files_path)"
  },
  {
    "path": "utils/tt100k_to_voc-main/5_label.py",
    "chars": 2346,
    "preview": "import xml.etree.ElementTree as ET\n\nimport pickle\nimport os\nfrom os import listdir, getcwd\nfrom os.path import join\nimpo"
  },
  {
    "path": "utils/tt100k_to_voc-main/Not_TT45_list_train.txt",
    "chars": 8716,
    "preview": "95843\n10823\n28337\n16027\n78355\n57562\n4224\n66435\n91093\n20447\n7520\n87281\n19858\n18830\n69123\n38804\n44829\n49574\n61204\n4388\n600"
  },
  {
    "path": "utils/tt100k_to_voc-main/Not_TT45_list_val.txt",
    "chars": 4124,
    "preview": "40317\n81417\n53149\n86340\n49324\n5568\n97776\n72200\n28988\n73250\n79688\n3299\n50756\n41940\n89453\n91061\n4348\n93521\n61238\n84963\n230"
  },
  {
    "path": "utils/tt100k_to_voc-main/README.md",
    "chars": 1,
    "preview": "\n"
  },
  {
    "path": "utils/tt100k_to_voc-main/TT100K_VOC_classes.json",
    "chars": 632,
    "preview": "{\n    \"i2\":1,\n    \"i4\":2,\n    \"i5\":3,\n    \"il100\":4,\n    \"il60\":5,\n    \"il80\":6,\n    \"io\":7,\n    \"ip\":8,\n    \"p10\":9,\n  "
  },
  {
    "path": "utils/tt100k_to_voc-main/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "utils/tt100k_to_voc-main/annotations_all.json",
    "chars": 3482498,
    "preview": "{\"types\": [\"pl80\", \"w9\", \"p6\", \"ph4.2\", \"i8\", \"w14\", \"w33\", \"pa13\", \"im\", \"w58\", \"pl90\", \"il70\", \"p5\", \"pm55\", \"pl60\", \""
  },
  {
    "path": "utils/tt100k_to_voc-main/新建 Internet 快捷方式.url",
    "chars": 143,
    "preview": "[{000214A0-0000-0000-C000-000000000046}]\nProp3=19,11\n[InternetShortcut]\nIDList=\nURL=https://blog.csdn.net/Hankerchen/art"
  },
  {
    "path": "utils/wandb_logging/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "utils/wandb_logging/log_dataset.py",
    "chars": 870,
    "preview": "import argparse\n\nimport yaml\n\nfrom wandb_utils import WandbLogger\n\nWANDB_ARTIFACT_PREFIX = 'wandb-artifact://'\n\n\ndef cre"
  },
  {
    "path": "utils/wandb_logging/sweep.py",
    "chars": 877,
    "preview": "import sys\nfrom pathlib import Path\nimport wandb\n\nFILE = Path(__file__).absolute()\nsys.path.append(FILE.parents[2].as_po"
  },
  {
    "path": "utils/wandb_logging/sweep.yaml",
    "chars": 2488,
    "preview": "# Hyperparameters for training\n# To set range- \n# Provide min and max values as:\n#      parameter:\n#         \n#         "
  },
  {
    "path": "utils/wandb_logging/wandb_utils.py",
    "chars": 19498,
    "preview": "\"\"\"Utilities and tools for tracking runs with Weights & Biases.\"\"\"\nimport logging\nimport os\nimport sys\nfrom contextlib i"
  }
]

// ... and 6 more files (download for full content)

About this extraction

This page contains the full source code of the Ai-trainee/Traffic-Sign-Recognition-PyQt5-YOLOv5-GUI GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 118 files (95.5 MB), approximately 1.1M tokens, and a symbol index with 634 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!