Repository: yeyupiaoling/AudioClassification-Pytorch Branch: master Commit: c63d284a8895 Files: 37 Total size: 127.9 KB Directory structure: gitextract_qa6r0r9g/ ├── .gitignore ├── LICENSE ├── README.md ├── README_en.md ├── configs/ │ ├── augmentation.yml │ ├── cam++.yml │ ├── ecapa_tdnn.yml │ ├── eres2net.yml │ ├── panns.yml │ ├── res2net.yml │ ├── resnet_se.yml │ └── tdnn.yml ├── create_data.py ├── eval.py ├── extract_features.py ├── infer.py ├── infer_record.py ├── macls/ │ ├── __init__.py │ ├── data_utils/ │ │ ├── __init__.py │ │ ├── collate_fn.py │ │ ├── featurizer.py │ │ └── reader.py │ ├── metric/ │ │ ├── __init__.py │ │ └── metrics.py │ ├── optimizer/ │ │ ├── __init__.py │ │ └── scheduler.py │ ├── predict.py │ ├── trainer.py │ └── utils/ │ ├── __init__.py │ ├── checkpoint.py │ ├── record.py │ └── utils.py ├── record_audio.py ├── requirements.txt ├── setup.py ├── tools/ │ └── download_language_data.sh └── train.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ __pycache__/ .idea/ build/ dist/ macls.egg-info/ dataset/ log/ output/ models/ pretrained_models/ feature_models/ temp/ test*.py ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ 简体中文 | [English](./README_en.md) # 基于Pytorch实现的声音分类系统 ![python version](https://img.shields.io/badge/python-3.8+-orange.svg) ![GitHub forks](https://img.shields.io/github/forks/yeyupiaoling/AudioClassification-Pytorch) ![GitHub Repo stars](https://img.shields.io/github/stars/yeyupiaoling/AudioClassification-Pytorch) ![GitHub](https://img.shields.io/github/license/yeyupiaoling/AudioClassification-Pytorch) ![支持系统](https://img.shields.io/badge/支持系统-Win/Linux/MAC-9cf) # 前言 本项目是基于Pytorch的声音分类项目,旨在实现对各种环境声音、动物叫声和语种的识别。项目提供了多种声音分类模型,如EcapaTdnn、PANNS、ResNetSE、CAMPPlus和ERes2Net,以支持不同的应用场景。此外,项目还提供了常用的Urbansound8K数据集测试报告和一些方言数据集的下载和使用例子。用户可以根据自己的需求选择适合的模型和数据集,以实现更准确的声音分类。项目的应用场景广泛,可以用于室外的环境监测、野生动物保护、语音识别等领域。同时,项目也鼓励用户探索更多的使用场景,以推动声音分类技术的发展和应用。 **欢迎大家扫码入知识星球或者QQ群讨论,知识星球里面提供项目的模型文件和博主其他相关项目的模型文件,也包括其他一些资源。**
知识星球 QQ群
# 目录 - [前言](#前言) - [项目特性](#项目特性) - [模型测试表](#模型测试表) - [安装环境](#安装环境) - [创建数据](#创建数据) - [修改预处理方法(可选)](#修改预处理方法可选) - [提取特征(可选)](#提取特征可选) - [训练模型](#训练模型) - [评估模型](#评估模型) - [预测](#预测) - [其他功能](#其他功能) # 使用准备 - Anaconda 3 - Python 3.11 - Pytorch 2.0.1 - Windows 11 or Ubuntu 22.04 # 项目特性 1. 支持模型:EcapaTdnn、PANNS、TDNN、Res2Net、ResNetSE、CAMPPlus、ERes2Net 2. 支持池化层:AttentiveStatsPool(ASP)、SelfAttentivePooling(SAP)、TemporalStatisticsPooling(TSP)、TemporalAveragePooling(TAP) 4. 支持预处理方法:MelSpectrogram、Spectrogram、MFCC、Fbank、Wav2vec2.0、WavLM **模型论文:** - EcapaTdnn:[ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification](https://arxiv.org/abs/2005.07143v3) - PANNS:[PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition](https://arxiv.org/abs/1912.10211v5) - TDNN:[Prediction of speech intelligibility with DNN-based performance measures](https://arxiv.org/abs/2203.09148) - Res2Net:[Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/abs/1904.01169) - ResNetSE:[Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507) - CAMPPlus:[CAM++: A Fast and Efficient Network for Speaker Verification Using Context-Aware Masking](https://arxiv.org/abs/2303.00332v3) - ERes2Net:[An Enhanced Res2Net with Local and Global Feature Fusion for Speaker Verification](https://arxiv.org/abs/2305.12838v1) # 模型测试表 | 模型 | Params(M) | 预处理方法 | 数据集 | 类别数量 | 准确率 | 获取模型 | |:------------:|:---------:|:-----:|:------------:|:----:|:-------:|:--------:| | ResNetSE | 7.8 | Flank | UrbanSound8K | 10 | 0.96233 | 加入知识星球获取 | | ERes2NetV2 | 5.4 | Flank | UrbanSound8K | 10 | 0.95662 | 加入知识星球获取 | | CAMPPlus | 7.1 | Flank | UrbanSound8K | 10 | 0.95454 | 加入知识星球获取 | | EcapaTdnn | 6.4 | Flank | UrbanSound8K | 10 | 0.95227 | 加入知识星球获取 | | ERes2Net | 6.6 | Flank | UrbanSound8K | 10 | 0.94292 | 加入知识星球获取 | | TDNN | 2.6 | Flank | UrbanSound8K | 10 | 0.93977 | 加入知识星球获取 | | PANNS(CNN10) | 5.2 | Flank | UrbanSound8K | 10 | 0.92954 | 加入知识星球获取 | | Res2Net | 5.0 | Flank | UrbanSound8K | 10 | 0.92580 | 加入知识星球获取 | **说明:** 1. 使用的测试集为从数据集中每10条音频取一条,共874条。 ## 安装环境 - 首先安装的是Pytorch的GPU版本,如果已经安装过了,请跳过。 ```shell conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 pytorch-cuda=11.8 -c pytorch -c nvidia ``` - 安装macls库。 使用pip安装,命令如下: ```shell python -m pip install macls -U -i https://pypi.tuna.tsinghua.edu.cn/simple ``` **建议源码安装**,源码安装能保证使用最新代码。 ```shell git clone https://github.com/yeyupiaoling/AudioClassification-Pytorch.git cd AudioClassification-Pytorch/ pip install . ``` ## 创建数据 生成数据列表,用于下一步的读取需要,`audio_path`为音频文件路径,用户需要提前把音频数据集存放在`dataset/audio`目录下,每个文件夹存放一个类别的音频数据,每条音频数据长度在3秒以上,如 `dataset/audio/鸟叫声/······`。`audio`是数据列表存放的位置,生成的数据类别的格式为 `音频路径\t音频对应的类别标签`,音频路径和标签用制表符 `\t`分开。读者也可以根据自己存放数据的方式修改以下函数。 以Urbansound8K为例,Urbansound8K是目前应用较为广泛的用于自动城市环境声分类研究的公共数据集,包含10个分类:空调声、汽车鸣笛声、儿童玩耍声、狗叫声、钻孔声、引擎空转声、枪声、手提钻、警笛声和街道音乐声。数据集下载地址:[UrbanSound8K.tar.gz](https://aistudio.baidu.com/aistudio/datasetdetail/36625)。以下是针对Urbansound8K生成数据列表的函数。如果读者想使用该数据集,请下载并解压到 `dataset`目录下,把生成数据列表代码改为以下代码。 执行`create_data.py`即可生成数据列表,里面提供了生成多种数据集列表方式,具体看代码。 ```shell python create_data.py ``` 生成的列表是长这样的,前面是音频的路径,后面是该音频对应的标签,从0开始,路径和标签之间用`\t`隔开。 ```shell dataset/UrbanSound8K/audio/fold2/104817-4-0-2.wav 4 dataset/UrbanSound8K/audio/fold9/105029-7-2-5.wav 7 dataset/UrbanSound8K/audio/fold3/107228-5-0-0.wav 5 dataset/UrbanSound8K/audio/fold4/109711-3-2-4.wav 3 ``` # 修改预处理方法(可选) 配置文件中默认使用的是Fbank预处理方法,如果要使用其他预处理方法,可以修改配置文件中的安装下面方式修改,具体的值可以根据自己情况修改。如果不清楚如何设置参数,可以直接删除该部分,直接使用默认值。 ```yaml # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 ``` # 提取特征(可选) 在训练过程中,首先是要读取音频数据,然后提取特征,最后再进行训练。其中读取音频数据、提取特征也是比较消耗时间的,所以我们可以选择提前提取好取特征,训练模型的是就可以直接加载提取好的特征,这样训练速度会更快。这个提取特征是可选择,如果没有提取好的特征,训练模型的时候就会从读取音频数据,然后提取特征开始。提取特征步骤如下: 1. 执行`extract_features.py`,提取特征,特征会保存在`dataset/features`目录下,并生成新的数据列表`train_list_features.txt`和`test_list_features.txt`。 ```shell python extract_features.py --configs=configs/cam++.yml --save_dir=dataset/features ``` 2. 修改配置文件,将`dataset_conf.train_list`和`dataset_conf.test_list`修改为`train_list_features.txt`和`test_list_features.txt`。 ## 训练模型 接着就可以开始训练模型了,创建 `train.py`。配置文件里面的参数一般不需要修改,但是这几个是需要根据自己实际的数据集进行调整的,首先最重要的就是分类大小`dataset_conf.num_class`,这个每个数据集的分类大小可能不一样,根据自己的实际情况设定。然后是`dataset_conf.batch_size`,如果是显存不够的话,可以减小这个参数。 ```shell # 单卡训练 CUDA_VISIBLE_DEVICES=0 python train.py # 多卡训练 CUDA_VISIBLE_DEVICES=0,1 torchrun --standalone --nnodes=1 --nproc_per_node=2 train.py ``` 训练输出日志: ``` [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:14 - ----------- 额外配置参数 ----------- [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - configs: configs/ecapa_tdnn.yml [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - local_rank: 0 [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - pretrained_model: None [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - resume_model: None [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - save_model_path: models/ [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - use_gpu: True [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:17 - ------------------------------------------------ [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:19 - ----------- 配置文件参数 ----------- [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:22 - dataset_conf: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - aug_conf: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - noise_aug_prob: 0.2 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - noise_dir: dataset/noise [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - speed_perturb: True [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - volume_aug_prob: 0.2 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - volume_perturb: False [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - dataLoader: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - batch_size: 64 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - num_workers: 4 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - do_vad: False [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - eval_conf: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - batch_size: 1 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - max_duration: 20 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - label_list_path: dataset/label_list.txt [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - max_duration: 3 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - min_duration: 0.5 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - sample_rate: 16000 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - spec_aug_args: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - freq_mask_width: [0, 8] [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - time_mask_width: [0, 10] [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - target_dB: -20 [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - test_list: dataset/test_list.txt [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - train_list: dataset/train_list.txt [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - use_dB_normalization: True [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - use_spec_aug: True [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:22 - model_conf: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - num_class: 10 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - pooling_type: ASP [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:22 - optimizer_conf: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - learning_rate: 0.001 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - optimizer: Adam [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - scheduler: WarmupCosineSchedulerLR [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:25 - scheduler_args: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:27 - max_lr: 0.001 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:27 - min_lr: 1e-05 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:27 - warmup_epoch: 5 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - weight_decay: 1e-06 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:22 - preprocess_conf: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - feature_method: Fbank [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:25 - method_args: [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:27 - num_mel_bins: 80 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:27 - sample_frequency: 16000 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:22 - train_conf: [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:29 - log_interval: 10 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:29 - max_epoch: 30 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:31 - use_model: EcapaTdnn [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:32 - ------------------------------------------------ [2023-08-07 22:54:22.213166 WARNING] trainer:__init__:67 - Windows系统不支持多线程读取数据,已自动关闭! ========================================================================================== Layer (type:depth-idx) Output Shape Param # ========================================================================================== EcapaTdnn [1, 10] -- ├─Conv1dReluBn: 1-1 [1, 512, 98] -- │ └─Conv1d: 2-1 [1, 512, 98] 204,800 │ └─BatchNorm1d: 2-2 [1, 512, 98] 1,024 ├─Sequential: 1-2 [1, 512, 98] -- │ └─Conv1dReluBn: 2-3 [1, 512, 98] -- │ │ └─Conv1d: 3-1 [1, 512, 98] 262,144 │ │ └─BatchNorm1d: 3-2 [1, 512, 98] 1,024 │ └─Res2Conv1dReluBn: 2-4 [1, 512, 98] -- │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) ··································· │ │ └─ModuleList: 3-56 -- (recursive) │ │ └─ModuleList: 3-55 -- (recursive) │ │ └─ModuleList: 3-56 -- (recursive) │ │ └─ModuleList: 3-55 -- (recursive) │ │ └─ModuleList: 3-56 -- (recursive) │ └─Conv1dReluBn: 2-13 [1, 512, 98] -- │ │ └─Conv1d: 3-57 [1, 512, 98] 262,144 │ │ └─BatchNorm1d: 3-58 [1, 512, 98] 1,024 │ └─SE_Connect: 2-14 [1, 512, 98] -- │ │ └─Linear: 3-59 [1, 256] 131,328 │ │ └─Linear: 3-60 [1, 512] 131,584 ├─Conv1d: 1-5 [1, 1536, 98] 2,360,832 ├─AttentiveStatsPool: 1-6 [1, 3072] -- │ └─Conv1d: 2-15 [1, 128, 98] 196,736 │ └─Conv1d: 2-16 [1, 1536, 98] 198,144 ├─BatchNorm1d: 1-7 [1, 3072] 6,144 ├─Linear: 1-8 [1, 192] 590,016 ├─BatchNorm1d: 1-9 [1, 192] 384 ├─Linear: 1-10 [1, 10] 1,930 ========================================================================================== Total params: 6,188,490 Trainable params: 6,188,490 Non-trainable params: 0 Total mult-adds (M): 470.96 ========================================================================================== Input size (MB): 0.03 Forward/backward pass size (MB): 10.28 Params size (MB): 24.75 Estimated Total Size (MB): 35.07 ========================================================================================== [2023-08-07 22:54:26.726095 INFO ] trainer:train:344 - 训练数据:8644 [2023-08-07 22:54:30.092504 INFO ] trainer:__train_epoch:296 - Train epoch: [1/30], batch: [0/4], loss: 2.57033, accuracy: 0.06250, learning rate: 0.00001000, speed: 19.02 data/sec, eta: 0:06:43 ``` **训练可视化:** 项目的根目录执行下面命令,并网页访问`http://localhost:8040/`,如果是服务器,需要修改`localhost`为服务器的IP地址。 ```shell visualdl --logdir=log --host=0.0.0.0 ``` 打开的网页如下:
混淆矩阵
# 评估模型 执行下面命令执行评估。 ```shell python eval.py --configs=configs/bi_lstm.yml ``` 评估输出如下: ```shell [2024-02-03 15:13:25.469242 INFO ] trainer:evaluate:461 - 成功加载模型:models/CAMPPlus_Fbank/best_model/model.pth 100%|██████████████████████████████| 150/150 [00:00<00:00, 1281.96it/s] 评估消耗时间:1s,loss:0.61840,accuracy:0.87333 ``` 评估会出来输出准确率,还保存了混淆矩阵图片,保存路径`output/images/`,如下。
混淆矩阵
注意:如果类别标签是中文的,需要设置安装字体才能正常显示,一般情况下Windows无需安装,Ubuntu需要安装。如果Windows确实是缺少字体,只需要[字体文件](https://github.com/tracyone/program_font)这里下载`.ttf`格式的文件,复制到`C:\Windows\Fonts`即可。Ubuntu系统操作如下。 1. 安装字体 ```shell git clone https://github.com/tracyone/program_font && cd program_font && ./install.sh ``` 2. 执行下面Python代码 ```python import matplotlib import shutil import os path = matplotlib.matplotlib_fname() path = path.replace('matplotlibrc', 'fonts/ttf/') print(path) shutil.copy('/usr/share/fonts/MyFonts/simhei.ttf', path) user_dir = os.path.expanduser('~') shutil.rmtree(f'{user_dir}/.cache/matplotlib', ignore_errors=True) ``` # 预测 在训练结束之后,我们得到了一个模型参数文件,我们使用这个模型预测音频。 ```shell python infer.py --audio_path=dataset/UrbanSound8K/audio/fold5/156634-5-2-5.wav ``` # 其他功能 - 为了方便读取录制数据和制作数据集,这里提供了录音程序`record_audio.py`,这个用于录制音频,录制的音频采样率为16000,单通道,16bit。 ```shell python record_audio.py ``` - `infer_record.py`这个程序是用来不断进行录音识别,我们可以大致理解为这个程序在实时录音识别。通过这个应该我们可以做一些比较有趣的事情,比如把麦克风放在小鸟经常来的地方,通过实时录音识别,一旦识别到有鸟叫的声音,如果你的数据集足够强大,有每种鸟叫的声音数据集,这样你还能准确识别是那种鸟叫。如果识别到目标鸟类,就启动程序,例如拍照等等。 ```shell python infer_record.py --record_seconds=3 ``` ## 打赏作者

打赏一块钱支持一下作者

打赏作者
# 参考资料 1. https://github.com/PaddlePaddle/PaddleSpeech 2. https://github.com/yeyupiaoling/PaddlePaddle-MobileFaceNets 3. https://github.com/yeyupiaoling/PPASR 4. https://github.com/alibaba-damo-academy/3D-Speaker ================================================ FILE: README_en.md ================================================ [简体中文](./README.md) | English # Sound classification system implemented in Pytorch ![python version](https://img.shields.io/badge/python-3.8+-orange.svg) ![GitHub forks](https://img.shields.io/github/forks/yeyupiaoling/AudioClassification-Pytorch) ![GitHub Repo stars](https://img.shields.io/github/stars/yeyupiaoling/AudioClassification-Pytorch) ![GitHub](https://img.shields.io/github/license/yeyupiaoling/AudioClassification-Pytorch) ![支持系统](https://img.shields.io/badge/支持系统-Win/Linux/MAC-9cf) **Disclaimer, this document was obtained through machine translation, please check the original document [here](./README.md).** # Introduction This project is a sound classification project based on Pytorch, aiming to realize the recognition of various environmental sounds, animal calls and languages. Several sound classification models such as EcapaTdnn, PANNS, ResNetSE, CAMPPlus, and ERes2Net are provided to support different application scenarios. In addition, the project also provides the commonly used Urbansound8K dataset test report and some dialect datasets download and use examples. Users can choose suitable models and datasets according to their needs to achieve more accurate sound classification. The project has a wide range of application scenarios, and can be used in outdoor environmental monitoring, wildlife protection, speech recognition and other fields. At the same time, the project also encourages users to explore more usage scenarios to promote the development and application of sound classification technology. # Environment - Anaconda 3 - Python 3.11 - Pytorch 2.0.1 - Windows 11 or Ubuntu 22.04 # Project Features 1. Supporting models: EcapaTdnn、PANNS、TDNN、Res2Net、ResNetSE、CAMPPlus、ERes2Net 2. Supporting pooling: AttentiveStatsPool(ASP)、SelfAttentivePooling(SAP)、TemporalStatisticsPooling(TSP)、TemporalAveragePooling(TAP) 3. Support preprocessing methods: MelSpectrogram、Spectrogram、MFCC、Fbank、Wav2vec2.0、WavLM **Model Paper:** - EcapaTdnn:[ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification](https://arxiv.org/abs/2005.07143v3) - PANNS:[PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition](https://arxiv.org/abs/1912.10211v5) - TDNN:[Prediction of speech intelligibility with DNN-based performance measures](https://arxiv.org/abs/2203.09148) - Res2Net:[Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/abs/1904.01169) - ResNetSE:[Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507) - CAMPPlus:[CAM++: A Fast and Efficient Network for Speaker Verification Using Context-Aware Masking](https://arxiv.org/abs/2303.00332v3) - ERes2Net:[An Enhanced Res2Net with Local and Global Feature Fusion for Speaker Verification](https://arxiv.org/abs/2305.12838v1) # Model Test | Model | Params(M) | Preprocessing method | Dataset | Number Class | Accuracy | |:------------:|:---------:|:--------------------:|:------------:|:------------:|:--------:| | ResNetSE | 7.8 | Flank | UrbanSound8K | 10 | 0.96233 | | ERes2NetV2 | 5.4 | Flank | UrbanSound8K | 10 | 0.95662 | | CAMPPlus | 7.1 | Flank | UrbanSound8K | 10 | 0.95454 | | EcapaTdnn | 6.4 | Flank | UrbanSound8K | 10 | 0.95227 | | ERes2Net | 6.6 | Flank | UrbanSound8K | 10 | 0.94292 | | TDNN | 2.6 | Flank | UrbanSound8K | 10 | 0.93977 | | PANNS(CNN10) | 5.2 | Flank | UrbanSound8K | 10 | 0.92954 | | Res2Net | 5.0 | Flank | UrbanSound8K | 10 | 0.92580 | ## Installation Environment - The GPU version of Pytorch will be installed first, please skip it if you already have it installed. ```shell conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 pytorch-cuda=11.8 -c pytorch -c nvidia ``` - Install macls. Install it using pip with the following command: ```shell python -m pip install macls -U -i https://pypi.tuna.tsinghua.edu.cn/simple ``` **Source installation is recommended**, which ensures that the latest code is used. ```shell git clone https://github.com/yeyupiaoling/AudioClassification_Pytorch.git cd AudioClassification_Pytorch/ python setup.py install ``` ## Preparing Data The `audio_path` is the audio file path. The user needs to store the audio dataset in the `dataset/audio` directory in advance. Each folder stores a category of audio data, and the length of each audio data is more than 3 seconds. For example, `dataset/audio/ bird song /······`. `audio` is where the data list is stored, and the format of the generated data category is`audio_path\tcategory_label_audio`, and the audio path and label are separated by a TAB character `\t`. You can also modify the following functions depending on how you store your data: Taking Urbansound8K as an example, it is a widely used public dataset for automatic urban environmental sound classification research. Urbansound8K contains 10 categories: air condition sound, car whistle sound, children playing sound, dog bark, drilling sound, engine idling sound, gun sound, jackdrill, siren sound, and street music sound. Data set download address: [UrbanSound8K](https://zenodo.org/record/1203745/files/UrbanSound8K.tar.gz). Here is the function to generate a list of data for Urbansound8K. If you want to use this dataset, please download and unzip it into the `dataset` directory and change the code to generate the list of data as follows. `create_data.py` can be used to generate a list of data sets. There are many ways to generate a list of data sets. ```shell python create_data.py ``` The resulting list looks like this, with the path to the audio followed by the tag for that audio, starting at 0, and separated by `\t`. ```shell dataset/UrbanSound8K/audio/fold2/104817-4-0-2.wav 4 dataset/UrbanSound8K/audio/fold9/105029-7-2-5.wav 7 dataset/UrbanSound8K/audio/fold3/107228-5-0-0.wav 5 dataset/UrbanSound8K/audio/fold4/109711-3-2-4.wav 3 ``` # Change preprocessing methods By default, the Fbank preprocessing method is used in the configuration file. If you want to use other preprocessing methods, you can modify the following installation in the configuration file, and the specific value can be modified according to your own situation. If it's not clear how to set the parameters, you can remove that section and just use the default values. ```yaml # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 ``` ## 训练 Now we can train the model. We will create `train.py`. The parameters in the configuration file generally do not need to be modified, but these few need to be adjusted according to your actual dataset. The first and most important is the class size `dataset_conf.num_class`, which may be different for each dataset. Then there is` dataset_conf.batch_size `, which can be reduced if memory is insufficient. ```shell # Single GPU training CUDA_VISIBLE_DEVICES=0 python train.py # Multi GPU training CUDA_VISIBLE_DEVICES=0,1 torchrun --standalone --nnodes=1 --nproc_per_node=2 train.py ``` Train log: ``` [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:14 - ----------- 额外配置参数 ----------- [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - configs: configs/ecapa_tdnn.yml [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - local_rank: 0 [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - pretrained_model: None [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - resume_model: None [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - save_model_path: models/ [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:16 - use_gpu: True [2023-08-07 22:54:22.148973 INFO ] utils:print_arguments:17 - ------------------------------------------------ [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:19 - ----------- 配置文件参数 ----------- [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:22 - dataset_conf: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - aug_conf: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - noise_aug_prob: 0.2 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - noise_dir: dataset/noise [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - speed_perturb: True [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - volume_aug_prob: 0.2 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - volume_perturb: False [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - dataLoader: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - batch_size: 64 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - num_workers: 4 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - do_vad: False [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - eval_conf: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - batch_size: 1 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - max_duration: 20 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - label_list_path: dataset/label_list.txt [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - max_duration: 3 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - min_duration: 0.5 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:29 - sample_rate: 16000 [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:25 - spec_aug_args: [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - freq_mask_width: [0, 8] [2023-08-07 22:54:22.202166 INFO ] utils:print_arguments:27 - time_mask_width: [0, 10] [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - target_dB: -20 [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - test_list: dataset/test_list.txt [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - train_list: dataset/train_list.txt [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - use_dB_normalization: True [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:29 - use_spec_aug: True [2023-08-07 22:54:22.203167 INFO ] utils:print_arguments:22 - model_conf: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - num_class: 10 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - pooling_type: ASP [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:22 - optimizer_conf: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - learning_rate: 0.001 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - optimizer: Adam [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - scheduler: WarmupCosineSchedulerLR [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:25 - scheduler_args: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:27 - max_lr: 0.001 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:27 - min_lr: 1e-05 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:27 - warmup_epoch: 5 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - weight_decay: 1e-06 [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:22 - preprocess_conf: [2023-08-07 22:54:22.207167 INFO ] utils:print_arguments:29 - feature_method: Fbank [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:25 - method_args: [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:27 - num_mel_bins: 80 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:27 - sample_frequency: 16000 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:22 - train_conf: [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:29 - log_interval: 10 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:29 - max_epoch: 30 [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:31 - use_model: EcapaTdnn [2023-08-07 22:54:22.208167 INFO ] utils:print_arguments:32 - ------------------------------------------------ [2023-08-07 22:54:22.213166 WARNING] trainer:__init__:67 - Windows系统不支持多线程读取数据,已自动关闭! ========================================================================================== Layer (type:depth-idx) Output Shape Param # ========================================================================================== EcapaTdnn [1, 10] -- ├─Conv1dReluBn: 1-1 [1, 512, 98] -- │ └─Conv1d: 2-1 [1, 512, 98] 204,800 │ └─BatchNorm1d: 2-2 [1, 512, 98] 1,024 ├─Sequential: 1-2 [1, 512, 98] -- │ └─Conv1dReluBn: 2-3 [1, 512, 98] -- │ │ └─Conv1d: 3-1 [1, 512, 98] 262,144 │ │ └─BatchNorm1d: 3-2 [1, 512, 98] 1,024 │ └─Res2Conv1dReluBn: 2-4 [1, 512, 98] -- │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) │ │ └─ModuleList: 3-15 -- (recursive) │ │ └─ModuleList: 3-16 -- (recursive) ··································· │ │ └─ModuleList: 3-56 -- (recursive) │ │ └─ModuleList: 3-55 -- (recursive) │ │ └─ModuleList: 3-56 -- (recursive) │ │ └─ModuleList: 3-55 -- (recursive) │ │ └─ModuleList: 3-56 -- (recursive) │ └─Conv1dReluBn: 2-13 [1, 512, 98] -- │ │ └─Conv1d: 3-57 [1, 512, 98] 262,144 │ │ └─BatchNorm1d: 3-58 [1, 512, 98] 1,024 │ └─SE_Connect: 2-14 [1, 512, 98] -- │ │ └─Linear: 3-59 [1, 256] 131,328 │ │ └─Linear: 3-60 [1, 512] 131,584 ├─Conv1d: 1-5 [1, 1536, 98] 2,360,832 ├─AttentiveStatsPool: 1-6 [1, 3072] -- │ └─Conv1d: 2-15 [1, 128, 98] 196,736 │ └─Conv1d: 2-16 [1, 1536, 98] 198,144 ├─BatchNorm1d: 1-7 [1, 3072] 6,144 ├─Linear: 1-8 [1, 192] 590,016 ├─BatchNorm1d: 1-9 [1, 192] 384 ├─Linear: 1-10 [1, 10] 1,930 ========================================================================================== Total params: 6,188,490 Trainable params: 6,188,490 Non-trainable params: 0 Total mult-adds (M): 470.96 ========================================================================================== Input size (MB): 0.03 Forward/backward pass size (MB): 10.28 Params size (MB): 24.75 Estimated Total Size (MB): 35.07 ========================================================================================== [2023-08-07 22:54:26.726095 INFO ] trainer:train:344 - 训练数据:8644 [2023-08-07 22:54:30.092504 INFO ] trainer:__train_epoch:296 - Train epoch: [1/30], batch: [0/4], loss: 2.57033, accuracy: 0.06250, learning rate: 0.00001000, speed: 19.02 data/sec, eta: 0:06:43 ``` # Eval At the end of each training round, we can perform an evaluation, which will output the accuracy. We also save the mixture matrix image, and save the path `output/images/` as follows. ![混合矩阵](docs/images/image1.png) # Inference At the end of the training, we are given a model parameter file, and we use this model to predict the audio. ```shell python infer.py --audio_path=dataset/UrbanSound8K/audio/fold5/156634-5-2-5.wav ``` # Other Functions - In order to read the recorded data and make a dataset easily, we provide the recording program `record_audio.py`, which is used to record audio with a sample rate of 16,000, single channel, 16bit. ```shell python record_audio.py ``` - `infer_record.py`This program is used to continuously perform recording recognition, and we can roughly understand this program as recording recognition in real time. And this should allow us to do some interesting things, like put a microphone in a place where birds often come, and recognize it by recording it in real time, and once you recognize that there's a bird calling, if your dataset is powerful enough, and you have a dataset of every bird calling, then you can identify exactly which bird is calling. If the target bird is identified, the procedure is initiated, such as taking photos, etc. ```shell python infer_record.py --record_seconds=3 ``` # Reference 1. https://github.com/PaddlePaddle/PaddleSpeech 2. https://github.com/yeyupiaoling/PaddlePaddle-MobileFaceNets 3. https://github.com/yeyupiaoling/PPASR 4. https://github.com/alibaba-damo-academy/3D-Speaker ================================================ FILE: configs/augmentation.yml ================================================ # 语速增强 speed: # 增强概率 prob: 1.0 # 音量增强 volume: # 增强概率 prob: 0.0 # 最小增益 min_gain_dBFS: -15 # 最大增益 max_gain_dBFS: 15 # 噪声增强 noise: # 增强概率 prob: 0.5 # 噪声增强的噪声文件夹 noise_dir: 'dataset/noise' # 针对噪声的最小音量增益 min_snr_dB: 10 # 针对噪声的最大音量增益 max_snr_dB: 50 # 混响增强 reverb: # 增强概率 prob: 0.5 # 混响增强的混响文件夹 reverb_dir: 'dataset/reverb' # Spec增强 spec_aug: # 增强概率 prob: 0.5 # 频域掩蔽的比例 freq_mask_ratio: 0.1 # 频域掩蔽次数 n_freq_masks: 1 # 频域掩蔽的比例 time_mask_ratio: 0.05 # 频域掩蔽次数 n_time_masks: 1 # 最大时间扭曲 max_time_warp: 0 ================================================ FILE: configs/cam++.yml ================================================ # 数据集参数 dataset_conf: dataset: # 过滤最短的音频长度 min_duration: 0.4 # 最长的音频长度,大于这个长度会裁剪掉 max_duration: 3 # 音频的采样率 sample_rate: 16000 # 是否对音频进行音量归一化 use_dB_normalization: True # 对音频进行音量归一化的音量分贝值 target_dB: -20 dataLoader: # 训练的批量大小 batch_size: 64 # 是否丢弃最后一个样本 drop_last: True # 读取数据的线程数量 num_workers: 8 # 评估的数据要特殊处理 eval_conf: # 评估的批量大小 batch_size: 8 # 最长的音频长度 max_duration: 20 # 训练数据的数据列表路径 train_list: 'dataset/train_list.txt' # 测试数据的数据列表路径 test_list: 'dataset/test_list.txt' # 标签列表 label_list_path: 'dataset/label_list.txt' # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 model_conf: # 所使用的模型 model: 'CAMPPlus' # 模型参数 model_args: # 分类大小,如果为null,自动通过标签列表获取 num_class: null optimizer_conf: # 优化方法 optimizer: 'Adam' # 优化方法参数 optimizer_args: lr: 0.001 weight_decay: !!float 1e-5 # 学习率衰减函数,支持Pytorch支持的和项目提供的WarmupCosineSchedulerLR scheduler: 'WarmupCosineSchedulerLR' # 学习率衰减函数参数 scheduler_args: min_lr: !!float 1e-5 max_lr: 0.001 warmup_epoch: 5 train_conf: # 是否开启自动混合精度 enable_amp: False # 是否使用Pytorch2.0的编译器 use_compile: False # CrossEntropyLoss类的label_smoothing参数 label_smoothing: 0.0 # 训练的轮数 max_epoch: 60 log_interval: 10 ================================================ FILE: configs/ecapa_tdnn.yml ================================================ # 数据集参数 dataset_conf: dataset: # 过滤最短的音频长度 min_duration: 0.4 # 最长的音频长度,大于这个长度会裁剪掉 max_duration: 3 # 音频的采样率 sample_rate: 16000 # 是否对音频进行音量归一化 use_dB_normalization: True # 对音频进行音量归一化的音量分贝值 target_dB: -20 dataLoader: # 训练的批量大小 batch_size: 128 # 是否丢弃最后一个样本 drop_last: True # 读取数据的线程数量 num_workers: 8 # 评估的数据要特殊处理 eval_conf: # 评估的批量大小 batch_size: 16 # 最长的音频长度 max_duration: 20 # 训练数据的数据列表路径 train_list: 'dataset/train_list.txt' # 测试数据的数据列表路径 test_list: 'dataset/test_list.txt' # 标签列表 label_list_path: 'dataset/label_list.txt' # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 model_conf: # 所使用的模型 model: 'EcapaTdnn' # 模型参数 model_args: # 分类大小,如果为null,自动通过标签列表获取 num_class: null optimizer_conf: # 优化方法 optimizer: 'Adam' # 优化方法参数 optimizer_args: lr: 0.001 weight_decay: !!float 1e-5 # 学习率衰减函数,支持Pytorch支持的和项目提供的WarmupCosineSchedulerLR scheduler: 'WarmupCosineSchedulerLR' # 学习率衰减函数参数 scheduler_args: min_lr: !!float 1e-5 max_lr: 0.001 warmup_epoch: 5 train_conf: # 是否开启自动混合精度 enable_amp: False # 是否使用Pytorch2.0的编译器 use_compile: False # CrossEntropyLoss类的label_smoothing参数 label_smoothing: 0.0 # 训练的轮数 max_epoch: 60 log_interval: 10 ================================================ FILE: configs/eres2net.yml ================================================ # 数据集参数 dataset_conf: dataset: # 过滤最短的音频长度 min_duration: 0.4 # 最长的音频长度,大于这个长度会裁剪掉 max_duration: 3 # 音频的采样率 sample_rate: 16000 # 是否对音频进行音量归一化 use_dB_normalization: True # 对音频进行音量归一化的音量分贝值 target_dB: -20 dataLoader: # 训练的批量大小 batch_size: 32 # 是否丢弃最后一个样本 drop_last: True # 读取数据的线程数量 num_workers: 8 # 评估的数据要特殊处理 eval_conf: # 评估的批量大小 batch_size: 4 # 最长的音频长度 max_duration: 20 # 训练数据的数据列表路径 train_list: 'dataset/train_list.txt' # 测试数据的数据列表路径 test_list: 'dataset/test_list.txt' # 标签列表 label_list_path: 'dataset/label_list.txt' # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 model_conf: # 所使用的模型,支持ERes2Net、ERes2NetV2 model: 'ERes2Net' # 模型参数 model_args: # 分类大小,如果为null,自动通过标签列表获取 num_class: null optimizer_conf: # 优化方法 optimizer: 'Adam' # 优化方法参数 optimizer_args: lr: 0.001 weight_decay: !!float 1e-5 # 学习率衰减函数,支持Pytorch支持的和项目提供的WarmupCosineSchedulerLR scheduler: 'WarmupCosineSchedulerLR' # 学习率衰减函数参数 scheduler_args: min_lr: !!float 1e-5 max_lr: 0.001 warmup_epoch: 5 train_conf: # 是否开启自动混合精度 enable_amp: False # 是否使用Pytorch2.0的编译器 use_compile: False # CrossEntropyLoss类的label_smoothing参数 label_smoothing: 0.0 # 训练的轮数 max_epoch: 60 log_interval: 10 ================================================ FILE: configs/panns.yml ================================================ # 数据集参数 dataset_conf: dataset: # 过滤最短的音频长度 min_duration: 0.4 # 最长的音频长度,大于这个长度会裁剪掉 max_duration: 3 # 音频的采样率 sample_rate: 16000 # 是否对音频进行音量归一化 use_dB_normalization: True # 对音频进行音量归一化的音量分贝值 target_dB: -20 dataLoader: # 训练的批量大小 batch_size: 64 # 是否丢弃最后一个样本 drop_last: True # 读取数据的线程数量 num_workers: 8 # 评估的数据要特殊处理 eval_conf: # 评估的批量大小 batch_size: 8 # 最长的音频长度 max_duration: 20 # 训练数据的数据列表路径 train_list: 'dataset/train_list.txt' # 测试数据的数据列表路径 test_list: 'dataset/test_list.txt' # 标签列表 label_list_path: 'dataset/label_list.txt' # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 model_conf: # 所使用的模型,支持PANNS_CNN6、PANNS_CNN10、PANNS_CNN14 model: 'PANNS_CNN10' # 模型参数 model_args: # 分类大小,如果为null,自动通过标签列表获取 num_class: null optimizer_conf: # 优化方法 optimizer: 'Adam' # 优化方法参数 optimizer_args: lr: 0.001 weight_decay: !!float 1e-5 # 学习率衰减函数,支持Pytorch支持的和项目提供的WarmupCosineSchedulerLR scheduler: 'WarmupCosineSchedulerLR' # 学习率衰减函数参数 scheduler_args: min_lr: !!float 1e-5 max_lr: 0.001 warmup_epoch: 5 train_conf: # 是否开启自动混合精度 enable_amp: False # 是否使用Pytorch2.0的编译器 use_compile: False # CrossEntropyLoss类的label_smoothing参数 label_smoothing: 0.0 # 训练的轮数 max_epoch: 60 log_interval: 10 ================================================ FILE: configs/res2net.yml ================================================ # 数据集参数 dataset_conf: dataset: # 过滤最短的音频长度 min_duration: 0.4 # 最长的音频长度,大于这个长度会裁剪掉 max_duration: 3 # 音频的采样率 sample_rate: 16000 # 是否对音频进行音量归一化 use_dB_normalization: True # 对音频进行音量归一化的音量分贝值 target_dB: -20 dataLoader: # 训练的批量大小 batch_size: 32 # 是否丢弃最后一个样本 drop_last: True # 读取数据的线程数量 num_workers: 8 # 评估的数据要特殊处理 eval_conf: # 评估的批量大小 batch_size: 4 # 最长的音频长度 max_duration: 20 # 训练数据的数据列表路径 train_list: 'dataset/train_list.txt' # 测试数据的数据列表路径 test_list: 'dataset/test_list.txt' # 标签列表 label_list_path: 'dataset/label_list.txt' # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 model_conf: # 所使用的模型 model: 'Res2Net' # 模型参数 model_args: # 分类大小,如果为null,自动通过标签列表获取 num_class: null optimizer_conf: # 优化方法 optimizer: 'Adam' # 优化方法参数 optimizer_args: lr: 0.001 weight_decay: !!float 1e-5 # 学习率衰减函数,支持Pytorch支持的和项目提供的WarmupCosineSchedulerLR scheduler: 'WarmupCosineSchedulerLR' # 学习率衰减函数参数 scheduler_args: min_lr: !!float 1e-5 max_lr: 0.001 warmup_epoch: 5 train_conf: # 是否开启自动混合精度 enable_amp: False # 是否使用Pytorch2.0的编译器 use_compile: False # CrossEntropyLoss类的label_smoothing参数 label_smoothing: 0.0 # 训练的轮数 max_epoch: 60 log_interval: 10 ================================================ FILE: configs/resnet_se.yml ================================================ # 数据集参数 dataset_conf: dataset: # 过滤最短的音频长度 min_duration: 0.4 # 最长的音频长度,大于这个长度会裁剪掉 max_duration: 3 # 音频的采样率 sample_rate: 16000 # 是否对音频进行音量归一化 use_dB_normalization: True # 对音频进行音量归一化的音量分贝值 target_dB: -20 dataLoader: # 训练的批量大小 batch_size: 32 # 是否丢弃最后一个样本 drop_last: True # 读取数据的线程数量 num_workers: 8 # 评估的数据要特殊处理 eval_conf: # 评估的批量大小 batch_size: 4 # 最长的音频长度 max_duration: 20 # 训练数据的数据列表路径 train_list: 'dataset/train_list.txt' # 测试数据的数据列表路径 test_list: 'dataset/test_list.txt' # 标签列表 label_list_path: 'dataset/label_list.txt' # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 model_conf: # 所使用的模型 model: 'ResNetSE' # 模型参数 model_args: # 分类大小,如果为null,自动通过标签列表获取 num_class: null optimizer_conf: # 优化方法 optimizer: 'Adam' # 优化方法参数 optimizer_args: lr: 0.001 weight_decay: !!float 1e-5 # 学习率衰减函数,支持Pytorch支持的和项目提供的WarmupCosineSchedulerLR scheduler: 'WarmupCosineSchedulerLR' # 学习率衰减函数参数 scheduler_args: min_lr: !!float 1e-5 max_lr: 0.001 warmup_epoch: 5 train_conf: # 是否开启自动混合精度 enable_amp: False # 是否使用Pytorch2.0的编译器 use_compile: False # CrossEntropyLoss类的label_smoothing参数 label_smoothing: 0.0 # 训练的轮数 max_epoch: 60 log_interval: 10 ================================================ FILE: configs/tdnn.yml ================================================ # 数据集参数 dataset_conf: dataset: # 过滤最短的音频长度 min_duration: 0.4 # 最长的音频长度,大于这个长度会裁剪掉 max_duration: 3 # 音频的采样率 sample_rate: 16000 # 是否对音频进行音量归一化 use_dB_normalization: True # 对音频进行音量归一化的音量分贝值 target_dB: -20 dataLoader: # 训练的批量大小 batch_size: 64 # 是否丢弃最后一个样本 drop_last: True # 读取数据的线程数量 num_workers: 8 # 评估的数据要特殊处理 eval_conf: # 评估的批量大小 batch_size: 8 # 最长的音频长度 max_duration: 20 # 训练数据的数据列表路径 train_list: 'dataset/train_list.txt' # 测试数据的数据列表路径 test_list: 'dataset/test_list.txt' # 标签列表 label_list_path: 'dataset/label_list.txt' # 数据预处理参数 preprocess_conf: # 是否使用HF上的Wav2Vec2类似模型提取音频特征 use_hf_model: False # 音频预处理方法,也可以叫特征提取方法 # 当use_hf_model为False时,支持:MelSpectrogram、Spectrogram、MFCC、Fbank # 当use_hf_model为True时,指定的是HuggingFace的模型或者本地路径,比如facebook/w2v-bert-2.0或者./feature_models/w2v-bert-2.0 feature_method: 'Fbank' # 当use_hf_model为False时,设置API参数,更参数查看对应API,不清楚的可以直接删除该部分,直接使用默认值。 # 当use_hf_model为True时,可以设置参数use_gpu,指定是否使用GPU提取特征 method_args: sample_frequency: 16000 num_mel_bins: 80 model_conf: # 所使用的模型 model: 'TDNN' # 模型参数 model_args: # 分类大小,如果为null,自动通过标签列表获取 num_class: null optimizer_conf: # 优化方法 optimizer: 'Adam' # 优化方法参数 optimizer_args: lr: 0.001 weight_decay: !!float 1e-5 # 学习率衰减函数,支持Pytorch支持的和项目提供的WarmupCosineSchedulerLR scheduler: 'WarmupCosineSchedulerLR' # 学习率衰减函数参数 scheduler_args: min_lr: !!float 1e-5 max_lr: 0.001 warmup_epoch: 5 train_conf: # 是否开启自动混合精度 enable_amp: False # 是否使用Pytorch2.0的编译器 use_compile: False # CrossEntropyLoss类的label_smoothing参数 label_smoothing: 0.0 # 训练的轮数 max_epoch: 60 log_interval: 10 ================================================ FILE: create_data.py ================================================ import os # 生成数据列表 def get_data_list(audio_path, list_path): sound_sum = 0 audios = os.listdir(audio_path) os.makedirs(list_path, exist_ok=True) f_train = open(os.path.join(list_path, 'train_list.txt'), 'w', encoding='utf-8') f_test = open(os.path.join(list_path, 'test_list.txt'), 'w', encoding='utf-8') f_label = open(os.path.join(list_path, 'label_list.txt'), 'w', encoding='utf-8') for i in range(len(audios)): f_label.write(f'{audios[i]}\n') sounds = os.listdir(os.path.join(audio_path, audios[i])) for sound in sounds: sound_path = os.path.join(audio_path, audios[i], sound).replace('\\', '/') if sound_sum % 10 == 0: f_test.write(f'{sound_path}\t{i}\n') else: f_train.write(f'{sound_path}\t{i}\n') sound_sum += 1 print(f"Audio:{i + 1}/{len(audios)}") f_label.close() f_test.close() f_train.close() # 下载数据方式,执行:./tools/download_3dspeaker_data.sh # 生成生成方言数据列表 def get_language_identification_data_list(audio_path, list_path): labels_dict = {0: 'Standard Mandarin', 3: 'Southwestern Mandarin', 6: 'Central Plains Mandarin', 4: 'JiangHuai Mandarin', 2: 'Wu dialect', 8: 'Gan dialect', 9: 'Jin dialect', 11: 'LiaoJiao Mandarin', 12: 'JiLu Mandarin', 10: 'Min dialect', 7: 'Yue dialect', 5: 'Hakka dialect', 1: 'Xiang dialect', 13: 'Northern Mandarin'} with open(os.path.join(list_path, 'train_list.txt'), 'w', encoding='utf-8') as f: train_dir = os.path.join(audio_path, 'train') for root, dirs, files in os.walk(train_dir): for file in files: if not file.endswith('.wav'): continue label = int(file.split('_')[-1].replace('.wav', '')[-2:]) file = os.path.join(root, file) f.write(f'{file}\t{label}\n') with open(os.path.join(list_path, 'test_list.txt'), 'w', encoding='utf-8') as f: test_dir = os.path.join(audio_path, 'test') for root, dirs, files in os.walk(test_dir): for file in files: if not file.endswith('.wav'): continue label = int(file.split('_')[-1].replace('.wav', '')[-2:]) file = os.path.join(root, file) f.write(f'{file}\t{label}\n') with open(os.path.join(list_path, 'label_list.txt'), 'w', encoding='utf-8') as f: for i in range(len(labels_dict)): f.write(f'{labels_dict[i]}\n') # 创建UrbanSound8K数据列表 def create_UrbanSound8K_list(audio_path, metadata_path, list_path): sound_sum = 0 f_train = open(os.path.join(list_path, 'train_list.txt'), 'w', encoding='utf-8') f_test = open(os.path.join(list_path, 'test_list.txt'), 'w', encoding='utf-8') f_label = open(os.path.join(list_path, 'label_list.txt'), 'w', encoding='utf-8') with open(metadata_path) as f: lines = f.readlines() labels = {} for i, line in enumerate(lines): if i == 0:continue data = line.replace('\n', '').split(',') class_id = int(data[6]) if class_id not in labels.keys(): labels[class_id] = data[-1] sound_path = os.path.join(audio_path, f'fold{data[5]}', data[0]).replace('\\', '/') if sound_sum % 10 == 0: f_test.write(f'{sound_path}\t{data[6]}\n') else: f_train.write(f'{sound_path}\t{data[6]}\n') sound_sum += 1 for i in range(len(labels)): f_label.write(f'{labels[i]}\n') f_label.close() f_test.close() f_train.close() if __name__ == '__main__': # get_data_list('dataset/audio', 'dataset') # 生成生成方言数据列表 # get_language_identification_data_list(audio_path='dataset/language', # list_path='dataset/') # 创建UrbanSound8K数据列表 create_UrbanSound8K_list(audio_path='dataset/UrbanSound8K/audio', metadata_path='dataset/UrbanSound8K/metadata/UrbanSound8K.csv', list_path='dataset') ================================================ FILE: eval.py ================================================ import argparse import functools import time from macls.trainer import MAClsTrainer from macls.utils.utils import add_arguments, print_arguments parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) add_arg('configs', str, 'configs/cam++.yml', "配置文件") add_arg("use_gpu", bool, True, "是否使用GPU评估模型") add_arg('save_matrix_path', str, 'output/images/', "保存混合矩阵的路径") add_arg('resume_model', str, 'models/CAMPPlus_Fbank/best_model/', "模型的路径") add_arg('overwrites', str, None, '覆盖配置文件中的参数,比如"train_conf.max_epoch=100",多个用逗号隔开') args = parser.parse_args() print_arguments(args=args) # 获取训练器 trainer = MAClsTrainer(configs=args.configs, use_gpu=args.use_gpu, overwrites=args.overwrites) # 开始评估 start = time.time() loss, accuracy = trainer.evaluate(resume_model=args.resume_model, save_matrix_path=args.save_matrix_path) end = time.time() print('评估消耗时间:{}s,loss:{:.5f},accuracy:{:.5f}'.format(int(end - start), loss, accuracy)) ================================================ FILE: extract_features.py ================================================ import argparse import functools from macls.trainer import MAClsTrainer from macls.utils.utils import add_arguments, print_arguments parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) add_arg('configs', str, 'configs/cam++.yml', '配置文件') add_arg('save_dir', str, 'dataset/features', '保存特征的路径') add_arg('max_duration', int, 100, '提取特征的最大时长,避免过长显存不足,单位秒') args = parser.parse_args() print_arguments(args=args) # 获取训练器 trainer = MAClsTrainer(configs=args.configs) # 提取特征保存文件 trainer.extract_features(save_dir=args.save_dir, max_duration=args.max_duration) ================================================ FILE: infer.py ================================================ import argparse import functools from macls.predict import MAClsPredictor from macls.utils.utils import add_arguments, print_arguments parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) add_arg('configs', str, 'configs/cam++.yml', '配置文件') add_arg('use_gpu', bool, True, '是否使用GPU预测') add_arg('audio_path', str, 'dataset/UrbanSound8K/audio/fold5/156634-5-2-5.wav', '音频路径') add_arg('model_path', str, 'models/CAMPPlus_Fbank/best_model/', '导出的预测模型文件路径') args = parser.parse_args() print_arguments(args=args) # 获取识别器 predictor = MAClsPredictor(configs=args.configs, model_path=args.model_path, use_gpu=args.use_gpu) label, score = predictor.predict(audio_data=args.audio_path) print(f'音频:{args.audio_path} 的预测结果标签为:{label},得分:{score}') ================================================ FILE: infer_record.py ================================================ import argparse import functools import threading import time import numpy as np import soundcard as sc from macls.predict import MAClsPredictor from macls.utils.utils import add_arguments, print_arguments parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) add_arg('configs', str, 'configs/cam++.yml', '配置文件') add_arg('use_gpu', bool, True, '是否使用GPU预测') add_arg('record_seconds', float, 3, '录音长度') add_arg('model_path', str, 'models/CAMPPlus_Fbank/best_model/', '导出的预测模型文件路径') args = parser.parse_args() print_arguments(args=args) # 获取识别器 predictor = MAClsPredictor(configs=args.configs, model_path=args.model_path, use_gpu=args.use_gpu) all_data = [] # 获取默认麦克风 default_mic = sc.default_microphone() # 录音采样率 samplerate = 16000 # 录音块大小 numframes = 1024 # 模型输入长度 infer_len = int(samplerate * args.record_seconds / numframes) def infer_thread(): global all_data s = time.time() while True: if len(all_data) < infer_len: continue # 截取最新的音频数据 seg_data = all_data[-infer_len:] d = np.concatenate(seg_data) # 删除旧的音频数据 del all_data[:len(all_data) - infer_len] label, score = predictor.predict(audio_data=d, sample_rate=samplerate) print(f'{int(time.time() - s)}s 预测结果标签为:{label},得分:{score}') thread = threading.Thread(target=infer_thread, args=()) thread.start() with default_mic.recorder(samplerate=samplerate, channels=1) as mic: while True: data = mic.record(numframes=numframes) all_data.append(data) ================================================ FILE: macls/__init__.py ================================================ __version__ = "1.0.6" ================================================ FILE: macls/data_utils/__init__.py ================================================ ================================================ FILE: macls/data_utils/collate_fn.py ================================================ import torch # 对一个batch的数据处理 def collate_fn(batch): # 找出音频长度最长的 batch_sorted = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True) freq_size = batch_sorted[0][0].size(1) max_freq_length = batch_sorted[0][0].size(0) batch_size = len(batch_sorted) # 以最大的长度创建0张量 features = torch.zeros((batch_size, max_freq_length, freq_size), dtype=torch.float32) input_lens, labels = [], [] for x in range(batch_size): tensor, label = batch[x] seq_length = tensor.size(0) # 将数据插入都0张量中,实现了padding features[x, :seq_length, :] = tensor[:, :] labels.append(label) input_lens.append(seq_length) labels = torch.tensor(labels, dtype=torch.int64) input_lens = torch.tensor(input_lens, dtype=torch.int64) return features, labels, input_lens ================================================ FILE: macls/data_utils/featurizer.py ================================================ import numpy as np import torch import torchaudio.compliance.kaldi as Kaldi from torch import nn from torchaudio.transforms import MelSpectrogram, Spectrogram, MFCC from loguru import logger class AudioFeaturizer(nn.Module): """音频特征器 :param feature_method: 所使用的预处理方法 :type feature_method: str :param use_hf_model: 是否使用HF上的Wav2Vec2类似模型提取音频特征 :type use_hf_model: bool :param method_args: 预处理方法的参数 :type method_args: dict """ def __init__(self, feature_method='MelSpectrogram', use_hf_model=False, method_args={}): super().__init__() self._method_args = method_args self._feature_method = feature_method self.use_hf_model = use_hf_model if self.use_hf_model: from transformers import AutoModel, AutoFeatureExtractor # 判断是否使用GPU提取特征 use_gpu = torch.cuda.is_available() and method_args.get('use_gpu', True) self.device = torch.device("cuda") if use_gpu else torch.device("cpu") # 加载Wav2Vec2类似模型 self.processor = AutoFeatureExtractor.from_pretrained(feature_method) self.feature_model = AutoModel.from_pretrained(feature_method).to(self.device) logger.info(f'使用模型【{feature_method}】提取特征,使用【{self.device}】设备提取') # 获取模型的输出通道数 inputs = self.processor(np.ones(16000 * 1, dtype=np.float32), sampling_rate=16000, return_tensors="pt").to(self.device) with torch.no_grad(): outputs = self.feature_model(**inputs) self.output_channels = outputs.extract_features.shape[2] else: if feature_method == 'MelSpectrogram': self.feat_fun = MelSpectrogram(**method_args) elif feature_method == 'Spectrogram': self.feat_fun = Spectrogram(**method_args) elif feature_method == 'MFCC': self.feat_fun = MFCC(**method_args) elif feature_method == 'Fbank': self.feat_fun = KaldiFbank(**method_args) else: raise Exception(f'预处理方法 {self._feature_method} 不存在!') logger.info(f'使用【{feature_method}】提取特征') def forward(self, waveforms, input_lens_ratio=None): """从AudioSegment中提取音频特征 :param waveforms: Audio segment to extract features from. :type waveforms: AudioSegment :param input_lens_ratio: input length ratio :type input_lens_ratio: tensor :return: Spectrogram audio feature in 2darray. :rtype: ndarray """ if len(waveforms.shape) == 1: waveforms = waveforms.unsqueeze(0) if self.use_hf_model: # 使用HF上的Wav2Vec2类似模型提取音频特征 if isinstance(waveforms, torch.Tensor): waveforms = waveforms.numpy() inputs = self.processor(waveforms, sampling_rate=16000, return_tensors="pt").to(self.device) with torch.no_grad(): outputs = self.feature_model(**inputs) feature = outputs.extract_features.cpu().detach() else: # 使用普通方法提取音频特征 feature = self.feat_fun(waveforms) feature = feature.transpose(2, 1) # 归一化 feature = feature - feature.mean(1, keepdim=True) if input_lens_ratio is not None: # 对掩码比例进行扩展 input_lens = (input_lens_ratio * feature.shape[1]) mask_lens = torch.round(input_lens).long() mask_lens = mask_lens.unsqueeze(1) # 生成掩码张量 idxs = torch.arange(feature.shape[1], device=feature.device).repeat(feature.shape[0], 1) mask = idxs < mask_lens mask = mask.unsqueeze(-1) # 对特征进行掩码操作 feature = torch.where(mask, feature, torch.zeros_like(feature)) return feature @property def feature_dim(self): """返回特征大小 :return: 特征大小 :rtype: int """ if self.use_hf_model: return self.output_channels if self._feature_method == 'MelSpectrogram': return self._method_args.get('n_mels', 128) elif self._feature_method == 'Spectrogram': return self._method_args.get('n_fft', 400) // 2 + 1 elif self._feature_method == 'MFCC': return self._method_args.get('n_mfcc', 40) elif self._feature_method == 'Fbank': return self._method_args.get('num_mel_bins', 23) else: raise Exception('没有{}预处理方法'.format(self._feature_method)) class KaldiFbank(nn.Module): def __init__(self, **kwargs): super(KaldiFbank, self).__init__() self.kwargs = kwargs def forward(self, waveforms): """ :param waveforms: [Batch, Length] :return: [Batch, Feature, Length] """ log_fbanks = [] for waveform in waveforms: if len(waveform.shape) == 1: waveform = waveform.unsqueeze(0) log_fbank = Kaldi.fbank(waveform, **self.kwargs) log_fbank = log_fbank.transpose(0, 1) log_fbanks.append(log_fbank) log_fbank = torch.stack(log_fbanks) return log_fbank ================================================ FILE: macls/data_utils/reader.py ================================================ import random import numpy as np import torch from torch.utils.data import Dataset from tqdm import tqdm from yeaudio.audio import AudioSegment from yeaudio.augmentation import SpeedPerturbAugmentor, VolumePerturbAugmentor, NoisePerturbAugmentor, \ ReverbPerturbAugmentor, SpecAugmentor from macls.data_utils.featurizer import AudioFeaturizer class MAClsDataset(Dataset): def __init__(self, data_list_path, audio_featurizer: AudioFeaturizer, max_duration=3, min_duration=0.5, mode='train', sample_rate=16000, aug_conf=None, use_dB_normalization=True, target_dB=-20): """音频数据加载器 Args: data_list_path: 包含音频路径和标签的数据列表文件的路径 audio_featurizer: 声纹特征提取器 max_duration: 最长的音频长度,大于这个长度会裁剪掉 min_duration: 过滤最短的音频长度 aug_conf: 用于指定音频增强的配置 mode: 数据集模式。在训练模式下,数据集可能会进行一些数据增强的预处理 sample_rate: 采样率 use_dB_normalization: 是否对音频进行音量归一化 target_dB: 音量归一化的大小 """ super(MAClsDataset, self).__init__() assert mode in ['train', 'eval', 'extract_feature'] self.data_list_path = data_list_path self.max_duration = max_duration self.min_duration = min_duration self.mode = mode self._target_sample_rate = sample_rate self._use_dB_normalization = use_dB_normalization self._target_dB = target_dB self.speed_augment = None self.volume_augment = None self.noise_augment = None self.reverb_augment = None self.spec_augment = None # 获取特征器 self.audio_featurizer = audio_featurizer # 获取特征裁剪的大小 self.max_feature_len = self.get_crop_feature_len() # 获取数据列表 with open(self.data_list_path, 'r', encoding='utf-8') as f: self.lines = f.readlines() if mode == 'train' and aug_conf is not None: # 获取数据增强器 self.get_augmentor(aug_conf) # 评估模式下,数据列表需要排序 if self.mode == 'eval': self.sort_list() def __getitem__(self, idx): # 分割数据文件路径和标签 data_path, label = self.lines[idx].replace('\n', '').split('\t') # 如果后缀名为.npy的文件,那么直接读取 if data_path.endswith('.npy'): feature = np.load(data_path) if feature.shape[0] > self.max_feature_len: crop_start = random.randint(0, feature.shape[0] - self.max_feature_len) if self.mode == 'train' else 0 feature = feature[crop_start:crop_start + self.max_feature_len, :] feature = torch.tensor(feature, dtype=torch.float32) else: audio_path, label = self.lines[idx].strip().split('\t') # 读取音频 audio_segment = AudioSegment.from_file(audio_path) # 数据太短不利于训练 if self.mode == 'train' or self.mode == 'extract_feature': if audio_segment.duration < self.min_duration: return self.__getitem__(idx + 1 if idx < len(self.lines) - 1 else 0) # 音频增强 if self.mode == 'train': audio_segment = self.augment_audio(audio_segment) # 重采样 if audio_segment.sample_rate != self._target_sample_rate: audio_segment.resample(self._target_sample_rate) # 音量归一化 if self._use_dB_normalization: audio_segment.normalize(target_db=self._target_dB) # 裁剪需要的数据 if audio_segment.duration > self.max_duration: audio_segment.crop(duration=self.max_duration, mode=self.mode) samples = torch.tensor(audio_segment.samples, dtype=torch.float32) feature = self.audio_featurizer(samples) feature = feature.squeeze(0) if self.mode == 'train' and self.spec_augment is not None: feature = self.spec_augment(feature.cpu().numpy()) feature = torch.tensor(feature, dtype=torch.float32) label = torch.tensor(int(label), dtype=torch.int64) return feature, label def __len__(self): return len(self.lines) # 获取特征裁剪的大小,对应max_duration音频提取特征后的长度 def get_crop_feature_len(self): samples = torch.randn((1, self.max_duration * self._target_sample_rate)) feature = self.audio_featurizer(samples).squeeze(0) freq_len = feature.size(0) return freq_len # 数据列表需要排序 def sort_list(self): lengths = [] for line in tqdm(self.lines, desc=f"对列表[{self.data_list_path}]进行长度排序"): # 分割数据文件路径和标签 data_path, _ = line.split('\t') if data_path.endswith('.npy'): feature = np.load(data_path) length = feature.shape[0] lengths.append(length) else: # 读取音频 audio_segment = AudioSegment.from_file(data_path) length = audio_segment.duration lengths.append(length) # 对长度排序并获取索引 sorted_indexes = np.argsort(lengths) self.lines = [self.lines[i] for i in sorted_indexes] # 获取数据增强器 def get_augmentor(self, aug_conf): if aug_conf.speed is not None: self.speed_augment = SpeedPerturbAugmentor(**aug_conf.speed) if aug_conf.volume is not None: self.volume_augment = VolumePerturbAugmentor(**aug_conf.volume) if aug_conf.noise is not None: self.noise_augment = NoisePerturbAugmentor(**aug_conf.noise) if aug_conf.reverb is not None: self.reverb_augment = ReverbPerturbAugmentor(**aug_conf.reverb) if aug_conf.spec_aug is not None: self.spec_augment = SpecAugmentor(**aug_conf.spec_aug) # 音频增强 def augment_audio(self, audio_segment): if self.speed_augment is not None: audio_segment = self.speed_augment(audio_segment) if self.volume_augment is not None: audio_segment = self.volume_augment(audio_segment) if self.noise_augment is not None: audio_segment = self.noise_augment(audio_segment) if self.reverb_augment is not None: audio_segment = self.reverb_augment(audio_segment) return audio_segment ================================================ FILE: macls/metric/__init__.py ================================================ ================================================ FILE: macls/metric/metrics.py ================================================ import numpy as np import torch # 计算准确率 def accuracy(output, label): output = torch.nn.functional.softmax(output, dim=-1) output = output.data.cpu().numpy() output = np.argmax(output, axis=1) label = label.data.cpu().numpy() acc = np.mean((output == label).astype(int)) return acc ================================================ FILE: macls/optimizer/__init__.py ================================================ import importlib from loguru import logger from torch.optim import * from .scheduler import WarmupCosineSchedulerLR from torch.optim.lr_scheduler import * __all__ = ['build_optimizer', 'build_lr_scheduler'] def build_optimizer(params, configs): use_optimizer = configs.optimizer_conf.get('optimizer', 'Adam') optimizer_args = configs.optimizer_conf.get('optimizer_args', {}) optim = importlib.import_module(__name__) optimizer = getattr(optim, use_optimizer)(params=params, **optimizer_args) logger.info(f'成功创建优化方法:{use_optimizer},参数为:{optimizer_args}') return optimizer def build_lr_scheduler(optimizer, step_per_epoch, configs): use_scheduler = configs.optimizer_conf.get('scheduler', 'WarmupCosineSchedulerLR') scheduler_args = configs.optimizer_conf.get('scheduler_args', {}) if configs.optimizer_conf.scheduler == 'CosineAnnealingLR' and 'T_max' not in scheduler_args: scheduler_args.T_max = int(configs.train_conf.max_epoch * 1.2) * step_per_epoch if configs.optimizer_conf.scheduler == 'WarmupCosineSchedulerLR' and 'fix_epoch' not in scheduler_args: scheduler_args.fix_epoch = configs.train_conf.max_epoch if configs.optimizer_conf.scheduler == 'WarmupCosineSchedulerLR' and 'step_per_epoch' not in scheduler_args: scheduler_args.step_per_epoch = step_per_epoch optim = importlib.import_module(__name__) scheduler = getattr(optim, use_scheduler)(optimizer=optimizer, **scheduler_args) logger.info(f'成功创建学习率衰减:{use_scheduler},参数为:{scheduler_args}') return scheduler ================================================ FILE: macls/optimizer/scheduler.py ================================================ import math from typing import List class WarmupCosineSchedulerLR: def __init__( self, optimizer, min_lr, max_lr, warmup_epoch, fix_epoch, step_per_epoch ): self.optimizer = optimizer assert min_lr <= max_lr self.min_lr = min_lr self.max_lr = max_lr self.warmup_step = warmup_epoch * step_per_epoch self.fix_step = fix_epoch * step_per_epoch self.current_step = 0.0 def set_lr(self, ): new_lr = self.clr(self.current_step) for param_group in self.optimizer.param_groups: param_group['lr'] = new_lr return new_lr def step(self, step=None): if step is not None: self.current_step = step new_lr = self.set_lr() self.current_step += 1 return new_lr def clr(self, step): if step < self.warmup_step: return self.min_lr + (self.max_lr - self.min_lr) * \ (step / self.warmup_step) elif self.warmup_step <= step < self.fix_step: return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * \ (1 + math.cos(math.pi * (step - self.warmup_step) / (self.fix_step - self.warmup_step))) else: return self.min_lr def get_last_lr(self) -> List[float]: return [self.clr(self.current_step)] ================================================ FILE: macls/predict.py ================================================ import os import sys from io import BufferedReader from typing import List import numpy as np import torch import yaml from loguru import logger from yeaudio.audio import AudioSegment from macls.data_utils.featurizer import AudioFeaturizer from macls.models import build_model from macls.utils.utils import dict_to_object, print_arguments, convert_string_based_on_type class MAClsPredictor: def __init__(self, configs, model_path='models/CAMPPlus_Fbank/best_model/', use_gpu=True, overwrites=None, log_level="info"): """声音分类预测工具 :param configs: 配置文件路径,或者模型名称,如果是模型名称则会使用默认的配置文件 :param model_path: 导出的预测模型文件夹路径 :param use_gpu: 是否使用GPU预测 :param overwrites: 覆盖配置文件中的参数,比如"train_conf.max_epoch=100",多个用逗号隔开 :param log_level: 打印的日志等级,可选值有:"debug", "info", "warning", "error" """ if use_gpu: assert (torch.cuda.is_available()), 'GPU不可用' self.device = torch.device("cuda") else: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' self.device = torch.device("cpu") self.log_level = log_level.upper() logger.remove() logger.add(sink=sys.stdout, level=self.log_level) # 读取配置文件 if isinstance(configs, str): # 获取当前程序绝对路径 absolute_path = os.path.dirname(__file__) # 获取默认配置文件路径 config_path = os.path.join(absolute_path, f"configs/{configs}.yml") configs = config_path if os.path.exists(config_path) else configs with open(configs, 'r', encoding='utf-8') as f: configs = yaml.load(f.read(), Loader=yaml.FullLoader) self.configs = dict_to_object(configs) # 覆盖配置文件中的参数 if overwrites: overwrites = overwrites.split(",") for overwrite in overwrites: keys, value = overwrite.strip().split("=") attrs = keys.split('.') current_level = self.configs for attr in attrs[:-1]: current_level = getattr(current_level, attr) before_value = getattr(current_level, attrs[-1]) setattr(current_level, attrs[-1], convert_string_based_on_type(before_value, value)) # 打印配置信息 print_arguments(configs=self.configs) # 获取特征器 self._audio_featurizer = AudioFeaturizer(feature_method=self.configs.preprocess_conf.feature_method, use_hf_model=self.configs.preprocess_conf.get('use_hf_model', False), method_args=self.configs.preprocess_conf.get('method_args', {})) # 获取分类标签 with open(self.configs.dataset_conf.label_list_path, 'r', encoding='utf-8') as f: lines = f.readlines() self.class_labels = [l.replace('\n', '') for l in lines] # 自动获取列表数量 if self.configs.model_conf.model_args.get('num_class', None) is None: self.configs.model_conf.model_args.num_class = len(self.class_labels) # 获取模型 self.predictor = build_model(input_size=self._audio_featurizer.feature_dim, configs=self.configs) self.predictor.to(self.device) # 加载模型 if os.path.isdir(model_path): model_path = os.path.join(model_path, 'model.pth') assert os.path.exists(model_path), f"{model_path} 模型不存在!" if torch.cuda.is_available() and use_gpu: model_state_dict = torch.load(model_path, weights_only=False) else: model_state_dict = torch.load(model_path, weights_only=False, map_location='cpu') self.predictor.load_state_dict(model_state_dict) logger.info(f"成功加载模型参数:{model_path}") self.predictor.eval() def _load_audio(self, audio_data, sample_rate=16000): """加载音频 :param audio_data: 需要识别的数据,支持文件路径,文件对象,字节,numpy。如果是字节的话,必须是完整的字节文件 :param sample_rate: 如果传入的事numpy数据,需要指定采样率 :return: 识别的文本结果和解码的得分数 """ # 加载音频文件,并进行预处理 if isinstance(audio_data, str): audio_segment = AudioSegment.from_file(audio_data) elif isinstance(audio_data, BufferedReader): audio_segment = AudioSegment.from_file(audio_data) elif isinstance(audio_data, np.ndarray): audio_segment = AudioSegment.from_ndarray(audio_data, sample_rate) elif isinstance(audio_data, bytes): audio_segment = AudioSegment.from_bytes(audio_data) else: raise Exception(f'不支持该数据类型,当前数据类型为:{type(audio_data)}') # 重采样 if audio_segment.sample_rate != self.configs.dataset_conf.dataset.sample_rate: audio_segment.resample(self.configs.dataset_conf.dataset.sample_rate) # decibel normalization if self.configs.dataset_conf.dataset.use_dB_normalization: audio_segment.normalize(target_db=self.configs.dataset_conf.dataset.target_dB) assert audio_segment.duration >= self.configs.dataset_conf.dataset.min_duration, \ f'音频太短,最小应该为{self.configs.dataset_conf.dataset.min_duration}s,当前音频为{audio_segment.duration}s' return audio_segment # 预测一个音频的特征 def predict(self, audio_data, sample_rate=16000): """预测一个音频 :param audio_data: 需要识别的数据,支持文件路径,文件对象,字节,numpy。如果是字节的话,必须是完整并带格式的字节文件 :param sample_rate: 如果传入的事numpy数据,需要指定采样率 :return: 结果标签和对应的得分 """ # 加载音频文件,并进行预处理 input_data = self._load_audio(audio_data=audio_data, sample_rate=sample_rate) input_data = torch.tensor(input_data.samples, dtype=torch.float32).unsqueeze(0) audio_feature = self._audio_featurizer(input_data).to(self.device) # 执行预测 output = self.predictor(audio_feature) result = torch.nn.functional.softmax(output, dim=-1)[0] result = result.data.cpu().numpy() # 最大概率的label lab = np.argsort(result)[-1] score = result[lab] return self.class_labels[lab], round(float(score), 5) def predict_batch(self, audios_data: List, sample_rate=16000): """预测一批音频的特征 :param audios_data: 需要识别的数据,支持文件路径,文件对象,字节,numpy。如果是字节的话,必须是完整并带格式的字节文件 :param sample_rate: 如果传入的事numpy数据,需要指定采样率 :return: 结果标签和对应的得分 """ audios_data1 = [] for audio_data in audios_data: # 加载音频文件,并进行预处理 input_data = self._load_audio(audio_data=audio_data, sample_rate=sample_rate) audios_data1.append(input_data.samples) # 找出音频长度最长的 batch = sorted(audios_data1, key=lambda a: a.shape[0], reverse=True) max_audio_length = batch[0].shape[0] batch_size = len(batch) # 以最大的长度创建0张量 inputs = np.zeros((batch_size, max_audio_length), dtype=np.float32) input_lens_ratio = [] for x in range(batch_size): tensor = audios_data1[x] seq_length = tensor.shape[0] # 将数据插入都0张量中,实现了padding inputs[x, :seq_length] = tensor[:] input_lens_ratio.append(seq_length / max_audio_length) inputs = torch.tensor(inputs, dtype=torch.float32) input_lens_ratio = torch.tensor(input_lens_ratio, dtype=torch.float32) audio_feature = self._audio_featurizer(inputs, input_lens_ratio).to(self.device) # 执行预测 output = self.predictor(audio_feature) results = torch.nn.functional.softmax(output, dim=-1) results = results.data.cpu().numpy() labels, scores = [], [] for result in results: lab = np.argsort(result)[-1] score = result[lab] labels.append(self.class_labels[lab]) scores.append(round(float(score), 5)) return labels, scores ================================================ FILE: macls/trainer.py ================================================ import os import platform import sys import time import uuid from datetime import timedelta import numpy as np import torch import torch.distributed as dist import yaml from sklearn.metrics import confusion_matrix from torch.utils.data import DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler from torchinfo import summary from tqdm import tqdm from loguru import logger from visualdl import LogWriter from macls.data_utils.collate_fn import collate_fn from macls.data_utils.featurizer import AudioFeaturizer from macls.data_utils.reader import MAClsDataset from macls.metric.metrics import accuracy from macls.models import build_model from macls.optimizer import build_optimizer, build_lr_scheduler from macls.utils.checkpoint import load_pretrained, load_checkpoint, save_checkpoint from macls.utils.utils import dict_to_object, plot_confusion_matrix, print_arguments, convert_string_based_on_type class MAClsTrainer(object): def __init__(self, configs, use_gpu=True, data_augment_configs=None, num_class=None, overwrites=None, log_level="info"): """声音分类训练工具类 :param configs: 配置文件路径,或者模型名称,如果是模型名称则会使用默认的配置文件 :param use_gpu: 是否使用GPU训练模型 :param data_augment_configs: 数据增强配置字典或者其文件路径 :param num_class: 分类大小,对应配置文件中的model_conf.model_args.num_class :param overwrites: 覆盖配置文件中的参数,比如"train_conf.max_epoch=100",多个用逗号隔开 :param log_level: 打印的日志等级,可选值有:"debug", "info", "warning", "error" """ if use_gpu: assert (torch.cuda.is_available()), 'GPU不可用' self.device = torch.device("cuda") else: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' self.device = torch.device("cpu") self.use_gpu = use_gpu self.log_level = log_level.upper() logger.remove() logger.add(sink=sys.stdout, level=self.log_level) # 读取配置文件 if isinstance(configs, str): # 获取当前程序绝对路径 absolute_path = os.path.dirname(__file__) # 获取默认配置文件路径 config_path = os.path.join(absolute_path, f"configs/{configs}.yml") configs = config_path if os.path.exists(config_path) else configs with open(configs, 'r', encoding='utf-8') as f: configs = yaml.load(f.read(), Loader=yaml.FullLoader) self.configs = dict_to_object(configs) if num_class is not None: self.configs.model_conf.model_args.num_class = num_class # 覆盖配置文件中的参数 if overwrites: overwrites = overwrites.split(",") for overwrite in overwrites: keys, value = overwrite.strip().split("=") attrs = keys.split('.') current_level = self.configs for attr in attrs[:-1]: current_level = getattr(current_level, attr) before_value = getattr(current_level, attrs[-1]) setattr(current_level, attrs[-1], convert_string_based_on_type(before_value, value)) # 打印配置信息 print_arguments(configs=self.configs) self.model = None self.optimizer = None self.scheduler = None self.audio_featurizer = None self.train_dataset = None self.train_loader = None self.test_dataset = None self.test_loader = None self.amp_scaler = None # 读取数据增强配置文件 if isinstance(data_augment_configs, str): with open(data_augment_configs, 'r', encoding='utf-8') as f: data_augment_configs = yaml.load(f.read(), Loader=yaml.FullLoader) print_arguments(configs=data_augment_configs, title='数据增强配置') self.data_augment_configs = dict_to_object(data_augment_configs) # 获取分类标签 with open(self.configs.dataset_conf.label_list_path, 'r', encoding='utf-8') as f: lines = f.readlines() self.class_labels = [l.replace('\n', '') for l in lines] if platform.system().lower() == 'windows': self.configs.dataset_conf.dataLoader.num_workers = 0 logger.warning('Windows系统不支持多线程读取数据,已自动关闭!') if self.configs.preprocess_conf.get('use_hf_model', False): self.configs.dataset_conf.dataLoader.num_workers = 0 logger.warning('使用HuggingFace模型不支持多线程进行特征提取,已自动关闭!') self.max_step, self.train_step = None, None self.train_loss, self.train_acc = None, None self.train_eta_sec = None self.eval_loss, self.eval_acc = None, None self.test_log_step, self.train_log_step = 0, 0 self.stop_train, self.stop_eval = False, False def __setup_dataloader(self, is_train=False): """ 获取数据加载器 :param is_train: 是否获取训练数据 """ # 获取特征器 self.audio_featurizer = AudioFeaturizer(feature_method=self.configs.preprocess_conf.feature_method, use_hf_model=self.configs.preprocess_conf.get('use_hf_model', False), method_args=self.configs.preprocess_conf.get('method_args', {})) dataset_args = self.configs.dataset_conf.get('dataset', {}) data_loader_args = self.configs.dataset_conf.get('dataLoader', {}) if is_train: self.train_dataset = MAClsDataset(data_list_path=self.configs.dataset_conf.train_list, audio_featurizer=self.audio_featurizer, aug_conf=self.data_augment_configs, mode='train', **dataset_args) # 设置支持多卡训练 train_sampler = RandomSampler(self.train_dataset) if torch.cuda.device_count() > 1: # 设置支持多卡训练 train_sampler = DistributedSampler(dataset=self.train_dataset) self.train_loader = DataLoader(dataset=self.train_dataset, collate_fn=collate_fn, sampler=train_sampler, **data_loader_args) # 获取测试数据 data_loader_args.drop_last = False dataset_args.max_duration = self.configs.dataset_conf.eval_conf.max_duration data_loader_args.batch_size = self.configs.dataset_conf.eval_conf.batch_size self.test_dataset = MAClsDataset(data_list_path=self.configs.dataset_conf.test_list, audio_featurizer=self.audio_featurizer, mode='eval', **dataset_args) self.test_loader = DataLoader(dataset=self.test_dataset, collate_fn=collate_fn, shuffle=False, **data_loader_args) def extract_features(self, save_dir='dataset/features', max_duration=100): """ 提取特征保存文件 :param save_dir: 保存路径 :param max_duration: 提取特征的最大时长,避免过长显存不足,单位秒 """ self.audio_featurizer = AudioFeaturizer(feature_method=self.configs.preprocess_conf.feature_method, use_hf_model=self.configs.preprocess_conf.get('use_hf_model', False), method_args=self.configs.preprocess_conf.get('method_args', {})) dataset_args = self.configs.dataset_conf.get('dataset', {}) dataset_args.max_duration = max_duration data_loader_args = self.configs.dataset_conf.get('dataLoader', {}) data_loader_args.drop_last = False for data_list in [self.configs.dataset_conf.train_list, self.configs.dataset_conf.test_list]: test_dataset = MAClsDataset(data_list_path=data_list, audio_featurizer=self.audio_featurizer, mode='extract_feature', **dataset_args) test_loader = DataLoader(dataset=test_dataset, collate_fn=collate_fn, shuffle=False, **data_loader_args) save_data_list = data_list.replace('.txt', '_features.txt') with open(save_data_list, 'w', encoding='utf-8') as f: for features, labels, input_lens in tqdm(test_loader): for i in range(len(features)): feature, label, input_len = features[i], labels[i], input_lens[i] feature = feature.numpy()[:input_len] label = int(label) save_path = os.path.join(save_dir, str(label), f'{str(uuid.uuid4())}.npy').replace('\\', '/') os.makedirs(os.path.dirname(save_path), exist_ok=True) np.save(save_path, feature) f.write(f'{save_path}\t{label}\n') logger.info(f'{data_list}列表中的数据已提取特征完成,新列表为:{save_data_list}') def __setup_model(self, input_size, is_train=False): """ 获取模型 :param input_size: 模型输入特征大小 :param is_train: 是否获取训练模型 """ # 自动获取列表数量 if self.configs.model_conf.model_args.get('num_class', None) is None: self.configs.model_conf.model_args.num_class = len(self.class_labels) # 获取模型 self.model = build_model(input_size=input_size, configs=self.configs) self.model.to(self.device) if self.log_level == "DEBUG" or self.log_level == "INFO": # 打印模型信息,98是长度,这个取决于输入的音频长度 summary(self.model, input_size=(1, 98, input_size)) # 使用Pytorch2.0的编译器 if self.configs.train_conf.use_compile and torch.__version__ >= "2" and platform.system().lower() == 'windows': self.model = torch.compile(self.model, mode="reduce-overhead") # print(self.model) # 获取损失函数 label_smoothing = self.configs.train_conf.get('label_smoothing', 0.0) self.loss = torch.nn.CrossEntropyLoss(label_smoothing=label_smoothing) if is_train: if self.configs.train_conf.enable_amp: self.amp_scaler = torch.GradScaler(init_scale=1024) # 获取优化方法 self.optimizer = build_optimizer(params=self.model.parameters(), configs=self.configs) # 学习率衰减函数 self.scheduler = build_lr_scheduler(optimizer=self.optimizer, step_per_epoch=len(self.train_loader), configs=self.configs) def __train_epoch(self, epoch_id, local_rank, writer, nranks=0): """训练一个epoch :param epoch_id: 当前epoch :param local_rank: 当前显卡id :param writer: VisualDL对象 :param nranks: 所使用显卡的数量 """ train_times, accuracies, loss_sum = [], [], [] start = time.time() for batch_id, (features, label, input_len) in enumerate(self.train_loader): if self.stop_train: break if nranks > 1: features = features.to(local_rank) label = label.to(local_rank).long() else: features = features.to(self.device) label = label.to(self.device).long() # 执行模型计算,是否开启自动混合精度 with torch.autocast('cuda', enabled=self.configs.train_conf.enable_amp): output = self.model(features) # 计算损失值 los = self.loss(output, label) # 是否开启自动混合精度 if self.configs.train_conf.enable_amp: # loss缩放,乘以系数loss_scaling scaled = self.amp_scaler.scale(los) scaled.backward() else: los.backward() # 是否开启自动混合精度 if self.configs.train_conf.enable_amp: self.amp_scaler.unscale_(self.optimizer) self.amp_scaler.step(self.optimizer) self.amp_scaler.update() else: self.optimizer.step() self.optimizer.zero_grad() # 计算准确率 acc = accuracy(output, label) accuracies.append(acc) loss_sum.append(los.data.cpu().numpy()) train_times.append((time.time() - start) * 1000) self.train_step += 1 # 多卡训练只使用一个进程打印 if batch_id % self.configs.train_conf.log_interval == 0 and local_rank == 0: batch_id = batch_id + 1 # 计算每秒训练数据量 train_speed = self.configs.dataset_conf.dataLoader.batch_size / ( sum(train_times) / len(train_times) / 1000) # 计算剩余时间 self.train_eta_sec = (sum(train_times) / len(train_times)) * (self.max_step - self.train_step) / 1000 eta_str = str(timedelta(seconds=int(self.train_eta_sec))) self.train_loss = sum(loss_sum) / len(loss_sum) self.train_acc = sum(accuracies) / len(accuracies) logger.info(f'Train epoch: [{epoch_id}/{self.configs.train_conf.max_epoch}], ' f'batch: [{batch_id}/{len(self.train_loader)}], ' f'loss: {self.train_loss:.5f}, accuracy: {self.train_acc:.5f}, ' f'learning rate: {self.scheduler.get_last_lr()[0]:>.8f}, ' f'speed: {train_speed:.2f} data/sec, eta: {eta_str}') writer.add_scalar('Train/Loss', self.train_loss, self.train_log_step) writer.add_scalar('Train/Accuracy', self.train_acc, self.train_log_step) # 记录学习率 writer.add_scalar('Train/lr', self.scheduler.get_last_lr()[0], self.train_log_step) train_times, accuracies, loss_sum = [], [], [] self.train_log_step += 1 start = time.time() self.scheduler.step() def train(self, save_model_path='models/', log_dir='log/', max_epoch=None, resume_model=None, pretrained_model=None): """ 训练模型 :param save_model_path: 模型保存的路径 :param log_dir: 保存VisualDL日志文件的路径 :param max_epoch: 最大训练轮数,对应配置文件中的train_conf.max_epoch :param resume_model: 恢复训练,当为None则不使用预训练模型 :param pretrained_model: 预训练模型的路径,当为None则不使用预训练模型 """ # 获取有多少张显卡训练 nranks = torch.cuda.device_count() local_rank = 0 writer = None if local_rank == 0: # 日志记录器 writer = LogWriter(logdir=log_dir) if nranks > 1 and self.use_gpu: # 初始化NCCL环境 dist.init_process_group(backend='nccl') local_rank = int(os.environ["LOCAL_RANK"]) # 获取数据 self.__setup_dataloader(is_train=True) # 获取模型 self.__setup_model(input_size=self.audio_featurizer.feature_dim, is_train=True) # 加载预训练模型 self.model = load_pretrained(model=self.model, pretrained_model=pretrained_model, use_gpu=self.use_gpu) # 加载恢复模型 self.model, self.optimizer, self.amp_scaler, self.scheduler, last_epoch, best_acc = \ load_checkpoint(configs=self.configs, model=self.model, optimizer=self.optimizer, amp_scaler=self.amp_scaler, scheduler=self.scheduler, step_epoch=len(self.train_loader), save_model_path=save_model_path, resume_model=resume_model) # 支持多卡训练 if nranks > 1 and self.use_gpu: self.model.to(local_rank) self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[local_rank]) logger.info('训练数据:{}'.format(len(self.train_dataset))) self.train_loss, self.train_acc = None, None self.eval_loss, self.eval_acc = None, None self.test_log_step, self.train_log_step = 0, 0 if local_rank == 0: writer.add_scalar('Train/lr', self.scheduler.get_last_lr()[0], last_epoch) if max_epoch is not None: self.configs.train_conf.max_epoch = max_epoch # 最大步数 self.max_step = len(self.train_loader) * self.configs.train_conf.max_epoch self.train_step = max(last_epoch, 0) * len(self.train_loader) # 开始训练 for epoch_id in range(last_epoch, self.configs.train_conf.max_epoch): if self.stop_train: break epoch_id += 1 start_epoch = time.time() # 训练一个epoch self.__train_epoch(epoch_id=epoch_id, local_rank=local_rank, writer=writer, nranks=nranks) # 多卡训练只使用一个进程执行评估和保存模型 if local_rank == 0: if self.stop_eval: continue logger.info('=' * 70) self.eval_loss, self.eval_acc = self.evaluate() logger.info('Test epoch: {}, time/epoch: {}, loss: {:.5f}, accuracy: {:.5f}'.format( epoch_id, str(timedelta(seconds=(time.time() - start_epoch))), self.eval_loss, self.eval_acc)) logger.info('=' * 70) writer.add_scalar('Test/Accuracy', self.eval_acc, self.test_log_step) writer.add_scalar('Test/Loss', self.eval_loss, self.test_log_step) self.test_log_step += 1 self.model.train() # # 保存最优模型 if self.eval_acc >= best_acc: best_acc = self.eval_acc save_checkpoint(configs=self.configs, model=self.model, optimizer=self.optimizer, amp_scaler=self.amp_scaler, save_model_path=save_model_path, epoch_id=epoch_id, accuracy=self.eval_acc, best_model=True) # 保存模型 save_checkpoint(configs=self.configs, model=self.model, optimizer=self.optimizer, amp_scaler=self.amp_scaler, save_model_path=save_model_path, epoch_id=epoch_id, accuracy=self.eval_acc) def evaluate(self, resume_model=None, save_matrix_path=None): """ 评估模型 :param resume_model: 所使用的模型 :param save_matrix_path: 保存混合矩阵的路径 :return: 评估结果 """ if self.test_loader is None: self.__setup_dataloader() if self.model is None: self.__setup_model(input_size=self.audio_featurizer.feature_dim) if resume_model is not None: if os.path.isdir(resume_model): resume_model = os.path.join(resume_model, 'model.pth') assert os.path.exists(resume_model), f"{resume_model} 模型不存在!" model_state_dict = torch.load(resume_model, weights_only=False) self.model.load_state_dict(model_state_dict) logger.info(f'成功加载模型:{resume_model}') self.model.eval() if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): eval_model = self.model.module else: eval_model = self.model accuracies, losses, preds, labels = [], [], [], [] with torch.no_grad(): for batch_id, (features, label, input_lens) in enumerate(tqdm(self.test_loader, desc='执行评估')): if self.stop_eval: break features = features.to(self.device) label = label.to(self.device).long() output = eval_model(features) los = self.loss(output, label) # 计算准确率 acc = accuracy(output, label) accuracies.append(acc) # 模型预测标签 label = label.data.cpu().numpy() output = output.data.cpu().numpy() pred = np.argmax(output, axis=1) preds.extend(pred.tolist()) # 真实标签 labels.extend(label.tolist()) losses.append(los.data.cpu().numpy()) loss = float(sum(losses) / len(losses)) if len(losses) > 0 else -1 acc = float(sum(accuracies) / len(accuracies)) if len(accuracies) > 0 else -1 # 保存混合矩阵 if save_matrix_path is not None: try: cm = confusion_matrix(labels, preds) plot_confusion_matrix(cm=cm, save_path=os.path.join(save_matrix_path, f'{int(time.time())}.png'), class_labels=self.class_labels) except Exception as e: logger.error(f'保存混淆矩阵失败:{e}') self.model.train() return loss, acc def export(self, save_model_path='models/', resume_model='models/EcapaTdnn_Fbank/best_model/'): """ 导出预测模型 :param save_model_path: 模型保存的路径 :param resume_model: 准备转换的模型路径 :return: """ self.__setup_model(input_size=self.audio_featurizer.feature_dim) # 加载预训练模型 if os.path.isdir(resume_model): resume_model = os.path.join(resume_model, 'model.pth') assert os.path.exists(resume_model), f"{resume_model} 模型不存在!" model_state_dict = torch.load(resume_model) self.model.load_state_dict(model_state_dict) logger.info('成功恢复模型参数和优化方法参数:{}'.format(resume_model)) self.model.eval() # 获取静态模型 infer_model = self.model.export() infer_model_path = os.path.join(save_model_path, f'{self.configs.use_model}_{self.configs.preprocess_conf.feature_method}', 'inference.pth') os.makedirs(os.path.dirname(infer_model_path), exist_ok=True) torch.jit.save(infer_model, infer_model_path) logger.info("预测模型已保存:{}".format(infer_model_path)) ================================================ FILE: macls/utils/__init__.py ================================================ ================================================ FILE: macls/utils/checkpoint.py ================================================ import json import os import shutil import torch from loguru import logger from macls import __version__ def load_pretrained(model, pretrained_model, use_gpu=True): """加载预训练模型 :param model: 使用的模型 :param pretrained_model: 预训练模型路径 :param use_gpu: 模型是否使用GPU :return: 加载的模型 """ # 加载预训练模型 if pretrained_model is None: return model if os.path.isdir(pretrained_model): pretrained_model = os.path.join(pretrained_model, 'model.pth') assert os.path.exists(pretrained_model), f"{pretrained_model} 模型不存在!" if isinstance(model, torch.nn.parallel.DistributedDataParallel): model_dict = model.module.state_dict() else: model_dict = model.state_dict() if torch.cuda.is_available() and use_gpu: model_state_dict = torch.load(pretrained_model, weights_only=False) else: model_state_dict = torch.load(pretrained_model, weights_only=False, map_location='cpu') # 过滤不存在的参数 for name, weight in model_dict.items(): if name in model_state_dict.keys(): if list(weight.shape) != list(model_state_dict[name].shape): logger.warning(f'{name} not used, shape {list(model_state_dict[name].shape)} ' f'unmatched with {list(weight.shape)} in model.') model_state_dict.pop(name, None) # 加载权重 if isinstance(model, torch.nn.parallel.DistributedDataParallel): missing_keys, unexpected_keys = model.module.load_state_dict(model_state_dict, strict=False) else: missing_keys, unexpected_keys = model.load_state_dict(model_state_dict, strict=False) if len(unexpected_keys) > 0: logger.warning('Unexpected key(s) in state_dict: {}. ' .format(', '.join('"{}"'.format(k) for k in unexpected_keys))) if len(missing_keys) > 0: logger.warning('Missing key(s) in state_dict: {}. ' .format(', '.join('"{}"'.format(k) for k in missing_keys))) logger.info('成功加载预训练模型:{}'.format(pretrained_model)) return model def load_checkpoint(configs, model, optimizer, amp_scaler, scheduler, step_epoch, save_model_path, resume_model): """加载模型 :param configs: 配置信息 :param model: 使用的模型 :param optimizer: 使用的优化方法 :param amp_scaler: 使用的自动混合精度 :param scheduler: 使用的学习率调整策略 :param step_epoch: 每个epoch的step数量 :param save_model_path: 模型保存路径 :param resume_model: 恢复训练的模型路径 """ last_epoch1 = 0 accuracy1 = 0. def load_model(model_path): assert os.path.exists(os.path.join(model_path, 'model.pth')), "模型参数文件不存在!" assert os.path.exists(os.path.join(model_path, 'optimizer.pth')), "优化方法参数文件不存在!" state_dict = torch.load(os.path.join(model_path, 'model.pth'), weights_only=False) if isinstance(model, torch.nn.parallel.DistributedDataParallel): model.module.load_state_dict(state_dict) else: model.load_state_dict(state_dict) optimizer.load_state_dict(torch.load(os.path.join(model_path, 'optimizer.pth'), weights_only=False)) # 自动混合精度参数 if amp_scaler is not None and os.path.exists(os.path.join(model_path, 'scaler.pth')): amp_scaler.load_state_dict(torch.load(os.path.join(model_path, 'scaler.pth')), weights_only=False) with open(os.path.join(model_path, 'model.state'), 'r', encoding='utf-8') as f: json_data = json.load(f) last_epoch = json_data['last_epoch'] accuracy = json_data['accuracy'] logger.info('成功恢复模型参数和优化方法参数:{}'.format(model_path)) optimizer.step() [scheduler.step() for _ in range(last_epoch * step_epoch)] return last_epoch, accuracy # 获取最后一个保存的模型 save_feature_method = configs.preprocess_conf.feature_method if configs.preprocess_conf.get('use_hf_model', False): save_feature_method = save_feature_method[:-1] if save_feature_method[-1] == '/' else save_feature_method save_feature_method = os.path.basename(save_feature_method) last_model_dir = os.path.join(save_model_path, f'{configs.model_conf.model}_{save_feature_method}', 'last_model') if resume_model is not None or (os.path.exists(os.path.join(last_model_dir, 'model.pth')) and os.path.exists(os.path.join(last_model_dir, 'optimizer.pth'))): if resume_model is not None: last_epoch1, accuracy1 = load_model(resume_model) else: try: # 自动获取最新保存的模型 last_epoch1, accuracy1 = load_model(last_model_dir) except Exception as e: logger.warning(f'尝试自动恢复最新模型失败,错误信息:{e}') return model, optimizer, amp_scaler, scheduler, last_epoch1, accuracy1 # 保存模型 def save_checkpoint(configs, model, optimizer, amp_scaler, save_model_path, epoch_id, accuracy=0., best_model=False): """保存模型 :param configs: 配置信息 :param model: 使用的模型 :param optimizer: 使用的优化方法 :param amp_scaler: 使用的自动混合精度 :param save_model_path: 模型保存路径 :param epoch_id: 当前epoch :param accuracy: 当前准确率 :param best_model: 是否为最佳模型 """ if isinstance(model, torch.nn.parallel.DistributedDataParallel): state_dict = model.module.state_dict() else: state_dict = model.state_dict() # 保存模型的路径 save_feature_method = configs.preprocess_conf.feature_method if configs.preprocess_conf.get('use_hf_model', False): save_feature_method = save_feature_method[:-1] if save_feature_method[-1] == '/' else save_feature_method save_feature_method = os.path.basename(save_feature_method) if best_model: model_path = os.path.join(save_model_path, f'{configs.model_conf.model}_{save_feature_method}', 'best_model') else: model_path = os.path.join(save_model_path, f'{configs.model_conf.model}_{save_feature_method}', 'epoch_{}'.format(epoch_id)) os.makedirs(model_path, exist_ok=True) # 保存模型参数 torch.save(optimizer.state_dict(), os.path.join(model_path, 'optimizer.pth')) torch.save(state_dict, os.path.join(model_path, 'model.pth')) # 自动混合精度参数 if amp_scaler is not None: torch.save(amp_scaler.state_dict(), os.path.join(model_path, 'scaler.pth')) with open(os.path.join(model_path, 'model.state'), 'w', encoding='utf-8') as f: data = {"last_epoch": epoch_id, "accuracy": accuracy, "version": __version__, "model": configs.model_conf.model, "feature_method": save_feature_method} f.write(json.dumps(data, indent=4, ensure_ascii=False)) if not best_model: last_model_path = os.path.join(save_model_path, f'{configs.model_conf.model}_{save_feature_method}', 'last_model') shutil.rmtree(last_model_path, ignore_errors=True) shutil.copytree(model_path, last_model_path) # 删除旧的模型 old_model_path = os.path.join(save_model_path, f'{configs.model_conf.model}_{save_feature_method}', 'epoch_{}'.format(epoch_id - 3)) if os.path.exists(old_model_path): shutil.rmtree(old_model_path) logger.info('已保存模型:{}'.format(model_path)) ================================================ FILE: macls/utils/record.py ================================================ import os import soundcard import soundfile class RecordAudio: def __init__(self, channels=1, sample_rate=16000): # 录音参数 self.channels = channels self.sample_rate = sample_rate # 获取麦克风 self.default_mic = soundcard.default_microphone() def record(self, record_seconds=3, save_path=None): """录音 :param record_seconds: 录音时间,默认3秒 :param save_path: 录音保存的路径,后缀名为wav :return: 音频的numpy数据 """ print("开始录音......") num_frames = int(record_seconds * self.sample_rate) data = self.default_mic.record(samplerate=self.sample_rate, numframes=num_frames, channels=self.channels) audio_data = data.squeeze() print("录音已结束!") if save_path is not None: os.makedirs(os.path.dirname(save_path), exist_ok=True) soundfile.write(save_path, data=data, samplerate=self.sample_rate) return audio_data ================================================ FILE: macls/utils/utils.py ================================================ import distutils.util import os import matplotlib.pyplot as plt import numpy as np from loguru import logger def print_arguments(args=None, configs=None, title=None): if args: logger.info("----------- 额外配置参数 -----------") for arg, value in sorted(vars(args).items()): logger.info("%s: %s" % (arg, value)) logger.info("------------------------------------------------") if configs: title = title if title else "配置文件参数" logger.info(f"----------- {title} -----------") for arg, value in sorted(configs.items()): if isinstance(value, dict): logger.info(f"{arg}:") for a, v in sorted(value.items()): if isinstance(v, dict): logger.info(f"\t{a}:") for a1, v1 in sorted(v.items()): logger.info("\t\t%s: %s" % (a1, v1)) else: logger.info("\t%s: %s" % (a, v)) else: logger.info("%s: %s" % (arg, value)) logger.info("------------------------------------------------") def add_arguments(argname, type, default, help, argparser, **kwargs): type = distutils.util.strtobool if type == bool else type argparser.add_argument("--" + argname, default=default, type=type, help=help + ' 默认: %(default)s.', **kwargs) class Dict(dict): __setattr__ = dict.__setitem__ __getattr__ = dict.__getitem__ def dict_to_object(dict_obj): if not isinstance(dict_obj, dict): return dict_obj inst = Dict() for k, v in dict_obj.items(): inst[k] = dict_to_object(v) return inst def plot_confusion_matrix(cm, save_path, class_labels, show=False): """ 绘制混淆矩阵 @param cm: 混淆矩阵, 一个二维数组,表示预测结果与真实结果的混淆情况。 @param save_path: 保存路径, 字符串,指定混淆矩阵图像的保存位置。 @param class_labels: 类别名称, 一个列表,包含各个类别的名称。 @param show: 是否显示图像, 布尔值,控制是否在绘图窗口显示混淆矩阵图像。 """ # 检测类别名称是否包含中文,是则设置相应字体 s = ''.join(class_labels) is_ascii = all(ord(c) < 128 for c in s) if not is_ascii: plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False # 初始化绘图参数并绘制混淆矩阵 plt.figure(figsize=(12, 8), dpi=100) np.set_printoptions(precision=2) # 在混淆矩阵中绘制每个格子的概率值 ind_array = np.arange(len(class_labels)) x, y = np.meshgrid(ind_array, ind_array) for x_val, y_val in zip(x.flatten(), y.flatten()): c = cm[y_val][x_val] / (np.sum(cm[:, x_val]) + 1e-6) # 忽略概率值太小的格子 if c < 1e-4: continue plt.text(x_val, y_val, "%0.2f" % (c,), color='red', fontsize=15, va='center', ha='center') m = np.sum(cm, axis=0) + 1e-6 plt.imshow(cm / m, interpolation='nearest', cmap=plt.cm.binary) plt.title('Confusion Matrix' if is_ascii else '混合矩阵') plt.colorbar() # 设置类别标签 xlocations = np.array(range(len(class_labels))) plt.xticks(xlocations, class_labels, rotation=90) plt.yticks(xlocations, class_labels) plt.ylabel('Actual label' if is_ascii else '实际标签') plt.xlabel('Predict label' if is_ascii else '预测标签') # 调整刻度标记位置,提高可视化效果 tick_marks = np.array(range(len(class_labels))) + 0.5 plt.gca().set_xticks(tick_marks, minor=True) plt.gca().set_yticks(tick_marks, minor=True) plt.gca().xaxis.set_ticks_position('none') plt.gca().yaxis.set_ticks_position('none') plt.grid(True, which='minor', linestyle='-') plt.gcf().subplots_adjust(bottom=0.15) # 保存图片 os.makedirs(os.path.dirname(save_path), exist_ok=True) plt.savefig(save_path, format='png') if show: # 显示图片 plt.show() # 根据a的类型,将b转换为相应的类型 def convert_string_based_on_type(a, b): if isinstance(a, int): try: b = int(b) except ValueError: logger.error("无法将字符串转换为整数") elif isinstance(a, float): try: b = float(b) except ValueError: logger.error("无法将字符串转换为浮点数") elif isinstance(a, str): return b elif isinstance(a, bool): b = b.lower() == 'true' else: try: b = eval(b) except Exception as e: logger.exception("无法将字符串转换为其他类型,将忽略该参数类型转换") return b ================================================ FILE: record_audio.py ================================================ import time from macls.utils.record import RecordAudio s = input('请输入你计划录音多少秒:') record_seconds = int(s) save_path = "dataset/save_audio/%s.wav" % str(int(time.time()*1000)) record_audio = RecordAudio() input(f"按下回车键开机录音,录音{record_seconds}秒中:") record_audio.record(record_seconds=record_seconds, save_path=save_path) print('文件保存在:%s' % save_path) ================================================ FILE: requirements.txt ================================================ numpy>=1.19.2 scipy>=1.6.3 librosa>=0.9.1 soundfile>=0.12.1 soundcard>=0.4.2 resampy>=0.2.2 numba>=0.53.0 pydub~=0.25.1 matplotlib>=3.5.2 pillow>=10.3.0 tqdm>=4.66.3 visualdl==2.5.3 pyyaml>=5.4.1 scikit-learn>=1.0.2 torchinfo>=1.7.2 loguru>=0.7.2 yeaudio>=0.0.7 ================================================ FILE: setup.py ================================================ import shutil from setuptools import setup, find_packages import macls VERSION = macls.__version__ # 复制配置文件到项目目录下 shutil.rmtree('./macls/configs/', ignore_errors=True) shutil.copytree('./configs/', './macls/configs/') def readme(): with open('README.md', encoding='utf-8') as f: content = f.read() return content def parse_requirements(): with open('./requirements.txt', encoding="utf-8") as f: requirements = f.readlines() return requirements if __name__ == "__main__": setup( name='macls', packages=find_packages(), package_data={'': ['configs/*']}, author='yeyupiaoling', version=VERSION, install_requires=parse_requirements(), description='Audio Classification toolkit on Pytorch', long_description=readme(), long_description_content_type='text/markdown', url='https://github.com/yeyupiaoling/AudioClassification-Pytorch', download_url='https://github.com/yeyupiaoling/AudioClassification-Pytorch.git', keywords=['audio', 'pytorch'], classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Natural Language :: Chinese (Simplified)', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Topic :: Utilities' ], license='Apache License 2.0', ext_modules=[]) shutil.rmtree('./macls/configs/', ignore_errors=True) ================================================ FILE: tools/download_language_data.sh ================================================ #!/bin/bash download_dir=dataset/language [ ! -d ${download_dir} ] && mkdir -p ${download_dir} if [ ! -f ${download_dir}/test.tar.gz ]; then echo "准备下载测试集" wget --no-check-certificate https://speech-lab-share-data.oss-cn-shanghai.aliyuncs.com/3D-Speaker/test.tar.gz -P ${download_dir} md5=$(md5sum ${download_dir}/test.tar.gz | awk '{print $1}') [ $md5 != "45972606dd10d3f7c1c31f27acdfbed7" ] && echo "Wrong md5sum of 3dspeaker test.tar.gz" && exit 1 fi if [ ! -f ${download_dir}/train.tar.gz ]; then echo "准备下载训练集" wget --no-check-certificate https://speech-lab-share-data.oss-cn-shanghai.aliyuncs.com/3D-Speaker/train.tar.gz -P ${download_dir} md5=$(md5sum ${download_dir}/train.tar.gz | awk '{print $1}') [ $md5 != "c2cea55fd22a2b867d295fb35a2d3340" ] && echo "Wrong md5sum of 3dspeaker train.tar.gz" && exit 1 fi echo "下载完成!" echo "准备解压" tar -zxvf ${download_dir}/train.tar.gz -C ${rawdata_dir}/ tar -xzvf ${download_dir}/test.tar.gz -C ${rawdata_dir}/ echo "解压完成!" ================================================ FILE: train.py ================================================ import argparse import functools from macls.trainer import MAClsTrainer from macls.utils.utils import add_arguments, print_arguments parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) add_arg('configs', str, 'configs/cam++.yml', '配置文件') add_arg('data_augment_configs', str, 'configs/augmentation.yml', '数据增强配置文件') add_arg("local_rank", int, 0, '多卡训练需要的参数') add_arg("use_gpu", bool, True, '是否使用GPU训练') add_arg('save_model_path', str, 'models/', '模型保存的路径') add_arg('log_dir', str, 'log/', '保存VisualDL日志文件的路径') add_arg('resume_model', str, None, '恢复训练,当为None则不使用预训练模型') add_arg('pretrained_model', str, None, '预训练模型的路径,当为None则不使用预训练模型') add_arg('overwrites', str, None, '覆盖配置文件中的参数,比如"train_conf.max_epoch=100",多个用逗号隔开') args = parser.parse_args() print_arguments(args=args) # 获取训练器 trainer = MAClsTrainer(configs=args.configs, use_gpu=args.use_gpu, data_augment_configs=args.data_augment_configs, overwrites=args.overwrites) trainer.train(save_model_path=args.save_model_path, log_dir=args.log_dir, resume_model=args.resume_model, pretrained_model=args.pretrained_model)