Repository: KakaruHayate/ColorSplitter Branch: main Commit: 6104d4a170e3 Files: 38 Total size: 16.4 MB Directory structure: gitextract_5gn6sueo/ ├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── README_CN.md ├── clean_csv.py ├── kick.py ├── load_npy.py ├── modules/ │ ├── cluster.py │ ├── model/ │ │ ├── emotion_encoder.py │ │ └── voice_encoder.py │ ├── utils.py │ └── visualizations.py ├── move_files.py ├── pretrain/ │ ├── encoder_1570000.bak │ └── wav2vec2-large-robust-12-ft-emotion-msp-dim/ │ ├── LICENSE │ ├── config.json │ ├── preprocessor_config.json │ └── vocab.json ├── requirements.txt ├── splitter.py └── viewer/ ├── .gitignore ├── README.md ├── bun.lockb ├── eslint.config.js ├── index.html ├── package.json ├── postcss.config.js ├── src/ │ ├── App.tsx │ ├── ScatterPlot.tsx │ ├── index.css │ ├── main.tsx │ └── vite-env.d.ts ├── tailwind.config.js ├── tsconfig.app.json ├── tsconfig.json ├── tsconfig.node.json └── vite.config.ts ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitattributes ================================================ # Auto detect text files and perform LF normalization * text=auto ================================================ FILE: .gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. # https://pdm.fming.dev/#use-with-ide .pdm.toml # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ .DS_Store /input/* /output/* /pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/*.bin ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2023 KakaruHayate Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # ColorSplitter ![result](IMG/20240102162212.png) [中文文档](README_CN.md) [webui](https://github.com/KakaruHayate/ColorSplitter/tree/main/viewer) A command-line tool for separating vocal timbres # Introduction ColorSplitter is a command-line tool for classifying the vocal timbre styles of single-speaker data in the pre-processing stage of vocal data. For scenarios that do not require style classification, using this tool to filter data can also reduce the problem of unstable timbre performance of the model. **Please note** that this project is based on Speaker Verification technology, and it is not clear whether the timbre changes of singing are completely related to the voiceprint differences, just for fun :) The research in this field is still scarce, hoping to inspire more ideas. Thanks to the community user: 洛泠羽 # New version features Implemented automatic optimization of clustering results, no longer need users to judge the optimal clustering results themselves. `splitter.py` deleted the `--nmax` parameter, added `--nmin` (minimum number of timbre types, invalid when cluster parameter is 2) `--cluster` (clustering method, 1:SpectralCluster, 2:UmapHdbscan), `--mer_cosine` to merge clusters that are too similar. **New version tips** 1. Run `splitter.py` directly with the default parameters by specifying the speaker. 2. If the result has only one cluster, observe the distribution map, set `--nmin` to the number you think is reasonable, and run `splitter.py` again. 3. The optimal value of `--nmin` may be smaller than expected in actual tests. 4. The new clustering algorithm is faster, it is recommended to try multiple times. 5. The emotion classification function has now been implemented and can be called through the `--encoder emotion` function. Go to when using https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim/tree/main Download `pytorch_Model.bin` is placed in the `wav2vec2-large-robust-12-ft-emotion-msp-dim` directory. 6. You can also use `--encoder mix` to filter audio that matches two similar features at the same time. This feature can help you filter `GPT SoVITS` or `Bert-VITS2.3` prompts. # Progress - [x] **Correctly trained weights** - [x] Clustering algorithm optimization - [ ] ~SSL~ - [x] emotional encoder - [x] embed mix # Environment Configuration It works normally under `python3.8`, please go to install [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) Then use the following command to install environment dependencies ``` pip install -r requirements.txt ``` Tips: If you are only using the timbre encoder, you only need to install the CPU version of pytorch. In other cases, it is recommended to use the GPU version. # How to Use **1. Move your well-made Diffsinger dataset to the `.\input` folder and run the following command** ``` python splitter.py --spk --nmin <'N'_min_num> ``` Enter the speaker name after `--spk`, and enter the minimum number of timbre types after `--nmin` (minimum 1, maximum 14,default 1) Tips: This project does not need to read the annotation file (transcriptions.csv) of the Diffsinger dataset, so as long as the file structure is as shown below, it can work normally ``` - input - - raw - wavs - audio1.wav - audio2.wav - ... ``` The wav files are best already split **2. After you select the optimal result you think, run the following command to classify the wav files in the dataset** ``` python move_files.py --spk ``` The classified results will be saved in `.\output\\` After that, you still need to manually merge the too small clusters to meet the training requirements **3. (Optional) Move `clean_csv.py` to the same level as `transcriptions.csv` and run it, you can delete the wav file entries that are not included in the `wavs` folder** # Based on Project [Resemblyzer](https://github.com/resemble-ai/Resemblyzer/) [3D-Speaker](https://github.com/alibaba-damo-academy/3D-Speaker/) [wav2vec2-large-robust-12-ft-emotion-msp-dim](https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim) [GTSinger](https://github.com/AaronZ345/GTSinger) ================================================ FILE: README_CN.md ================================================ # ColorSplitter ![result](IMG/20240102162212.png) [webui](https://github.com/KakaruHayate/ColorSplitter/tree/main/viewer) 一个用于分离歌声音色的命令行工具 # 介绍 ColorSplitter是一个为了在歌声数据的处理前期,对单说话人数据的音色风格进行分类的命令行工具 对于不需要进行风格分类的场合,使用本工具进行数据筛选,也可以减轻模型的音色表现不稳定问题 **请注意**,本项目基于说话人确认(Speaker Verification)技术,目前并不确定唱歌的音色变化是与声纹差异完全相关,just for fun:) 目前该领域研究仍然匮乏,抛砖引玉 感谢社区用户:洛泠羽 # 新版本特性 实装了聚类结果自动优化,不再需要用户自己判断聚类最优结果 `splitter.py`删除了`--nmax`参数,添加了`--nmin`(最小音色类型数量,cluster参数为2时无效)`--cluster`(聚类方式,1:SpectralCluster, 2:UmapHdbscan),`--mer_cosine`合并过于相似的簇 **新版本使用技巧** 1.默认参数直接指定说话人运行`splitter.py` 2.如果结果只有一个簇,观察分布图,将`--nmin`设为你认为合理的数量,再次运行`splitter.py` 3.实际测试下`--nmin`的最优值可能比想象的要小 4.新的聚类算法速度较快,建议多次尝试 5.新版本已支持情绪编码器的使用,可以通过`--encoder emotion`调用。使用时前往 https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim/tree/main 下载 `pytorch_model.bin` 放置在 `pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim` 目录下 6.你也可以用`--encoder mix`筛选同时符合两个特征相似的音频,这个功能可以帮助你筛选`GPT SoVITS`和`BertVITS2.3`的参考音频 # 进展 - [x] **正确训练的权重** - [x] 聚类算法优化 - [ ] ~SSL~ - [x] emotional encoder - [x] embed mix # 环境配置 `python3.8`下使用正常,请先安装[Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) 之后使用以下命令安装环境依赖 ``` pip install -r requirements.txt ``` 注意:如果你只是用音色编码器则只需要安装CPU版本的pytorch,其他情况下建议使用GPU版本 # 如何使用 **1.将你制作好的Diffsinger数据集移动到`.\input`文件夹下,运行以下命令** ``` python splitter.py --spk --nmin <'N'_min_num> ``` 其中`--spk`后输入说话人名称,`--nmin`后输入最小音色类型数量(最小1最大14默认1) tips:本项目并不需要读取Diffsinger数据集的标注文件(transcriptions.csv),所以保证只要文件结构如下所示就可以正常工作 ``` - input - - raw - wavs - audio1.wav - audio2.wav - ... ``` 其中wav文件最好已经进行过切分 **2.选定你认为的最优结果后,运行以下命令将数据集中的wav文件分类** ``` python move_files.py --spk ``` 分类后结果将保存到`.\output\\`中 在那之后还需要人工对过小的簇进行归并,以达到训练的需求 **3.(可选)将`clean_csv.py`移动到与`transcriptions.csv`同级后运行,可以删除`wavs`文件夹中没有包含的wav文件条目** # 基于项目 [Resemblyzer](https://github.com/resemble-ai/Resemblyzer/) [3D-Speaker](https://github.com/alibaba-damo-academy/3D-Speaker/) [wav2vec2-large-robust-12-ft-emotion-msp-dim](https://huggingface.co/audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim) [GTSinger](https://github.com/AaronZ345/GTSinger) ================================================ FILE: clean_csv.py ================================================ import os import csv wav_files = set(f[:-4] for f in os.listdir('wavs') if f.endswith('.wav')) with open('transcriptions.csv', 'r') as f: reader = csv.reader(f) header = next(reader) rows = [row for row in reader if row[0] in wav_files] with open('transcriptions.csv', 'w', newline='') as f: writer = csv.writer(f) writer.writerow(header) writer.writerows(rows) ================================================ FILE: kick.py ================================================ import os import shutil import pandas as pd import argparse parser = argparse.ArgumentParser() parser.add_argument('--spk', type=str, help='Speaker name') parser.add_argument('--clust', type=int, help='Cluster value') parser.add_argument('--encoder', type=str, default='timbre', help='encoder type') args = parser.parse_args() Speaker_name = args.spk #Speaker name clust_value = args.clust # Cluster value encoder_name = args.encoder data = pd.read_csv(os.path.join('output', Speaker_name, f'clustered_files({encoder_name}).csv')) for index, row in data.iterrows(): file_path = row['filename'] clust = row['clust'] if clust == clust_value: clust_dir = os.path.join('input', f'{Speaker_name}_{clust_value}') if not os.path.exists(clust_dir): os.makedirs(clust_dir) shutil.move(file_path, clust_dir) ================================================ FILE: load_npy.py ================================================ import pandas as pd from pathlib import Path import numpy as np import matplotlib.pyplot as plt from modules.visualizations import plot_projections, process_json_file from modules.cluster import CommonClustering import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--path', type=str, help='path to the .npy file') parser.add_argument('--reducer', type=int, default=2, help='1:tSNE, 2:Umap') parser.add_argument('--json', type=str, default=None, help='path to the .json file') args = parser.parse_args() if args.reducer == 1: cluster_name = 'spectral' elif args.reducer == 2: cluster_name = 'umap_hdbscan' else: raise ValueError('reducer type error') npy_path = args.path embeds = np.load(npy_path) if args.json == None: token_names = np.arange(embeds.shape[0]) else: token_names = process_json_file(args.json) labels = np.ones_like(token_names) output_dir = f'output/npy_result' if not os.path.exists(output_dir): os.makedirs(output_dir) df = pd.DataFrame({ 'token': [f'{i}' for i in range(embeds.shape[0])], 'clust': labels }) df.to_csv(f'{output_dir}/clustered_files({os.path.basename(npy_path)}).csv', index=False) plot_projections(embeds, labels, title="Embedding projections", cluster_name=cluster_name, labels=token_names) plt.savefig(f'{output_dir}/embedding_projections({os.path.basename(npy_path)}).png', dpi=600) plt.show() ================================================ FILE: modules/cluster.py ================================================ # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved. # Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import numpy as np import scipy import sklearn from sklearn.cluster._kmeans import k_means from sklearn.metrics.pairwise import cosine_similarity try: import umap, hdbscan except ImportError: raise ImportError( "Package \"umap\" or \"hdbscan\" not found. \ Please install them first by \"pip install umap-learn hdbscan\"." ) class SpectralCluster: """A spectral clustering method using unnormalized Laplacian of affinity matrix. This implementation is adapted from https://github.com/speechbrain/speechbrain. """ def __init__(self, min_num_spks=1, max_num_spks=14, pval=0.02, min_pnum=6, oracle_num=None): self.min_num_spks = min_num_spks self.max_num_spks = max_num_spks self.min_pnum = min_pnum self.pval = pval self.k = oracle_num def __call__(self, X, pval=None, oracle_num=None): # Similarity matrix computation sim_mat = self.get_sim_mat(X) # Refining similarity matrix with pval prunned_sim_mat = self.p_pruning(sim_mat, pval) # Symmetrization sym_prund_sim_mat = 0.5 * (prunned_sim_mat + prunned_sim_mat.T) # Laplacian calculation laplacian = self.get_laplacian(sym_prund_sim_mat) # Get Spectral Embeddings emb, num_of_spk = self.get_spec_embs(laplacian, oracle_num) # Perform clustering labels = self.cluster_embs(emb, num_of_spk) return labels def get_sim_mat(self, X): # Cosine similarities M = cosine_similarity(X, X) return M def p_pruning(self, A, pval=None): if pval is None: pval = self.pval n_elems = int((1 - pval) * A.shape[0]) n_elems = min(n_elems, A.shape[0]-self.min_pnum) # For each row in a affinity matrix for i in range(A.shape[0]): low_indexes = np.argsort(A[i, :]) low_indexes = low_indexes[0:n_elems] # Replace smaller similarity values by 0s A[i, low_indexes] = 0 return A def get_laplacian(self, M): M[np.diag_indices(M.shape[0])] = 0 D = np.sum(np.abs(M), axis=1) D = np.diag(D) L = D - M return L def get_spec_embs(self, L, k_oracle=None): if k_oracle is None: k_oracle = self.k lambdas, eig_vecs = scipy.linalg.eigh(L) if k_oracle is not None: num_of_spk = k_oracle else: lambda_gap_list = self.getEigenGaps( lambdas[self.min_num_spks - 1:self.max_num_spks + 1]) num_of_spk = np.argmax(lambda_gap_list) + self.min_num_spks emb = eig_vecs[:, :num_of_spk] return emb, num_of_spk def cluster_embs(self, emb, k): # k-means _, labels, _ = k_means(emb, k, n_init='auto') return labels def getEigenGaps(self, eig_vals): eig_vals_gap_list = [] for i in range(len(eig_vals) - 1): gap = float(eig_vals[i + 1]) - float(eig_vals[i]) eig_vals_gap_list.append(gap) return eig_vals_gap_list class UmapHdbscan: """ Reference: - Siqi Zheng, Hongbin Suo. Reformulating Speaker Diarization as Community Detection With Emphasis On Topological Structure. ICASSP2022 """ def __init__(self, n_neighbors=20, n_components=60, min_samples=20, min_cluster_size=10, metric='euclidean'): self.n_neighbors = n_neighbors self.n_components = n_components self.min_samples = min_samples self.min_cluster_size = min_cluster_size self.metric = metric def __call__(self, X): umap_X = umap.UMAP( n_neighbors=self.n_neighbors, min_dist=0.0, n_components=min(self.n_components, X.shape[0]-2), metric=self.metric, ).fit_transform(X) labels = hdbscan.HDBSCAN(min_samples=self.min_samples, min_cluster_size=self.min_cluster_size).fit_predict(umap_X) return labels class CommonClustering: """Perfom clustering for input embeddings and output the labels. """ def __init__(self, cluster_type, cluster_line=10, mer_cos=None, min_cluster_size=4, **kwargs): self.cluster_type = cluster_type self.cluster_line = cluster_line self.min_cluster_size = min_cluster_size self.mer_cos = mer_cos if self.cluster_type == 'spectral': self.cluster = SpectralCluster(**kwargs) elif self.cluster_type == 'umap_hdbscan': kwargs['min_cluster_size'] = min_cluster_size self.cluster = UmapHdbscan(**kwargs) else: raise ValueError( '%s is not currently supported.' % self.cluster_type ) def __call__(self, X): # clustering and return the labels assert len(X.shape) == 2, 'Shape of input should be [N, C]' if X.shape[0] < self.cluster_line: return np.ones(X.shape[0], dtype=int) # clustering labels = self.cluster(X) # remove extremely minor cluster labels = self.filter_minor_cluster(labels, X, self.min_cluster_size) # merge similar speaker if self.mer_cos is not None: labels = self.merge_by_cos(labels, X, self.mer_cos) return labels def filter_minor_cluster(self, labels, x, min_cluster_size): cset = np.unique(labels) csize = np.array([(labels == i).sum() for i in cset]) minor_idx = np.where(csize < self.min_cluster_size)[0] if len(minor_idx) == 0: return labels minor_cset = cset[minor_idx] major_idx = np.where(csize >= self.min_cluster_size)[0] major_cset = cset[major_idx] major_center = np.stack([x[labels == i].mean(0) \ for i in major_cset]) for i in range(len(labels)): if labels[i] in minor_cset: cos_sim = cosine_similarity(x[i][np.newaxis], major_center) labels[i] = major_cset[cos_sim.argmax()] return labels def merge_by_cos(self, labels, x, cos_thr): # merge the similar speakers by cosine similarity assert cos_thr > 0 and cos_thr <= 1 while True: cset = np.unique(labels) if len(cset) == 1: break centers = np.stack([x[labels == i].mean(0) \ for i in cset]) affinity = cosine_similarity(centers, centers) affinity = np.triu(affinity, 1) idx = np.unravel_index(np.argmax(affinity), affinity.shape) if affinity[idx] < cos_thr: break c1, c2 = cset[np.array(idx)] labels[labels==c2]=c1 return labels ================================================ FILE: modules/model/emotion_encoder.py ================================================ import torch import torch.nn as nn from transformers import Wav2Vec2Processor from transformers.models.wav2vec2.modeling_wav2vec2 import ( Wav2Vec2Model, Wav2Vec2PreTrainedModel, ) import os import librosa import numpy as np class RegressionHead(nn.Module): r"""Classification head.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.final_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x class EmotionModel(Wav2Vec2PreTrainedModel): r"""Speech emotion classifier.""" def __init__(self, config): super().__init__(config) self.config = config self.wav2vec2 = Wav2Vec2Model(config) self.classifier = RegressionHead(config) self.init_weights() def forward( self, input_values, ): outputs = self.wav2vec2(input_values) hidden_states = outputs[0] hidden_states = torch.mean(hidden_states, dim=1) logits = self.classifier(hidden_states) return hidden_states, logits # load model from hub device = 'cuda' if torch.cuda.is_available() else "cpu" model_path = './pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim' processor = Wav2Vec2Processor.from_pretrained(model_path) model = EmotionModel.from_pretrained(model_path).to(device) def process_func( x: np.ndarray, sampling_rate: int, embeddings: bool = False, ) -> np.ndarray: r"""Predict emotions or extract embeddings from raw audio signal.""" # run through processor to normalize signal # always returns a batch, so we just get the first entry # then we put it on the device y = processor(x, sampling_rate=sampling_rate) y = y['input_values'][0] y = y.reshape(1, -1) y = torch.from_numpy(y).to(device) # run through model with torch.no_grad(): y = model(y)[0 if embeddings else 1] # convert to numpy y = y.detach().cpu().numpy() return y def extract_wav(path): wav, sr = librosa.load(path, sr = 16000) emb = process_func(np.expand_dims(wav, 0), sr, embeddings=True) return emb ================================================ FILE: modules/model/voice_encoder.py ================================================ from resemblyzer.hparams import * from resemblyzer import audio from pathlib import Path from typing import Union, List from torch import nn from time import perf_counter as timer import numpy as np import torch class VoiceEncoder(nn.Module): def __init__(self, device: Union[str, torch.device]="cpu", verbose=True, weights_fpath: Union[Path, str]=None): """ If None, defaults to cuda if it is available on your machine, otherwise the model will run on cpu. Outputs are always returned on the cpu, as numpy arrays. :param weights_fpath: path to ".pt" file path. If None, defaults to built-in "pretrained.pt" model """ super().__init__() # Define the network self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) self.linear = nn.Linear(model_hidden_size, model_embedding_size) self.relu = nn.ReLU() # Get the target device if device is None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") elif isinstance(device, str): device = torch.device(device) self.device = device # Load the pretrained model'speaker weights if weights_fpath is None: weights_fpath = Path(__file__).resolve().parent.joinpath("pretrained.pt") else: weights_fpath = Path(weights_fpath) if not weights_fpath.exists(): raise Exception("Couldn't find the voice encoder pretrained model at %s." % weights_fpath) start = timer() checkpoint = torch.load(weights_fpath, map_location="cpu") self.load_state_dict(checkpoint["model_state"], strict=False) self.to(device) if verbose: print("Loaded the voice encoder model on %s in %.2f seconds." % (device.type, timer() - start)) def forward(self, mels: torch.FloatTensor): """ Computes the embeddings of a batch of utterance spectrograms. :param mels: a batch of mel spectrograms of same duration as a float32 tensor of shape (batch_size, n_frames, n_channels) :return: the embeddings as a float 32 tensor of shape (batch_size, embedding_size). Embeddings are positive and L2-normed, thus they lay in the range [0, 1]. """ # Pass the input through the LSTM layers and retrieve the final hidden state of the last # layer. Apply a cutoff to 0 for negative values and L2 normalize the embeddings. _, (hidden, _) = self.lstm(mels) embeds_raw = self.relu(self.linear(hidden[-1])) return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) @staticmethod def compute_partial_slices(n_samples: int, rate, min_coverage): """ Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain partial utterances of each. Both the waveform and the mel spectrogram slices are returned, so as to make each partial utterance waveform correspond to its spectrogram. The returned ranges may be indexing further than the length of the waveform. It is recommended that you pad the waveform with zeros up to wav_slices[-1].stop. :param n_samples: the number of samples in the waveform :param rate: how many partial utterances should occur per second. Partial utterances must cover the span of the entire utterance, thus the rate should not be lower than the inverse of the duration of a partial utterance. By default, partial utterances are 1.6s long and the minimum rate is thus 0.625. :param min_coverage: when reaching the last partial utterance, it may or may not have enough frames. If at least of are present, then the last partial utterance will be considered by zero-padding the audio. Otherwise, it will be discarded. If there aren't enough frames for one partial utterance, this parameter is ignored so that the function always returns at least one slice. :return: the waveform slices and mel spectrogram slices as lists of array slices. Index respectively the waveform and the mel spectrogram with these slices to obtain the partial utterances. """ assert 0 < min_coverage <= 1 # Compute how many frames separate two partial utterances samples_per_frame = int((sampling_rate * mel_window_step / 1000)) n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) frame_step = int(np.round((sampling_rate / rate) / samples_per_frame)) assert 0 < frame_step, "The rate is too high" assert frame_step <= partials_n_frames, "The rate is too low, it should be %f at least" % \ (sampling_rate / (samples_per_frame * partials_n_frames)) # Compute the slices wav_slices, mel_slices = [], [] steps = max(1, n_frames - partials_n_frames + frame_step + 1) for i in range(0, steps, frame_step): mel_range = np.array([i, i + partials_n_frames]) wav_range = mel_range * samples_per_frame mel_slices.append(slice(*mel_range)) wav_slices.append(slice(*wav_range)) # Evaluate whether extra padding is warranted or not last_wav_range = wav_slices[-1] coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) if coverage < min_coverage and len(mel_slices) > 1: mel_slices = mel_slices[:-1] wav_slices = wav_slices[:-1] return wav_slices, mel_slices def embed_utterance(self, wav: np.ndarray, return_partials=False, rate=1.3, min_coverage=0.75): """ Computes an embedding for a single utterance. The utterance is divided in partial utterances and an embedding is computed for each. The complete utterance embedding is the L2-normed average embedding of the partial utterances. TODO: independent batched version of this function :param wav: a preprocessed utterance waveform as a numpy array of float32 :param return_partials: if True, the partial embeddings will also be returned along with the wav slices corresponding to each partial utterance. :param rate: how many partial utterances should occur per second. Partial utterances must cover the span of the entire utterance, thus the rate should not be lower than the inverse of the duration of a partial utterance. By default, partial utterances are 1.6s long and the minimum rate is thus 0.625. :param min_coverage: when reaching the last partial utterance, it may or may not have enough frames. If at least of are present, then the last partial utterance will be considered by zero-padding the audio. Otherwise, it will be discarded. If there aren't enough frames for one partial utterance, this parameter is ignored so that the function always returns at least one slice. :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If is True, the partial utterances as a numpy array of float32 of shape (n_partials, model_embedding_size) and the wav partials as a list of slices will also be returned. """ # Compute where to split the utterance into partials and pad the waveform with zeros if # the partial utterances cover a larger range. wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage) max_wave_length = wav_slices[-1].stop if max_wave_length >= len(wav): wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") # Split the utterance into partials and forward them through the model mel = audio.wav_to_mel_spectrogram(wav) mels = np.array([mel[s] for s in mel_slices]) with torch.no_grad(): mels = torch.from_numpy(mels).to(self.device) partial_embeds = self(mels).cpu().numpy() # Compute the utterance embedding from the partial embeddings raw_embed = np.mean(partial_embeds, axis=0) embed = raw_embed / np.linalg.norm(raw_embed, 2) if return_partials: return embed, partial_embeds, wav_slices return embed def embed_speaker(self, wavs: List[np.ndarray], **kwargs): """ Compute the embedding of a collection of wavs (presumably from the same speaker) by averaging their embedding and L2-normalizing it. :param wavs: list of wavs a numpy arrays of float32. :param kwargs: extra arguments to embed_utterance() :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). """ raw_embed = np.mean([self.embed_utterance(wav, return_partials=False, **kwargs) \ for wav in wavs], axis=0) return raw_embed / np.linalg.norm(raw_embed, 2) ================================================ FILE: modules/utils.py ================================================ from resemblyzer import preprocess_wav from modules.model.voice_encoder import VoiceEncoder from tqdm import tqdm import numpy as np import pickle import os import importlib class GetEmbeds: """ Used to obtain embedding vectors for audio. Directly input wav. """ def __init__(self, encoder_type, Speaker_name): self.encoder_type = encoder_type self.Speaker_name = Speaker_name if self.encoder_type == 'timbre': self.encoder = VoiceEncoder(weights_fpath="pretrain/encoder_1570000.bak") elif self.encoder_type == 'emotion': self.emotion_module = importlib.import_module('modules.model.emotion_encoder') elif self.encoder_type == 'mix': self.encoder = VoiceEncoder(weights_fpath="pretrain/encoder_1570000.bak") self.emotion_module = importlib.import_module('modules.model.emotion_encoder') else: raise ValueError( '%s is not currently supported.' % self.encoder_type ) def __call__(self, wav_fpaths): if self.encoder_type == 'timbre': embeds = self.timbre_encoder(wav_fpaths) if self.encoder_type == 'emotion': embeds = self.emotion_encoder(wav_fpaths) if self.encoder_type == 'mix': embeds = self.mix_encoder(wav_fpaths) return embeds def timbre_encoder(self, wav_fpaths): features_path = os.path.join("input", self.Speaker_name, "features(timbre).pkl") # Check if features already exist if os.path.exists(features_path): with open(features_path, 'rb') as f: embeds = pickle.load(f) else: wavs = [preprocess_wav(wav_fpath) for wav_fpath in \ tqdm(wav_fpaths, f"Preprocessing wavs ({len(wav_fpaths)} utterances)")] embeds = np.array(list(map(self.encoder.embed_utterance, wavs))) with open(features_path, 'wb') as f: pickle.dump(embeds, f) return embeds def emotion_encoder(self, wav_fpaths): features_path = os.path.join("input", self.Speaker_name, "features(emotion).pkl") # Check if features already exist if os.path.exists(features_path): with open(features_path, 'rb') as f: embeds = pickle.load(f) else: embeds = [self.emotion_module.extract_wav(wav_fpath) for wav_fpath in \ tqdm(wav_fpaths, f"Preprocessing wavs ({len(wav_fpaths)} utterances)")] embeds = np.concatenate(embeds,axis=0) with open(features_path, 'wb') as f: pickle.dump(embeds, f) return embeds def mix_encoder(self, wav_fpaths): features_path = os.path.join("input", self.Speaker_name, "features(mix).pkl") # Check if features already exist if os.path.exists(features_path): with open(features_path, 'rb') as f: embeds = pickle.load(f) else: timber_embeds = self.timbre_encoder(wav_fpaths) emotion_embeds = self.emotion_encoder(wav_fpaths) embeds = np.concatenate((timber_embeds, emotion_embeds), axis=1) with open(features_path, 'wb') as f: pickle.dump(embeds, f) return embeds ================================================ FILE: modules/visualizations.py ================================================ from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.animation import FuncAnimation from resemblyzer import sampling_rate from matplotlib import cm from time import sleep, perf_counter as timer from umap import UMAP from sys import stderr import matplotlib.pyplot as plt import numpy as np from sklearn.manifold import TSNE import json _default_colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] _my_colors = np.array([ [0, 127, 70], [255, 0, 0], [255, 217, 38], [0, 135, 255], [165, 0, 165], [255, 167, 255], [97, 142, 151], [0, 255, 255], [255, 96, 38], [142, 76, 0], [33, 0, 127], [0, 0, 0], [183, 183, 183], [76, 255, 0], ], dtype=float) / 255 def generate_colors(n): cm = plt.get_cmap('gist_rainbow') colors = [cm(1.*i/n) for i in range(n)] return colors def play_wav(wav, blocking=True): try: import sounddevice as sd # Small bug with sounddevice.play: the audio is cut 0.5 second too early. We pad it to # make up for that wav = np.concatenate((wav, np.zeros(sampling_rate // 2))) sd.play(wav, sampling_rate, blocking=blocking) except Exception as e: print("Failed to play audio: %s" % repr(e)) def plot_similarity_matrix(matrix, labels_a=None, labels_b=None, ax: plt.Axes=None, title=""): if ax is None: _, ax = plt.subplots() fig = plt.gcf() img = ax.matshow(matrix, extent=(-0.5, matrix.shape[0] - 0.5, -0.5, matrix.shape[1] - 0.5)) ax.xaxis.set_ticks_position("bottom") if labels_a is not None: ax.set_xticks(range(len(labels_a))) ax.set_xticklabels(labels_a, rotation=90) if labels_b is not None: ax.set_yticks(range(len(labels_b))) ax.set_yticklabels(labels_b[::-1]) # Upper origin -> reverse y axis ax.set_title(title) cax = make_axes_locatable(ax).append_axes("right", size="5%", pad=0.15) fig.colorbar(img, cax=cax, ticks=np.linspace(0.4, 1, 7)) img.set_clim(0.4, 1) img.set_cmap("inferno") return ax def plot_histograms(all_samples, ax=None, names=None, title=""): """ Plots (possibly) overlapping histograms and their median """ if ax is None: _, ax = plt.subplots() for samples, color, name in zip(all_samples, _default_colors, names): ax.hist(samples, density=True, color=color + "80", label=name) ax.legend() ax.set_xlim(0.35, 1) ax.set_yticks([]) ax.set_title(title) ylim = ax.get_ylim() ax.set_ylim(*ylim) # Yeah, I know for samples, color in zip(all_samples, _default_colors): median = np.median(samples) ax.vlines(median, *ylim, color, "dashed") ax.text(median, ylim[1] * 0.15, "median", rotation=270, color=color) return ax def plot_projections(embeds, speakers, ax=None, colors=None, markers=None, legend=True, title="", cluster_name="", labels=None, **kwargs): if ax is None: _, ax = plt.subplots(figsize=(6, 6)) if cluster_name == 'spectral': reducer = TSNE(init='pca', **kwargs) if cluster_name == 'umap_hdbscan': reducer = UMAP(**kwargs) # Compute the 2D projections. You could also project to another number of dimensions (e.g. # for a 3D plot) or use a different different dimensionality reduction like PCA or TSNE. projs = reducer.fit_transform(embeds) # Draw the projections speakers = np.array(speakers) colors = generate_colors(len(np.unique(speakers))) colors = colors or _my_colors for i, speaker in enumerate(np.unique(speakers)): speaker_projs = projs[speakers == speaker] marker = "o" if markers is None else markers[i] label = speaker if legend else None ax.scatter(*speaker_projs.T, s=60, c=[colors[i]], marker=marker, label=label, edgecolors='k') if labels is not None: for j, (proj_x, proj_y) in enumerate(speaker_projs): label_index = np.where(speakers == speaker)[0][j] ax.text(proj_x, proj_y, str(labels[label_index]), fontsize=8, ha='right') center = speaker_projs.mean(axis=0) ax.scatter(*center, s=200, c=[colors[i]], marker="X", edgecolors='k') if legend: ax.legend(title="Speakers", ncol=2) ax.set_title(title) #ax.set_xticks([]) #ax.set_yticks([]) ax.grid(True) ax.set_aspect("equal") return projs def interactive_diarization(similarity_dict, wav, wav_splits, x_crop=5, show_time=False): fig, ax = plt.subplots() lines = [ax.plot([], [], label=name)[0] for name in similarity_dict.keys()] text = ax.text(0, 0, "", fontsize=10) def init(): ax.set_ylim(0.4, 1) ax.set_ylabel("Similarity") if show_time: ax.set_xlabel("Time (seconds)") else: ax.set_xticks([]) ax.set_title("Diarization") ax.legend(loc="lower right") return lines + [text] times = [((s.start + s.stop) / 2) / sampling_rate for s in wav_splits] rate = 1 / (times[1] - times[0]) crop_range = int(np.round(x_crop * rate)) ticks = np.arange(0, len(wav_splits), rate) ref_time = timer() def update(i): # Crop plot crop = (max(i - crop_range // 2, 0), i + crop_range // 2) ax.set_xlim(i - crop_range // 2, crop[1]) if show_time: crop_ticks = ticks[(crop[0] <= ticks) * (ticks <= crop[1])] ax.set_xticks(crop_ticks) ax.set_xticklabels(np.round(crop_ticks / rate).astype(np.int)) # Plot the prediction similarities = [s[i] for s in similarity_dict.values()] best = np.argmax(similarities) name, similarity = list(similarity_dict.keys())[best], similarities[best] if similarity > 0.75: message = "Speaker: %s (confident)" % name color = _default_colors[best] elif similarity > 0.65: message = "Speaker: %s (uncertain)" % name color = _default_colors[best] else: message = "Unknown/No speaker" color = "black" text.set_text(message) text.set_c(color) text.set_position((i, 0.96)) # Plot data for line, (name, similarities) in zip(lines, similarity_dict.items()): line.set_data(range(crop[0], i + 1), similarities[crop[0]:i + 1]) # Block to synchronize with the audio (interval is not reliable) current_time = timer() - ref_time if current_time < times[i]: sleep(times[i] - current_time) elif current_time - 0.2 > times[i]: print("Animation is delayed further than 200ms!", file=stderr) return lines + [text] ani = FuncAnimation(fig, update, frames=len(wav_splits), init_func=init, blit=not show_time, repeat=False, interval=1) play_wav(wav, blocking=False) plt.show() def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)): if ax is None: _, ax = plt.subplots() if shape is None: height = int(np.sqrt(len(embed))) shape = (height, -1) embed = embed.reshape(shape) cmap = cm.get_cmap() mappable = ax.imshow(embed, cmap=cmap) cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04) cbar.set_clim(*color_range) ax.set_xticks([]), ax.set_yticks([]) ax.set_title(title) def process_json_file(file_path): with open(file_path, 'r', encoding='utf-8') as file: data = json.load(file) merged_data = {} for key, value in data.items(): if value not in merged_data: merged_data[value] = [] merged_data[value].append(key) result_str = [""] for keys in merged_data.values(): result_str.append(','.join(keys)) return result_str ================================================ FILE: move_files.py ================================================ import os import shutil import pandas as pd import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--spk', type=str, help='Speaker name') parser.add_argument('--encoder', type=str, default='timbre', help='encoder type') args = parser.parse_args() Speaker_name = args.spk #Speaker name encoder_name = args.encoder data = pd.read_csv(os.path.join('output', Speaker_name, f'clustered_files({encoder_name}).csv')) for index, row in data.iterrows(): file_path = row['filename'] clust = row['clust'] clust_dir = os.path.join('output', Speaker_name, str(clust)) if not os.path.exists(clust_dir): os.makedirs(clust_dir) shutil.copy(file_path, clust_dir) ================================================ FILE: pretrain/encoder_1570000.bak ================================================ [File too large to display: 16.3 MB] ================================================ FILE: pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/LICENSE ================================================ Attribution-NonCommercial-ShareAlike 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. BY-NC-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike. h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. k. NonCommercial means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. l. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. m. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. n. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and b. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. Additional offer from the Licensor -- Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter's License You apply. c. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. b. ShareAlike. In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 1. The Adapter's License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License. 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” The text of the Creative Commons public licenses is dedicated to the public domain under the CC0 Public Domain Dedication. Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org. ================================================ FILE: pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/config.json ================================================ { "_name_or_path": "torch", "activation_dropout": 0.1, "adapter_kernel_size": 3, "adapter_stride": 2, "add_adapter": false, "apply_spec_augment": true, "architectures": [ "Wav2Vec2ForSpeechClassification" ], "attention_dropout": 0.1, "bos_token_id": 1, "classifier_proj_size": 256, "codevector_dim": 768, "contrastive_logits_temperature": 0.1, "conv_bias": true, "conv_dim": [ 512, 512, 512, 512, 512, 512, 512 ], "conv_kernel": [ 10, 3, 3, 3, 3, 2, 2 ], "conv_stride": [ 5, 2, 2, 2, 2, 2, 2 ], "ctc_loss_reduction": "sum", "ctc_zero_infinity": false, "diversity_loss_weight": 0.1, "do_stable_layer_norm": true, "eos_token_id": 2, "feat_extract_activation": "gelu", "feat_extract_dropout": 0.0, "feat_extract_norm": "layer", "feat_proj_dropout": 0.1, "feat_quantizer_dropout": 0.0, "final_dropout": 0.1, "finetuning_task": "wav2vec2_reg", "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout": 0.1, "hidden_dropout_prob": 0.1, "hidden_size": 1024, "id2label": { "0": "arousal", "1": "dominance", "2": "valence" }, "initializer_range": 0.02, "intermediate_size": 4096, "label2id": { "arousal": 0, "dominance": 1, "valence": 2 }, "layer_norm_eps": 1e-05, "layerdrop": 0.1, "mask_feature_length": 10, "mask_feature_min_masks": 0, "mask_feature_prob": 0.0, "mask_time_length": 10, "mask_time_min_masks": 2, "mask_time_prob": 0.05, "model_type": "wav2vec2", "num_adapter_layers": 3, "num_attention_heads": 16, "num_codevector_groups": 2, "num_codevectors_per_group": 320, "num_conv_pos_embedding_groups": 16, "num_conv_pos_embeddings": 128, "num_feat_extract_layers": 7, "num_hidden_layers": 12, "num_negatives": 100, "output_hidden_size": 1024, "pad_token_id": 0, "pooling_mode": "mean", "problem_type": "regression", "proj_codevector_dim": 768, "tdnn_dilation": [ 1, 2, 3, 1, 1 ], "tdnn_dim": [ 512, 512, 512, 512, 1500 ], "tdnn_kernel": [ 5, 3, 3, 1, 1 ], "torch_dtype": "float32", "transformers_version": "4.17.0.dev0", "use_weighted_layer_sum": false, "vocab_size": null, "xvector_output_dim": 512 } ================================================ FILE: pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/preprocessor_config.json ================================================ { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": true, "sampling_rate": 16000 } ================================================ FILE: pretrain/wav2vec2-large-robust-12-ft-emotion-msp-dim/vocab.json ================================================ {} ================================================ FILE: requirements.txt ================================================ torch matplotlib>=3.0.0 numpy>=1.20.3 pandas Resemblyzer scikit_learn sounddevice tqdm umap_learn hdbscan transformers ================================================ FILE: splitter.py ================================================ import pandas as pd from pathlib import Path import numpy as np import matplotlib.pyplot as plt from modules.utils import GetEmbeds from modules.visualizations import plot_projections from modules.cluster import CommonClustering import argparse import os parser = argparse.ArgumentParser() parser.add_argument("--spk", type=str, help="Speaker name") parser.add_argument("--nmin", type=int, default=1, help="minimum number of clusters") parser.add_argument( "--cluster", type=int, default=1, help="1:SpectralCluster, 2:UmapHdbscan" ) parser.add_argument("--mer_cosine", type=str, default=None, help="merge similar embeds") parser.add_argument("--encoder", type=str, default="timbre", help="encoder type") args = parser.parse_args() Speaker_name = args.spk # Speaker name Nmin = args.nmin # set Nmax values merge_cos = args.mer_cosine encoder_name = args.encoder data_dir = os.path.join("input", Speaker_name, "raw", "wavs") wav_fpaths = list(Path(data_dir).glob("*.wav")) encoder = GetEmbeds(encoder_type=encoder_name, Speaker_name=Speaker_name) embeds = encoder.__call__(wav_fpaths) while True: if args.cluster == 1: cluster_name = "spectral" min_num_spks = Nmin mer_cos = merge_cos Cluster = CommonClustering( cluster_type=cluster_name, mer_cos=None, min_num_spks=Nmin ) elif args.cluster == 2: cluster_name = "umap_hdbscan" mer_cos = merge_cos Cluster = CommonClustering(mer_cos=None, cluster_type=cluster_name) else: raise ValueError("cluster type error") labels = Cluster.__call__(embeds) output_dir = f"output/{Speaker_name}" if not os.path.exists(output_dir): os.makedirs(output_dir) df = pd.DataFrame( {"filename": [str(fpath) for fpath in wav_fpaths], "clust": labels} ) proj = plot_projections( embeds, labels, title="Embedding projections", cluster_name=cluster_name ) df["x"] = proj[:, 0] df["y"] = proj[:, 1] plt.savefig(f"{output_dir}/embedding_projections({encoder_name}).png", dpi=600) plt.show() df.to_csv(f"{output_dir}/clustered_files({encoder_name}).csv", index=False) user_input = input("Are you satisfied with the results?/是否满意结果?(y/n): ") if user_input.lower() == "y": break else: Nmin = int(input("Please enter a new Nmin value/请输入新的Nmin值: ")) ================================================ FILE: viewer/.gitignore ================================================ # Logs logs *.log npm-debug.log* yarn-debug.log* yarn-error.log* pnpm-debug.log* lerna-debug.log* node_modules dist dist-ssr *.local # Editor directories and files .vscode/* !.vscode/extensions.json .idea .DS_Store *.suo *.ntvs* *.njsproj *.sln *.sw? ================================================ FILE: viewer/README.md ================================================ # Cluster viewer After running Colour Splitter you would get a csv file. You can use this viewer to load this csv file, listen to each point in the scatter plot and interactively explore the cluster result. ## Prerequisites - Bun (faster) or npm - Modern web browser ## Build 1. Navigate to the viewer directory: ```bash cd viewer ``` 2. Install dependencies: ```bash # Using Bun bun install # Or using npm npm install ``` ## Serve You'll need to run two servers simultaneously *in viewer directory*: 1. Web Application Server ```bash # Using Bun bun run dev # Or using npm npm run dev ``` 2. Audio file server ```bash # Using Bun bunx http-server --cors -p 8080 # Or using npm npx http-server --cors -p 8080 ``` ## Usage ![screenshot](screenshot.png) 1. Open your browser and navigate to the URL shown by the web application server (typically something like http://localhost:5173) 2. Look for the tab titled "Vite + React + TS" 3. Drag and drop your CSV file (generated by the color splitter) into the designated dropping area 4. Explore your clusters and play audio samples by clicking on individual data points ================================================ FILE: viewer/eslint.config.js ================================================ import js from '@eslint/js' import globals from 'globals' import reactHooks from 'eslint-plugin-react-hooks' import reactRefresh from 'eslint-plugin-react-refresh' import tseslint from 'typescript-eslint' export default tseslint.config( { ignores: ['dist'] }, { extends: [js.configs.recommended, ...tseslint.configs.recommended], files: ['**/*.{ts,tsx}'], languageOptions: { ecmaVersion: 2020, globals: globals.browser, }, plugins: { 'react-hooks': reactHooks, 'react-refresh': reactRefresh, }, rules: { ...reactHooks.configs.recommended.rules, 'react-refresh/only-export-components': [ 'warn', { allowConstantExport: true }, ], }, }, ) ================================================ FILE: viewer/index.html ================================================ Vite + React + TS
================================================ FILE: viewer/package.json ================================================ { "name": "viewer", "private": true, "version": "0.0.0", "type": "module", "scripts": { "dev": "vite", "build": "tsc -b && vite build", "lint": "eslint .", "preview": "vite preview" }, "dependencies": { "@tremor/react": "^3.18.7", "autoprefixer": "^10.4.20", "lucide-react": "^0.473.0", "postcss": "^8.5.1", "react": "^18.3.1", "react-dom": "^18.3.1", "recharts": "^2.15.0", "tailwindcss": "^3.4.17" }, "devDependencies": { "@eslint/js": "^9.17.0", "@types/react": "^18.3.18", "@types/react-dom": "^18.3.5", "@vitejs/plugin-react-swc": "^3.5.0", "eslint": "^9.17.0", "eslint-plugin-react-hooks": "^5.0.0", "eslint-plugin-react-refresh": "^0.4.16", "globals": "^15.14.0", "typescript": "~5.6.2", "typescript-eslint": "^8.18.2", "vite": "^6.0.5" } } ================================================ FILE: viewer/postcss.config.js ================================================ export default { plugins: { tailwindcss: {}, autoprefixer: {}, }, } ================================================ FILE: viewer/src/App.tsx ================================================ import AudioVisualizer from "./ScatterPlot" function App() { return ( ) } export default App ================================================ FILE: viewer/src/ScatterPlot.tsx ================================================ import React, { useState, useCallback } from 'react'; import { ScatterChart, Scatter, XAxis, YAxis, CartesianGrid } from 'recharts'; import { Play, Pause } from 'lucide-react'; interface DataPoint { filename: string; clust: number; x: number; y: number; } const AudioVisualizer = () => { const [data, setData] = useState([]); const [selectedFile, setSelectedFile] = useState(null); const [currentAudio, setCurrentAudio] = useState<{ file: string; audio: HTMLAudioElement | null; isPlaying: boolean; progress: number; } | null>(null); const handlePlay = (filename: string) => { if (currentAudio?.file === filename) { // Resume/pause existing audio if (currentAudio.isPlaying) { currentAudio.audio?.pause(); setCurrentAudio(prev => prev ? { ...prev, isPlaying: false } : null); } else { currentAudio.audio?.play(); setCurrentAudio(prev => prev ? { ...prev, isPlaying: true } : null); } } else { // Stop previous audio if any currentAudio?.audio?.pause(); // Create new audio const audio = new Audio(`http://localhost:8080/${filename}`); audio.addEventListener('ended', () => { setCurrentAudio(prev => prev ? { ...prev, isPlaying: false, progress: 0 } : null); }); audio.play(); setCurrentAudio({ file: filename, audio, isPlaying: true, progress: 0 }); } }; const handleDragOver = useCallback((e: React.DragEvent) => { e.preventDefault(); }, []); const handleDrop = useCallback((e: React.DragEvent) => { e.preventDefault(); const file = e.dataTransfer.files[0]; if (file && file.name.endsWith('.csv')) { const reader = new FileReader(); reader.onload = (event) => { const text = event.target?.result as string; const lines = text.split('\n'); const parsedData: DataPoint[] = lines.slice(1) .filter(line => line.trim()) .map(line => { const values = line.split(','); return { filename: values[0], clust: parseInt(values[1], 10), x: parseFloat(values[2]), y: parseFloat(values[3]) }; }); setData(parsedData); }; reader.readAsText(file); } }, []); const handleClick = (point: DataPoint) => { setSelectedFile(point.filename); }; const handleReset = () => { setSelectedFile(null); }; const displayFiles = selectedFile ? data.filter(d => d.filename === selectedFile) : data; return (

Drop CSV here

{data.length > 0 && ( {(() => { const maxCluster = Math.max(...data.map(d => d.clust)); const numClusters = maxCluster + 1; // Since clusters start from 0 return Array.from(new Set(data.map(d => d.clust))).map((cluster) => { const hue = (360 / numClusters) * cluster; return ( d.clust === cluster)} fill={`hsl(${hue}deg, 70%, 50%)`} onClick={(point) => { const p = point as unknown as DataPoint; handleClick(p); handlePlay(p.filename); }} cursor="pointer" /> ); }); })()} )}

Files

{displayFiles.map((file, index) => (
{file.filename.split('/').pop()}
))}
); }; export default AudioVisualizer; ================================================ FILE: viewer/src/index.css ================================================ @tailwind base; @tailwind components; @tailwind utilities; ================================================ FILE: viewer/src/main.tsx ================================================ import { StrictMode } from 'react' import { createRoot } from 'react-dom/client' import './index.css' import App from './App.tsx' createRoot(document.getElementById('root')!).render( , ) ================================================ FILE: viewer/src/vite-env.d.ts ================================================ /// ================================================ FILE: viewer/tailwind.config.js ================================================ /** @type {import('tailwindcss').Config} */ export default { content: [ "./index.html", "./src/**/*.{js,ts,jsx,tsx}", ], theme: { extend: {}, }, plugins: [], } ================================================ FILE: viewer/tsconfig.app.json ================================================ { "compilerOptions": { "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", "target": "ES2020", "useDefineForClassFields": true, "lib": ["ES2020", "DOM", "DOM.Iterable"], "module": "ESNext", "skipLibCheck": true, /* Bundler mode */ "moduleResolution": "bundler", "allowImportingTsExtensions": true, "isolatedModules": true, "moduleDetection": "force", "noEmit": true, "jsx": "react-jsx", /* Linting */ "strict": true, "noUnusedLocals": true, "noUnusedParameters": true, "noFallthroughCasesInSwitch": true, "noUncheckedSideEffectImports": true }, "include": ["src"] } ================================================ FILE: viewer/tsconfig.json ================================================ { "files": [], "references": [ { "path": "./tsconfig.app.json" }, { "path": "./tsconfig.node.json" } ] } ================================================ FILE: viewer/tsconfig.node.json ================================================ { "compilerOptions": { "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", "target": "ES2022", "lib": ["ES2023"], "module": "ESNext", "skipLibCheck": true, /* Bundler mode */ "moduleResolution": "bundler", "allowImportingTsExtensions": true, "isolatedModules": true, "moduleDetection": "force", "noEmit": true, /* Linting */ "strict": true, "noUnusedLocals": true, "noUnusedParameters": true, "noFallthroughCasesInSwitch": true, "noUncheckedSideEffectImports": true }, "include": ["vite.config.ts"] } ================================================ FILE: viewer/vite.config.ts ================================================ import { defineConfig } from 'vite' import react from '@vitejs/plugin-react-swc' // https://vite.dev/config/ export default defineConfig({ plugins: [react()], })