mirror of
https://github.com/shivammehta25/Matcha-TTS.git
synced 2026-02-05 02:09:21 +08:00
Compare commits
37 Commits
0.0.3
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c0bcaf69f | ||
|
|
fb7b954de5 | ||
|
|
5a52a67cf7 | ||
|
|
39cbd85236 | ||
|
|
95ec24b599 | ||
|
|
13ca33fbe5 | ||
|
|
19bea20928 | ||
|
|
8268360674 | ||
|
|
a0bf4e9e9a | ||
|
|
f1e8efdec2 | ||
|
|
4ec245e61e | ||
|
|
254a8e05ce | ||
|
|
0ed9290c31 | ||
|
|
f39ee6cf3b | ||
|
|
6e71dc8b8f | ||
|
|
ae2417c175 | ||
|
|
6c7a82a516 | ||
|
|
009b09a8b2 | ||
|
|
a18db17330 | ||
|
|
263d5c4d4e | ||
|
|
df896301ca | ||
|
|
c8d0d60f87 | ||
|
|
e540794e7e | ||
|
|
b756809a32 | ||
|
|
1ead4303f3 | ||
|
|
7a29fef719 | ||
|
|
9ace522249 | ||
|
|
ed6e6bbf6c | ||
|
|
51ea36d271 | ||
|
|
269609003b | ||
|
|
2a81800825 | ||
|
|
336dd20d5b | ||
|
|
01c99161c4 | ||
|
|
2c21a0edac | ||
|
|
25767f76a8 | ||
|
|
1b204ed42c | ||
|
|
2cd057187b |
@@ -18,7 +18,7 @@ repos:
|
||||
|
||||
# python code formatting
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.1.0
|
||||
rev: 23.9.1
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--line-length, "120"]
|
||||
@@ -32,14 +32,14 @@ repos:
|
||||
|
||||
# python upgrading syntax to newer version
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.3.1
|
||||
rev: v3.14.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py38-plus]
|
||||
|
||||
# python check (PEP8), programming errors and code complexity
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 6.0.0
|
||||
rev: 6.1.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
args:
|
||||
@@ -54,6 +54,6 @@ repos:
|
||||
|
||||
# pylint
|
||||
- repo: https://github.com/pycqa/pylint
|
||||
rev: v2.8.2
|
||||
rev: v3.0.0
|
||||
hooks:
|
||||
- id: pylint
|
||||
|
||||
82
.pylintrc
82
.pylintrc
@@ -82,16 +82,6 @@ disable=missing-docstring,
|
||||
no-name-in-module,
|
||||
no-member,
|
||||
unsubscriptable-object,
|
||||
print-statement,
|
||||
parameter-unpacking,
|
||||
unpacking-in-except,
|
||||
old-raise-syntax,
|
||||
backtick,
|
||||
long-suffix,
|
||||
old-ne-operator,
|
||||
old-octal-literal,
|
||||
import-star-module-level,
|
||||
non-ascii-bytes-literal,
|
||||
raw-checker-failed,
|
||||
bad-inline-option,
|
||||
locally-disabled,
|
||||
@@ -106,67 +96,6 @@ disable=missing-docstring,
|
||||
too-many-arguments,
|
||||
too-many-locals,
|
||||
too-many-statements,
|
||||
apply-builtin,
|
||||
basestring-builtin,
|
||||
buffer-builtin,
|
||||
cmp-builtin,
|
||||
coerce-builtin,
|
||||
execfile-builtin,
|
||||
file-builtin,
|
||||
long-builtin,
|
||||
raw_input-builtin,
|
||||
reduce-builtin,
|
||||
standarderror-builtin,
|
||||
unicode-builtin,
|
||||
xrange-builtin,
|
||||
coerce-method,
|
||||
delslice-method,
|
||||
getslice-method,
|
||||
setslice-method,
|
||||
no-absolute-import,
|
||||
old-division,
|
||||
dict-iter-method,
|
||||
dict-view-method,
|
||||
next-method-called,
|
||||
metaclass-assignment,
|
||||
indexing-exception,
|
||||
raising-string,
|
||||
reload-builtin,
|
||||
oct-method,
|
||||
hex-method,
|
||||
nonzero-method,
|
||||
cmp-method,
|
||||
input-builtin,
|
||||
round-builtin,
|
||||
intern-builtin,
|
||||
unichr-builtin,
|
||||
map-builtin-not-iterating,
|
||||
zip-builtin-not-iterating,
|
||||
range-builtin-not-iterating,
|
||||
filter-builtin-not-iterating,
|
||||
using-cmp-argument,
|
||||
eq-without-hash,
|
||||
div-method,
|
||||
idiv-method,
|
||||
rdiv-method,
|
||||
exception-message-attribute,
|
||||
invalid-str-codec,
|
||||
sys-max-int,
|
||||
bad-python3-import,
|
||||
deprecated-string-function,
|
||||
deprecated-str-translate-call,
|
||||
deprecated-itertools-function,
|
||||
deprecated-types-field,
|
||||
next-method-defined,
|
||||
dict-items-not-iterating,
|
||||
dict-keys-not-iterating,
|
||||
dict-values-not-iterating,
|
||||
deprecated-operator-function,
|
||||
deprecated-urllib-function,
|
||||
xreadlines-attribute,
|
||||
deprecated-sys-function,
|
||||
exception-escape,
|
||||
comprehension-escape,
|
||||
duplicate-code,
|
||||
not-callable,
|
||||
import-outside-toplevel,
|
||||
@@ -363,13 +292,6 @@ max-line-length=120
|
||||
# Maximum number of lines in a module.
|
||||
max-module-lines=1000
|
||||
|
||||
# List of optional constructs for which whitespace checking is disabled. `dict-
|
||||
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
||||
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
||||
# `empty-line` allows space-only lines.
|
||||
no-space-check=trailing-comma,
|
||||
dict-separator
|
||||
|
||||
# Allow the body of a class to be on the same line as the declaration if body
|
||||
# contains single statement.
|
||||
single-line-class-stmt=no
|
||||
@@ -599,5 +521,5 @@ min-public-methods=2
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "BaseException, Exception".
|
||||
overgeneral-exceptions=BaseException,
|
||||
Exception
|
||||
overgeneral-exceptions=builtins.BaseException,
|
||||
builtins.Exception
|
||||
|
||||
85
README.md
85
README.md
@@ -26,17 +26,16 @@ We propose 🍵 Matcha-TTS, a new approach to non-autoregressive neural TTS, tha
|
||||
- Sounds highly natural
|
||||
- Is very fast to synthesise from
|
||||
|
||||
Check out our [demo page](https://shivammehta25.github.io/Matcha-TTS) and read [our arXiv preprint](https://arxiv.org/abs/2309.03199) for more details.
|
||||
Check out our [demo page](https://shivammehta25.github.io/Matcha-TTS) and read [our ICASSP 2024 paper](https://arxiv.org/abs/2309.03199) for more details.
|
||||
|
||||
[Pre-trained models](https://drive.google.com/drive/folders/17C_gYgEHOxI5ZypcfE_k1piKCtyR0isJ?usp=sharing) will be automatically downloaded with the CLI or gradio interface.
|
||||
|
||||
[Try 🍵 Matcha-TTS on HuggingFace 🤗 spaces!](https://huggingface.co/spaces/shivammehta25/Matcha-TTS)
|
||||
You can also [try 🍵 Matcha-TTS in your browser on HuggingFace 🤗 spaces](https://huggingface.co/spaces/shivammehta25/Matcha-TTS).
|
||||
|
||||
## Watch the teaser
|
||||
## Teaser video
|
||||
|
||||
[](https://youtu.be/xmvJkz3bqw0)
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
1. Create an environment (suggested but optional)
|
||||
@@ -46,7 +45,7 @@ conda create -n matcha-tts python=3.10 -y
|
||||
conda activate matcha-tts
|
||||
```
|
||||
|
||||
2. Install Matcha TTS using pip or from source
|
||||
2. Install Matcha TTS using pip or from source
|
||||
|
||||
```bash
|
||||
pip install matcha-tts
|
||||
@@ -56,6 +55,8 @@ from source
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/shivammehta25/Matcha-TTS.git
|
||||
cd Matcha-TTS
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
3. Run CLI / gradio app / jupyter notebook
|
||||
@@ -187,16 +188,80 @@ python matcha/train.py experiment=ljspeech trainer.devices=[0,1]
|
||||
matcha-tts --text "<INPUT TEXT>" --checkpoint_path <PATH TO CHECKPOINT>
|
||||
```
|
||||
|
||||
## ONNX support
|
||||
|
||||
> Special thanks to [@mush42](https://github.com/mush42) for implementing ONNX export and inference support.
|
||||
|
||||
It is possible to export Matcha checkpoints to [ONNX](https://onnx.ai/), and run inference on the exported ONNX graph.
|
||||
|
||||
### ONNX export
|
||||
|
||||
To export a checkpoint to ONNX, first install ONNX with
|
||||
|
||||
```bash
|
||||
pip install onnx
|
||||
```
|
||||
|
||||
then run the following:
|
||||
|
||||
```bash
|
||||
python3 -m matcha.onnx.export matcha.ckpt model.onnx --n-timesteps 5
|
||||
```
|
||||
|
||||
Optionally, the ONNX exporter accepts **vocoder-name** and **vocoder-checkpoint** arguments. This enables you to embed the vocoder in the exported graph and generate waveforms in a single run (similar to end-to-end TTS systems).
|
||||
|
||||
**Note** that `n_timesteps` is treated as a hyper-parameter rather than a model input. This means you should specify it during export (not during inference). If not specified, `n_timesteps` is set to **5**.
|
||||
|
||||
**Important**: for now, torch>=2.1.0 is needed for export since the `scaled_product_attention` operator is not exportable in older versions. Until the final version is released, those who want to export their models must install torch>=2.1.0 manually as a pre-release.
|
||||
|
||||
### ONNX Inference
|
||||
|
||||
To run inference on the exported model, first install `onnxruntime` using
|
||||
|
||||
```bash
|
||||
pip install onnxruntime
|
||||
pip install onnxruntime-gpu # for GPU inference
|
||||
```
|
||||
|
||||
then use the following:
|
||||
|
||||
```bash
|
||||
python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs
|
||||
```
|
||||
|
||||
You can also control synthesis parameters:
|
||||
|
||||
```bash
|
||||
python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs --temperature 0.4 --speaking_rate 0.9 --spk 0
|
||||
```
|
||||
|
||||
To run inference on **GPU**, make sure to install **onnxruntime-gpu** package, and then pass `--gpu` to the inference command:
|
||||
|
||||
```bash
|
||||
python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs --gpu
|
||||
```
|
||||
|
||||
If you exported only Matcha to ONNX, this will write mel-spectrogram as graphs and `numpy` arrays to the output directory.
|
||||
If you embedded the vocoder in the exported graph, this will write `.wav` audio files to the output directory.
|
||||
|
||||
If you exported only Matcha to ONNX, and you want to run a full TTS pipeline, you can pass a path to a vocoder model in `ONNX` format:
|
||||
|
||||
```bash
|
||||
python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs --vocoder hifigan.small.onnx
|
||||
```
|
||||
|
||||
This will write `.wav` audio files to the output directory.
|
||||
|
||||
## Citation information
|
||||
|
||||
If you use our code or otherwise find this work useful, please cite our paper:
|
||||
|
||||
```text
|
||||
@article{mehta2023matcha,
|
||||
title={Matcha-TTS: A fast TTS architecture with conditional flow matching},
|
||||
@inproceedings{mehta2024matcha,
|
||||
title={Matcha-{TTS}: A fast {TTS} architecture with conditional flow matching},
|
||||
author={Mehta, Shivam and Tu, Ruibo and Beskow, Jonas and Sz{\'e}kely, {\'E}va and Henter, Gustav Eje},
|
||||
journal={arXiv preprint arXiv:2309.03199},
|
||||
year={2023}
|
||||
booktitle={Proc. ICASSP},
|
||||
year={2024}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -204,7 +269,7 @@ If you use our code or otherwise find this work useful, please cite our paper:
|
||||
|
||||
Since this code uses [Lightning-Hydra-Template](https://github.com/ashleve/lightning-hydra-template), you have all the powers that come with it.
|
||||
|
||||
Other source code I would like to acknowledge:
|
||||
Other source code we would like to acknowledge:
|
||||
|
||||
- [Coqui-TTS](https://github.com/coqui-ai/TTS/tree/dev): For helping me figure out how to make cython binaries pip installable and encouragement
|
||||
- [Hugging Face Diffusers](https://huggingface.co/): For their awesome diffusers library and its components
|
||||
|
||||
14
configs/data/hi-fi_en-US_female.yaml
Normal file
14
configs/data/hi-fi_en-US_female.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
defaults:
|
||||
- ljspeech
|
||||
- _self_
|
||||
|
||||
# Dataset URL: https://ast-astrec.nict.go.jp/en/release/hi-fi-captain/
|
||||
_target_: matcha.data.text_mel_datamodule.TextMelDataModule
|
||||
name: hi-fi_en-US_female
|
||||
train_filelist_path: data/filelists/hi-fi-captain-en-us-female_train.txt
|
||||
valid_filelist_path: data/filelists/hi-fi-captain-en-us-female_val.txt
|
||||
batch_size: 32
|
||||
cleaners: [english_cleaners_piper]
|
||||
data_statistics: # Computed for this dataset
|
||||
mel_mean: -6.38385
|
||||
mel_std: 2.541796
|
||||
14
configs/experiment/hifi_dataset_piper_phonemizer.yaml
Normal file
14
configs/experiment/hifi_dataset_piper_phonemizer.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
# @package _global_
|
||||
|
||||
# to execute this experiment run:
|
||||
# python train.py experiment=multispeaker
|
||||
|
||||
defaults:
|
||||
- override /data: hi-fi_en-US_female.yaml
|
||||
|
||||
# all parameters below will be merged with parameters from default configurations set above
|
||||
# this allows you to overwrite only specified parameters
|
||||
|
||||
tags: ["hi-fi", "single_speaker", "piper_phonemizer", "en_US", "female"]
|
||||
|
||||
run_name: hi-fi_en-US_female_piper_phonemizer
|
||||
@@ -12,3 +12,4 @@ spk_emb_dim: 64
|
||||
n_feats: 80
|
||||
data_statistics: ${data.data_statistics}
|
||||
out_size: null # Must be divisible by 4
|
||||
prior_loss: true
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.0.3
|
||||
0.0.5.1
|
||||
|
||||
@@ -29,8 +29,15 @@ args = Namespace(
|
||||
|
||||
CURRENTLY_LOADED_MODEL = args.model
|
||||
|
||||
MATCHA_TTS_LOC = lambda x: LOCATION / f"{x}.ckpt" # noqa: E731
|
||||
VOCODER_LOC = lambda x: LOCATION / f"{x}" # noqa: E731
|
||||
|
||||
def MATCHA_TTS_LOC(x):
|
||||
return LOCATION / f"{x}.ckpt"
|
||||
|
||||
|
||||
def VOCODER_LOC(x):
|
||||
return LOCATION / f"{x}"
|
||||
|
||||
|
||||
LOGO_URL = "https://shivammehta25.github.io/Matcha-TTS/images/logo.png"
|
||||
RADIO_OPTIONS = {
|
||||
"Multi Speaker (VCTK)": {
|
||||
|
||||
@@ -18,13 +18,13 @@ from matcha.text import sequence_to_text, text_to_sequence
|
||||
from matcha.utils.utils import assert_model_downloaded, get_user_data_dir, intersperse
|
||||
|
||||
MATCHA_URLS = {
|
||||
"matcha_ljspeech": "https://drive.google.com/file/d/1BBzmMU7k3a_WetDfaFblMoN18GqQeHCg/view?usp=drive_link",
|
||||
"matcha_vctk": "https://drive.google.com/file/d/1enuxmfslZciWGAl63WGh2ekVo00FYuQ9/view?usp=drive_link",
|
||||
"matcha_ljspeech": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/matcha_ljspeech.ckpt",
|
||||
"matcha_vctk": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/matcha_vctk.ckpt",
|
||||
}
|
||||
|
||||
VOCODER_URLS = {
|
||||
"hifigan_T2_v1": "https://drive.google.com/file/d/14NENd4equCBLyyCSke114Mv6YR_j_uFs/view?usp=drive_link",
|
||||
"hifigan_univ_v1": "https://drive.google.com/file/d/1qpgI41wNXFcH-iKq1Y42JlBC9j0je8PW/view?usp=drive_link",
|
||||
"hifigan_T2_v1": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/generator_v1", # Old url: https://drive.google.com/file/d/14NENd4equCBLyyCSke114Mv6YR_j_uFs/view?usp=drive_link
|
||||
"hifigan_univ_v1": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/g_02500000", # Old url: https://drive.google.com/file/d/1qpgI41wNXFcH-iKq1Y42JlBC9j0je8PW/view?usp=drive_link
|
||||
}
|
||||
|
||||
MULTISPEAKER_MODEL = {
|
||||
@@ -63,7 +63,7 @@ def get_texts(args):
|
||||
if args.text:
|
||||
texts = [args.text]
|
||||
else:
|
||||
with open(args.file) as f:
|
||||
with open(args.file, encoding="utf-8") as f:
|
||||
texts = f.readlines()
|
||||
return texts
|
||||
|
||||
@@ -140,7 +140,7 @@ def validate_args(args):
|
||||
|
||||
if args.checkpoint_path is None:
|
||||
# When using pretrained models
|
||||
if args.model in SINGLESPEAKER_MODEL.keys():
|
||||
if args.model in SINGLESPEAKER_MODEL:
|
||||
args = validate_args_for_single_speaker_model(args)
|
||||
|
||||
if args.model in MULTISPEAKER_MODEL:
|
||||
|
||||
@@ -81,7 +81,7 @@ class BaseLightningClass(LightningModule, ABC):
|
||||
"step",
|
||||
float(self.global_step),
|
||||
on_step=True,
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
logger=True,
|
||||
sync_dist=True,
|
||||
)
|
||||
|
||||
@@ -73,16 +73,14 @@ class BASECFM(torch.nn.Module, ABC):
|
||||
# Or in future might add like a return_all_steps flag
|
||||
sol = []
|
||||
|
||||
steps = 1
|
||||
while steps <= len(t_span) - 1:
|
||||
for step in range(1, len(t_span)):
|
||||
dphi_dt = self.estimator(x, mask, mu, t, spks, cond)
|
||||
|
||||
x = x + dt * dphi_dt
|
||||
t = t + dt
|
||||
sol.append(x)
|
||||
if steps < len(t_span) - 1:
|
||||
dt = t_span[steps + 1] - t
|
||||
steps += 1
|
||||
if step < len(t_span) - 1:
|
||||
dt = t_span[step + 1] - t
|
||||
|
||||
return sol[-1]
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ class MatchaTTS(BaseLightningClass): # 🍵
|
||||
out_size,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
prior_loss=True,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
@@ -44,6 +45,7 @@ class MatchaTTS(BaseLightningClass): # 🍵
|
||||
self.spk_emb_dim = spk_emb_dim
|
||||
self.n_feats = n_feats
|
||||
self.out_size = out_size
|
||||
self.prior_loss = prior_loss
|
||||
|
||||
if n_spks > 1:
|
||||
self.spk_emb = torch.nn.Embedding(n_spks, spk_emb_dim)
|
||||
@@ -116,7 +118,7 @@ class MatchaTTS(BaseLightningClass): # 🍵
|
||||
w = torch.exp(logw) * x_mask
|
||||
w_ceil = torch.ceil(w) * length_scale
|
||||
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
||||
y_max_length = int(y_lengths.max())
|
||||
y_max_length = y_lengths.max()
|
||||
y_max_length_ = fix_len_compatibility(y_max_length)
|
||||
|
||||
# Using obtained durations `w` construct alignment map `attn`
|
||||
@@ -228,7 +230,10 @@ class MatchaTTS(BaseLightningClass): # 🍵
|
||||
# Compute loss of the decoder
|
||||
diff_loss, _ = self.decoder.compute_loss(x1=y, mask=y_mask, mu=mu_y, spks=spks, cond=cond)
|
||||
|
||||
prior_loss = torch.sum(0.5 * ((y - mu_y) ** 2 + math.log(2 * math.pi)) * y_mask)
|
||||
prior_loss = prior_loss / (torch.sum(y_mask) * self.n_feats)
|
||||
if self.prior_loss:
|
||||
prior_loss = torch.sum(0.5 * ((y - mu_y) ** 2 + math.log(2 * math.pi)) * y_mask)
|
||||
prior_loss = prior_loss / (torch.sum(y_mask) * self.n_feats)
|
||||
else:
|
||||
prior_loss = 0
|
||||
|
||||
return dur_loss, prior_loss, diff_loss
|
||||
|
||||
0
matcha/onnx/__init__.py
Normal file
0
matcha/onnx/__init__.py
Normal file
181
matcha/onnx/export.py
Normal file
181
matcha/onnx/export.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import argparse
|
||||
import random
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from lightning import LightningModule
|
||||
|
||||
from matcha.cli import VOCODER_URLS, load_matcha, load_vocoder
|
||||
|
||||
DEFAULT_OPSET = 15
|
||||
|
||||
SEED = 1234
|
||||
random.seed(SEED)
|
||||
np.random.seed(SEED)
|
||||
torch.manual_seed(SEED)
|
||||
torch.cuda.manual_seed(SEED)
|
||||
torch.backends.cudnn.deterministic = True
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
|
||||
class MatchaWithVocoder(LightningModule):
|
||||
def __init__(self, matcha, vocoder):
|
||||
super().__init__()
|
||||
self.matcha = matcha
|
||||
self.vocoder = vocoder
|
||||
|
||||
def forward(self, x, x_lengths, scales, spks=None):
|
||||
mel, mel_lengths = self.matcha(x, x_lengths, scales, spks)
|
||||
wavs = self.vocoder(mel).clamp(-1, 1)
|
||||
lengths = mel_lengths * 256
|
||||
return wavs.squeeze(1), lengths
|
||||
|
||||
|
||||
def get_exportable_module(matcha, vocoder, n_timesteps):
|
||||
"""
|
||||
Return an appropriate `LighteningModule` and output-node names
|
||||
based on whether the vocoder is embedded in the final graph
|
||||
"""
|
||||
|
||||
def onnx_forward_func(x, x_lengths, scales, spks=None):
|
||||
"""
|
||||
Custom forward function for accepting
|
||||
scaler parameters as tensors
|
||||
"""
|
||||
# Extract scaler parameters from tensors
|
||||
temperature = scales[0]
|
||||
length_scale = scales[1]
|
||||
output = matcha.synthesise(x, x_lengths, n_timesteps, temperature, spks, length_scale)
|
||||
return output["mel"], output["mel_lengths"]
|
||||
|
||||
# Monkey-patch Matcha's forward function
|
||||
matcha.forward = onnx_forward_func
|
||||
|
||||
if vocoder is None:
|
||||
model, output_names = matcha, ["mel", "mel_lengths"]
|
||||
else:
|
||||
model = MatchaWithVocoder(matcha, vocoder)
|
||||
output_names = ["wav", "wav_lengths"]
|
||||
return model, output_names
|
||||
|
||||
|
||||
def get_inputs(is_multi_speaker):
|
||||
"""
|
||||
Create dummy inputs for tracing
|
||||
"""
|
||||
dummy_input_length = 50
|
||||
x = torch.randint(low=0, high=20, size=(1, dummy_input_length), dtype=torch.long)
|
||||
x_lengths = torch.LongTensor([dummy_input_length])
|
||||
|
||||
# Scales
|
||||
temperature = 0.667
|
||||
length_scale = 1.0
|
||||
scales = torch.Tensor([temperature, length_scale])
|
||||
|
||||
model_inputs = [x, x_lengths, scales]
|
||||
input_names = [
|
||||
"x",
|
||||
"x_lengths",
|
||||
"scales",
|
||||
]
|
||||
|
||||
if is_multi_speaker:
|
||||
spks = torch.LongTensor([1])
|
||||
model_inputs.append(spks)
|
||||
input_names.append("spks")
|
||||
|
||||
return tuple(model_inputs), input_names
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Export 🍵 Matcha-TTS to ONNX")
|
||||
|
||||
parser.add_argument(
|
||||
"checkpoint_path",
|
||||
type=str,
|
||||
help="Path to the model checkpoint",
|
||||
)
|
||||
parser.add_argument("output", type=str, help="Path to output `.onnx` file")
|
||||
parser.add_argument(
|
||||
"--n-timesteps", type=int, default=5, help="Number of steps to use for reverse diffusion in decoder (default 5)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vocoder-name",
|
||||
type=str,
|
||||
choices=list(VOCODER_URLS.keys()),
|
||||
default=None,
|
||||
help="Name of the vocoder to embed in the ONNX graph",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vocoder-checkpoint-path",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Vocoder checkpoint to embed in the ONNX graph for an `e2e` like experience",
|
||||
)
|
||||
parser.add_argument("--opset", type=int, default=DEFAULT_OPSET, help="ONNX opset version to use (default 15")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"[🍵] Loading Matcha checkpoint from {args.checkpoint_path}")
|
||||
print(f"Setting n_timesteps to {args.n_timesteps}")
|
||||
|
||||
checkpoint_path = Path(args.checkpoint_path)
|
||||
matcha = load_matcha(checkpoint_path.stem, checkpoint_path, "cpu")
|
||||
|
||||
if args.vocoder_name or args.vocoder_checkpoint_path:
|
||||
assert (
|
||||
args.vocoder_name and args.vocoder_checkpoint_path
|
||||
), "Both vocoder_name and vocoder-checkpoint are required when embedding the vocoder in the ONNX graph."
|
||||
vocoder, _ = load_vocoder(args.vocoder_name, args.vocoder_checkpoint_path, "cpu")
|
||||
else:
|
||||
vocoder = None
|
||||
|
||||
is_multi_speaker = matcha.n_spks > 1
|
||||
|
||||
dummy_input, input_names = get_inputs(is_multi_speaker)
|
||||
model, output_names = get_exportable_module(matcha, vocoder, args.n_timesteps)
|
||||
|
||||
# Set dynamic shape for inputs/outputs
|
||||
dynamic_axes = {
|
||||
"x": {0: "batch_size", 1: "time"},
|
||||
"x_lengths": {0: "batch_size"},
|
||||
}
|
||||
|
||||
if vocoder is None:
|
||||
dynamic_axes.update(
|
||||
{
|
||||
"mel": {0: "batch_size", 2: "time"},
|
||||
"mel_lengths": {0: "batch_size"},
|
||||
}
|
||||
)
|
||||
else:
|
||||
print("Embedding the vocoder in the ONNX graph")
|
||||
dynamic_axes.update(
|
||||
{
|
||||
"wav": {0: "batch_size", 1: "time"},
|
||||
"wav_lengths": {0: "batch_size"},
|
||||
}
|
||||
)
|
||||
|
||||
if is_multi_speaker:
|
||||
dynamic_axes["spks"] = {0: "batch_size"}
|
||||
|
||||
# Create the output directory (if not exists)
|
||||
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
model.to_onnx(
|
||||
args.output,
|
||||
dummy_input,
|
||||
input_names=input_names,
|
||||
output_names=output_names,
|
||||
dynamic_axes=dynamic_axes,
|
||||
opset_version=args.opset,
|
||||
export_params=True,
|
||||
do_constant_folding=True,
|
||||
)
|
||||
print(f"[🍵] ONNX model exported to {args.output}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
168
matcha/onnx/infer.py
Normal file
168
matcha/onnx/infer.py
Normal file
@@ -0,0 +1,168 @@
|
||||
import argparse
|
||||
import os
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from time import perf_counter
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
import soundfile as sf
|
||||
import torch
|
||||
|
||||
from matcha.cli import plot_spectrogram_to_numpy, process_text
|
||||
|
||||
|
||||
def validate_args(args):
|
||||
assert (
|
||||
args.text or args.file
|
||||
), "Either text or file must be provided Matcha-T(ea)TTS need sometext to whisk the waveforms."
|
||||
assert args.temperature >= 0, "Sampling temperature cannot be negative"
|
||||
assert args.speaking_rate >= 0, "Speaking rate must be greater than 0"
|
||||
return args
|
||||
|
||||
|
||||
def write_wavs(model, inputs, output_dir, external_vocoder=None):
|
||||
if external_vocoder is None:
|
||||
print("The provided model has the vocoder embedded in the graph.\nGenerating waveform directly")
|
||||
t0 = perf_counter()
|
||||
wavs, wav_lengths = model.run(None, inputs)
|
||||
infer_secs = perf_counter() - t0
|
||||
mel_infer_secs = vocoder_infer_secs = None
|
||||
else:
|
||||
print("[🍵] Generating mel using Matcha")
|
||||
mel_t0 = perf_counter()
|
||||
mels, mel_lengths = model.run(None, inputs)
|
||||
mel_infer_secs = perf_counter() - mel_t0
|
||||
print("Generating waveform from mel using external vocoder")
|
||||
vocoder_inputs = {external_vocoder.get_inputs()[0].name: mels}
|
||||
vocoder_t0 = perf_counter()
|
||||
wavs = external_vocoder.run(None, vocoder_inputs)[0]
|
||||
vocoder_infer_secs = perf_counter() - vocoder_t0
|
||||
wavs = wavs.squeeze(1)
|
||||
wav_lengths = mel_lengths * 256
|
||||
infer_secs = mel_infer_secs + vocoder_infer_secs
|
||||
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
for i, (wav, wav_length) in enumerate(zip(wavs, wav_lengths)):
|
||||
output_filename = output_dir.joinpath(f"output_{i + 1}.wav")
|
||||
audio = wav[:wav_length]
|
||||
print(f"Writing audio to {output_filename}")
|
||||
sf.write(output_filename, audio, 22050, "PCM_24")
|
||||
|
||||
wav_secs = wav_lengths.sum() / 22050
|
||||
print(f"Inference seconds: {infer_secs}")
|
||||
print(f"Generated wav seconds: {wav_secs}")
|
||||
rtf = infer_secs / wav_secs
|
||||
if mel_infer_secs is not None:
|
||||
mel_rtf = mel_infer_secs / wav_secs
|
||||
print(f"Matcha RTF: {mel_rtf}")
|
||||
if vocoder_infer_secs is not None:
|
||||
vocoder_rtf = vocoder_infer_secs / wav_secs
|
||||
print(f"Vocoder RTF: {vocoder_rtf}")
|
||||
print(f"Overall RTF: {rtf}")
|
||||
|
||||
|
||||
def write_mels(model, inputs, output_dir):
|
||||
t0 = perf_counter()
|
||||
mels, mel_lengths = model.run(None, inputs)
|
||||
infer_secs = perf_counter() - t0
|
||||
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
for i, mel in enumerate(mels):
|
||||
output_stem = output_dir.joinpath(f"output_{i + 1}")
|
||||
plot_spectrogram_to_numpy(mel.squeeze(), output_stem.with_suffix(".png"))
|
||||
np.save(output_stem.with_suffix(".numpy"), mel)
|
||||
|
||||
wav_secs = (mel_lengths * 256).sum() / 22050
|
||||
print(f"Inference seconds: {infer_secs}")
|
||||
print(f"Generated wav seconds: {wav_secs}")
|
||||
rtf = infer_secs / wav_secs
|
||||
print(f"RTF: {rtf}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=" 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching"
|
||||
)
|
||||
parser.add_argument(
|
||||
"model",
|
||||
type=str,
|
||||
help="ONNX model to use",
|
||||
)
|
||||
parser.add_argument("--vocoder", type=str, default=None, help="Vocoder to use (defaults to None)")
|
||||
parser.add_argument("--text", type=str, default=None, help="Text to synthesize")
|
||||
parser.add_argument("--file", type=str, default=None, help="Text file to synthesize")
|
||||
parser.add_argument("--spk", type=int, default=None, help="Speaker ID")
|
||||
parser.add_argument(
|
||||
"--temperature",
|
||||
type=float,
|
||||
default=0.667,
|
||||
help="Variance of the x0 noise (default: 0.667)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--speaking-rate",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="change the speaking rate, a higher value means slower speaking rate (default: 1.0)",
|
||||
)
|
||||
parser.add_argument("--gpu", action="store_true", help="Use CPU for inference (default: use GPU if available)")
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=str,
|
||||
default=os.getcwd(),
|
||||
help="Output folder to save results (default: current dir)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
args = validate_args(args)
|
||||
|
||||
if args.gpu:
|
||||
providers = ["GPUExecutionProvider"]
|
||||
else:
|
||||
providers = ["CPUExecutionProvider"]
|
||||
model = ort.InferenceSession(args.model, providers=providers)
|
||||
|
||||
model_inputs = model.get_inputs()
|
||||
model_outputs = list(model.get_outputs())
|
||||
|
||||
if args.text:
|
||||
text_lines = args.text.splitlines()
|
||||
else:
|
||||
with open(args.file, encoding="utf-8") as file:
|
||||
text_lines = file.read().splitlines()
|
||||
|
||||
processed_lines = [process_text(0, line, "cpu") for line in text_lines]
|
||||
x = [line["x"].squeeze() for line in processed_lines]
|
||||
# Pad
|
||||
x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True)
|
||||
x = x.detach().cpu().numpy()
|
||||
x_lengths = np.array([line["x_lengths"].item() for line in processed_lines], dtype=np.int64)
|
||||
inputs = {
|
||||
"x": x,
|
||||
"x_lengths": x_lengths,
|
||||
"scales": np.array([args.temperature, args.speaking_rate], dtype=np.float32),
|
||||
}
|
||||
is_multi_speaker = len(model_inputs) == 4
|
||||
if is_multi_speaker:
|
||||
if args.spk is None:
|
||||
args.spk = 0
|
||||
warn = "[!] Speaker ID not provided! Using speaker ID 0"
|
||||
warnings.warn(warn, UserWarning)
|
||||
inputs["spks"] = np.repeat(args.spk, x.shape[0]).astype(np.int64)
|
||||
|
||||
has_vocoder_embedded = model_outputs[0].name == "wav"
|
||||
if has_vocoder_embedded:
|
||||
write_wavs(model, inputs, args.output_dir)
|
||||
elif args.vocoder:
|
||||
external_vocoder = ort.InferenceSession(args.vocoder, providers=providers)
|
||||
write_wavs(model, inputs, args.output_dir, external_vocoder=external_vocoder)
|
||||
else:
|
||||
warn = "[!] A vocoder is not embedded in the graph nor an external vocoder is provided. The mel output will be written as numpy arrays to `*.npy` files in the output directory"
|
||||
warnings.warn(warn, UserWarning)
|
||||
write_mels(model, inputs, args.output_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -15,6 +15,7 @@ import logging
|
||||
import re
|
||||
|
||||
import phonemizer
|
||||
import piper_phonemize
|
||||
from unidecode import unidecode
|
||||
|
||||
# To avoid excessive logging we set the log level of the phonemizer package to Critical
|
||||
@@ -103,3 +104,13 @@ def english_cleaners2(text):
|
||||
phonemes = global_phonemizer.phonemize([text], strip=True, njobs=1)[0]
|
||||
phonemes = collapse_whitespace(phonemes)
|
||||
return phonemes
|
||||
|
||||
|
||||
def english_cleaners_piper(text):
|
||||
"""Pipeline for English text, including abbreviation expansion. + punctuation + stress"""
|
||||
text = convert_to_ascii(text)
|
||||
text = lowercase(text)
|
||||
text = expand_abbreviations(text)
|
||||
phonemes = "".join(piper_phonemize.phonemize_espeak(text=text, voice="en-US")[0])
|
||||
phonemes = collapse_whitespace(phonemes)
|
||||
return phonemes
|
||||
|
||||
@@ -7,15 +7,17 @@ import torch
|
||||
def sequence_mask(length, max_length=None):
|
||||
if max_length is None:
|
||||
max_length = length.max()
|
||||
x = torch.arange(int(max_length), dtype=length.dtype, device=length.device)
|
||||
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
||||
return x.unsqueeze(0) < length.unsqueeze(1)
|
||||
|
||||
|
||||
def fix_len_compatibility(length, num_downsamplings_in_unet=2):
|
||||
while True:
|
||||
if length % (2**num_downsamplings_in_unet) == 0:
|
||||
return length
|
||||
length += 1
|
||||
factor = torch.scalar_tensor(2).pow(num_downsamplings_in_unet)
|
||||
length = (length / factor).ceil() * factor
|
||||
if not torch.onnx.is_in_onnx_export():
|
||||
return length.int().item()
|
||||
else:
|
||||
return length
|
||||
|
||||
|
||||
def convert_pad_shape(pad_shape):
|
||||
|
||||
@@ -115,7 +115,7 @@ def get_metric_value(metric_dict: Dict[str, Any], metric_name: str) -> float:
|
||||
return None
|
||||
|
||||
if metric_name not in metric_dict:
|
||||
raise Exception(
|
||||
raise ValueError(
|
||||
f"Metric value not found! <metric_name={metric_name}>\n"
|
||||
"Make sure metric name logged in LightningModule is correct!\n"
|
||||
"Make sure `optimized_metric` name in `hparams_search` config is correct!"
|
||||
@@ -205,11 +205,13 @@ def get_user_data_dir(appname="matcha_tts"):
|
||||
return final_path
|
||||
|
||||
|
||||
def assert_model_downloaded(checkpoint_path, url, use_wget=False):
|
||||
def assert_model_downloaded(checkpoint_path, url, use_wget=True):
|
||||
if Path(checkpoint_path).exists():
|
||||
log.debug(f"[+] Model already present at {checkpoint_path}!")
|
||||
print(f"[+] Model already present at {checkpoint_path}!")
|
||||
return
|
||||
log.info(f"[-] Model not found at {checkpoint_path}! Will download it")
|
||||
print(f"[-] Model not found at {checkpoint_path}! Will download it")
|
||||
checkpoint_path = str(checkpoint_path)
|
||||
if not use_wget:
|
||||
gdown.download(url=url, output=checkpoint_path, quiet=False, fuzzy=True)
|
||||
|
||||
@@ -35,10 +35,11 @@ torchaudio
|
||||
matplotlib
|
||||
pandas
|
||||
conformer==0.3.2
|
||||
diffusers==0.21.2
|
||||
diffusers==0.27.2
|
||||
notebook
|
||||
ipywidgets
|
||||
gradio
|
||||
gdown
|
||||
wget
|
||||
seaborn
|
||||
piper_phonemize
|
||||
|
||||
Reference in New Issue
Block a user