mirror of
https://github.com/shivammehta25/Matcha-TTS.git
synced 2026-02-04 17:59:19 +08:00
Compare commits
27 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bd4d90d932 | ||
|
|
108906c603 | ||
|
|
354f5dc69f | ||
|
|
8e5f98476e | ||
|
|
7e499df0b2 | ||
|
|
0735e653fc | ||
|
|
f9843cfca4 | ||
|
|
289ef51578 | ||
|
|
7a65f83b17 | ||
|
|
7275764a48 | ||
|
|
863bfbdd8b | ||
|
|
4bc541705a | ||
|
|
a3fea22988 | ||
|
|
d56f40765c | ||
|
|
b0ba920dc1 | ||
|
|
a220f283e3 | ||
|
|
1df73ef43e | ||
|
|
404b045b65 | ||
|
|
7cfae6bed4 | ||
|
|
a83fd29829 | ||
|
|
c8178bf2cd | ||
|
|
8b1284993a | ||
|
|
0000f93021 | ||
|
|
c2569a1018 | ||
|
|
bd058a68f7 | ||
|
|
362ba2dce7 | ||
|
|
77804265f8 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -161,3 +161,4 @@ generator_v1
|
|||||||
g_02500000
|
g_02500000
|
||||||
gradio_cached_examples/
|
gradio_cached_examples/
|
||||||
synth_output/
|
synth_output/
|
||||||
|
/data
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
[](https://hydra.cc/)
|
[](https://hydra.cc/)
|
||||||
[](https://black.readthedocs.io/en/stable/)
|
[](https://black.readthedocs.io/en/stable/)
|
||||||
[](https://pycqa.github.io/isort/)
|
[](https://pycqa.github.io/isort/)
|
||||||
|
[](https://pepy.tech/projects/matcha-tts)
|
||||||
<p style="text-align: center;">
|
<p style="text-align: center;">
|
||||||
<img src="https://shivammehta25.github.io/Matcha-TTS/images/logo.png" height="128"/>
|
<img src="https://shivammehta25.github.io/Matcha-TTS/images/logo.png" height="128"/>
|
||||||
</p>
|
</p>
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ defaults:
|
|||||||
# Dataset URL: https://ast-astrec.nict.go.jp/en/release/hi-fi-captain/
|
# Dataset URL: https://ast-astrec.nict.go.jp/en/release/hi-fi-captain/
|
||||||
_target_: matcha.data.text_mel_datamodule.TextMelDataModule
|
_target_: matcha.data.text_mel_datamodule.TextMelDataModule
|
||||||
name: hi-fi_en-US_female
|
name: hi-fi_en-US_female
|
||||||
train_filelist_path: data/filelists/hi-fi-captain-en-us-female_train.txt
|
train_filelist_path: data/hi-fi_en-US_female/train.txt
|
||||||
valid_filelist_path: data/filelists/hi-fi-captain-en-us-female_val.txt
|
valid_filelist_path: data/hi-fi_en-US_female/val.txt
|
||||||
batch_size: 32
|
batch_size: 32
|
||||||
cleaners: [english_cleaners_piper]
|
cleaners: [english_cleaners_piper]
|
||||||
data_statistics: # Computed for this dataset
|
data_statistics: # Computed for this dataset
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
0.0.6.0
|
0.0.7.2
|
||||||
|
|||||||
@@ -114,10 +114,10 @@ def load_matcha(model_name, checkpoint_path, device):
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def to_waveform(mel, vocoder, denoiser=None):
|
def to_waveform(mel, vocoder, denoiser=None, denoiser_strength=0.00025):
|
||||||
audio = vocoder(mel).clamp(-1, 1)
|
audio = vocoder(mel).clamp(-1, 1)
|
||||||
if denoiser is not None:
|
if denoiser is not None:
|
||||||
audio = denoiser(audio.squeeze(), strength=0.00025).cpu().squeeze()
|
audio = denoiser(audio.squeeze(), strength=denoiser_strength).cpu().squeeze()
|
||||||
|
|
||||||
return audio.cpu().squeeze()
|
return audio.cpu().squeeze()
|
||||||
|
|
||||||
@@ -336,7 +336,7 @@ def batched_synthesis(args, device, model, vocoder, denoiser, texts, spk):
|
|||||||
length_scale=args.speaking_rate,
|
length_scale=args.speaking_rate,
|
||||||
)
|
)
|
||||||
|
|
||||||
output["waveform"] = to_waveform(output["mel"], vocoder, denoiser)
|
output["waveform"] = to_waveform(output["mel"], vocoder, denoiser, args.denoiser_strength)
|
||||||
t = (dt.datetime.now() - start_t).total_seconds()
|
t = (dt.datetime.now() - start_t).total_seconds()
|
||||||
rtf_w = t * 22050 / (output["waveform"].shape[-1])
|
rtf_w = t * 22050 / (output["waveform"].shape[-1])
|
||||||
print(f"[🍵-Batch: {i}] Matcha-TTS RTF: {output['rtf']:.4f}")
|
print(f"[🍵-Batch: {i}] Matcha-TTS RTF: {output['rtf']:.4f}")
|
||||||
@@ -377,7 +377,7 @@ def unbatched_synthesis(args, device, model, vocoder, denoiser, texts, spk):
|
|||||||
spks=spk,
|
spks=spk,
|
||||||
length_scale=args.speaking_rate,
|
length_scale=args.speaking_rate,
|
||||||
)
|
)
|
||||||
output["waveform"] = to_waveform(output["mel"], vocoder, denoiser)
|
output["waveform"] = to_waveform(output["mel"], vocoder, denoiser, args.denoiser_strength)
|
||||||
# RTF with HiFiGAN
|
# RTF with HiFiGAN
|
||||||
t = (dt.datetime.now() - start_t).total_seconds()
|
t = (dt.datetime.now() - start_t).total_seconds()
|
||||||
rtf_w = t * 22050 / (output["waveform"].shape[-1])
|
rtf_w = t * 22050 / (output["waveform"].shape[-1])
|
||||||
|
|||||||
@@ -234,9 +234,9 @@ class TextMelBatchCollate:
|
|||||||
|
|
||||||
def __call__(self, batch):
|
def __call__(self, batch):
|
||||||
B = len(batch)
|
B = len(batch)
|
||||||
y_max_length = max([item["y"].shape[-1] for item in batch])
|
y_max_length = max([item["y"].shape[-1] for item in batch]) # pylint: disable=consider-using-generator
|
||||||
y_max_length = fix_len_compatibility(y_max_length)
|
y_max_length = fix_len_compatibility(y_max_length)
|
||||||
x_max_length = max([item["x"].shape[-1] for item in batch])
|
x_max_length = max([item["x"].shape[-1] for item in batch]) # pylint: disable=consider-using-generator
|
||||||
n_feats = batch[0]["y"].shape[-2]
|
n_feats = batch[0]["y"].shape[-2]
|
||||||
|
|
||||||
y = torch.zeros((B, n_feats, y_max_length), dtype=torch.float32)
|
y = torch.zeros((B, n_feats, y_max_length), dtype=torch.float32)
|
||||||
|
|||||||
@@ -4,6 +4,10 @@
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
class ModeException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Denoiser(torch.nn.Module):
|
class Denoiser(torch.nn.Module):
|
||||||
"""Removes model bias from audio produced with waveglow"""
|
"""Removes model bias from audio produced with waveglow"""
|
||||||
|
|
||||||
@@ -20,7 +24,7 @@ class Denoiser(torch.nn.Module):
|
|||||||
elif mode == "normal":
|
elif mode == "normal":
|
||||||
mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device)
|
mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device)
|
||||||
else:
|
else:
|
||||||
raise Exception(f"Mode {mode} if not supported")
|
raise ModeException(f"Mode {mode} if not supported")
|
||||||
|
|
||||||
def stft_fn(audio, n_fft, hop_length, win_length, window):
|
def stft_fn(audio, n_fft, hop_length, win_length, window):
|
||||||
spec = torch.stft(
|
spec = torch.stft(
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin,
|
|||||||
if torch.max(y) > 1.0:
|
if torch.max(y) > 1.0:
|
||||||
print("max value is ", torch.max(y))
|
print("max value is ", torch.max(y))
|
||||||
|
|
||||||
global mel_basis, hann_window # pylint: disable=global-statement
|
global mel_basis, hann_window # pylint: disable=global-statement,global-variable-not-assigned
|
||||||
if fmax not in mel_basis:
|
if fmax not in mel_basis:
|
||||||
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
||||||
mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
|
mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
""" from https://github.com/jik876/hifi-gan """
|
""" from https://github.com/jik876/hifi-gan """
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn # pylint: disable=consider-using-from-import
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
|
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
|
||||||
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
|
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import math
|
|||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn # pylint: disable=consider-using-from-import
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
from conformer import ConformerBlock
|
from conformer import ConformerBlock
|
||||||
from diffusers.models.activations import get_activation
|
from diffusers.models.activations import get_activation
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
import math
|
import math
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn # pylint: disable=consider-using-from-import
|
||||||
from einops import rearrange
|
from einops import rearrange
|
||||||
|
|
||||||
import matcha.utils as utils
|
import matcha.utils as utils # pylint: disable=consider-using-from-import
|
||||||
from matcha.utils.model import sequence_mask
|
from matcha.utils.model import sequence_mask
|
||||||
|
|
||||||
log = utils.get_pylogger(__name__)
|
log = utils.get_pylogger(__name__)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn # pylint: disable=consider-using-from-import
|
||||||
from diffusers.models.attention import (
|
from diffusers.models.attention import (
|
||||||
GEGLU,
|
GEGLU,
|
||||||
GELU,
|
GELU,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import random
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
import matcha.utils.monotonic_align as monotonic_align
|
import matcha.utils.monotonic_align as monotonic_align # pylint: disable=consider-using-from-import
|
||||||
from matcha import utils
|
from matcha import utils
|
||||||
from matcha.models.baselightningmodule import BaseLightningClass
|
from matcha.models.baselightningmodule import BaseLightningClass
|
||||||
from matcha.models.components.flow_matching import CFM
|
from matcha.models.components.flow_matching import CFM
|
||||||
@@ -106,6 +106,7 @@ class MatchaTTS(BaseLightningClass): # 🍵
|
|||||||
# Lengths of mel spectrograms
|
# Lengths of mel spectrograms
|
||||||
"rtf": float,
|
"rtf": float,
|
||||||
# Real-time factor
|
# Real-time factor
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
# For RTF computation
|
# For RTF computation
|
||||||
t = dt.datetime.now()
|
t = dt.datetime.now()
|
||||||
@@ -152,7 +153,7 @@ class MatchaTTS(BaseLightningClass): # 🍵
|
|||||||
def forward(self, x, x_lengths, y, y_lengths, spks=None, out_size=None, cond=None, durations=None):
|
def forward(self, x, x_lengths, y, y_lengths, spks=None, out_size=None, cond=None, durations=None):
|
||||||
"""
|
"""
|
||||||
Computes 3 losses:
|
Computes 3 losses:
|
||||||
1. duration loss: loss between predicted token durations and those extracted by Monotinic Alignment Search (MAS).
|
1. duration loss: loss between predicted token durations and those extracted by Monotonic Alignment Search (MAS).
|
||||||
2. prior loss: loss between mel-spectrogram and encoder outputs.
|
2. prior loss: loss between mel-spectrogram and encoder outputs.
|
||||||
3. flow matching loss: loss between mel-spectrogram and decoder outputs.
|
3. flow matching loss: loss between mel-spectrogram and decoder outputs.
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,10 @@ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
|||||||
_id_to_symbol = {i: s for i, s in enumerate(symbols)} # pylint: disable=unnecessary-comprehension
|
_id_to_symbol = {i: s for i, s in enumerate(symbols)} # pylint: disable=unnecessary-comprehension
|
||||||
|
|
||||||
|
|
||||||
|
class UnknownCleanerException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def text_to_sequence(text, cleaner_names):
|
def text_to_sequence(text, cleaner_names):
|
||||||
"""Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
"""Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
||||||
Args:
|
Args:
|
||||||
@@ -48,6 +52,6 @@ def _clean_text(text, cleaner_names):
|
|||||||
for name in cleaner_names:
|
for name in cleaner_names:
|
||||||
cleaner = getattr(cleaners, name)
|
cleaner = getattr(cleaners, name)
|
||||||
if not cleaner:
|
if not cleaner:
|
||||||
raise Exception("Unknown cleaner: %s" % name)
|
raise UnknownCleanerException(f"Unknown cleaner: {name}")
|
||||||
text = cleaner(text)
|
text = cleaner(text)
|
||||||
return text
|
return text
|
||||||
|
|||||||
@@ -36,9 +36,12 @@ global_phonemizer = phonemizer.backend.EspeakBackend(
|
|||||||
# Regular expression matching whitespace:
|
# Regular expression matching whitespace:
|
||||||
_whitespace_re = re.compile(r"\s+")
|
_whitespace_re = re.compile(r"\s+")
|
||||||
|
|
||||||
|
# Remove brackets
|
||||||
|
_brackets_re = re.compile(r"[\[\]\(\)\{\}]")
|
||||||
|
|
||||||
# List of (regular expression, replacement) pairs for abbreviations:
|
# List of (regular expression, replacement) pairs for abbreviations:
|
||||||
_abbreviations = [
|
_abbreviations = [
|
||||||
(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
|
(re.compile(f"\\b{x[0]}\\.", re.IGNORECASE), x[1])
|
||||||
for x in [
|
for x in [
|
||||||
("mrs", "misess"),
|
("mrs", "misess"),
|
||||||
("mr", "mister"),
|
("mr", "mister"),
|
||||||
@@ -72,6 +75,10 @@ def lowercase(text):
|
|||||||
return text.lower()
|
return text.lower()
|
||||||
|
|
||||||
|
|
||||||
|
def remove_brackets(text):
|
||||||
|
return re.sub(_brackets_re, "", text)
|
||||||
|
|
||||||
|
|
||||||
def collapse_whitespace(text):
|
def collapse_whitespace(text):
|
||||||
return re.sub(_whitespace_re, " ", text)
|
return re.sub(_whitespace_re, " ", text)
|
||||||
|
|
||||||
@@ -101,10 +108,26 @@ def english_cleaners2(text):
|
|||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
text = expand_abbreviations(text)
|
text = expand_abbreviations(text)
|
||||||
phonemes = global_phonemizer.phonemize([text], strip=True, njobs=1)[0]
|
phonemes = global_phonemizer.phonemize([text], strip=True, njobs=1)[0]
|
||||||
|
# Added in some cases espeak is not removing brackets
|
||||||
|
phonemes = remove_brackets(phonemes)
|
||||||
phonemes = collapse_whitespace(phonemes)
|
phonemes = collapse_whitespace(phonemes)
|
||||||
return phonemes
|
return phonemes
|
||||||
|
|
||||||
|
|
||||||
|
def ipa_simplifier(text):
|
||||||
|
replacements = [
|
||||||
|
("ɐ", "ə"),
|
||||||
|
("ˈə", "ə"),
|
||||||
|
("ʤ", "dʒ"),
|
||||||
|
("ʧ", "tʃ"),
|
||||||
|
("ᵻ", "ɪ"),
|
||||||
|
]
|
||||||
|
for replacement in replacements:
|
||||||
|
text = text.replace(replacement[0], replacement[1])
|
||||||
|
phonemes = collapse_whitespace(text)
|
||||||
|
return phonemes
|
||||||
|
|
||||||
|
|
||||||
# I am removing this due to incompatibility with several version of python
|
# I am removing this due to incompatibility with several version of python
|
||||||
# However, if you want to use it, you can uncomment it
|
# However, if you want to use it, you can uncomment it
|
||||||
# and install piper-phonemize with the following command:
|
# and install piper-phonemize with the following command:
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin,
|
|||||||
if torch.max(y) > 1.0:
|
if torch.max(y) > 1.0:
|
||||||
print("max value is ", torch.max(y))
|
print("max value is ", torch.max(y))
|
||||||
|
|
||||||
global mel_basis, hann_window # pylint: disable=global-statement
|
global mel_basis, hann_window # pylint: disable=global-statement,global-variable-not-assigned
|
||||||
if f"{str(fmax)}_{str(y.device)}" not in mel_basis:
|
if f"{str(fmax)}_{str(y.device)}" not in mel_basis:
|
||||||
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
||||||
mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
|
mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
|
||||||
|
|||||||
0
matcha/utils/data/__init__.py
Normal file
0
matcha/utils/data/__init__.py
Normal file
148
matcha/utils/data/hificaptain.py
Normal file
148
matcha/utils/data/hificaptain.py
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torchaudio
|
||||||
|
from torch.hub import download_url_to_file
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
from matcha.utils.data.utils import _extract_zip
|
||||||
|
|
||||||
|
URLS = {
|
||||||
|
"en-US": {
|
||||||
|
"female": "https://ast-astrec.nict.go.jp/release/hi-fi-captain/hfc_en-US_F.zip",
|
||||||
|
"male": "https://ast-astrec.nict.go.jp/release/hi-fi-captain/hfc_en-US_M.zip",
|
||||||
|
},
|
||||||
|
"ja-JP": {
|
||||||
|
"female": "https://ast-astrec.nict.go.jp/release/hi-fi-captain/hfc_ja-JP_F.zip",
|
||||||
|
"male": "https://ast-astrec.nict.go.jp/release/hi-fi-captain/hfc_ja-JP_M.zip",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
INFO_PAGE = "https://ast-astrec.nict.go.jp/en/release/hi-fi-captain/"
|
||||||
|
|
||||||
|
# On their website they say "We NICT open-sourced Hi-Fi-CAPTAIN",
|
||||||
|
# but they use this very-much-not-open-source licence.
|
||||||
|
# Dunno if this is open washing or stupidity.
|
||||||
|
LICENCE = "CC BY-NC-SA 4.0"
|
||||||
|
|
||||||
|
# I'd normally put the citation here. It's on their website.
|
||||||
|
# Boo to non-open-source stuff.
|
||||||
|
|
||||||
|
|
||||||
|
def get_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
parser.add_argument("-s", "--save-dir", type=str, default=None, help="Place to store the downloaded zip files")
|
||||||
|
parser.add_argument(
|
||||||
|
"-r",
|
||||||
|
"--skip-resampling",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="Skip resampling the data (from 48 to 22.05)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-l", "--language", type=str, choices=["en-US", "ja-JP"], default="en-US", help="The language to download"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-g",
|
||||||
|
"--gender",
|
||||||
|
type=str,
|
||||||
|
choices=["male", "female"],
|
||||||
|
default="female",
|
||||||
|
help="The gender of the speaker to download",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-o",
|
||||||
|
"--output_dir",
|
||||||
|
type=str,
|
||||||
|
default="data",
|
||||||
|
help="Place to store the converted data. Top-level only, the subdirectory will be created",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def process_text(infile, outpath: Path):
|
||||||
|
outmode = "w"
|
||||||
|
if infile.endswith("dev.txt"):
|
||||||
|
outfile = outpath / "valid.txt"
|
||||||
|
elif infile.endswith("eval.txt"):
|
||||||
|
outfile = outpath / "test.txt"
|
||||||
|
else:
|
||||||
|
outfile = outpath / "train.txt"
|
||||||
|
if outfile.exists():
|
||||||
|
outmode = "a"
|
||||||
|
with (
|
||||||
|
open(infile, encoding="utf-8") as inf,
|
||||||
|
open(outfile, outmode, encoding="utf-8") as of,
|
||||||
|
):
|
||||||
|
for line in inf.readlines():
|
||||||
|
line = line.strip()
|
||||||
|
fileid, rest = line.split(" ", maxsplit=1)
|
||||||
|
outfile = str(outpath / f"{fileid}.wav")
|
||||||
|
of.write(f"{outfile}|{rest}\n")
|
||||||
|
|
||||||
|
|
||||||
|
def process_files(zipfile, outpath, resample=True):
|
||||||
|
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||||
|
for filename in tqdm(_extract_zip(zipfile, tmpdirname)):
|
||||||
|
if not filename.startswith(tmpdirname):
|
||||||
|
filename = os.path.join(tmpdirname, filename)
|
||||||
|
if filename.endswith(".txt"):
|
||||||
|
process_text(filename, outpath)
|
||||||
|
elif filename.endswith(".wav"):
|
||||||
|
filepart = filename.rsplit("/", maxsplit=1)[-1]
|
||||||
|
outfile = str(outpath / filepart)
|
||||||
|
arr, sr = torchaudio.load(filename)
|
||||||
|
if resample:
|
||||||
|
arr = torchaudio.functional.resample(arr, orig_freq=sr, new_freq=22050)
|
||||||
|
torchaudio.save(outfile, arr, 22050)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = get_args()
|
||||||
|
|
||||||
|
save_dir = None
|
||||||
|
if args.save_dir:
|
||||||
|
save_dir = Path(args.save_dir)
|
||||||
|
if not save_dir.is_dir():
|
||||||
|
save_dir.mkdir()
|
||||||
|
|
||||||
|
if not args.output_dir:
|
||||||
|
print("output directory not specified, exiting")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
URL = URLS[args.language][args.gender]
|
||||||
|
dirname = f"hi-fi_{args.language}_{args.gender}"
|
||||||
|
|
||||||
|
outbasepath = Path(args.output_dir)
|
||||||
|
if not outbasepath.is_dir():
|
||||||
|
outbasepath.mkdir()
|
||||||
|
outpath = outbasepath / dirname
|
||||||
|
if not outpath.is_dir():
|
||||||
|
outpath.mkdir()
|
||||||
|
|
||||||
|
resample = True
|
||||||
|
if args.skip_resampling:
|
||||||
|
resample = False
|
||||||
|
|
||||||
|
if save_dir:
|
||||||
|
zipname = URL.rsplit("/", maxsplit=1)[-1]
|
||||||
|
zipfile = save_dir / zipname
|
||||||
|
if not zipfile.exists():
|
||||||
|
download_url_to_file(URL, zipfile, progress=True)
|
||||||
|
process_files(zipfile, outpath, resample)
|
||||||
|
else:
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=".zip", delete=True) as zf:
|
||||||
|
download_url_to_file(URL, zf.name, progress=True)
|
||||||
|
process_files(zf.name, outpath, resample)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
97
matcha/utils/data/ljspeech.py
Normal file
97
matcha/utils/data/ljspeech.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import argparse
|
||||||
|
import random
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from torch.hub import download_url_to_file
|
||||||
|
|
||||||
|
from matcha.utils.data.utils import _extract_tar
|
||||||
|
|
||||||
|
URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
|
||||||
|
|
||||||
|
INFO_PAGE = "https://keithito.com/LJ-Speech-Dataset/"
|
||||||
|
|
||||||
|
LICENCE = "Public domain (LibriVox copyright disclaimer)"
|
||||||
|
|
||||||
|
CITATION = """
|
||||||
|
@misc{ljspeech17,
|
||||||
|
author = {Keith Ito and Linda Johnson},
|
||||||
|
title = {The LJ Speech Dataset},
|
||||||
|
howpublished = {\\url{https://keithito.com/LJ-Speech-Dataset/}},
|
||||||
|
year = 2017
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def decision():
|
||||||
|
return random.random() < 0.98
|
||||||
|
|
||||||
|
|
||||||
|
def get_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
parser.add_argument("-s", "--save-dir", type=str, default=None, help="Place to store the downloaded zip files")
|
||||||
|
parser.add_argument(
|
||||||
|
"output_dir",
|
||||||
|
type=str,
|
||||||
|
nargs="?",
|
||||||
|
default="data",
|
||||||
|
help="Place to store the converted data (subdirectory LJSpeech-1.1 will be created)",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def process_csv(ljpath: Path):
|
||||||
|
if (ljpath / "metadata.csv").exists():
|
||||||
|
basepath = ljpath
|
||||||
|
elif (ljpath / "LJSpeech-1.1" / "metadata.csv").exists():
|
||||||
|
basepath = ljpath / "LJSpeech-1.1"
|
||||||
|
csvpath = basepath / "metadata.csv"
|
||||||
|
wavpath = basepath / "wavs"
|
||||||
|
|
||||||
|
with (
|
||||||
|
open(csvpath, encoding="utf-8") as csvf,
|
||||||
|
open(basepath / "train.txt", "w", encoding="utf-8") as tf,
|
||||||
|
open(basepath / "val.txt", "w", encoding="utf-8") as vf,
|
||||||
|
):
|
||||||
|
for line in csvf.readlines():
|
||||||
|
line = line.strip()
|
||||||
|
parts = line.split("|")
|
||||||
|
wavfile = str(wavpath / f"{parts[0]}.wav")
|
||||||
|
if decision():
|
||||||
|
tf.write(f"{wavfile}|{parts[1]}\n")
|
||||||
|
else:
|
||||||
|
vf.write(f"{wavfile}|{parts[1]}\n")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = get_args()
|
||||||
|
|
||||||
|
save_dir = None
|
||||||
|
if args.save_dir:
|
||||||
|
save_dir = Path(args.save_dir)
|
||||||
|
if not save_dir.is_dir():
|
||||||
|
save_dir.mkdir()
|
||||||
|
|
||||||
|
outpath = Path(args.output_dir)
|
||||||
|
if not outpath.is_dir():
|
||||||
|
outpath.mkdir()
|
||||||
|
|
||||||
|
if save_dir:
|
||||||
|
tarname = URL.rsplit("/", maxsplit=1)[-1]
|
||||||
|
tarfile = save_dir / tarname
|
||||||
|
if not tarfile.exists():
|
||||||
|
download_url_to_file(URL, str(tarfile), progress=True)
|
||||||
|
_extract_tar(tarfile, outpath)
|
||||||
|
process_csv(outpath)
|
||||||
|
else:
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=".tar.bz2", delete=True) as zf:
|
||||||
|
download_url_to_file(URL, zf.name, progress=True)
|
||||||
|
_extract_tar(zf.name, outpath)
|
||||||
|
process_csv(outpath)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
53
matcha/utils/data/utils.py
Normal file
53
matcha/utils/data/utils.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# taken from https://github.com/pytorch/audio/blob/main/src/torchaudio/datasets/utils.py
|
||||||
|
# Copyright (c) 2017 Facebook Inc. (Soumith Chintala)
|
||||||
|
# Licence: BSD 2-Clause
|
||||||
|
# pylint: disable=C0123
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import tarfile
|
||||||
|
import zipfile
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, List, Optional, Union
|
||||||
|
|
||||||
|
_LG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_tar(from_path: Union[str, Path], to_path: Optional[str] = None, overwrite: bool = False) -> List[str]:
|
||||||
|
if type(from_path) is Path:
|
||||||
|
from_path = str(Path)
|
||||||
|
|
||||||
|
if to_path is None:
|
||||||
|
to_path = os.path.dirname(from_path)
|
||||||
|
|
||||||
|
with tarfile.open(from_path, "r") as tar:
|
||||||
|
files = []
|
||||||
|
for file_ in tar: # type: Any
|
||||||
|
file_path = os.path.join(to_path, file_.name)
|
||||||
|
if file_.isfile():
|
||||||
|
files.append(file_path)
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
_LG.info("%s already extracted.", file_path)
|
||||||
|
if not overwrite:
|
||||||
|
continue
|
||||||
|
tar.extract(file_, to_path)
|
||||||
|
return files
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_zip(from_path: Union[str, Path], to_path: Optional[str] = None, overwrite: bool = False) -> List[str]:
|
||||||
|
if type(from_path) is Path:
|
||||||
|
from_path = str(Path)
|
||||||
|
|
||||||
|
if to_path is None:
|
||||||
|
to_path = os.path.dirname(from_path)
|
||||||
|
|
||||||
|
with zipfile.ZipFile(from_path, "r") as zfile:
|
||||||
|
files = zfile.namelist()
|
||||||
|
for file_ in files:
|
||||||
|
file_path = os.path.join(to_path, file_)
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
_LG.info("%s already extracted.", file_path)
|
||||||
|
if not overwrite:
|
||||||
|
continue
|
||||||
|
zfile.extract(file_, to_path)
|
||||||
|
return files
|
||||||
@@ -102,10 +102,8 @@ def main():
|
|||||||
log.info("Dataloader loaded! Now computing stats...")
|
log.info("Dataloader loaded! Now computing stats...")
|
||||||
params = compute_data_statistics(data_loader, cfg["n_feats"])
|
params = compute_data_statistics(data_loader, cfg["n_feats"])
|
||||||
print(params)
|
print(params)
|
||||||
json.dump(
|
with open(output_file, "w", encoding="utf-8") as dumpfile:
|
||||||
params,
|
json.dump(params, dumpfile)
|
||||||
open(output_file, "w"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ def print_config_tree(
|
|||||||
|
|
||||||
# save config tree to file
|
# save config tree to file
|
||||||
if save_to_file:
|
if save_to_file:
|
||||||
with open(Path(cfg.paths.output_dir, "config_tree.log"), "w") as file:
|
with open(Path(cfg.paths.output_dir, "config_tree.log"), "w", encoding="utf-8") as file:
|
||||||
rich.print(tree, file=file)
|
rich.print(tree, file=file)
|
||||||
|
|
||||||
|
|
||||||
@@ -97,5 +97,5 @@ def enforce_tags(cfg: DictConfig, save_to_file: bool = False) -> None:
|
|||||||
log.info(f"Tags: {cfg.tags}")
|
log.info(f"Tags: {cfg.tags}")
|
||||||
|
|
||||||
if save_to_file:
|
if save_to_file:
|
||||||
with open(Path(cfg.paths.output_dir, "tags.log"), "w") as file:
|
with open(Path(cfg.paths.output_dir, "tags.log"), "w", encoding="utf-8") as file:
|
||||||
rich.print(cfg.tags, file=file)
|
rich.print(cfg.tags, file=file)
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ torchaudio
|
|||||||
matplotlib
|
matplotlib
|
||||||
pandas
|
pandas
|
||||||
conformer==0.3.2
|
conformer==0.3.2
|
||||||
diffusers==0.25.0
|
diffusers # developed using version ==0.25.0
|
||||||
notebook
|
notebook
|
||||||
ipywidgets
|
ipywidgets
|
||||||
gradio==3.43.2
|
gradio==3.43.2
|
||||||
|
|||||||
11
setup.py
11
setup.py
@@ -16,9 +16,16 @@ with open("README.md", encoding="utf-8") as readme_file:
|
|||||||
README = readme_file.read()
|
README = readme_file.read()
|
||||||
|
|
||||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||||
with open(os.path.join(cwd, "matcha", "VERSION")) as fin:
|
with open(os.path.join(cwd, "matcha", "VERSION"), encoding="utf-8") as fin:
|
||||||
version = fin.read().strip()
|
version = fin.read().strip()
|
||||||
|
|
||||||
|
|
||||||
|
def get_requires():
|
||||||
|
requirements = os.path.join(os.path.dirname(__file__), "requirements.txt")
|
||||||
|
with open(requirements, encoding="utf-8") as reqfile:
|
||||||
|
return [str(r).strip() for r in reqfile]
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="matcha-tts",
|
name="matcha-tts",
|
||||||
version=version,
|
version=version,
|
||||||
@@ -28,7 +35,7 @@ setup(
|
|||||||
author="Shivam Mehta",
|
author="Shivam Mehta",
|
||||||
author_email="shivam.mehta25@gmail.com",
|
author_email="shivam.mehta25@gmail.com",
|
||||||
url="https://shivammehta25.github.io/Matcha-TTS",
|
url="https://shivammehta25.github.io/Matcha-TTS",
|
||||||
install_requires=[str(r) for r in open(os.path.join(os.path.dirname(__file__), "requirements.txt"))],
|
install_requires=get_requires(),
|
||||||
include_dirs=[numpy.get_include()],
|
include_dirs=[numpy.get_include()],
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
packages=find_packages(exclude=["tests", "tests/*", "examples", "examples/*"]),
|
packages=find_packages(exclude=["tests", "tests/*", "examples", "examples/*"]),
|
||||||
|
|||||||
Reference in New Issue
Block a user