mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 17:39:25 +08:00
update with upstream
This commit is contained in:
@@ -22,6 +22,8 @@ git submodule update --init --recursive
|
||||
``` sh
|
||||
conda create -n cosyvoice python=3.8
|
||||
conda activate cosyvoice
|
||||
# pynini is required by WeTextProcessing, use conda to install it as it can be executed on all platform.
|
||||
conda install -y -c conda-forge pynini==2.1.5
|
||||
pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ --trusted-host=mirrors.aliyun.com
|
||||
|
||||
# If you encounter sox compatibility issues
|
||||
@@ -50,9 +52,9 @@ git clone https://www.modelscope.cn/iic/CosyVoice-300M-Instruct.git pretrained_m
|
||||
git clone https://www.modelscope.cn/iic/CosyVoice-ttsfrd.git pretrained_models/CosyVoice-ttsfrd
|
||||
```
|
||||
|
||||
Optionaly, you can unzip `ttsfrd` resouce and install `ttsfrd` package.
|
||||
Optionaly, you can unzip `ttsfrd` resouce and install `ttsfrd` package for better text normalization performance.
|
||||
|
||||
Notice that this step is not necessary. If you do not install `ttsfrd` package, you need to normalize input text manually.
|
||||
Notice that this step is not necessary. If you do not install `ttsfrd` package, we will use WeTextProcessing by default.
|
||||
|
||||
``` sh
|
||||
cd pretrained_models/CosyVoice-ttsfrd/
|
||||
@@ -142,7 +144,7 @@ python3 client.py --port 50000 --mode <sft|zero_shot|cross_lingual|instruct>
|
||||
|
||||
You can directly discuss on [Github Issues](https://github.com/FunAudioLLM/CosyVoice/issues).
|
||||
|
||||
You can also scan the QR code to join our officla Dingding chat group.
|
||||
You can also scan the QR code to join our official Dingding chat group.
|
||||
|
||||
<img src="./asset/dingding.png" width="250px">
|
||||
|
||||
|
||||
@@ -24,8 +24,10 @@ import inflect
|
||||
try:
|
||||
import ttsfrd
|
||||
use_ttsfrd = True
|
||||
except:
|
||||
print("failed to import ttsfrd, please normalize input text manually")
|
||||
except ImportError:
|
||||
print("failed to import ttsfrd, use WeTextProcessing instead")
|
||||
from tn.chinese.normalizer import Normalizer as ZhNormalizer
|
||||
from tn.english.normalizer import Normalizer as EnNormalizer
|
||||
use_ttsfrd = False
|
||||
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
||||
|
||||
@@ -61,6 +63,9 @@ class CosyVoiceFrontEnd:
|
||||
self.frd.set_lang_type('pinyin')
|
||||
self.frd.enable_pinyin_mix(True)
|
||||
self.frd.set_breakmodel_index(1)
|
||||
else:
|
||||
self.zh_tn_model = ZhNormalizer(remove_erhua=False, full_to_half=False)
|
||||
self.en_tn_model = EnNormalizer()
|
||||
|
||||
def _extract_text_token(self, text):
|
||||
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
||||
@@ -97,6 +102,8 @@ class CosyVoiceFrontEnd:
|
||||
if contains_chinese(text):
|
||||
if self.use_ttsfrd:
|
||||
text = self.frd.get_frd_extra_info(text, 'input')
|
||||
else:
|
||||
text = self.zh_tn_model.normalize(text)
|
||||
text = text.replace("\n", "")
|
||||
text = replace_blank(text)
|
||||
text = replace_corner_mark(text)
|
||||
@@ -107,6 +114,7 @@ class CosyVoiceFrontEnd:
|
||||
token_min_n=60, merge_len=20,
|
||||
comma_split=False)]
|
||||
else:
|
||||
text = self.en_tn_model.normalize(text)
|
||||
text = spell_out_number(text, self.inflect_parser)
|
||||
texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
|
||||
token_min_n=60, merge_len=20,
|
||||
|
||||
@@ -22,7 +22,6 @@ from torch.nn.utils.rnn import pad_sequence
|
||||
import torch.nn.functional as F
|
||||
|
||||
torchaudio.set_audio_backend('soundfile')
|
||||
torchaudio.utils.sox_utils.set_buffer_size(16500)
|
||||
|
||||
AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'])
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=
|
||||
return len(tokenize(_text)) < merge_len
|
||||
|
||||
if lang == "zh":
|
||||
pounc = ['。', '?', '!', ';', ':', '.', '?', '!', ';']
|
||||
pounc = ['。', '?', '!', ';', ':', '、', '.', '?', '!', ';']
|
||||
else:
|
||||
pounc = ['.', '?', '!', ';', ':']
|
||||
if comma_split:
|
||||
@@ -91,6 +91,11 @@ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=
|
||||
st = i + 2
|
||||
else:
|
||||
st = i + 1
|
||||
if len(utts) == 0:
|
||||
if lang == "zh":
|
||||
utts.append(text + '。')
|
||||
else:
|
||||
utts.append(text + '.')
|
||||
final_utts = []
|
||||
cur_utt = ""
|
||||
for utt in utts:
|
||||
|
||||
@@ -27,4 +27,5 @@ torch==2.0.1
|
||||
torchaudio==2.0.2
|
||||
wget==3.2
|
||||
fastapi==0.111.0
|
||||
fastapi-cli==0.0.4
|
||||
fastapi-cli==0.0.4
|
||||
WeTextProcessing==1.0.3
|
||||
|
||||
Reference in New Issue
Block a user