diff --git a/cosyvoice/cli/frontend.py b/cosyvoice/cli/frontend.py index 32e5539..f10f655 100644 --- a/cosyvoice/cli/frontend.py +++ b/cosyvoice/cli/frontend.py @@ -21,12 +21,9 @@ import torchaudio.compliance.kaldi as kaldi import torchaudio import os import inflect -try: - import ttsfrd - use_ttsfrd = True -except: - print("failed to import ttsfrd, please normalize input text manually") - use_ttsfrd = False +from tn.chinese.normalizer import Normalizer as ZhNormalizer +from tn.english.normalizer import Normalizer as EnNormalizer + from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph @@ -53,14 +50,8 @@ class CosyVoiceFrontEnd: self.instruct = instruct self.allowed_special = allowed_special self.inflect_parser = inflect.engine() - self.use_ttsfrd = use_ttsfrd - if self.use_ttsfrd: - self.frd = ttsfrd.TtsFrontendEngine() - ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - assert self.frd.initialize('{}/../../pretrained_models/CosyVoice-ttsfrd/resource'.format(ROOT_DIR)) is True, 'failed to initialize ttsfrd resource' - self.frd.set_lang_type('pinyin') - self.frd.enable_pinyin_mix(True) - self.frd.set_breakmodel_index(1) + self.zh_tn_model = ZhNormalizer(remove_erhua=False,full_to_half=False) + self.en_tn_model = EnNormalizer() def _extract_text_token(self, text): text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special) @@ -95,8 +86,7 @@ class CosyVoiceFrontEnd: def text_normalize(self, text, split=True): text = text.strip() if contains_chinese(text): - if self.use_ttsfrd: - text = self.frd.get_frd_extra_info(text, 'input') + text = self.zh_tn_model.normalize(text) text = text.replace("\n", "") text = replace_blank(text) text = replace_corner_mark(text) @@ -107,6 +97,7 @@ class CosyVoiceFrontEnd: token_min_n=60, merge_len=20, comma_split=False)] else: + text = self.en_tn_model.normalize(text) text = spell_out_number(text, self.inflect_parser) texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80, token_min_n=60, merge_len=20,