From a051a09ba4efc042533c0d2f4240c6acf9c42739 Mon Sep 17 00:00:00 2001 From: "lyuxiang.lx" Date: Tue, 9 Dec 2025 15:41:02 +0000 Subject: [PATCH] remove unncessary code --- README.md | 2 +- cosyvoice/cli/cosyvoice.py | 7 +------ example.py | 5 ++--- vllm_example.py | 2 +- 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 495934a..6b3297f 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ We strongly recommend that you download our pretrained `CosyVoice2-0.5B` `CosyVo ``` python # SDK模型下载 from modelscope import snapshot_download -snapshot_download('iic/CosyVoice3-0.5B', local_dir='pretrained_models/CosyVoice3-0.5B') +snapshot_download('FunAudioLLM/Fun-CosyVoice3-0.5B', local_dir='pretrained_models/Fun-CosyVoice3-0.5B') snapshot_download('iic/CosyVoice2-0.5B', local_dir='pretrained_models/CosyVoice2-0.5B') snapshot_download('iic/CosyVoice-300M', local_dir='pretrained_models/CosyVoice-300M') snapshot_download('iic/CosyVoice-300M-SFT', local_dir='pretrained_models/CosyVoice-300M-SFT') diff --git a/cosyvoice/cli/cosyvoice.py b/cosyvoice/cli/cosyvoice.py index 6395d60..ce411c2 100644 --- a/cosyvoice/cli/cosyvoice.py +++ b/cosyvoice/cli/cosyvoice.py @@ -27,7 +27,6 @@ from cosyvoice.utils.class_utils import get_model_type class CosyVoice: def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False, trt_concurrent=1): - self.instruct = True if '-Instruct' in model_dir else False self.model_dir = model_dir self.fp16 = fp16 if not os.path.exists(model_dir): @@ -37,7 +36,7 @@ class CosyVoice: raise ValueError('{} not found!'.format(hyper_yaml_path)) with open(hyper_yaml_path, 'r') as f: configs = load_hyperpyyaml(f) - assert get_model_type(configs) != CosyVoice2Model, 'do not use {} for CosyVoice initialization!'.format(model_dir) + assert get_model_type(configs) == CosyVoiceModel, 'do not use {} for CosyVoice initialization!'.format(model_dir) self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'], configs['feat_extractor'], '{}/campplus.onnx'.format(model_dir), @@ -116,8 +115,6 @@ class CosyVoice: def inference_instruct(self, tts_text, spk_id, instruct_text, stream=False, speed=1.0, text_frontend=True): assert isinstance(self.model, CosyVoiceModel), 'inference_instruct is only implemented for CosyVoice!' - if self.instruct is False: - raise ValueError('{} do not support instruct inference'.format(self.model_dir)) instruct_text = self.frontend.text_normalize(instruct_text, split=False, text_frontend=text_frontend) for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)): model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text) @@ -142,7 +139,6 @@ class CosyVoice: class CosyVoice2(CosyVoice): def __init__(self, model_dir, load_jit=False, load_trt=False, load_vllm=False, fp16=False, trt_concurrent=1): - self.instruct = True if '-Instruct' in model_dir else False self.model_dir = model_dir self.fp16 = fp16 if not os.path.exists(model_dir): @@ -197,7 +193,6 @@ class CosyVoice2(CosyVoice): class CosyVoice3(CosyVoice2): def __init__(self, model_dir, load_trt=False, load_vllm=False, fp16=False, trt_concurrent=1): - self.instruct = True if '-Instruct' in model_dir else False self.model_dir = model_dir self.fp16 = fp16 if not os.path.exists(model_dir): diff --git a/example.py b/example.py index 164acf6..1307ed4 100644 --- a/example.py +++ b/example.py @@ -1,7 +1,6 @@ import sys sys.path.append('third_party/Matcha-TTS') from cosyvoice.cli.cosyvoice import AutoModel -from cosyvoice.utils.file_utils import load_wav import torchaudio @@ -68,7 +67,7 @@ def cosyvoice2_example(): def cosyvoice3_example(): """ CosyVoice3 Usage, check https://funaudiollm.github.io/cosyvoice3/ for more details """ - cosyvoice = AutoModel(model_dir='pretrained_models/CosyVoice3-0.5B') + cosyvoice = AutoModel(model_dir='pretrained_models/Fun-CosyVoice3-0.5B') # zero_shot usage for i, j in enumerate(cosyvoice.inference_zero_shot('八百标兵奔北坡,北坡炮兵并排跑,炮兵怕把标兵碰,标兵怕碰炮兵炮。', 'You are a helpful assistant.<|endofprompt|>希望你以后能够做的比我还好呦。', './asset/zero_shot_prompt.wav', stream=False)): torchaudio.save('zero_shot_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate) @@ -77,7 +76,7 @@ def cosyvoice3_example(): for i, j in enumerate(cosyvoice.inference_cross_lingual('You are a helpful assistant.<|endofprompt|>[breath]因为他们那一辈人[breath]在乡里面住的要习惯一点,[breath]邻居都很活络,[breath]嗯,都很熟悉。[breath]', './asset/zero_shot_prompt.wav', stream=False)): torchaudio.save('fine_grained_control_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate) - # instruct usage + # instruct usage, for supported control, check cosyvoice/utils/common.py#L28 for i, j in enumerate(cosyvoice.inference_instruct2('好少咯,一般系放嗰啲国庆啊,中秋嗰啲可能会咯。', 'You are a helpful assistant. 请用广东话表达。<|endofprompt|>', './asset/zero_shot_prompt.wav', stream=False)): torchaudio.save('instruct_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate) for i, j in enumerate(cosyvoice.inference_instruct2('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', 'You are a helpful assistant. 请用尽可能快地语速说一句话。<|endofprompt|>', './asset/zero_shot_prompt.wav', stream=False)): diff --git a/vllm_example.py b/vllm_example.py index 5fbfe7d..3e4dd20 100644 --- a/vllm_example.py +++ b/vllm_example.py @@ -21,7 +21,7 @@ def cosyvoice2_example(): def cosyvoice3_example(): """ CosyVoice3 vllm usage """ - cosyvoice = AutoModel(model_dir='pretrained_models/CosyVoice3-0.5B', load_trt=True, load_vllm=True, fp16=False) + cosyvoice = AutoModel(model_dir='pretrained_models/Fun-CosyVoice3-0.5B', load_trt=True, load_vllm=True, fp16=False) for i in tqdm(range(100)): set_all_random_seed(i) for _, _ in enumerate(cosyvoice.inference_zero_shot('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', 'You are a helpful assistant.<|endofprompt|>希望你以后能够做的比我还好呦。', './asset/zero_shot_prompt.wav', stream=False)):