From a96ae1361608f3dfe54fc6a0ddeea8475e575dad Mon Sep 17 00:00:00 2001 From: "lyuxiang.lx" Date: Wed, 23 Apr 2025 15:40:59 +0800 Subject: [PATCH] fix instruct2 bug --- cosyvoice/cli/cosyvoice.py | 4 ++-- cosyvoice/cli/frontend.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cosyvoice/cli/cosyvoice.py b/cosyvoice/cli/cosyvoice.py index fc1ea90..d82f66e 100644 --- a/cosyvoice/cli/cosyvoice.py +++ b/cosyvoice/cli/cosyvoice.py @@ -177,10 +177,10 @@ class CosyVoice2(CosyVoice): def inference_instruct(self, *args, **kwargs): raise NotImplementedError('inference_instruct is not implemented for CosyVoice2!') - def inference_instruct2(self, tts_text, instruct_text, prompt_speech_16k, stream=False, speed=1.0, text_frontend=True): + def inference_instruct2(self, tts_text, instruct_text, prompt_speech_16k, zero_shot_spk_id='', stream=False, speed=1.0, text_frontend=True): assert isinstance(self.model, CosyVoice2Model), 'inference_instruct2 is only implemented for CosyVoice2!' for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)): - model_input = self.frontend.frontend_instruct2(i, instruct_text, prompt_speech_16k, self.sample_rate) + model_input = self.frontend.frontend_instruct2(i, instruct_text, prompt_speech_16k, self.sample_rate, zero_shot_spk_id) start_time = time.time() logging.info('synthesis text {}'.format(i)) for model_output in self.model.tts(**model_input, stream=stream, speed=speed): diff --git a/cosyvoice/cli/frontend.py b/cosyvoice/cli/frontend.py index 99cdb18..36dcd18 100644 --- a/cosyvoice/cli/frontend.py +++ b/cosyvoice/cli/frontend.py @@ -196,8 +196,8 @@ class CosyVoiceFrontEnd: model_input['prompt_text_len'] = instruct_text_token_len return model_input - def frontend_instruct2(self, tts_text, instruct_text, prompt_speech_16k, resample_rate): - model_input = self.frontend_zero_shot(tts_text, instruct_text + '<|endofprompt|>', prompt_speech_16k, resample_rate) + def frontend_instruct2(self, tts_text, instruct_text, prompt_speech_16k, resample_rate, zero_shot_spk_id): + model_input = self.frontend_zero_shot(tts_text, instruct_text + '<|endofprompt|>', prompt_speech_16k, resample_rate, zero_shot_spk_id) del model_input['llm_prompt_speech_token'] del model_input['llm_prompt_speech_token_len'] return model_input