mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-05 01:49:25 +08:00
fix bug
This commit is contained in:
@@ -193,13 +193,13 @@ class CosyVoiceFrontEnd:
|
||||
model_input = self.frontend_sft(tts_text, spk_id)
|
||||
# in instruct mode, we remove spk_embedding in llm due to information leakage
|
||||
del model_input['llm_embedding']
|
||||
instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
|
||||
instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text)
|
||||
model_input['prompt_text'] = instruct_text_token
|
||||
model_input['prompt_text_len'] = instruct_text_token_len
|
||||
return model_input
|
||||
|
||||
def frontend_instruct2(self, tts_text, instruct_text, prompt_wav, resample_rate, zero_shot_spk_id):
|
||||
model_input = self.frontend_zero_shot(tts_text, instruct_text + '<|endofprompt|>', prompt_wav, resample_rate, zero_shot_spk_id)
|
||||
model_input = self.frontend_zero_shot(tts_text, instruct_text, prompt_wav, resample_rate, zero_shot_spk_id)
|
||||
del model_input['llm_prompt_speech_token']
|
||||
del model_input['llm_prompt_speech_token_len']
|
||||
return model_input
|
||||
|
||||
Reference in New Issue
Block a user