From c6c3f27ecc8ec7fdd81102412e82f8475ee159ff Mon Sep 17 00:00:00 2001 From: "lyuxiang.lx" Date: Thu, 23 Jan 2025 11:27:10 +0800 Subject: [PATCH 1/3] fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cbe217a..62bdf1d 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,7 @@ def text_generator(): yield '那份意外的惊喜与深深的祝福' yield '让我心中充满了甜蜜的快乐,' yield '笑容如花儿般绽放。' -for i, j in enumerate(cosyvoice.inference_zero_shot(text_generator, '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)): +for i, j in enumerate(cosyvoice.inference_zero_shot(text_generator(), '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)): torchaudio.save('zero_shot_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate) ``` From 69518b2bded1cae4315554eb67e171c4b7d96a84 Mon Sep 17 00:00:00 2001 From: "huzetao.hzt" Date: Thu, 23 Jan 2025 19:08:18 +0800 Subject: [PATCH 2/3] fix bistream extra token --- cosyvoice/llm/llm.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cosyvoice/llm/llm.py b/cosyvoice/llm/llm.py index 78d1f9c..bbd3305 100644 --- a/cosyvoice/llm/llm.py +++ b/cosyvoice/llm/llm.py @@ -382,7 +382,10 @@ class Qwen2LM(TransformerLM): if text_cache.size(1) >= self.mix_ratio[0]: lm_input_text = text_cache[:, :self.mix_ratio[0]] logging.info('append {} text token'.format(lm_input_text.size(1))) - lm_input = torch.concat([lm_input, lm_input_text], dim=1) + if len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2: + lm_input = lm_input_text + else: + lm_input = torch.concat([lm_input, lm_input_text], dim=1) text_cache = text_cache[:, self.mix_ratio[0]:] else: logging.info('not enough text token to decode, wait for more') From f1c214377c04da7576e12265ee8d15852a738a54 Mon Sep 17 00:00:00 2001 From: sd0ric4 <1286518974@qq.com> Date: Fri, 24 Jan 2025 19:41:14 +0800 Subject: [PATCH 3/3] fix: add POST endpoints to resolve browser error about GET request with body --- runtime/python/fastapi/server.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/runtime/python/fastapi/server.py b/runtime/python/fastapi/server.py index 17aed2f..6c308a0 100644 --- a/runtime/python/fastapi/server.py +++ b/runtime/python/fastapi/server.py @@ -44,12 +44,14 @@ def generate_data(model_output): @app.get("/inference_sft") +@app.post("/inference_sft") async def inference_sft(tts_text: str = Form(), spk_id: str = Form()): model_output = cosyvoice.inference_sft(tts_text, spk_id) return StreamingResponse(generate_data(model_output)) @app.get("/inference_zero_shot") +@app.post("/inference_zero_shot") async def inference_zero_shot(tts_text: str = Form(), prompt_text: str = Form(), prompt_wav: UploadFile = File()): prompt_speech_16k = load_wav(prompt_wav.file, 16000) model_output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k) @@ -57,6 +59,7 @@ async def inference_zero_shot(tts_text: str = Form(), prompt_text: str = Form(), @app.get("/inference_cross_lingual") +@app.post("/inference_cross_lingual") async def inference_cross_lingual(tts_text: str = Form(), prompt_wav: UploadFile = File()): prompt_speech_16k = load_wav(prompt_wav.file, 16000) model_output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k) @@ -64,6 +67,7 @@ async def inference_cross_lingual(tts_text: str = Form(), prompt_wav: UploadFile @app.get("/inference_instruct") +@app.post("/inference_instruct") async def inference_instruct(tts_text: str = Form(), spk_id: str = Form(), instruct_text: str = Form()): model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text) return StreamingResponse(generate_data(model_output))