Merge pull request #940 from c4fun/fastapi-cosyvoice2

Add a inference_instruct2 route to support and defaultly supports cosyvoice2 in fastapi server
This commit is contained in:
Xiang Lyu
2025-02-07 16:30:27 +08:00
committed by GitHub

View File

@@ -72,6 +72,14 @@ async def inference_instruct(tts_text: str = Form(), spk_id: str = Form(), instr
model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text) model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text)
return StreamingResponse(generate_data(model_output)) return StreamingResponse(generate_data(model_output))
@app.get("/inference_instruct2")
@app.post("/inference_instruct2")
async def inference_instruct2(tts_text: str = Form(), instruct_text: str = Form(), prompt_wav: UploadFile = File()):
prompt_speech_16k = load_wav(prompt_wav.file, 16000)
model_output = cosyvoice.inference_instruct2(tts_text, instruct_text, prompt_speech_16k)
return StreamingResponse(generate_data(model_output))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
@@ -90,4 +98,4 @@ if __name__ == '__main__':
cosyvoice = CosyVoice2(args.model_dir) cosyvoice = CosyVoice2(args.model_dir)
except Exception: except Exception:
raise TypeError('no valid model_type!') raise TypeError('no valid model_type!')
uvicorn.run(app, host="0.0.0.0", port=args.port) uvicorn.run(app, host="0.0.0.0", port=args.port)