Merge pull request #56 from iflamed/fastapi

Add Fastapi server to serve TTS and download script
This commit is contained in:
Xiang Lyu
2024-07-11 15:21:49 +08:00
committed by GitHub
4 changed files with 190 additions and 1 deletions

View File

@@ -152,4 +152,4 @@ You can also scan the QR code to join our official Dingding chat group.
5. We borrowed a lot of code from [WeNet](https://github.com/wenet-e2e/wenet). 5. We borrowed a lot of code from [WeNet](https://github.com/wenet-e2e/wenet).
## Disclaimer ## Disclaimer
The content provided above is for academic purposes only and is intended to demonstrate technical capabilities. Some examples are sourced from the internet. If any content infringes on your rights, please contact us to request its removal. The content provided above is for academic purposes only and is intended to demonstrate technical capabilities. Some examples are sourced from the internet. If any content infringes on your rights, please contact us to request its removal.

View File

@@ -26,4 +26,6 @@ tensorboard==2.14.0
torch==2.0.1 torch==2.0.1
torchaudio==2.0.2 torchaudio==2.0.2
wget==3.2 wget==3.2
fastapi==0.111.0
fastapi-cli==0.0.4
WeTextProcessing==1.0.3 WeTextProcessing==1.0.3

View File

@@ -0,0 +1,78 @@
import argparse
import logging
import requests
def saveResponse(path, response):
# 以二进制写入模式打开文件
with open(path, 'wb') as file:
# 将响应的二进制内容写入文件
file.write(response.content)
def main():
api = args.api_base
if args.mode == 'sft':
url = api + "/api/inference/sft"
payload={
'tts': args.tts_text,
'role': args.spk_id
}
response = requests.request("POST", url, data=payload)
saveResponse(args.tts_wav, response)
elif args.mode == 'zero_shot':
url = api + "/api/inference/zero-shot"
payload={
'tts': args.tts_text,
'prompt': args.prompt_text
}
files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
response = requests.request("POST", url, data=payload, files=files)
saveResponse(args.tts_wav, response)
elif args.mode == 'cross_lingual':
url = api + "/api/inference/cross-lingual"
payload={
'tts': args.tts_text,
}
files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
response = requests.request("POST", url, data=payload, files=files)
saveResponse(args.tts_wav, response)
else:
url = api + "/api/inference/instruct"
payload = {
'tts': args.tts_text,
'role': args.spk_id,
'instruct': args.instruct_text
}
response = requests.request("POST", url, data=payload)
saveResponse(args.tts_wav, response)
logging.info("Response save to {}", args.tts_wav)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--api_base',
type=str,
default='http://127.0.0.1:6006')
parser.add_argument('--mode',
default='sft',
choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],
help='request mode')
parser.add_argument('--tts_text',
type=str,
default='你好,我是通义千问语音合成大模型,请问有什么可以帮您的吗?')
parser.add_argument('--spk_id',
type=str,
default='中文女')
parser.add_argument('--prompt_text',
type=str,
default='希望你以后能够做的比我还好呦。')
parser.add_argument('--prompt_wav',
type=str,
default='../../zero_shot_prompt.wav')
parser.add_argument('--instruct_text',
type=str,
default='Theo \'Crimson\', is a fiery, passionate rebel leader. Fights with fervor for justice, but struggles with impulsiveness.')
parser.add_argument('--tts_wav',
type=str,
default='demo.wav')
args = parser.parse_args()
prompt_sr, target_sr = 16000, 22050
main()

View File

@@ -0,0 +1,109 @@
# Set inference model
# export MODEL_DIR=pretrained_models/CosyVoice-300M-Instruct
# For development
# fastapi dev --port 6006 fastapi_server.py
# For production deployment
# fastapi run --port 6006 fastapi_server.py
import os
import sys
import io,time
from fastapi import FastAPI, Response, File, UploadFile, Form
from fastapi.responses import HTMLResponse
from contextlib import asynccontextmanager
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/../..'.format(ROOT_DIR))
sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR))
from cosyvoice.cli.cosyvoice import CosyVoice
from cosyvoice.utils.file_utils import load_wav
import numpy as np
import torch
import torchaudio
import logging
logging.getLogger('matplotlib').setLevel(logging.WARNING)
class LaunchFailed(Exception):
pass
@asynccontextmanager
async def lifespan(app: FastAPI):
model_dir = os.getenv("MODEL_DIR", "pretrained_models/CosyVoice-300M-SFT")
if model_dir:
logging.info("MODEL_DIR is {}", model_dir)
app.cosyvoice = CosyVoice('../../'+model_dir)
# sft usage
logging.info("Avaliable speakers {}", app.cosyvoice.list_avaliable_spks())
else:
raise LaunchFailed("MODEL_DIR environment must set")
yield
app = FastAPI(lifespan=lifespan)
def buildResponse(output):
buffer = io.BytesIO()
torchaudio.save(buffer, output, 22050, format="wav")
buffer.seek(0)
return Response(content=buffer.read(-1), media_type="audio/wav")
@app.post("/api/inference/sft")
@app.get("/api/inference/sft")
async def sft(tts: str = Form(), role: str = Form()):
start = time.process_time()
output = app.cosyvoice.inference_sft(tts, role)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.post("/api/inference/zero-shot")
async def zeroShot(tts: str = Form(), prompt: str = Form(), audio: UploadFile = File()):
start = time.process_time()
prompt_speech = load_wav(audio.file, 16000)
prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
output = app.cosyvoice.inference_zero_shot(tts, prompt, prompt_speech_16k)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.post("/api/inference/cross-lingual")
async def crossLingual(tts: str = Form(), audio: UploadFile = File()):
start = time.process_time()
prompt_speech = load_wav(audio.file, 16000)
prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
output = app.cosyvoice.inference_cross_lingual(tts, prompt_speech_16k)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.post("/api/inference/instruct")
@app.get("/api/inference/instruct")
async def instruct(tts: str = Form(), role: str = Form(), instruct: str = Form()):
start = time.process_time()
output = app.cosyvoice.inference_instruct(tts, role, instruct)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.get("/api/roles")
async def roles():
return {"roles": app.cosyvoice.list_avaliable_spks()}
@app.get("/", response_class=HTMLResponse)
async def root():
return """
<!DOCTYPE html>
<html lang=zh-cn>
<head>
<meta charset=utf-8>
<title>Api information</title>
</head>
<body>
Get the supported tones from the Roles API first, then enter the tones and textual content in the TTS API for synthesis. <a href='./docs'>Documents of API</a>
</body>
</html>
"""