mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 09:29:25 +08:00
Merge pull request #353 from FunAudioLLM/inference_streaming
onnx and fastapi
This commit is contained in:
@@ -167,7 +167,7 @@ docker build -t cosyvoice:v1.0 .
|
||||
docker run -d --runtime=nvidia -p 50000:50000 cosyvoice:v1.0 /bin/bash -c "cd /opt/CosyVoice/CosyVoice/runtime/python/grpc && python3 server.py --port 50000 --max_conc 4 --model_dir iic/CosyVoice-300M && sleep infinity"
|
||||
cd grpc && python3 client.py --port 50000 --mode <sft|zero_shot|cross_lingual|instruct>
|
||||
# for fastapi usage
|
||||
docker run -d --runtime=nvidia -p 50000:50000 cosyvoice:v1.0 /bin/bash -c "cd /opt/CosyVoice/CosyVoice/runtime/python/fastapi && MODEL_DIR=iic/CosyVoice-300M fastapi dev --port 50000 server.py && sleep infinity"
|
||||
docker run -d --runtime=nvidia -p 50000:50000 cosyvoice:v1.0 /bin/bash -c "cd /opt/CosyVoice/CosyVoice/runtime/python/fastapi && python3 server.py --port 50000 --model_dir iic/CosyVoice-300M && sleep infinity"
|
||||
cd fastapi && python3 client.py --port 50000 --mode <sft|zero_shot|cross_lingual|instruct>
|
||||
```
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ def main():
|
||||
torch._C._jit_set_profiling_mode(False)
|
||||
torch._C._jit_set_profiling_executor(False)
|
||||
|
||||
cosyvoice = CosyVoice(args.model_dir, load_jit=False, load_trt=False)
|
||||
cosyvoice = CosyVoice(args.model_dir, load_jit=False, load_onnx=False)
|
||||
|
||||
# 1. export llm text_encoder
|
||||
llm_text_encoder = cosyvoice.model.llm.text_encoder.half()
|
||||
@@ -60,5 +60,12 @@ def main():
|
||||
script = torch.jit.optimize_for_inference(script)
|
||||
script.save('{}/llm.llm.fp16.zip'.format(args.model_dir))
|
||||
|
||||
# 3. export flow encoder
|
||||
flow_encoder = cosyvoice.model.flow.encoder
|
||||
script = torch.jit.script(flow_encoder)
|
||||
script = torch.jit.freeze(script)
|
||||
script = torch.jit.optimize_for_inference(script)
|
||||
script.save('{}/flow.encoder.fp32.zip'.format(args.model_dir))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
109
cosyvoice/bin/export_onnx.py
Normal file
109
cosyvoice/bin/export_onnx.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# Copyright (c) 2024 Antgroup Inc (authors: Zhoubofan, hexisyztem@icloud.com)
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
||||
import os
|
||||
import sys
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append('{}/../..'.format(ROOT_DIR))
|
||||
sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR))
|
||||
import onnxruntime
|
||||
import random
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
from cosyvoice.cli.cosyvoice import CosyVoice
|
||||
|
||||
|
||||
def get_dummy_input(batch_size, seq_len, out_channels, device):
|
||||
x = torch.rand((batch_size, out_channels, seq_len), dtype=torch.float32, device=device)
|
||||
mask = torch.ones((batch_size, 1, seq_len), dtype=torch.float32, device=device)
|
||||
mu = torch.rand((batch_size, out_channels, seq_len), dtype=torch.float32, device=device)
|
||||
t = torch.rand((batch_size), dtype=torch.float32, device=device)
|
||||
spks = torch.rand((batch_size, out_channels), dtype=torch.float32, device=device)
|
||||
cond = torch.rand((batch_size, out_channels, seq_len), dtype=torch.float32, device=device)
|
||||
return x, mask, mu, t, spks, cond
|
||||
|
||||
|
||||
def get_args():
|
||||
parser = argparse.ArgumentParser(description='export your model for deployment')
|
||||
parser.add_argument('--model_dir',
|
||||
type=str,
|
||||
default='pretrained_models/CosyVoice-300M',
|
||||
help='local path')
|
||||
args = parser.parse_args()
|
||||
print(args)
|
||||
return args
|
||||
|
||||
def main():
|
||||
args = get_args()
|
||||
logging.basicConfig(level=logging.DEBUG,
|
||||
format='%(asctime)s %(levelname)s %(message)s')
|
||||
|
||||
cosyvoice = CosyVoice(args.model_dir, load_jit=False, load_onnx=False)
|
||||
|
||||
# 1. export flow decoder estimator
|
||||
estimator = cosyvoice.model.flow.decoder.estimator
|
||||
|
||||
device = cosyvoice.model.device
|
||||
batch_size, seq_len = 1, 256
|
||||
out_channels = cosyvoice.model.flow.decoder.estimator.out_channels
|
||||
x, mask, mu, t, spks, cond = get_dummy_input(batch_size, seq_len, out_channels, device)
|
||||
torch.onnx.export(
|
||||
estimator,
|
||||
(x, mask, mu, t, spks, cond),
|
||||
'{}/flow.decoder.estimator.fp32.onnx'.format(args.model_dir),
|
||||
export_params=True,
|
||||
opset_version=18,
|
||||
do_constant_folding=True,
|
||||
input_names=['x', 'mask', 'mu', 't', 'spks', 'cond'],
|
||||
output_names=['estimator_out'],
|
||||
dynamic_axes={
|
||||
'x': {0: 'batch_size', 2: 'seq_len'},
|
||||
'mask': {0: 'batch_size', 2: 'seq_len'},
|
||||
'mu': {0: 'batch_size', 2: 'seq_len'},
|
||||
'cond': {0: 'batch_size', 2: 'seq_len'},
|
||||
't': {0: 'batch_size'},
|
||||
'spks': {0: 'batch_size'},
|
||||
'estimator_out': {0: 'batch_size', 2: 'seq_len'},
|
||||
}
|
||||
)
|
||||
|
||||
# 2. test computation consistency
|
||||
option = onnxruntime.SessionOptions()
|
||||
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
option.intra_op_num_threads = 1
|
||||
providers = ['CUDAExecutionProvider' if torch.cuda.is_available() else 'CPUExecutionProvider']
|
||||
estimator_onnx = onnxruntime.InferenceSession('{}/flow.decoder.estimator.fp32.onnx'.format(args.model_dir), sess_options=option, providers=providers)
|
||||
|
||||
for _ in tqdm(range(10)):
|
||||
x, mask, mu, t, spks, cond = get_dummy_input(random.randint(1, 6), random.randint(16, 512), out_channels, device)
|
||||
output_pytorch = estimator(x, mask, mu, t, spks, cond)
|
||||
ort_inputs = {
|
||||
'x': x.cpu().numpy(),
|
||||
'mask': mask.cpu().numpy(),
|
||||
'mu': mu.cpu().numpy(),
|
||||
't': t.cpu().numpy(),
|
||||
'spks': spks.cpu().numpy(),
|
||||
'cond': cond.cpu().numpy()
|
||||
}
|
||||
output_onnx = estimator_onnx.run(None, ort_inputs)[0]
|
||||
torch.testing.assert_allclose(output_pytorch, torch.from_numpy(output_onnx).to(device), rtol=1e-2, atol=1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,8 +0,0 @@
|
||||
# TODO 跟export_jit一样的逻辑,完成flow部分的estimator的onnx导出。
|
||||
# tensorrt的安装方式,再这里写一下步骤提示如下,如果没有安装,那么不要执行这个脚本,提示用户先安装,不给选择
|
||||
try:
|
||||
import tensorrt
|
||||
except ImportError:
|
||||
print('step1, 下载\n step2. 解压,安装whl,')
|
||||
# 安装命令里tensosrt的根目录用环境变量导入,比如os.environ['tensorrt_root_dir']/bin/exetrace,然后python里subprocess里执行导出命令
|
||||
# 后面我会在run.sh里写好执行命令 tensorrt_root_dir=xxxx python cosyvoice/bin/export_trt.py --model_dir xxx
|
||||
@@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
import os
|
||||
import time
|
||||
from tqdm import tqdm
|
||||
from hyperpyyaml import load_hyperpyyaml
|
||||
from modelscope import snapshot_download
|
||||
from cosyvoice.cli.frontend import CosyVoiceFrontEnd
|
||||
@@ -21,7 +22,7 @@ from cosyvoice.utils.file_utils import logging
|
||||
|
||||
class CosyVoice:
|
||||
|
||||
def __init__(self, model_dir, load_jit=True):
|
||||
def __init__(self, model_dir, load_jit=True, load_onnx=True):
|
||||
instruct = True if '-Instruct' in model_dir else False
|
||||
self.model_dir = model_dir
|
||||
if not os.path.exists(model_dir):
|
||||
@@ -41,7 +42,10 @@ class CosyVoice:
|
||||
'{}/hift.pt'.format(model_dir))
|
||||
if load_jit:
|
||||
self.model.load_jit('{}/llm.text_encoder.fp16.zip'.format(model_dir),
|
||||
'{}/llm.llm.fp16.zip'.format(model_dir))
|
||||
'{}/llm.llm.fp16.zip'.format(model_dir),
|
||||
'{}/flow.encoder.fp32.zip'.format(model_dir))
|
||||
if load_onnx:
|
||||
self.model.load_onnx('{}/flow.decoder.estimator.fp32.onnx'.format(model_dir))
|
||||
del configs
|
||||
|
||||
def list_avaliable_spks(self):
|
||||
@@ -49,7 +53,7 @@ class CosyVoice:
|
||||
return spks
|
||||
|
||||
def inference_sft(self, tts_text, spk_id, stream=False):
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
|
||||
model_input = self.frontend.frontend_sft(i, spk_id)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
@@ -61,7 +65,7 @@ class CosyVoice:
|
||||
|
||||
def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, stream=False):
|
||||
prompt_text = self.frontend.text_normalize(prompt_text, split=False)
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
|
||||
model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
@@ -74,7 +78,7 @@ class CosyVoice:
|
||||
def inference_cross_lingual(self, tts_text, prompt_speech_16k, stream=False):
|
||||
if self.frontend.instruct is True:
|
||||
raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir))
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
|
||||
model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
@@ -88,7 +92,7 @@ class CosyVoice:
|
||||
if self.frontend.instruct is False:
|
||||
raise ValueError('{} do not support instruct inference'.format(self.model_dir))
|
||||
instruct_text = self.frontend.text_normalize(instruct_text, split=False)
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
|
||||
model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
|
||||
@@ -18,7 +18,7 @@ import time
|
||||
from contextlib import nullcontext
|
||||
import uuid
|
||||
from cosyvoice.utils.common import fade_in_out
|
||||
|
||||
import numpy as np
|
||||
|
||||
class CosyVoiceModel:
|
||||
|
||||
@@ -60,11 +60,22 @@ class CosyVoiceModel:
|
||||
self.hift.load_state_dict(torch.load(hift_model, map_location=self.device))
|
||||
self.hift.to(self.device).eval()
|
||||
|
||||
def load_jit(self, llm_text_encoder_model, llm_llm_model):
|
||||
def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
|
||||
llm_text_encoder = torch.jit.load(llm_text_encoder_model)
|
||||
self.llm.text_encoder = llm_text_encoder
|
||||
llm_llm = torch.jit.load(llm_llm_model)
|
||||
self.llm.llm = llm_llm
|
||||
flow_encoder = torch.jit.load(flow_encoder_model)
|
||||
self.flow.encoder = flow_encoder
|
||||
|
||||
def load_onnx(self, flow_decoder_estimator_model):
|
||||
import onnxruntime
|
||||
option = onnxruntime.SessionOptions()
|
||||
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
option.intra_op_num_threads = 1
|
||||
providers = ['CUDAExecutionProvider' if torch.cuda.is_available() else 'CPUExecutionProvider']
|
||||
del self.flow.decoder.estimator
|
||||
self.flow.decoder.estimator = onnxruntime.InferenceSession(flow_decoder_estimator_model, sess_options=option, providers=providers)
|
||||
|
||||
def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
|
||||
with self.llm_context:
|
||||
@@ -169,4 +180,5 @@ class CosyVoiceModel:
|
||||
self.llm_end_dict.pop(this_uuid)
|
||||
self.mel_overlap_dict.pop(this_uuid)
|
||||
self.hift_cache_dict.pop(this_uuid)
|
||||
torch.cuda.synchronize()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.synchronize()
|
||||
|
||||
@@ -159,7 +159,7 @@ class ConditionalDecoder(nn.Module):
|
||||
_type_: _description_
|
||||
"""
|
||||
|
||||
t = self.time_embeddings(t)
|
||||
t = self.time_embeddings(t).to(t.dtype)
|
||||
t = self.time_mlp(t)
|
||||
|
||||
x = pack([x, mu], "b * t")[0]
|
||||
|
||||
@@ -113,7 +113,7 @@ class MaskedDiffWithXvec(torch.nn.Module):
|
||||
# concat text and prompt_text
|
||||
token_len1, token_len2 = prompt_token.shape[1], token.shape[1]
|
||||
token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
|
||||
mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(embedding)
|
||||
mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
|
||||
token = self.input_embedding(torch.clamp(token, min=0)) * mask
|
||||
|
||||
# text encode
|
||||
|
||||
@@ -50,7 +50,7 @@ class ConditionalCFM(BASECFM):
|
||||
shape: (batch_size, n_feats, mel_timesteps)
|
||||
"""
|
||||
z = torch.randn_like(mu) * temperature
|
||||
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device)
|
||||
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
|
||||
if self.t_scheduler == 'cosine':
|
||||
t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
|
||||
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond)
|
||||
@@ -71,16 +71,17 @@ class ConditionalCFM(BASECFM):
|
||||
cond: Not used but kept for future purposes
|
||||
"""
|
||||
t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
|
||||
t = t.unsqueeze(dim=0)
|
||||
|
||||
# I am storing this because I can later plot it by putting a debugger here and saving it to a file
|
||||
# Or in future might add like a return_all_steps flag
|
||||
sol = []
|
||||
|
||||
for step in range(1, len(t_span)):
|
||||
dphi_dt = self.estimator(x, mask, mu, t, spks, cond)
|
||||
dphi_dt = self.forward_estimator(x, mask, mu, t, spks, cond)
|
||||
# Classifier-Free Guidance inference introduced in VoiceBox
|
||||
if self.inference_cfg_rate > 0:
|
||||
cfg_dphi_dt = self.estimator(
|
||||
cfg_dphi_dt = self.forward_estimator(
|
||||
x, mask,
|
||||
torch.zeros_like(mu), t,
|
||||
torch.zeros_like(spks) if spks is not None else None,
|
||||
@@ -96,6 +97,21 @@ class ConditionalCFM(BASECFM):
|
||||
|
||||
return sol[-1]
|
||||
|
||||
def forward_estimator(self, x, mask, mu, t, spks, cond):
|
||||
if isinstance(self.estimator, torch.nn.Module):
|
||||
return self.estimator.forward(x, mask, mu, t, spks, cond)
|
||||
else:
|
||||
ort_inputs = {
|
||||
'x': x.cpu().numpy(),
|
||||
'mask': mask.cpu().numpy(),
|
||||
'mu': mu.cpu().numpy(),
|
||||
't': t.cpu().numpy(),
|
||||
'spks': spks.cpu().numpy(),
|
||||
'cond': cond.cpu().numpy()
|
||||
}
|
||||
output = self.estimator.run(None, ort_inputs)[0]
|
||||
return torch.tensor(output, dtype=x.dtype, device=x.device)
|
||||
|
||||
def compute_loss(self, x1, mask, mu, spks=None, cond=None):
|
||||
"""Computes diffusion loss
|
||||
|
||||
|
||||
@@ -340,7 +340,7 @@ class HiFTGenerator(nn.Module):
|
||||
s = self._f02source(f0)
|
||||
|
||||
# use cache_source to avoid glitch
|
||||
if cache_source.shape[2] == 0:
|
||||
if cache_source.shape[2] != 0:
|
||||
s[:, :, :cache_source.shape[2]] = cache_source
|
||||
|
||||
s_stft_real, s_stft_imag = self._stft(s.squeeze(1))
|
||||
|
||||
@@ -102,4 +102,10 @@ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
|
||||
--deepspeed_config ./conf/ds_stage2.json \
|
||||
--deepspeed.save_states model+optimizer
|
||||
done
|
||||
fi
|
||||
|
||||
if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
|
||||
echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
|
||||
python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
|
||||
python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
|
||||
fi
|
||||
@@ -102,4 +102,10 @@ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
|
||||
--deepspeed_config ./conf/ds_stage2.json \
|
||||
--deepspeed.save_states model+optimizer
|
||||
done
|
||||
fi
|
||||
|
||||
if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
|
||||
echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
|
||||
python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
|
||||
python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
|
||||
fi
|
||||
@@ -15,6 +15,7 @@ matplotlib==3.7.5
|
||||
modelscope==1.15.0
|
||||
networkx==3.1
|
||||
omegaconf==2.3.0
|
||||
onnx==1.16.0
|
||||
onnxruntime-gpu==1.16.0; sys_platform == 'linux'
|
||||
onnxruntime==1.16.0; sys_platform == 'darwin' or sys_platform == 'windows'
|
||||
openai-whisper==20231117
|
||||
@@ -25,6 +26,7 @@ soundfile==0.12.1
|
||||
tensorboard==2.14.0
|
||||
torch==2.0.1
|
||||
torchaudio==2.0.2
|
||||
uvicorn==0.30.0
|
||||
wget==3.2
|
||||
fastapi==0.111.0
|
||||
fastapi-cli==0.0.4
|
||||
|
||||
@@ -1,56 +1,68 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import logging
|
||||
import requests
|
||||
import torch
|
||||
import torchaudio
|
||||
import numpy as np
|
||||
|
||||
def saveResponse(path, response):
|
||||
# 以二进制写入模式打开文件
|
||||
with open(path, 'wb') as file:
|
||||
# 将响应的二进制内容写入文件
|
||||
file.write(response.content)
|
||||
|
||||
def main():
|
||||
api = args.api_base
|
||||
url = "http://{}:{}/inference_{}".format(args.host, args.port, args.mode)
|
||||
if args.mode == 'sft':
|
||||
url = api + "/api/inference/sft"
|
||||
payload={
|
||||
'tts': args.tts_text,
|
||||
'role': args.spk_id
|
||||
}
|
||||
response = requests.request("POST", url, data=payload)
|
||||
saveResponse(args.tts_wav, response)
|
||||
elif args.mode == 'zero_shot':
|
||||
url = api + "/api/inference/zero-shot"
|
||||
payload={
|
||||
'tts': args.tts_text,
|
||||
'prompt': args.prompt_text
|
||||
}
|
||||
files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
|
||||
response = requests.request("POST", url, data=payload, files=files)
|
||||
saveResponse(args.tts_wav, response)
|
||||
elif args.mode == 'cross_lingual':
|
||||
url = api + "/api/inference/cross-lingual"
|
||||
payload={
|
||||
'tts': args.tts_text,
|
||||
}
|
||||
files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
|
||||
response = requests.request("POST", url, data=payload, files=files)
|
||||
saveResponse(args.tts_wav, response)
|
||||
else:
|
||||
url = api + "/api/inference/instruct"
|
||||
payload = {
|
||||
'tts': args.tts_text,
|
||||
'role': args.spk_id,
|
||||
'instruct': args.instruct_text
|
||||
'tts_text': args.tts_text,
|
||||
'spk_id': args.spk_id
|
||||
}
|
||||
response = requests.request("POST", url, data=payload)
|
||||
saveResponse(args.tts_wav, response)
|
||||
logging.info("Response save to {}", args.tts_wav)
|
||||
response = requests.request("GET", url, data=payload, stream=True)
|
||||
elif args.mode == 'zero_shot':
|
||||
payload = {
|
||||
'tts_text': args.tts_text,
|
||||
'prompt_text': args.prompt_text
|
||||
}
|
||||
files = [('prompt_wav', ('prompt_wav', open(args.prompt_wav, 'rb'), 'application/octet-stream'))]
|
||||
response = requests.request("GET", url, data=payload, files=files, stream=True)
|
||||
elif args.mode == 'cross_lingual':
|
||||
payload = {
|
||||
'tts_text': args.tts_text,
|
||||
}
|
||||
files = [('prompt_wav', ('prompt_wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
|
||||
response = requests.request("GET", url, data=payload, files=files, stream=True)
|
||||
else:
|
||||
payload = {
|
||||
'tts_text': args.tts_text,
|
||||
'spk_id': args.spk_id,
|
||||
'instruct_text': args.instruct_text
|
||||
}
|
||||
response = requests.request("GET", url, data=payload, stream=True)
|
||||
tts_audio = b''
|
||||
for r in response.iter_content(chunk_size=16000):
|
||||
tts_audio += r
|
||||
tts_speech = torch.from_numpy(np.array(np.frombuffer(tts_audio, dtype=np.int16))).unsqueeze(dim=0)
|
||||
logging.info('save response to {}'.format(args.tts_wav))
|
||||
torchaudio.save(args.tts_wav, tts_speech, target_sr)
|
||||
logging.info('get response')
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--api_base',
|
||||
parser.add_argument('--host',
|
||||
type=str,
|
||||
default='http://127.0.0.1:6006')
|
||||
default='0.0.0.0')
|
||||
parser.add_argument('--port',
|
||||
type=int,
|
||||
default='50000')
|
||||
parser.add_argument('--mode',
|
||||
default='sft',
|
||||
choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],
|
||||
|
||||
@@ -1,119 +1,77 @@
|
||||
# Set inference model
|
||||
# export MODEL_DIR=pretrained_models/CosyVoice-300M-Instruct
|
||||
# For development
|
||||
# fastapi dev --port 6006 fastapi_server.py
|
||||
# For production deployment
|
||||
# fastapi run --port 6006 fastapi_server.py
|
||||
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import sys
|
||||
import io,time
|
||||
from fastapi import FastAPI, Response, File, UploadFile, Form
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.middleware.cors import CORSMiddleware #引入 CORS中间件模块
|
||||
from contextlib import asynccontextmanager
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append('{}/../../..'.format(ROOT_DIR))
|
||||
sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
|
||||
from cosyvoice.cli.cosyvoice import CosyVoice
|
||||
from cosyvoice.utils.file_utils import load_wav
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchaudio
|
||||
import argparse
|
||||
import logging
|
||||
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
||||
from fastapi import FastAPI, UploadFile, Form, File
|
||||
from fastapi.responses import StreamingResponse
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import uvicorn
|
||||
import numpy as np
|
||||
from cosyvoice.cli.cosyvoice import CosyVoice
|
||||
from cosyvoice.utils.file_utils import load_wav
|
||||
|
||||
class LaunchFailed(Exception):
|
||||
pass
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
model_dir = os.getenv("MODEL_DIR", "pretrained_models/CosyVoice-300M-SFT")
|
||||
if model_dir:
|
||||
logging.info("MODEL_DIR is {}", model_dir)
|
||||
app.cosyvoice = CosyVoice(model_dir)
|
||||
# sft usage
|
||||
logging.info("Avaliable speakers {}", app.cosyvoice.list_avaliable_spks())
|
||||
else:
|
||||
raise LaunchFailed("MODEL_DIR environment must set")
|
||||
yield
|
||||
|
||||
app = FastAPI(lifespan=lifespan)
|
||||
|
||||
#设置允许访问的域名
|
||||
origins = ["*"] #"*",即为所有,也可以改为允许的特定ip。
|
||||
app = FastAPI()
|
||||
# set cross region allowance
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins, #设置允许的origins来源
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"], # 设置允许跨域的http方法,比如 get、post、put等。
|
||||
allow_headers=["*"]) #允许跨域的headers,可以用来鉴别来源等作用。
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"])
|
||||
|
||||
def buildResponse(output):
|
||||
buffer = io.BytesIO()
|
||||
torchaudio.save(buffer, output, 22050, format="wav")
|
||||
buffer.seek(0)
|
||||
return Response(content=buffer.read(-1), media_type="audio/wav")
|
||||
def generate_data(model_output):
|
||||
for i in model_output:
|
||||
tts_audio = (i['tts_speech'].numpy() * (2 ** 15)).astype(np.int16).tobytes()
|
||||
yield tts_audio
|
||||
|
||||
@app.post("/api/inference/sft")
|
||||
@app.get("/api/inference/sft")
|
||||
async def sft(tts: str = Form(), role: str = Form()):
|
||||
start = time.process_time()
|
||||
output = app.cosyvoice.inference_sft(tts, role)
|
||||
end = time.process_time()
|
||||
logging.info("infer time is {} seconds", end-start)
|
||||
return buildResponse(output['tts_speech'])
|
||||
@app.get("/inference_sft")
|
||||
async def inference_sft(tts_text: str = Form(), spk_id: str = Form()):
|
||||
model_output = cosyvoice.inference_sft(tts_text, spk_id)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
@app.post("/api/inference/zero-shot")
|
||||
async def zeroShot(tts: str = Form(), prompt: str = Form(), audio: UploadFile = File()):
|
||||
start = time.process_time()
|
||||
prompt_speech = load_wav(audio.file, 16000)
|
||||
prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
|
||||
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
|
||||
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
|
||||
@app.get("/inference_zero_shot")
|
||||
async def inference_zero_shot(tts_text: str = Form(), prompt_text: str = Form(), prompt_wav: UploadFile = File()):
|
||||
prompt_speech_16k = load_wav(prompt_wav.file, 16000)
|
||||
model_output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
output = app.cosyvoice.inference_zero_shot(tts, prompt, prompt_speech_16k)
|
||||
end = time.process_time()
|
||||
logging.info("infer time is {} seconds", end-start)
|
||||
return buildResponse(output['tts_speech'])
|
||||
@app.get("/inference_cross_lingual")
|
||||
async def inference_cross_lingual(tts_text: str = Form(), prompt_wav: UploadFile = File()):
|
||||
prompt_speech_16k = load_wav(prompt_wav.file, 16000)
|
||||
model_output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
@app.post("/api/inference/cross-lingual")
|
||||
async def crossLingual(tts: str = Form(), audio: UploadFile = File()):
|
||||
start = time.process_time()
|
||||
prompt_speech = load_wav(audio.file, 16000)
|
||||
prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
|
||||
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
|
||||
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
|
||||
@app.get("/inference_instruct")
|
||||
async def inference_instruct(tts_text: str = Form(), spk_id: str = Form(), instruct_text: str = Form()):
|
||||
model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
output = app.cosyvoice.inference_cross_lingual(tts, prompt_speech_16k)
|
||||
end = time.process_time()
|
||||
logging.info("infer time is {} seconds", end-start)
|
||||
return buildResponse(output['tts_speech'])
|
||||
|
||||
@app.post("/api/inference/instruct")
|
||||
@app.get("/api/inference/instruct")
|
||||
async def instruct(tts: str = Form(), role: str = Form(), instruct: str = Form()):
|
||||
start = time.process_time()
|
||||
output = app.cosyvoice.inference_instruct(tts, role, instruct)
|
||||
end = time.process_time()
|
||||
logging.info("infer time is {} seconds", end-start)
|
||||
return buildResponse(output['tts_speech'])
|
||||
|
||||
@app.get("/api/roles")
|
||||
async def roles():
|
||||
return {"roles": app.cosyvoice.list_avaliable_spks()}
|
||||
|
||||
@app.get("/", response_class=HTMLResponse)
|
||||
async def root():
|
||||
return """
|
||||
<!DOCTYPE html>
|
||||
<html lang=zh-cn>
|
||||
<head>
|
||||
<meta charset=utf-8>
|
||||
<title>Api information</title>
|
||||
</head>
|
||||
<body>
|
||||
Get the supported tones from the Roles API first, then enter the tones and textual content in the TTS API for synthesis. <a href='./docs'>Documents of API</a>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
if __name__=='__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--port',
|
||||
type=int,
|
||||
default=50000)
|
||||
parser.add_argument('--model_dir',
|
||||
type=str,
|
||||
default='iic/CosyVoice-300M',
|
||||
help='local path or modelscope repo id')
|
||||
args = parser.parse_args()
|
||||
cosyvoice = CosyVoice(args.model_dir)
|
||||
uvicorn.run(app, host="127.0.0.1", port=args.port)
|
||||
Reference in New Issue
Block a user