mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 09:29:25 +08:00
fix decoupled mode
This commit is contained in:
@@ -295,11 +295,26 @@ class TritonPythonModel:
|
||||
if self.decoupled:
|
||||
response_sender = request.get_response_sender()
|
||||
request_id = request.request_id()
|
||||
for generated_ids in generated_ids_iter:
|
||||
raise NotImplementedError("Decoupled mode is not implemented")
|
||||
generated_ids = []
|
||||
for generated_id in generated_ids_iter:
|
||||
# convert the numpy array into a int32 tensor
|
||||
generated_id = generated_id.tolist()
|
||||
if len(generated_id) > 0:
|
||||
assert len(generated_id) == 1, "Generated ID is not a single integer"
|
||||
generated_ids.append(generated_id[0])
|
||||
generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(torch.int32).to(self.device)
|
||||
prompt_spk_embedding = self._extract_spk_embedding(wav_tensor)
|
||||
audio = self.forward_token2wav(prompt_speech_tokens, prompt_speech_feat, prompt_spk_embedding, generated_ids)
|
||||
|
||||
# Prepare response
|
||||
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(audio))
|
||||
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
|
||||
response_sender.send(inference_response)
|
||||
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
|
||||
self.logger.log_info(f"send tritonserver_response_complete_final to end")
|
||||
else:
|
||||
generated_ids = next(generated_ids_iter)
|
||||
generated_ids = torch.tensor([generated_ids]).to(self.device)
|
||||
generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(self.device)
|
||||
if generated_ids is None or len(generated_ids) == 0:
|
||||
raise pb_utils.TritonModelException("Generated IDs is None or empty")
|
||||
|
||||
@@ -311,9 +326,5 @@ class TritonPythonModel:
|
||||
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
|
||||
responses.append(inference_response)
|
||||
|
||||
if self.decoupled:
|
||||
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
|
||||
self.logger.log_info(f"send tritonserver_response_complete_final to end")
|
||||
|
||||
if not self.decoupled:
|
||||
return responses
|
||||
@@ -10,4 +10,5 @@ wget
|
||||
librosa
|
||||
pyworld
|
||||
openai-whisper
|
||||
tritonclient
|
||||
tritonclient
|
||||
modelscope
|
||||
|
||||
@@ -1,25 +1,31 @@
|
||||
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice:$PYTHONPATH
|
||||
export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice/third_party/Matcha-TTS:$PYTHONPATH
|
||||
cosyvoice_path=/workspace/CosyVoice
|
||||
export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
|
||||
export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
|
||||
stage=$1
|
||||
stop_stage=$2
|
||||
|
||||
huggingface_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/cosyvoice2_llm
|
||||
model_scope_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice2-0.5B
|
||||
huggingface_model_local_dir=./cosyvoice2_llm
|
||||
model_scope_model_local_dir=./CosyVoice2-0.5B
|
||||
trt_dtype=bfloat16
|
||||
trt_weights_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_weights_${trt_dtype}
|
||||
trt_engines_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_engines_${trt_dtype}
|
||||
trt_weights_dir=./trt_weights_${trt_dtype}
|
||||
trt_engines_dir=./trt_engines_${trt_dtype}
|
||||
|
||||
model_repo=./model_repo_cosyvoice2
|
||||
|
||||
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
||||
echo " "
|
||||
huggingface-cli download --local-dir cosyvoice2_llm yuekai/cosyvoice2_llm
|
||||
modelscope download --model iic/CosyVoice2-0.5B --local_dir ./CosyVoice2-0.5B/
|
||||
git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git
|
||||
cd CosyVoice
|
||||
if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
|
||||
echo "Cloning CosyVoice"
|
||||
git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
|
||||
cd $cosyvoice_path
|
||||
git submodule update --init --recursive
|
||||
cd runtime/triton_trtllm
|
||||
fi
|
||||
|
||||
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
||||
echo "Downloading CosyVoice2-0.5B"
|
||||
huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
|
||||
modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
|
||||
fi
|
||||
|
||||
|
||||
@@ -35,17 +41,15 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
||||
--max_batch_size 16 \
|
||||
--max_num_tokens 32768 \
|
||||
--gemm_plugin $trt_dtype || exit 1
|
||||
fi
|
||||
|
||||
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
||||
echo "Testing TensorRT engines"
|
||||
python3 ./test_llm.py --input_text "你好,请问你叫什么?" \
|
||||
python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
|
||||
--tokenizer_dir $huggingface_model_local_dir \
|
||||
--top_k 50 --top_p 0.95 --temperature 0.8 \
|
||||
--engine_dir=$trt_engines_dir || exit 1
|
||||
fi
|
||||
|
||||
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
||||
echo "Creating model repository"
|
||||
rm -rf $model_repo
|
||||
mkdir -p $model_repo
|
||||
@@ -71,28 +75,31 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||
|
||||
fi
|
||||
|
||||
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
||||
|
||||
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||
echo "Starting Triton server"
|
||||
tritonserver --model-repository $model_repo
|
||||
fi
|
||||
|
||||
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
||||
echo "Testing TensorRT engines"
|
||||
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
||||
echo "Single request test http"
|
||||
python3 client_http.py \
|
||||
--reference-audio ./prompt_audio.wav \
|
||||
--reference-audio ./assets/prompt_audio.wav \
|
||||
--reference-text "吃燕窝就选燕之屋,本节目由26年专注高品质燕窝的燕之屋冠名播出。豆奶牛奶换着喝,营养更均衡,本节目由豆本豆豆奶特约播出。" \
|
||||
--target-text "身临其境,换新体验。塑造开源语音合成新范式,让智能语音更自然。" \
|
||||
--model-name cosyvoice2
|
||||
fi
|
||||
|
||||
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
||||
echo "Running benchmark client"
|
||||
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
||||
echo "Running benchmark client grpc"
|
||||
num_task=4
|
||||
# set mode=streaming, when decoupled=True
|
||||
# set mode=offline, when decoupled=False
|
||||
mode=offline
|
||||
python3 client_grpc.py \
|
||||
--server-addr localhost \
|
||||
--model-name cosyvoice2 \
|
||||
--num-tasks $num_task \
|
||||
--mode offline \
|
||||
--mode $mode \
|
||||
--huggingface-dataset yuekai/seed_tts_cosy2 \
|
||||
--log-dir ./log_concurrent_tasks_${num_task}_offline_bls_4_${trt_dtype}
|
||||
--log-dir ./log_concurrent_tasks_${num_task}_${mode}_bls_4_${trt_dtype}
|
||||
fi
|
||||
Reference in New Issue
Block a user