fix decoupled mode

This commit is contained in:
yuekaiz
2025-07-29 11:13:07 +08:00
parent 178da09993
commit dc196df940
3 changed files with 52 additions and 33 deletions

View File

@@ -295,11 +295,26 @@ class TritonPythonModel:
if self.decoupled: if self.decoupled:
response_sender = request.get_response_sender() response_sender = request.get_response_sender()
request_id = request.request_id() request_id = request.request_id()
for generated_ids in generated_ids_iter: generated_ids = []
raise NotImplementedError("Decoupled mode is not implemented") for generated_id in generated_ids_iter:
# convert the numpy array into a int32 tensor
generated_id = generated_id.tolist()
if len(generated_id) > 0:
assert len(generated_id) == 1, "Generated ID is not a single integer"
generated_ids.append(generated_id[0])
generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(torch.int32).to(self.device)
prompt_spk_embedding = self._extract_spk_embedding(wav_tensor)
audio = self.forward_token2wav(prompt_speech_tokens, prompt_speech_feat, prompt_spk_embedding, generated_ids)
# Prepare response
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(audio))
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
response_sender.send(inference_response)
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
self.logger.log_info(f"send tritonserver_response_complete_final to end")
else: else:
generated_ids = next(generated_ids_iter) generated_ids = next(generated_ids_iter)
generated_ids = torch.tensor([generated_ids]).to(self.device) generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(self.device)
if generated_ids is None or len(generated_ids) == 0: if generated_ids is None or len(generated_ids) == 0:
raise pb_utils.TritonModelException("Generated IDs is None or empty") raise pb_utils.TritonModelException("Generated IDs is None or empty")
@@ -311,9 +326,5 @@ class TritonPythonModel:
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor]) inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
responses.append(inference_response) responses.append(inference_response)
if self.decoupled:
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
self.logger.log_info(f"send tritonserver_response_complete_final to end")
if not self.decoupled: if not self.decoupled:
return responses return responses

View File

@@ -11,3 +11,4 @@ librosa
pyworld pyworld
openai-whisper openai-whisper
tritonclient tritonclient
modelscope

View File

@@ -1,25 +1,31 @@
export CUDA_VISIBLE_DEVICES=0 export CUDA_VISIBLE_DEVICES=0
export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice:$PYTHONPATH cosyvoice_path=/workspace/CosyVoice
export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice/third_party/Matcha-TTS:$PYTHONPATH export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
stage=$1 stage=$1
stop_stage=$2 stop_stage=$2
huggingface_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/cosyvoice2_llm huggingface_model_local_dir=./cosyvoice2_llm
model_scope_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice2-0.5B model_scope_model_local_dir=./CosyVoice2-0.5B
trt_dtype=bfloat16 trt_dtype=bfloat16
trt_weights_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_weights_${trt_dtype} trt_weights_dir=./trt_weights_${trt_dtype}
trt_engines_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_engines_${trt_dtype} trt_engines_dir=./trt_engines_${trt_dtype}
model_repo=./model_repo_cosyvoice2 model_repo=./model_repo_cosyvoice2
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
echo " " echo "Cloning CosyVoice"
huggingface-cli download --local-dir cosyvoice2_llm yuekai/cosyvoice2_llm git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
modelscope download --model iic/CosyVoice2-0.5B --local_dir ./CosyVoice2-0.5B/ cd $cosyvoice_path
git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git
cd CosyVoice
git submodule update --init --recursive git submodule update --init --recursive
cd runtime/triton_trtllm
fi
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
echo "Downloading CosyVoice2-0.5B"
huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
fi fi
@@ -35,17 +41,15 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
--max_batch_size 16 \ --max_batch_size 16 \
--max_num_tokens 32768 \ --max_num_tokens 32768 \
--gemm_plugin $trt_dtype || exit 1 --gemm_plugin $trt_dtype || exit 1
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
echo "Testing TensorRT engines" echo "Testing TensorRT engines"
python3 ./test_llm.py --input_text "你好,请问你叫什么?" \ python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
--tokenizer_dir $huggingface_model_local_dir \ --tokenizer_dir $huggingface_model_local_dir \
--top_k 50 --top_p 0.95 --temperature 0.8 \ --top_k 50 --top_p 0.95 --temperature 0.8 \
--engine_dir=$trt_engines_dir || exit 1 --engine_dir=$trt_engines_dir || exit 1
fi fi
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
echo "Creating model repository" echo "Creating model repository"
rm -rf $model_repo rm -rf $model_repo
mkdir -p $model_repo mkdir -p $model_repo
@@ -71,28 +75,31 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
fi fi
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
echo "Starting Triton server"
tritonserver --model-repository $model_repo tritonserver --model-repository $model_repo
fi fi
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
echo "Testing TensorRT engines" echo "Single request test http"
python3 client_http.py \ python3 client_http.py \
--reference-audio ./prompt_audio.wav \ --reference-audio ./assets/prompt_audio.wav \
--reference-text "吃燕窝就选燕之屋本节目由26年专注高品质燕窝的燕之屋冠名播出。豆奶牛奶换着喝营养更均衡本节目由豆本豆豆奶特约播出。" \ --reference-text "吃燕窝就选燕之屋本节目由26年专注高品质燕窝的燕之屋冠名播出。豆奶牛奶换着喝营养更均衡本节目由豆本豆豆奶特约播出。" \
--target-text "身临其境,换新体验。塑造开源语音合成新范式,让智能语音更自然。" \ --target-text "身临其境,换新体验。塑造开源语音合成新范式,让智能语音更自然。" \
--model-name cosyvoice2 --model-name cosyvoice2
fi fi
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
echo "Running benchmark client" echo "Running benchmark client grpc"
num_task=4 num_task=4
# set mode=streaming, when decoupled=True
# set mode=offline, when decoupled=False
mode=offline
python3 client_grpc.py \ python3 client_grpc.py \
--server-addr localhost \ --server-addr localhost \
--model-name cosyvoice2 \ --model-name cosyvoice2 \
--num-tasks $num_task \ --num-tasks $num_task \
--mode offline \ --mode $mode \
--huggingface-dataset yuekai/seed_tts_cosy2 \ --huggingface-dataset yuekai/seed_tts_cosy2 \
--log-dir ./log_concurrent_tasks_${num_task}_offline_bls_4_${trt_dtype} --log-dir ./log_concurrent_tasks_${num_task}_${mode}_bls_4_${trt_dtype}
fi fi