mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-05 18:09:24 +08:00
add prompt audio cache
This commit is contained in:
@@ -15,6 +15,8 @@ trt_engines_dir=./trt_engines_${trt_dtype}
|
||||
|
||||
model_repo=./model_repo_cosyvoice2
|
||||
|
||||
use_spk2info_cache=True
|
||||
|
||||
if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
|
||||
echo "Cloning CosyVoice"
|
||||
git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
|
||||
@@ -27,6 +29,8 @@ if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
||||
echo "Downloading CosyVoice2-0.5B"
|
||||
huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
|
||||
modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
|
||||
# download spk2info.pt to directly use cached speech tokens, speech feats, and embeddings
|
||||
wget https://raw.githubusercontent.com/qi-hua/async_cosyvoice/main/CosyVoice2-0.5B/spk2info.pt -O $model_scope_model_local_dir/spk2info.pt
|
||||
fi
|
||||
|
||||
|
||||
@@ -57,10 +61,12 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
||||
cosyvoice2_dir="cosyvoice2"
|
||||
|
||||
cp -r ./model_repo/${cosyvoice2_dir} $model_repo
|
||||
cp -r ./model_repo/audio_tokenizer $model_repo
|
||||
cp -r ./model_repo/tensorrt_llm $model_repo
|
||||
cp -r ./model_repo/token2wav $model_repo
|
||||
cp -r ./model_repo/speaker_embedding $model_repo
|
||||
if [ $use_spk2info_cache == "False" ]; then
|
||||
cp -r ./model_repo/audio_tokenizer $model_repo
|
||||
cp -r ./model_repo/speaker_embedding $model_repo
|
||||
fi
|
||||
|
||||
ENGINE_PATH=$trt_engines_dir
|
||||
MAX_QUEUE_DELAY_MICROSECONDS=0
|
||||
@@ -71,11 +77,12 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
||||
DECOUPLED_MODE=True # True for streaming, False for offline
|
||||
|
||||
python3 scripts/fill_template.py -i ${model_repo}/token2wav/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
|
||||
python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
|
||||
python3 scripts/fill_template.py -i ${model_repo}/${cosyvoice2_dir}/config.pbtxt model_dir:${MODEL_DIR},bls_instance_num:${BLS_INSTANCE_NUM},llm_tokenizer_dir:${LLM_TOKENIZER_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
|
||||
python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
|
||||
python3 scripts/fill_template.py -i ${model_repo}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_beam_width:1,engine_dir:${ENGINE_PATH},max_tokens_in_paged_kv_cache:2560,max_attention_window_size:2560,kv_cache_free_gpu_mem_fraction:0.5,exclude_input_in_output:True,enable_kv_cache_reuse:False,batching_strategy:inflight_fused_batching,max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS},encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32
|
||||
|
||||
if [ $use_spk2info_cache == "False" ]; then
|
||||
python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
|
||||
python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||
@@ -94,7 +101,7 @@ fi
|
||||
|
||||
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
||||
echo "Running benchmark client grpc"
|
||||
num_task=1
|
||||
num_task=4
|
||||
|
||||
mode=streaming
|
||||
BLS_INSTANCE_NUM=4
|
||||
@@ -104,6 +111,7 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
||||
--model-name cosyvoice2 \
|
||||
--num-tasks $num_task \
|
||||
--mode $mode \
|
||||
--use-spk2info-cache $use_spk2info_cache \
|
||||
--huggingface-dataset yuekai/seed_tts_cosy2 \
|
||||
--log-dir ./log_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM}
|
||||
--log-dir ./log_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM}_spk_cache_${use_spk2info_cache}
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user