|
|
@@ -1,25 +1,31 @@
|
|
|
|
|
|
export CUDA_VISIBLE_DEVICES=0
|
|
|
-export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice:$PYTHONPATH
|
|
|
-export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice/third_party/Matcha-TTS:$PYTHONPATH
|
|
|
+cosyvoice_path=/workspace/CosyVoice
|
|
|
+export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
|
|
|
+export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
|
|
|
stage=$1
|
|
|
stop_stage=$2
|
|
|
|
|
|
-huggingface_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/cosyvoice2_llm
|
|
|
-model_scope_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice2-0.5B
|
|
|
+huggingface_model_local_dir=./cosyvoice2_llm
|
|
|
+model_scope_model_local_dir=./CosyVoice2-0.5B
|
|
|
trt_dtype=bfloat16
|
|
|
-trt_weights_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_weights_${trt_dtype}
|
|
|
-trt_engines_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_engines_${trt_dtype}
|
|
|
+trt_weights_dir=./trt_weights_${trt_dtype}
|
|
|
+trt_engines_dir=./trt_engines_${trt_dtype}
|
|
|
|
|
|
model_repo=./model_repo_cosyvoice2
|
|
|
|
|
|
-if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|
|
- echo " "
|
|
|
- huggingface-cli download --local-dir cosyvoice2_llm yuekai/cosyvoice2_llm
|
|
|
- modelscope download --model iic/CosyVoice2-0.5B --local_dir ./CosyVoice2-0.5B/
|
|
|
- git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git
|
|
|
- cd CosyVoice
|
|
|
+if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
|
|
|
+ echo "Cloning CosyVoice"
|
|
|
+ git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
|
|
|
+ cd $cosyvoice_path
|
|
|
git submodule update --init --recursive
|
|
|
+ cd runtime/triton_trtllm
|
|
|
+fi
|
|
|
+
|
|
|
+if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|
|
+ echo "Downloading CosyVoice2-0.5B"
|
|
|
+ huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
|
|
|
+ modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
|
|
|
fi
|
|
|
|
|
|
|
|
|
@@ -35,17 +41,15 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
|
|
--max_batch_size 16 \
|
|
|
--max_num_tokens 32768 \
|
|
|
--gemm_plugin $trt_dtype || exit 1
|
|
|
-fi
|
|
|
|
|
|
-if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
|
|
echo "Testing TensorRT engines"
|
|
|
- python3 ./test_llm.py --input_text "你好,请问你叫什么?" \
|
|
|
+ python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
|
|
|
--tokenizer_dir $huggingface_model_local_dir \
|
|
|
--top_k 50 --top_p 0.95 --temperature 0.8 \
|
|
|
--engine_dir=$trt_engines_dir || exit 1
|
|
|
fi
|
|
|
|
|
|
-if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
|
|
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
|
|
echo "Creating model repository"
|
|
|
rm -rf $model_repo
|
|
|
mkdir -p $model_repo
|
|
|
@@ -71,28 +75,31 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
|
|
|
|
|
fi
|
|
|
|
|
|
-if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
|
|
-
|
|
|
+if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
|
|
+ echo "Starting Triton server"
|
|
|
tritonserver --model-repository $model_repo
|
|
|
fi
|
|
|
|
|
|
-if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
|
|
- echo "Testing TensorRT engines"
|
|
|
+if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
|
|
+ echo "Single request test http"
|
|
|
python3 client_http.py \
|
|
|
- --reference-audio ./prompt_audio.wav \
|
|
|
+ --reference-audio ./assets/prompt_audio.wav \
|
|
|
--reference-text "吃燕窝就选燕之屋,本节目由26年专注高品质燕窝的燕之屋冠名播出。豆奶牛奶换着喝,营养更均衡,本节目由豆本豆豆奶特约播出。" \
|
|
|
--target-text "身临其境,换新体验。塑造开源语音合成新范式,让智能语音更自然。" \
|
|
|
--model-name cosyvoice2
|
|
|
fi
|
|
|
|
|
|
-if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
|
|
- echo "Running benchmark client"
|
|
|
+if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
|
|
+ echo "Running benchmark client grpc"
|
|
|
num_task=4
|
|
|
+ # set mode=streaming, when decoupled=True
|
|
|
+ # set mode=offline, when decoupled=False
|
|
|
+ mode=offline
|
|
|
python3 client_grpc.py \
|
|
|
--server-addr localhost \
|
|
|
--model-name cosyvoice2 \
|
|
|
--num-tasks $num_task \
|
|
|
- --mode offline \
|
|
|
+ --mode $mode \
|
|
|
--huggingface-dataset yuekai/seed_tts_cosy2 \
|
|
|
- --log-dir ./log_concurrent_tasks_${num_task}_offline_bls_4_${trt_dtype}
|
|
|
+ --log-dir ./log_concurrent_tasks_${num_task}_${mode}_bls_4_${trt_dtype}
|
|
|
fi
|