run_cosyvoice3.sh 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. #!/bin/bash
  2. # Copyright (c) 2026 NVIDIA (authors: Yuekai Zhang)
  3. export CUDA_VISIBLE_DEVICES=0
  4. # cosyvoice_path=/workspace/CosyVoice
  5. cosyvoice_path=/workspace_yuekai/tts/CosyVoice
  6. export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
  7. export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
  8. stage=$1
  9. stop_stage=$2
  10. huggingface_model_local_dir=./hf_cosyvoice3_llm
  11. model_scope_model_local_dir=/workspace_yuekai/HF/Fun-CosyVoice3-0.5B-2512
  12. trt_dtype=bfloat16
  13. trt_weights_dir=./trt_weights_${trt_dtype}
  14. trt_engines_dir=./trt_engines_${trt_dtype}
  15. model_repo_src=./model_repo_cosyvoice3
  16. model_repo=./deploy_cosyvoice3
  17. bls_instance_num=1
  18. if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
  19. echo "Cloning CosyVoice"
  20. git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
  21. cd $cosyvoice_path
  22. git submodule update --init --recursive
  23. cd runtime/triton_trtllm
  24. fi
  25. if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
  26. echo ""
  27. # see https://github.com/nvidia-china-sae/mair-hub/blob/main/rl-tutorial/cosyvoice_llm/pretrained_to_huggingface.py
  28. # huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
  29. # modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
  30. # pip3 install --upgrade x_transformers s3tokenizer
  31. # pip install -U nvidia-modelopt[all]
  32. python3 scripts/convert_cosyvoice3_to_hf.py \
  33. --model-dir $model_scope_model_local_dir \
  34. --output-dir $huggingface_model_local_dir || exit 1 # TODO: output dir should be here
  35. fi
  36. if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
  37. echo "Converting checkpoint to TensorRT weights"
  38. python3 scripts/convert_checkpoint.py --model_dir $huggingface_model_local_dir \
  39. --output_dir $trt_weights_dir \
  40. --dtype $trt_dtype || exit 1
  41. echo "Building TensorRT engines"
  42. trtllm-build --checkpoint_dir $trt_weights_dir \
  43. --output_dir $trt_engines_dir \
  44. --max_batch_size 64 \
  45. --max_num_tokens 32768 \
  46. --gemm_plugin $trt_dtype || exit 1
  47. echo "Testing TensorRT engines"
  48. python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
  49. --tokenizer_dir $huggingface_model_local_dir \
  50. --top_k 50 --top_p 0.95 --temperature 0.8 \
  51. --engine_dir=$trt_engines_dir || exit 1
  52. fi
  53. if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
  54. echo "Creating CosyVoice3 model repository"
  55. rm -rf $model_repo
  56. mkdir -p $model_repo
  57. # Copy all modules from template source
  58. cp -r ${model_repo_src}/cosyvoice3 $model_repo/
  59. cp -r ${model_repo_src}/token2wav $model_repo/
  60. cp -r ${model_repo_src}/vocoder $model_repo/
  61. cp -r ${model_repo_src}/audio_tokenizer $model_repo/
  62. cp -r ${model_repo_src}/speaker_embedding $model_repo/
  63. MAX_QUEUE_DELAY_MICROSECONDS=0
  64. MODEL_DIR=$model_scope_model_local_dir
  65. LLM_TOKENIZER_DIR=$huggingface_model_local_dir
  66. BLS_INSTANCE_NUM=$bls_instance_num
  67. TRITON_MAX_BATCH_SIZE=1
  68. DECOUPLED_MODE=True
  69. python3 scripts/fill_template.py -i ${model_repo}/cosyvoice3/config.pbtxt model_dir:${MODEL_DIR},bls_instance_num:${BLS_INSTANCE_NUM},llm_tokenizer_dir:${LLM_TOKENIZER_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  70. python3 scripts/fill_template.py -i ${model_repo}/token2wav/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  71. python3 scripts/fill_template.py -i ${model_repo}/vocoder/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  72. python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  73. python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  74. fi
  75. if [ $stage -le 30 ] && [ $stop_stage -ge 30 ]; then
  76. echo "Starting CosyVoice3 Triton server and LLM using trtllm-serve"
  77. CUDA_VISIBLE_DEVICES=0 mpirun -np 1 --allow-run-as-root --oversubscribe trtllm-serve serve --tokenizer $huggingface_model_local_dir $trt_engines_dir --max_batch_size 64 --kv_cache_free_gpu_memory_fraction 0.4
  78. fi
  79. if [ $stage -le 40 ] && [ $stop_stage -ge 40 ]; then
  80. CUDA_VISIBLE_DEVICES=1 tritonserver --model-repository $model_repo --http-port 18000 --grpc-port 18001 --metrics-port 18002 &
  81. fi
  82. if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
  83. echo "Starting CosyVoice3 Triton server and LLM using trtllm-serve"
  84. CUDA_VISIBLE_DEVICES=0 mpirun -np 1 --allow-run-as-root --oversubscribe trtllm-serve serve --tokenizer $huggingface_model_local_dir $trt_engines_dir --max_batch_size 64 --kv_cache_free_gpu_memory_fraction 0.4 &
  85. CUDA_VISIBLE_DEVICES=0,1,2,3 tritonserver --model-repository $model_repo --http-port 18000 --grpc-port 18001 --metrics-port 18002 &
  86. wait
  87. # Test using curl
  88. # curl http://localhost:8000/v1/chat/completions \
  89. # -H "Content-Type: application/json" \
  90. # -d '{
  91. # "model": "",
  92. # "messages":[{"role": "user", "content": "Where is New York?"},
  93. # {"role": "assistant", "content": "<|s_1708|><|s_2050|><|s_2159|>"}],
  94. # "max_tokens": 512,
  95. # "temperature": 0.8,
  96. # "top_p": 0.95,
  97. # "top_k": 50,
  98. # "stop": ["<|eos1|>"],
  99. # "repetition_penalty": 1.2,
  100. # "stream": false
  101. # }'
  102. fi
  103. if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
  104. echo "Running benchmark client for CosyVoice3"
  105. num_task=4
  106. mode=offline
  107. mode=streaming
  108. BLS_INSTANCE_NUM=$bls_instance_num
  109. python3 client_grpc.py \
  110. --server-addr localhost \
  111. --server-port 18001 \
  112. --model-name cosyvoice3 \
  113. --num-tasks $num_task \
  114. --mode $mode \
  115. --huggingface-dataset yuekai/seed_tts_cosy2 \
  116. --log-dir ./log_cosyvoice3_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM}
  117. fi
  118. if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
  119. echo "stage 5: Offline TTS (Cosyvoice2 LLM + Step-Audio2-mini DiT Token2Wav) inference using a single python script"
  120. datasets=(wenetspeech4tts) # wenetspeech4tts, test_zh, zero_shot_zh
  121. backend=trtllm # hf, trtllm, vllm, trtllm-serve
  122. batch_sizes=(16)
  123. token2wav_batch_size=1
  124. for batch_size in ${batch_sizes[@]}; do
  125. for dataset in ${datasets[@]}; do
  126. output_dir=./${dataset}_${backend}_llm_batch_size_${batch_size}_token2wav_batch_size_${token2wav_batch_size}
  127. CUDA_VISIBLE_DEVICES=1 \
  128. python3 offline_inference.py \
  129. --output-dir $output_dir \
  130. --llm-model-name-or-path $huggingface_model_local_dir \
  131. --token2wav-path $step_audio_model_dir/token2wav \
  132. --backend $backend \
  133. --batch-size $batch_size --token2wav-batch-size $token2wav_batch_size \
  134. --engine-dir $trt_engines_dir \
  135. --split-name ${dataset} || exit 1
  136. done
  137. done
  138. fi
  139. if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
  140. echo "Disaggregated Server: LLM and Token2wav on different GPUs"
  141. echo "Starting LLM server on GPU 0"
  142. export CUDA_VISIBLE_DEVICES=0
  143. mpirun -np 1 --allow-run-as-root --oversubscribe trtllm-serve serve --tokenizer $huggingface_model_local_dir $trt_engines_dir --max_batch_size 64 --kv_cache_free_gpu_memory_fraction 0.4 &
  144. echo "Starting Token2wav server on GPUs 1-3"
  145. Token2wav_num_gpus=3
  146. http_port=17000
  147. grpc_port=18000
  148. metrics_port=16000
  149. for i in $(seq 0 $(($Token2wav_num_gpus - 1))); do
  150. echo "Starting server on GPU $i"
  151. http_port=$((http_port + 1))
  152. grpc_port=$((grpc_port + 1))
  153. metrics_port=$((metrics_port + 1))
  154. # Two instances of Token2wav server on the same GPU
  155. CUDA_VISIBLE_DEVICES=$(($i + 1)) tritonserver --model-repository $model_repo --http-port $http_port --grpc-port $grpc_port --metrics-port $metrics_port &
  156. http_port=$((http_port + 1))
  157. grpc_port=$((grpc_port + 1))
  158. metrics_port=$((metrics_port + 1))
  159. CUDA_VISIBLE_DEVICES=$(($i + 1)) tritonserver --model-repository $model_repo --http-port $http_port --grpc-port $grpc_port --metrics-port $metrics_port &
  160. done
  161. wait
  162. fi
  163. if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
  164. echo "Running benchmark client for Disaggregated Server"
  165. per_gpu_instances=2
  166. mode=streaming
  167. BLS_INSTANCE_NUM=$bls_instance_num
  168. Token2wav_num_gpus=(1 2 3)
  169. concurrent_tasks=(1 2 3 4 5 6)
  170. for n_gpu in ${Token2wav_num_gpus[@]}; do
  171. echo "Test 1 GPU for LLM server and $n_gpu GPUs for Token2wav servers"
  172. for concurrent_task in ${concurrent_tasks[@]}; do
  173. num_instances=$((per_gpu_instances * n_gpu))
  174. for i in $(seq 1 $num_instances); do
  175. port=$(($i + 18000))
  176. python3 client_grpc.py \
  177. --server-addr localhost \
  178. --server-port $port \
  179. --model-name cosyvoice2_dit \
  180. --num-tasks $concurrent_task \
  181. --mode $mode \
  182. --huggingface-dataset yuekai/seed_tts_cosy2 \
  183. --log-dir ./log_disagg_concurrent_tasks_${concurrent_task}_per_instance_total_token2wav_instances_${num_instances}_port_${port} &
  184. done
  185. wait
  186. done
  187. done
  188. fi
  189. if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then
  190. echo "stage 10: Python script CosyVoice3 TTS (LLM + CosyVoice3 Token2Wav) inference"
  191. datasets=(wenetspeech4tts) # wenetspeech4tts
  192. backend=trtllm-serve # hf, trtllm, vllm, trtllm-serve
  193. batch_sizes=(1)
  194. token2wav_batch_size=1
  195. for batch_size in ${batch_sizes[@]}; do
  196. for dataset in ${datasets[@]}; do
  197. output_dir=./cosyvoice3_${dataset}_${backend}_llm_batch_size_${batch_size}_token2wav_batch_size_${token2wav_batch_size}_streaming_trt
  198. CUDA_VISIBLE_DEVICES=0 \
  199. python3 infer_cosyvoice3.py \
  200. --output-dir $output_dir \
  201. --llm-model-name-or-path $huggingface_model_local_dir \
  202. --token2wav-path $model_scope_model_local_dir \
  203. --backend $backend \
  204. --batch-size $batch_size --token2wav-batch-size $token2wav_batch_size \
  205. --engine-dir $trt_engines_dir \
  206. --enable-trt --streaming\
  207. --epoch 1 \
  208. --split-name ${dataset} || exit 1
  209. done
  210. done
  211. fi