run_cosyvoice3.sh 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. #!/bin/bash
  2. # Copyright (c) 2026 NVIDIA (authors: Yuekai Zhang)
  3. export CUDA_VISIBLE_DEVICES=0
  4. cosyvoice_path=/workspace/CosyVoice
  5. export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
  6. export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
  7. stage=$1
  8. stop_stage=$2
  9. huggingface_llm_local_dir=$cosyvoice_path/runtime/triton_trtllm/hf_cosyvoice3_llm
  10. cosyvoice3_official_model_dir=$cosyvoice_path/runtime/triton_trtllm/Fun-CosyVoice3-0.5B-2512
  11. trt_dtype=bfloat16
  12. trt_weights_dir=$cosyvoice_path/runtime/triton_trtllm/trt_weights_${trt_dtype}
  13. trt_engines_dir=$cosyvoice_path/runtime/triton_trtllm/trt_engines_${trt_dtype}
  14. model_repo_src=$cosyvoice_path/runtime/triton_trtllm/model_repo_cosyvoice3
  15. model_repo=$cosyvoice_path/runtime/triton_trtllm/model_repo_cosyvoice3_copy
  16. bls_instance_num=10
  17. if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
  18. echo "Cloning CosyVoice"
  19. git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
  20. cd $cosyvoice_path
  21. git submodule update --init --recursive
  22. cd runtime/triton_trtllm
  23. fi
  24. if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
  25. echo "Downloading CosyVoice3 Checkpoints"
  26. # if s3 tokenizer version is not 0.3.0
  27. if [ $(pip3 show s3tokenizer | grep -o "0\.2\.[0-9]") != "0.3.0" ]; then
  28. pip3 install --upgrade x_transformers s3tokenizer
  29. fi
  30. huggingface-cli download --local-dir $huggingface_llm_local_dir yuekai/Fun-CosyVoice3-0.5B-2512-LLM-HF
  31. huggingface-cli download --local-dir $cosyvoice3_official_model_dir yuekai/Fun-CosyVoice3-0.5B-2512-FP16-ONNX
  32. huggingface-cli download --local-dir $cosyvoice3_official_model_dir FunAudioLLM/Fun-CosyVoice3-0.5B-2512
  33. fi
  34. if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
  35. echo "Converting checkpoint to TensorRT weights"
  36. python3 scripts/convert_checkpoint.py --model_dir $huggingface_llm_local_dir \
  37. --output_dir $trt_weights_dir \
  38. --dtype $trt_dtype || exit 1
  39. echo "Building TensorRT engines"
  40. trtllm-build --checkpoint_dir $trt_weights_dir \
  41. --output_dir $trt_engines_dir \
  42. --max_batch_size 64 \
  43. --max_num_tokens 32768 \
  44. --gemm_plugin $trt_dtype || exit 1
  45. echo "Testing TensorRT engines"
  46. python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
  47. --tokenizer_dir $huggingface_llm_local_dir \
  48. --top_k 50 --top_p 0.95 --temperature 0.8 \
  49. --engine_dir=$trt_engines_dir || exit 1
  50. fi
  51. if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
  52. echo "Creating CosyVoice3 model repository"
  53. rm -rf $model_repo
  54. mkdir -p $model_repo
  55. # Copy all modules from template source
  56. cp -r ${model_repo_src}/cosyvoice3 $model_repo/
  57. cp -r ${model_repo_src}/token2wav $model_repo/
  58. cp -r ${model_repo_src}/vocoder $model_repo/
  59. cp -r ${model_repo_src}/audio_tokenizer $model_repo/
  60. cp -r ${model_repo_src}/speaker_embedding $model_repo/
  61. MAX_QUEUE_DELAY_MICROSECONDS=0
  62. MODEL_DIR=$cosyvoice3_official_model_dir
  63. LLM_TOKENIZER_DIR=$huggingface_llm_local_dir
  64. BLS_INSTANCE_NUM=$bls_instance_num
  65. TRITON_MAX_BATCH_SIZE=1
  66. DECOUPLED_MODE=True # False for offline TTS
  67. python3 scripts/fill_template.py -i ${model_repo}/cosyvoice3/config.pbtxt model_dir:${MODEL_DIR},bls_instance_num:${BLS_INSTANCE_NUM},llm_tokenizer_dir:${LLM_TOKENIZER_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  68. python3 scripts/fill_template.py -i ${model_repo}/token2wav/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  69. python3 scripts/fill_template.py -i ${model_repo}/vocoder/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  70. python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  71. python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  72. fi
  73. if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
  74. echo "Starting CosyVoice3 Triton server and LLM using trtllm-serve"
  75. CUDA_VISIBLE_DEVICES=0 mpirun -np 1 --allow-run-as-root --oversubscribe trtllm-serve serve --tokenizer $huggingface_llm_local_dir $trt_engines_dir --max_batch_size 64 --kv_cache_free_gpu_memory_fraction 0.4 &
  76. CUDA_VISIBLE_DEVICES=0 tritonserver --model-repository $model_repo --http-port 18000 --grpc-port 18001 --metrics-port 18002 &
  77. wait
  78. fi
  79. if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
  80. echo "Running benchmark client for CosyVoice3"
  81. num_task=4
  82. mode=streaming
  83. BLS_INSTANCE_NUM=$bls_instance_num
  84. python3 client_grpc.py \
  85. --server-addr localhost \
  86. --server-port 18001 \
  87. --model-name cosyvoice3 \
  88. --num-tasks $num_task \
  89. --mode $mode \
  90. --huggingface-dataset yuekai/seed_tts_cosy2 \
  91. --log-dir ./log_cosyvoice3_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM}
  92. fi
  93. if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
  94. echo "stage 5: Python script CosyVoice3 TTS (LLM + CosyVoice3 Token2Wav) inference"
  95. datasets=(wenetspeech4tts) # wenetspeech4tts
  96. backend=trtllm # hf, trtllm, vllm, trtllm-serve
  97. batch_sizes=(16 8 4 2 1)
  98. token2wav_batch_size=1 # Only support 1 for now
  99. for batch_size in ${batch_sizes[@]}; do
  100. for dataset in ${datasets[@]}; do
  101. output_dir=./cosyvoice3_${dataset}_${backend}_llm_batch_size_${batch_size}_token2wav_batch_size_${token2wav_batch_size}_offline_tts_trt
  102. CUDA_VISIBLE_DEVICES=0 \
  103. python3 infer_cosyvoice3.py \
  104. --output-dir $output_dir \
  105. --llm-model-name-or-path $huggingface_llm_local_dir \
  106. --token2wav-path $cosyvoice3_official_model_dir \
  107. --backend $backend \
  108. --batch-size $batch_size --token2wav-batch-size $token2wav_batch_size \
  109. --engine-dir $trt_engines_dir \
  110. --enable-trt \
  111. --epoch 3 \
  112. --split-name ${dataset} || exit 1
  113. done
  114. done
  115. fi