run.sh 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #!/bin/bash
  2. # Copyright (c) 2025 NVIDIA (authors: Yuekai Zhang)
  3. export CUDA_VISIBLE_DEVICES=0
  4. cosyvoice_path=/workspace/CosyVoice
  5. export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
  6. export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
  7. stage=$1
  8. stop_stage=$2
  9. huggingface_model_local_dir=./cosyvoice2_llm
  10. model_scope_model_local_dir=./CosyVoice2-0.5B
  11. trt_dtype=bfloat16
  12. trt_weights_dir=./trt_weights_${trt_dtype}
  13. trt_engines_dir=./trt_engines_${trt_dtype}
  14. model_repo=./model_repo_cosyvoice2
  15. use_spk2info_cache=True
  16. if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
  17. echo "Cloning CosyVoice"
  18. git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
  19. cd $cosyvoice_path
  20. git submodule update --init --recursive
  21. cd runtime/triton_trtllm
  22. fi
  23. if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
  24. echo "Downloading CosyVoice2-0.5B"
  25. huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
  26. modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
  27. # download spk2info.pt to directly use cached speech tokens, speech feats, and embeddings
  28. wget https://raw.githubusercontent.com/qi-hua/async_cosyvoice/main/CosyVoice2-0.5B/spk2info.pt -O $model_scope_model_local_dir/spk2info.pt
  29. fi
  30. if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
  31. echo "Converting checkpoint to TensorRT weights"
  32. python3 scripts/convert_checkpoint.py --model_dir $huggingface_model_local_dir \
  33. --output_dir $trt_weights_dir \
  34. --dtype $trt_dtype || exit 1
  35. echo "Building TensorRT engines"
  36. trtllm-build --checkpoint_dir $trt_weights_dir \
  37. --output_dir $trt_engines_dir \
  38. --max_batch_size 16 \
  39. --max_num_tokens 32768 \
  40. --gemm_plugin $trt_dtype || exit 1
  41. echo "Testing TensorRT engines"
  42. python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
  43. --tokenizer_dir $huggingface_model_local_dir \
  44. --top_k 50 --top_p 0.95 --temperature 0.8 \
  45. --engine_dir=$trt_engines_dir || exit 1
  46. fi
  47. if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
  48. echo "Creating model repository"
  49. rm -rf $model_repo
  50. mkdir -p $model_repo
  51. cosyvoice2_dir="cosyvoice2"
  52. cp -r ./model_repo/${cosyvoice2_dir} $model_repo
  53. cp -r ./model_repo/tensorrt_llm $model_repo
  54. cp -r ./model_repo/token2wav $model_repo
  55. if [ $use_spk2info_cache == "False" ]; then
  56. cp -r ./model_repo/audio_tokenizer $model_repo
  57. cp -r ./model_repo/speaker_embedding $model_repo
  58. fi
  59. ENGINE_PATH=$trt_engines_dir
  60. MAX_QUEUE_DELAY_MICROSECONDS=0
  61. MODEL_DIR=$model_scope_model_local_dir
  62. LLM_TOKENIZER_DIR=$huggingface_model_local_dir
  63. BLS_INSTANCE_NUM=4
  64. TRITON_MAX_BATCH_SIZE=16
  65. DECOUPLED_MODE=True # True for streaming, False for offline
  66. python3 scripts/fill_template.py -i ${model_repo}/token2wav/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  67. python3 scripts/fill_template.py -i ${model_repo}/${cosyvoice2_dir}/config.pbtxt model_dir:${MODEL_DIR},bls_instance_num:${BLS_INSTANCE_NUM},llm_tokenizer_dir:${LLM_TOKENIZER_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  68. python3 scripts/fill_template.py -i ${model_repo}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_beam_width:1,engine_dir:${ENGINE_PATH},max_tokens_in_paged_kv_cache:2560,max_attention_window_size:2560,kv_cache_free_gpu_mem_fraction:0.5,exclude_input_in_output:True,enable_kv_cache_reuse:False,batching_strategy:inflight_fused_batching,max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS},encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32
  69. if [ $use_spk2info_cache == "False" ]; then
  70. python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  71. python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  72. fi
  73. fi
  74. if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
  75. echo "Starting Triton server"
  76. tritonserver --model-repository $model_repo
  77. fi
  78. if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
  79. echo "Single request test http, only work for offline TTS mode"
  80. python3 client_http.py \
  81. --reference-audio ./assets/prompt_audio.wav \
  82. --reference-text "吃燕窝就选燕之屋,本节目由26年专注高品质燕窝的燕之屋冠名播出。豆奶牛奶换着喝,营养更均衡,本节目由豆本豆豆奶特约播出。" \
  83. --target-text "身临其境,换新体验。塑造开源语音合成新范式,让智能语音更自然。" \
  84. --model-name cosyvoice2
  85. fi
  86. if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
  87. echo "Running benchmark client grpc"
  88. num_task=4
  89. mode=streaming
  90. BLS_INSTANCE_NUM=4
  91. python3 client_grpc.py \
  92. --server-addr localhost \
  93. --model-name cosyvoice2 \
  94. --num-tasks $num_task \
  95. --mode $mode \
  96. --use-spk2info-cache $use_spk2info_cache \
  97. --huggingface-dataset yuekai/seed_tts_cosy2 \
  98. --log-dir ./log_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM}_spk_cache_${use_spk2info_cache}
  99. fi