run_stepaudio2_dit_token2wav.sh 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. #!/bin/bash
  2. # Copyright (c) 2025 NVIDIA (authors: Yuekai Zhang)
  3. export CUDA_VISIBLE_DEVICES=0
  4. cosyvoice_path=/workspace/CosyVoice
  5. cosyvoice_path=/workspace_yuekai/tts/CosyVoice
  6. stepaudio2_path=/workspace_yuekai/tts/Step-Audio2
  7. export PYTHONPATH=${stepaudio2_path}:$PYTHONPATH
  8. export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
  9. export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
  10. stage=$1
  11. stop_stage=$2
  12. N_GPUS=2 # set the number of GPUs to use
  13. huggingface_model_local_dir=./cosyvoice2_llm
  14. model_scope_model_local_dir=./CosyVoice2-0.5B
  15. trt_dtype=bfloat16
  16. trt_weights_dir=./trt_weights_${trt_dtype}
  17. trt_engines_dir=./trt_engines_${trt_dtype}
  18. model_repo=./model_repo_cosyvoice2_dit
  19. use_spk2info_cache=False
  20. if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
  21. echo "Cloning CosyVoice"
  22. git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
  23. cd $cosyvoice_path
  24. git submodule update --init --recursive
  25. cd runtime/triton_trtllm
  26. fi
  27. if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
  28. echo "Downloading CosyVoice2-0.5B"
  29. # see https://github.com/nvidia-china-sae/mair-hub/blob/main/rl-tutorial/cosyvoice_llm/pretrained_to_huggingface.py
  30. huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
  31. modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
  32. # download spk2info.pt to directly use cached speech tokens, speech feats, and embeddings
  33. wget https://raw.githubusercontent.com/qi-hua/async_cosyvoice/main/CosyVoice2-0.5B/spk2info.pt -O $model_scope_model_local_dir/spk2info.pt
  34. fi
  35. if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
  36. echo "Converting checkpoint to TensorRT weights"
  37. python3 scripts/convert_checkpoint.py --model_dir $huggingface_model_local_dir \
  38. --output_dir $trt_weights_dir \
  39. --dtype $trt_dtype || exit 1
  40. echo "Building TensorRT engines"
  41. trtllm-build --checkpoint_dir $trt_weights_dir \
  42. --output_dir $trt_engines_dir \
  43. --max_batch_size 16 \
  44. --max_num_tokens 32768 \
  45. --gemm_plugin $trt_dtype || exit 1
  46. echo "Testing TensorRT engines"
  47. python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
  48. --tokenizer_dir $huggingface_model_local_dir \
  49. --top_k 50 --top_p 0.95 --temperature 0.8 \
  50. --engine_dir=$trt_engines_dir || exit 1
  51. fi
  52. # if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
  53. # echo "Creating model repository"
  54. # rm -rf $model_repo
  55. # mkdir -p $model_repo
  56. # cosyvoice2_dir="cosyvoice2_dit"
  57. # token2wav_dir="token2wav_dit"
  58. # cp -r ./model_repo/${cosyvoice2_dir} $model_repo
  59. # cp -r ./model_repo/tensorrt_llm $model_repo
  60. # cp -r ./model_repo/${token2wav_dir} $model_repo
  61. # #if [ $use_spk2info_cache == "False" ]; then
  62. # cp -r ./model_repo/audio_tokenizer $model_repo
  63. # cp -r ./model_repo/speaker_embedding $model_repo
  64. # #fi
  65. # ENGINE_PATH=$trt_engines_dir
  66. # MAX_QUEUE_DELAY_MICROSECONDS=0
  67. # MODEL_DIR=$model_scope_model_local_dir
  68. # LLM_TOKENIZER_DIR=$huggingface_model_local_dir
  69. # BLS_INSTANCE_NUM=1
  70. # TRITON_MAX_BATCH_SIZE=16
  71. # DECOUPLED_MODE=True # True for streaming, False for offline
  72. # STEP_AUDIO_MODEL_DIR=/workspace_yuekai/tts/CosyVoice/runtime/triton_trtllm/Step-Audio-2-mini/token2wav
  73. # python3 scripts/fill_template.py -i ${model_repo}/${token2wav_dir}/config.pbtxt model_dir:${STEP_AUDIO_MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  74. # python3 scripts/fill_template.py -i ${model_repo}/${cosyvoice2_dir}/config.pbtxt model_dir:${MODEL_DIR},bls_instance_num:${BLS_INSTANCE_NUM},llm_tokenizer_dir:${LLM_TOKENIZER_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  75. # python3 scripts/fill_template.py -i ${model_repo}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_beam_width:1,engine_dir:${ENGINE_PATH},max_tokens_in_paged_kv_cache:2560,max_attention_window_size:2560,kv_cache_free_gpu_mem_fraction:0.5,exclude_input_in_output:True,enable_kv_cache_reuse:False,batching_strategy:inflight_fused_batching,max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS},encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32
  76. # #if [ $use_spk2info_cache == "False" ]; then
  77. # python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  78. # python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  79. # #fi
  80. # fi
  81. if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
  82. echo "Creating model repository async mode"
  83. rm -rf $model_repo
  84. mkdir -p $model_repo
  85. cosyvoice2_dir="cosyvoice2_dit"
  86. token2wav_dir="token2wav_dit"
  87. cp -r ./model_repo/${cosyvoice2_dir} $model_repo
  88. cp -r ./model_repo/tensorrt_llm $model_repo
  89. cp -r ./model_repo/${token2wav_dir} $model_repo
  90. #if [ $use_spk2info_cache == "False" ]; then
  91. cp -r ./model_repo/audio_tokenizer $model_repo
  92. cp -r ./model_repo/speaker_embedding $model_repo
  93. #fi
  94. ENGINE_PATH=$trt_engines_dir
  95. MAX_QUEUE_DELAY_MICROSECONDS=0
  96. MODEL_DIR=$model_scope_model_local_dir
  97. LLM_TOKENIZER_DIR=$huggingface_model_local_dir
  98. BLS_INSTANCE_NUM=4
  99. TRITON_MAX_BATCH_SIZE=1
  100. DECOUPLED_MODE=True # True for streaming, False for offline
  101. STEP_AUDIO_MODEL_DIR=/workspace_yuekai/tts/CosyVoice/runtime/triton_trtllm/Step-Audio-2-mini/token2wav
  102. python3 scripts/fill_template.py -i ${model_repo}/${token2wav_dir}/config.pbtxt model_dir:${STEP_AUDIO_MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  103. python3 scripts/fill_template.py -i ${model_repo}/${cosyvoice2_dir}/config.pbtxt model_dir:${MODEL_DIR},bls_instance_num:${BLS_INSTANCE_NUM},llm_tokenizer_dir:${LLM_TOKENIZER_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  104. python3 scripts/fill_template.py -i ${model_repo}/tensorrt_llm/config.pbtxt triton_backend:tensorrtllm,triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_beam_width:1,engine_dir:${ENGINE_PATH},max_tokens_in_paged_kv_cache:2560,max_attention_window_size:2560,kv_cache_free_gpu_mem_fraction:0.5,exclude_input_in_output:True,enable_kv_cache_reuse:False,batching_strategy:inflight_fused_batching,max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS},encoder_input_features_data_type:TYPE_FP16,logits_datatype:TYPE_FP32
  105. #if [ $use_spk2info_cache == "False" ]; then
  106. python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  107. python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  108. #fi
  109. rm -rf $model_repo/tensorrt_llm
  110. # mv $model_repo/cosyvoice2_dit/1 $model_repo/cosyvoice2_dit/4
  111. fi
  112. if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
  113. echo "Starting Triton server on $N_GPUS GPUs"
  114. for i in $(seq 0 $(($N_GPUS - 1))); do
  115. echo "Starting server on GPU $i"
  116. http_port=$((19000 + $i))
  117. grpc_port=$((18000 + $i))
  118. metrics_port=$((17000 + $i))
  119. CUDA_VISIBLE_DEVICES=$i tritonserver --model-repository $model_repo --http-port $http_port --grpc-port $grpc_port --metrics-port $metrics_port &
  120. done
  121. echo "Servers are running in the background. Press Ctrl+C to stop them and the script."
  122. wait
  123. fi
  124. if [ $stage -le 30 ] && [ $stop_stage -ge 30 ]; then
  125. echo "Starting Triton server on $N_GPUS GPUs"
  126. N_GPUS=1
  127. for i in $(seq 0 $(($N_GPUS - 1))); do
  128. echo "Starting server on GPU $i"
  129. http_port=$((19000 + $i))
  130. grpc_port=$((18000 + $i))
  131. metrics_port=$((17000 + $i))
  132. CUDA_VISIBLE_DEVICES=0 tritonserver --model-repository $model_repo --http-port $http_port --grpc-port $grpc_port --metrics-port $metrics_port &
  133. done
  134. echo "Servers are running in the background. Press Ctrl+C to stop them and the script."
  135. wait
  136. fi
  137. if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
  138. echo "Single request test http, only work for offline TTS mode"
  139. python3 client_http.py \
  140. --reference-audio ./assets/prompt_audio.wav \
  141. --reference-text "吃燕窝就选燕之屋,本节目由26年专注高品质燕窝的燕之屋冠名播出。豆奶牛奶换着喝,营养更均衡,本节目由豆本豆豆奶特约播出。" \
  142. --target-text "身临其境,换新体验。塑造开源语音合成新范式,让智能语音更自然。" \
  143. --model-name cosyvoice2
  144. fi
  145. if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
  146. echo "Running benchmark client grpc on $N_GPUS GPUs"
  147. num_task=1
  148. mode=streaming
  149. BLS_INSTANCE_NUM=4
  150. for i in $(seq 0 $(($N_GPUS - 1))); do
  151. grpc_port=$((18000 + $i))
  152. echo "Running client for server on localhost:$grpc_port"
  153. python3 client_grpc.py \
  154. --server-addr localhost \
  155. --server-port $grpc_port \
  156. --model-name cosyvoice2_dit \
  157. --num-tasks $num_task \
  158. --mode $mode \
  159. --huggingface-dataset yuekai/seed_tts_cosy2 \
  160. --log-dir ./log_debug_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM}_gpu${i} &
  161. done
  162. wait
  163. fi
  164. if [ $stage -le 50 ] && [ $stop_stage -ge 50 ]; then
  165. echo "Running benchmark client grpc on $N_GPUS GPUs"
  166. num_task=4
  167. N_GPUS=1
  168. mode=streaming
  169. BLS_INSTANCE_NUM=4
  170. for i in $(seq 0 $(($N_GPUS - 1))); do
  171. grpc_port=$((18000 + $i))
  172. echo "Running client for server on localhost:$grpc_port"
  173. python3 client_grpc.py \
  174. --server-addr localhost \
  175. --server-port $grpc_port \
  176. --model-name cosyvoice2_dit \
  177. --num-tasks $num_task \
  178. --mode $mode \
  179. --huggingface-dataset yuekai/seed_tts_cosy2 \
  180. --log-dir ./log_single_card_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM} &
  181. done
  182. wait
  183. fi
  184. if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
  185. echo "stage 6: Offline inference benchmark"
  186. n_gpus=1
  187. datasets=(wenetspeech4tts) # wenetspeech4tts, test_zh, zero_shot_zh
  188. backend=trtllm-serve # hf, trtllm, vllm
  189. batch_sizes=(16 8 4 2 1)
  190. batch_sizes=(16 8 4 2)
  191. token2wav_batch_size=1
  192. for batch_size in ${batch_sizes[@]}; do
  193. for dataset in ${datasets[@]}; do
  194. output_dir=./${dataset}_${backend}_llm_batch_size_${batch_size}_token2wav_batch_size_${token2wav_batch_size}
  195. CUDA_VISIBLE_DEVICES=1 \
  196. python3 offline_inference.py \
  197. --output-dir $output_dir \
  198. --llm-model-name-or-path $huggingface_model_local_dir \
  199. --token2wav-path $model_scope_model_local_dir \
  200. --backend $backend \
  201. --batch-size $batch_size --token2wav-batch-size $token2wav_batch_size \
  202. --engine-dir $trt_engines_dir \
  203. --split-name ${dataset} || exit 1
  204. done
  205. done
  206. fi
  207. if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
  208. CUDA_VISIBLE_DEVICES=2 python3 streaming_inference.py --enable-trt --strategy exponential
  209. fi
  210. if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
  211. CUDA_VISIBLE_DEVICES=0 mpirun -np 1 --allow-run-as-root --oversubscribe trtllm-serve serve --tokenizer $huggingface_model_local_dir $trt_engines_dir --max_batch_size 16 --kv_cache_free_gpu_memory_fraction 0.4
  212. fi
  213. if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then
  214. #! /usr/bin/env bash
  215. curl http://localhost:8000/v1/chat/completions \
  216. -H "Content-Type: application/json" \
  217. -d '{
  218. "model": "trt_engines_bfloat16",
  219. "messages":[{"role": "user", "content": "Where is New York?"},
  220. {"role": "assistant", "content": "<|s_1708|><|s_2050|><|s_2159|>"}],
  221. "max_tokens": 512,
  222. "temperature": 0.8,
  223. "top_p": 0.95,
  224. "top_k": 50,
  225. "stop": ["<|eos1|>"],
  226. "repetition_penalty": 1.2,
  227. "stream": false
  228. }'
  229. fi