run_stepaudio2_dit_token2wav.sh 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. #!/bin/bash
  2. # Copyright (c) 2025 NVIDIA (authors: Yuekai Zhang)
  3. export CUDA_VISIBLE_DEVICES=0
  4. cosyvoice_path=/workspace/CosyVoice
  5. stepaudio2_path=/workspace/Step-Audio2
  6. export PYTHONPATH=${stepaudio2_path}:$PYTHONPATH
  7. export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
  8. export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
  9. stage=$1
  10. stop_stage=$2
  11. huggingface_model_local_dir=./cosyvoice2_llm
  12. model_scope_model_local_dir=./CosyVoice2-0.5B
  13. step_audio_model_dir=./Step-Audio-2-mini
  14. trt_dtype=bfloat16
  15. trt_weights_dir=./trt_weights_${trt_dtype}
  16. trt_engines_dir=./trt_engines_${trt_dtype}
  17. model_repo=./model_repo_cosyvoice2_dit
  18. bls_instance_num=4
  19. if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
  20. echo "Cloning Step-Audio2-mini"
  21. git clone https://github.com/yuekaizhang/Step-Audio2.git -b trt $stepaudio2_path
  22. echo "Cloning CosyVoice"
  23. git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
  24. cd $cosyvoice_path
  25. git submodule update --init --recursive
  26. cd runtime/triton_trtllm
  27. fi
  28. if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
  29. echo "Downloading CosyVoice2-0.5B"
  30. # see https://github.com/nvidia-china-sae/mair-hub/blob/main/rl-tutorial/cosyvoice_llm/pretrained_to_huggingface.py
  31. huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
  32. modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
  33. echo "Step-Audio2-mini"
  34. huggingface-cli download --local-dir $step_audio_model_dir stepfun-ai/Step-Audio-2-mini
  35. cd $stepaudio2_path/token2wav
  36. wget https://huggingface.co/yuekai/cosyvoice2_dit_flow_matching_onnx/resolve/main/flow.decoder.estimator.fp32.dynamic_batch.onnx -O flow.decoder.estimator.fp32.dynamic_batch.onnx
  37. wget https://huggingface.co/yuekai/cosyvoice2_dit_flow_matching_onnx/resolve/main/flow.decoder.estimator.chunk.fp32.dynamic_batch.simplify.onnx -O flow.decoder.estimator.chunk.fp32.dynamic_batch.simplify.onnx
  38. cd -
  39. fi
  40. if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
  41. echo "Converting checkpoint to TensorRT weights"
  42. python3 scripts/convert_checkpoint.py --model_dir $huggingface_model_local_dir \
  43. --output_dir $trt_weights_dir \
  44. --dtype $trt_dtype || exit 1
  45. echo "Building TensorRT engines"
  46. trtllm-build --checkpoint_dir $trt_weights_dir \
  47. --output_dir $trt_engines_dir \
  48. --max_batch_size 16 \
  49. --max_num_tokens 32768 \
  50. --gemm_plugin $trt_dtype || exit 1
  51. echo "Testing TensorRT engines"
  52. python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
  53. --tokenizer_dir $huggingface_model_local_dir \
  54. --top_k 50 --top_p 0.95 --temperature 0.8 \
  55. --engine_dir=$trt_engines_dir || exit 1
  56. fi
  57. if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
  58. echo "Creating model repository async mode"
  59. rm -rf $model_repo
  60. mkdir -p $model_repo
  61. cosyvoice2_dir="cosyvoice2_dit"
  62. token2wav_dir="token2wav_dit"
  63. cp -r ./model_repo/${cosyvoice2_dir} $model_repo
  64. cp -r ./model_repo/${token2wav_dir} $model_repo
  65. cp -r ./model_repo/audio_tokenizer $model_repo
  66. cp -r ./model_repo/speaker_embedding $model_repo
  67. ENGINE_PATH=$trt_engines_dir
  68. MAX_QUEUE_DELAY_MICROSECONDS=0
  69. MODEL_DIR=$model_scope_model_local_dir
  70. LLM_TOKENIZER_DIR=$huggingface_model_local_dir
  71. BLS_INSTANCE_NUM=$bls_instance_num
  72. TRITON_MAX_BATCH_SIZE=1
  73. DECOUPLED_MODE=True # Only streaming TTS mode is supported using Nvidia Triton for now
  74. STEP_AUDIO_MODEL_DIR=$step_audio_model_dir/token2wav
  75. python3 scripts/fill_template.py -i ${model_repo}/${token2wav_dir}/config.pbtxt model_dir:${STEP_AUDIO_MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  76. python3 scripts/fill_template.py -i ${model_repo}/${cosyvoice2_dir}/config.pbtxt model_dir:${MODEL_DIR},bls_instance_num:${BLS_INSTANCE_NUM},llm_tokenizer_dir:${LLM_TOKENIZER_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},decoupled_mode:${DECOUPLED_MODE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  77. python3 scripts/fill_template.py -i ${model_repo}/audio_tokenizer/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  78. python3 scripts/fill_template.py -i ${model_repo}/speaker_embedding/config.pbtxt model_dir:${MODEL_DIR},triton_max_batch_size:${TRITON_MAX_BATCH_SIZE},max_queue_delay_microseconds:${MAX_QUEUE_DELAY_MICROSECONDS}
  79. fi
  80. if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
  81. echo "Starting Token2wav Triton server and Cosyvoice2 llm using trtllm-serve"
  82. tritonserver --model-repository $model_repo --http-port 18000 &
  83. mpirun -np 1 --allow-run-as-root --oversubscribe trtllm-serve serve --tokenizer $huggingface_model_local_dir $trt_engines_dir --max_batch_size 16 --kv_cache_free_gpu_memory_fraction 0.4 &
  84. wait
  85. # Test using curl
  86. # curl http://localhost:8000/v1/chat/completions \
  87. # -H "Content-Type: application/json" \
  88. # -d '{
  89. # "model": "trt_engines_bfloat16",
  90. # "messages":[{"role": "user", "content": "Where is New York?"},
  91. # {"role": "assistant", "content": "<|s_1708|><|s_2050|><|s_2159|>"}],
  92. # "max_tokens": 512,
  93. # "temperature": 0.8,
  94. # "top_p": 0.95,
  95. # "top_k": 50,
  96. # "stop": ["<|eos1|>"],
  97. # "repetition_penalty": 1.2,
  98. # "stream": false
  99. # }'
  100. fi
  101. if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
  102. echo "Running benchmark client"
  103. num_task=4
  104. mode=streaming
  105. BLS_INSTANCE_NUM=$bls_instance_num
  106. python3 client_grpc.py \
  107. --server-addr localhost \
  108. --server-port 8001 \
  109. --model-name cosyvoice2_dit \
  110. --num-tasks $num_task \
  111. --mode $mode \
  112. --huggingface-dataset yuekai/seed_tts_cosy2 \
  113. --log-dir ./log_single_gpu_concurrent_tasks_${num_task}_${mode}_bls_${BLS_INSTANCE_NUM}
  114. fi
  115. if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
  116. echo "stage 5: Offline TTS (Cosyvoice2 LLM + Step-Audio2-mini DiT Token2Wav) inference using a single python script"
  117. datasets=(wenetspeech4tts) # wenetspeech4tts, test_zh, zero_shot_zh
  118. backend=trtllm # hf, trtllm, vllm, trtllm-serve
  119. batch_sizes=(16)
  120. token2wav_batch_size=1
  121. for batch_size in ${batch_sizes[@]}; do
  122. for dataset in ${datasets[@]}; do
  123. output_dir=./${dataset}_${backend}_llm_batch_size_${batch_size}_token2wav_batch_size_${token2wav_batch_size}
  124. CUDA_VISIBLE_DEVICES=1 \
  125. python3 offline_inference.py \
  126. --output-dir $output_dir \
  127. --llm-model-name-or-path $huggingface_model_local_dir \
  128. --token2wav-path $step_audio_model_dir/token2wav \
  129. --backend $backend \
  130. --batch-size $batch_size --token2wav-batch-size $token2wav_batch_size \
  131. --engine-dir $trt_engines_dir \
  132. --split-name ${dataset} || exit 1
  133. done
  134. done
  135. fi
  136. if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
  137. echo "Running Step-Audio2-mini DiT Token2Wav inference using a single python script"
  138. export CUDA_VISIBLE_DEVICES=1
  139. # Note: Using pre-computed cosyvoice2 tokens
  140. python3 streaming_inference.py --enable-trt --strategy equal # equal, exponential
  141. # Offline Token2wav inference
  142. # python3 token2wav_dit.py --enable-trt
  143. fi