run.sh 3.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. #!/bin/bash
  2. # Copyright 2024 Alibaba Inc. All Rights Reserved.
  3. . ./path.sh || exit 1;
  4. stage=-1
  5. stop_stage=3
  6. data_url=www.openslr.org/resources/60
  7. data_dir=/mnt/lyuxiang.lx/data/tts/openslr/libritts
  8. pretrained_model_dir=../../../pretrained_models/Fun-CosyVoice3-0.5B
  9. if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
  10. echo "Data Download"
  11. for part in dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500; do
  12. local/download_and_untar.sh ${data_dir} ${data_url} ${part}
  13. done
  14. fi
  15. if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
  16. echo "Data preparation, prepare wav.scp/text/utt2spk/spk2utt"
  17. for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
  18. mkdir -p data/$x
  19. # NOTE in CosyVoice3, we add instruct in sequence
  20. python local/prepare_data.py --src_dir $data_dir/LibriTTS/$x --des_dir data/$x --instruct "You are a helpful assistant.<|endofprompt|>"
  21. done
  22. fi
  23. # NOTE embedding/token extraction is not necessary now as we support online feature extraction
  24. if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
  25. echo "Prepare required parquet format data, you should have prepared wav.scp/text/utt2spk/spk2utt/utt2embedding.pt/spk2embedding.pt/utt2speech_token.pt"
  26. for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
  27. mkdir -p data/$x/parquet
  28. ../../../tools/make_parquet_list.py --num_utts_per_parquet 1000 \
  29. --num_processes 10 \
  30. --src_dir data/$x \
  31. --des_dir data/$x/parquet
  32. done
  33. fi
  34. # train llm
  35. export CUDA_VISIBLE_DEVICES="0"
  36. num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
  37. job_id=1986
  38. dist_backend="nccl"
  39. num_workers=2
  40. prefetch=100
  41. train_engine=torch_ddp
  42. if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
  43. echo "Run train. We only support llm traning for now"
  44. if [ $train_engine == 'deepspeed' ]; then
  45. echo "Notice deepspeed has its own optimizer config. Modify conf/ds_stage2.json if necessary"
  46. fi
  47. cat data/{train-clean-100,train-clean-360,train-other-500}/parquet/data.list > data/train.data.list
  48. cat data/{dev-clean,dev-other}/parquet/data.list > data/dev.data.list
  49. for model in llm flow hifigan; do
  50. torchrun --nnodes=1 --nproc_per_node=$num_gpus \
  51. --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint="localhost:1234" \
  52. ../../../cosyvoice/bin/train.py \
  53. --train_engine $train_engine \
  54. --config conf/cosyvoice3.yaml \
  55. --train_data data/train.data.list \
  56. --cv_data data/dev.data.list \
  57. --qwen_pretrain_path $pretrained_model_dir/CosyVoice-BlankEN \
  58. --onnx_path $pretrained_model_dir \
  59. --model $model \
  60. --checkpoint $pretrained_model_dir/$model.pt \
  61. --model_dir `pwd`/exp/cosyvoice3/$model/$train_engine \
  62. --tensorboard_dir `pwd`/tensorboard/cosyvoice3/$model/$train_engine \
  63. --ddp.dist_backend $dist_backend \
  64. --num_workers ${num_workers} \
  65. --prefetch ${prefetch} \
  66. --pin_memory \
  67. --use_amp \
  68. --deepspeed_config ./conf/ds_stage2.json \
  69. --deepspeed.save_states model+optimizer
  70. done
  71. fi
  72. # average model
  73. average_num=5
  74. if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
  75. for model in llm flow hifigan; do
  76. decode_checkpoint=`pwd`/exp/cosyvoice/$model/$train_engine/${model}.pt
  77. echo "do model average and final checkpoint is $decode_checkpoint"
  78. python cosyvoice/bin/average_model.py \
  79. --dst_model $decode_checkpoint \
  80. --src_path `pwd`/exp/cosyvoice/$model/$train_engine \
  81. --num ${average_num} \
  82. --val_best
  83. done
  84. fi
  85. if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then
  86. echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
  87. python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
  88. python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
  89. fi