cosyvoice2.yaml 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. # set random seed, so that you may reproduce your result.
  2. __set_seed1: !apply:random.seed [1986]
  3. __set_seed2: !apply:numpy.random.seed [1986]
  4. __set_seed3: !apply:torch.manual_seed [1986]
  5. __set_seed4: !apply:torch.cuda.manual_seed_all [1986]
  6. # fixed params
  7. sample_rate: 24000
  8. llm_input_size: 896
  9. llm_output_size: 896
  10. spk_embed_dim: 192
  11. qwen_pretrain_path: ''
  12. token_frame_rate: 25
  13. token_mel_ratio: 2
  14. # stream related params
  15. chunk_size: 1 # streaming inference chunk size, in second
  16. num_decoding_left_chunks: 2 # streaming inference flow decoder left chunk size, in second
  17. # model params
  18. # for all class/function included in this repo, we use !<name> or !<new> for intialization, so that user may find all corresponding class/function according to one single yaml.
  19. # for system/third_party class/function, we do not require this.
  20. llm: !new:cosyvoice.llm.llm.Qwen2LM
  21. llm_input_size: !ref <llm_input_size>
  22. llm_output_size: !ref <llm_output_size>
  23. speech_token_size: 6561
  24. length_normalized_loss: True
  25. lsm_weight: 0
  26. mix_ratio: [5, 15]
  27. llm: !new:cosyvoice.llm.llm.Qwen2Encoder
  28. pretrain_path: !ref <qwen_pretrain_path>
  29. sampling: !name:cosyvoice.utils.common.ras_sampling
  30. top_p: 0.8
  31. top_k: 25
  32. win_size: 10
  33. tau_r: 0.1
  34. flow: !new:cosyvoice.flow.flow.CausalMaskedDiffWithXvec
  35. input_size: 512
  36. output_size: 80
  37. spk_embed_dim: !ref <spk_embed_dim>
  38. output_type: 'mel'
  39. vocab_size: 6561
  40. input_frame_rate: !ref <token_frame_rate>
  41. only_mask_loss: True
  42. token_mel_ratio: !ref <token_mel_ratio>
  43. pre_lookahead_len: 3
  44. encoder: !new:cosyvoice.transformer.upsample_encoder.UpsampleConformerEncoder
  45. output_size: 512
  46. attention_heads: 8
  47. linear_units: 2048
  48. num_blocks: 6
  49. dropout_rate: 0.1
  50. positional_dropout_rate: 0.1
  51. attention_dropout_rate: 0.1
  52. normalize_before: True
  53. input_layer: 'linear'
  54. pos_enc_layer_type: 'rel_pos_espnet'
  55. selfattention_layer_type: 'rel_selfattn'
  56. input_size: 512
  57. use_cnn_module: False
  58. macaron_style: False
  59. static_chunk_size: !ref <chunk_size> * <token_frame_rate>
  60. decoder: !new:cosyvoice.flow.flow_matching.CausalConditionalCFM
  61. in_channels: 240
  62. n_spks: 1
  63. spk_emb_dim: 80
  64. cfm_params: !new:omegaconf.DictConfig
  65. content:
  66. sigma_min: 1e-06
  67. solver: 'euler'
  68. t_scheduler: 'cosine'
  69. training_cfg_rate: 0.2
  70. inference_cfg_rate: 0.7
  71. reg_loss_type: 'l1'
  72. estimator: !new:cosyvoice.flow.decoder.CausalConditionalDecoder
  73. in_channels: 320
  74. out_channels: 80
  75. channels: [256]
  76. dropout: 0.0
  77. attention_head_dim: 64
  78. n_blocks: 4
  79. num_mid_blocks: 12
  80. num_heads: 8
  81. act_fn: 'gelu'
  82. static_chunk_size: !ref <chunk_size> * <token_frame_rate> * <token_mel_ratio> # here we use static_chunk_size because we want to fix kv cache size during inference
  83. num_decoding_left_chunks: !ref <num_decoding_left_chunks>
  84. hift: !new:cosyvoice.hifigan.generator.HiFTGenerator
  85. in_channels: 80
  86. base_channels: 512
  87. nb_harmonics: 8
  88. sampling_rate: !ref <sample_rate>
  89. nsf_alpha: 0.1
  90. nsf_sigma: 0.003
  91. nsf_voiced_threshold: 10
  92. upsample_rates: [8, 5, 3]
  93. upsample_kernel_sizes: [16, 11, 7]
  94. istft_params:
  95. n_fft: 16
  96. hop_len: 4
  97. resblock_kernel_sizes: [3, 7, 11]
  98. resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
  99. source_resblock_kernel_sizes: [7, 7, 11]
  100. source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
  101. lrelu_slope: 0.1
  102. audio_limit: 0.99
  103. f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor
  104. num_class: 1
  105. in_channels: 80
  106. cond_channels: 512
  107. # gan related module
  108. mel_spec_transform1: !name:matcha.utils.audio.mel_spectrogram
  109. n_fft: 1024
  110. num_mels: 80
  111. sampling_rate: !ref <sample_rate>
  112. hop_size: 256
  113. win_size: 1024
  114. fmin: 0
  115. fmax: null
  116. center: False
  117. hifigan: !new:cosyvoice.hifigan.hifigan.HiFiGan
  118. generator: !ref <hift>
  119. discriminator: !new:cosyvoice.hifigan.discriminator.MultipleDiscriminator
  120. mpd: !new:matcha.hifigan.models.MultiPeriodDiscriminator
  121. mrd: !new:cosyvoice.hifigan.discriminator.MultiResolutionDiscriminator
  122. mel_spec_transform: [
  123. !ref <mel_spec_transform1>
  124. ]
  125. # processor functions
  126. parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener
  127. get_tokenizer: !name:cosyvoice.tokenizer.tokenizer.get_qwen_tokenizer
  128. token_path: !ref <qwen_pretrain_path>
  129. skip_special_tokens: True
  130. allowed_special: 'all'
  131. tokenize: !name:cosyvoice.dataset.processor.tokenize
  132. get_tokenizer: !ref <get_tokenizer>
  133. allowed_special: !ref <allowed_special>
  134. filter: !name:cosyvoice.dataset.processor.filter
  135. max_length: 40960
  136. min_length: 100
  137. token_max_length: 200
  138. token_min_length: 1
  139. resample: !name:cosyvoice.dataset.processor.resample
  140. resample_rate: !ref <sample_rate>
  141. truncate: !name:cosyvoice.dataset.processor.truncate
  142. truncate_length: 24480 # must be a multiplier of hop_size
  143. feat_extractor: !name:matcha.utils.audio.mel_spectrogram
  144. n_fft: 1920
  145. num_mels: 80
  146. sampling_rate: !ref <sample_rate>
  147. hop_size: 480
  148. win_size: 1920
  149. fmin: 0
  150. fmax: 8000
  151. center: False
  152. compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank
  153. feat_extractor: !ref <feat_extractor>
  154. compute_f0: !name:cosyvoice.dataset.processor.compute_f0
  155. sample_rate: !ref <sample_rate>
  156. hop_size: 480
  157. parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding
  158. normalize: True
  159. shuffle: !name:cosyvoice.dataset.processor.shuffle
  160. shuffle_size: 1000
  161. sort: !name:cosyvoice.dataset.processor.sort
  162. sort_size: 500 # sort_size should be less than shuffle_size
  163. batch: !name:cosyvoice.dataset.processor.batch
  164. batch_type: 'dynamic'
  165. max_frames_in_batch: 2500
  166. padding: !name:cosyvoice.dataset.processor.padding
  167. use_spk_embedding: False # change to True during sft
  168. # dataset processor pipeline
  169. data_pipeline: [
  170. !ref <parquet_opener>,
  171. !ref <tokenize>,
  172. !ref <filter>,
  173. !ref <resample>,
  174. !ref <compute_fbank>,
  175. !ref <parse_embedding>,
  176. !ref <shuffle>,
  177. !ref <sort>,
  178. !ref <batch>,
  179. !ref <padding>,
  180. ]
  181. data_pipeline_gan: [
  182. !ref <parquet_opener>,
  183. !ref <tokenize>,
  184. !ref <filter>,
  185. !ref <resample>,
  186. !ref <truncate>,
  187. !ref <compute_fbank>,
  188. !ref <compute_f0>,
  189. !ref <parse_embedding>,
  190. !ref <shuffle>,
  191. !ref <sort>,
  192. !ref <batch>,
  193. !ref <padding>,
  194. ]
  195. # llm flow train conf
  196. train_conf:
  197. optim: adam
  198. optim_conf:
  199. lr: 1e-5 # change to 1e-5 during sft
  200. scheduler: constantlr # change to constantlr during sft
  201. scheduler_conf:
  202. warmup_steps: 2500
  203. max_epoch: 200
  204. grad_clip: 5
  205. accum_grad: 2
  206. log_interval: 100
  207. save_per_step: -1
  208. # gan train conf
  209. train_conf_gan:
  210. optim: adam
  211. optim_conf:
  212. lr: 0.0002 # use small lr for gan training
  213. scheduler: constantlr
  214. optim_d: adam
  215. optim_conf_d:
  216. lr: 0.0002 # use small lr for gan training
  217. scheduler_d: constantlr
  218. max_epoch: 200
  219. grad_clip: 5
  220. accum_grad: 1 # in gan training, accum_grad must be 1
  221. log_interval: 100
  222. save_per_step: -1