convert_cosyvoice3_to_hf.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. #!/usr/bin/env python3
  2. # Copyright 2025 CosyVoice3 TRT-LLM Integration
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. Convert CosyVoice3 LLM to HuggingFace format with merged embeddings.
  17. This script:
  18. 1. Loads CosyVoice3 model
  19. 2. Extends tokenizer vocab with speech tokens
  20. 3. Merges speech_embedding into embed_tokens of Qwen2
  21. 4. Replaces lm_head with llm_decoder using extended vocab
  22. 5. Saves model in HuggingFace format for TRT-LLM conversion
  23. Usage:
  24. python scripts/convert_cosyvoice3_to_hf.py \
  25. --model-dir pretrained_models/Fun-CosyVoice3-0.5B \
  26. --output-dir pretrained_models/Fun-CosyVoice3-0.5B/hf_merged
  27. Then convert to TRT-LLM:
  28. trtllm-build --checkpoint_dir <output_dir> --output_dir <trt_engines_dir> ...
  29. """
  30. import argparse
  31. import os
  32. import sys
  33. import logging
  34. import torch
  35. from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
  36. sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
  37. sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'third_party/Matcha-TTS'))
  38. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
  39. logger = logging.getLogger(__name__)
  40. def parse_args():
  41. parser = argparse.ArgumentParser(description="Convert CosyVoice3 to HuggingFace format with merged embeddings")
  42. parser.add_argument(
  43. "--model-dir",
  44. type=str,
  45. default="pretrained_models/Fun-CosyVoice3-0.5B",
  46. help="Path to CosyVoice3 model directory",
  47. )
  48. parser.add_argument(
  49. "--output-dir",
  50. type=str,
  51. default=None,
  52. help="Output directory for HuggingFace model (default: <model-dir>/hf_merged)",
  53. )
  54. parser.add_argument(
  55. "--dtype",
  56. type=str,
  57. default="bfloat16",
  58. choices=["float16", "bfloat16", "float32"],
  59. help="Output dtype for the model",
  60. )
  61. return parser.parse_args()
  62. def load_cosyvoice3_model(model_dir: str):
  63. """Load CosyVoice3 model for weight extraction."""
  64. from hyperpyyaml import load_hyperpyyaml
  65. from cosyvoice.utils.class_utils import get_model_type
  66. hyper_yaml_path = os.path.join(model_dir, 'cosyvoice3.yaml')
  67. hf_llm_dir = os.path.join(model_dir, 'CosyVoice-BlankEN')
  68. if not os.path.exists(hyper_yaml_path):
  69. raise ValueError(f'{hyper_yaml_path} not found!')
  70. with open(hyper_yaml_path, 'r') as f:
  71. configs = load_hyperpyyaml(
  72. f,
  73. overrides={'qwen_pretrain_path': hf_llm_dir}
  74. )
  75. # Load LLM only
  76. llm = configs['llm']
  77. llm_weights_path = os.path.join(model_dir, 'llm.pt')
  78. llm.load_state_dict(torch.load(llm_weights_path, map_location='cpu'), strict=True)
  79. llm.eval()
  80. logger.info(f"Loaded CosyVoice3 LLM from {model_dir}")
  81. return llm, hf_llm_dir, configs
  82. def get_speech_token_size(llm) -> int:
  83. """Determine speech token vocabulary size from the model."""
  84. # CosyVoice3LM has: speech_token_size + 200 in llm_decoder
  85. # speech_embedding has: speech_token_size + 200
  86. speech_embedding_size = llm.speech_embedding.num_embeddings
  87. # Use full embedding size (includes speech special tokens)
  88. return speech_embedding_size
  89. def convert_cosyvoice3_to_hf(
  90. model_dir: str,
  91. output_dir: str,
  92. dtype: str = "bfloat16",
  93. ):
  94. """
  95. Convert CosyVoice3 LLM to HuggingFace format with merged embeddings.
  96. Merging architecture:
  97. - embed_tokens[0:original_vocab_size] = original text embeddings
  98. - embed_tokens[original_vocab_size:original_vocab_size+speech_token_size] = speech_embedding
  99. - lm_head[original_vocab_size:original_vocab_size+speech_token_size] = llm_decoder
  100. Args:
  101. model_dir: Path to CosyVoice3 model
  102. output_dir: Path to save HF model
  103. dtype: Data type for saving
  104. """
  105. logger.info(f"Loading CosyVoice3 model from {model_dir}")
  106. # 1. Load CosyVoice3 components
  107. cosyvoice3_llm, hf_llm_dir, configs = load_cosyvoice3_model(model_dir)
  108. # Extract key components
  109. qwen_model = cosyvoice3_llm.llm.model # Qwen2ForCausalLM
  110. speech_embedding = cosyvoice3_llm.speech_embedding # Embedding for speech tokens
  111. llm_decoder = cosyvoice3_llm.llm_decoder # Linear for decoding to speech tokens
  112. speech_token_size = get_speech_token_size(cosyvoice3_llm)
  113. logger.info(f"Speech token size: {speech_token_size}")
  114. # 2. Load tokenizer and add CosyVoice3 text special tokens + speech tokens
  115. tokenizer = AutoTokenizer.from_pretrained(hf_llm_dir, trust_remote_code=True)
  116. base_vocab_size = len(tokenizer)
  117. logger.info(f"Base tokenizer vocab size: {base_vocab_size}")
  118. # IMPORTANT:
  119. # - In CosyVoice3, LLM speech special tokens (sos/eos/task_id/fill) are INSIDE speech_embedding,
  120. # i.e. represented as <|s_6561|>, <|s_6562|>, <|s_6563|>, <|s_6564|>.
  121. # - But text-level special tokens like [cough]/[laughter] MUST exist in tokenizer
  122. # (mirrors `CosyVoice3Tokenizer` from `cosyvoice/tokenizer/tokenizer.py`).
  123. special_tokens = {
  124. 'eos_token': '<|endoftext|>',
  125. 'pad_token': '<|endoftext|>',
  126. 'additional_special_tokens': [
  127. '<|im_start|>', '<|im_end|>', '<|endofprompt|>',
  128. '[breath]', '<strong>', '</strong>', '[noise]',
  129. '[laughter]', '[cough]', '[clucking]', '[accent]',
  130. '[quick_breath]',
  131. "<laughter>", "</laughter>",
  132. "[hissing]", "[sigh]", "[vocalized-noise]",
  133. "[lipsmack]", "[mn]", "<|endofsystem|>",
  134. # Phoneme tokens (kept consistent with CosyVoice3Tokenizer)
  135. "[AA]", "[AA0]", "[AA1]", "[AA2]", "[AE]", "[AE0]", "[AE1]", "[AE2]", "[AH]", "[AH0]", "[AH1]", "[AH2]",
  136. "[AO]", "[AO0]", "[AO1]", "[AO2]", "[AW]", "[AW0]", "[AW1]", "[AW2]", "[AY]", "[AY0]", "[AY1]", "[AY2]",
  137. "[B]", "[CH]", "[D]", "[DH]", "[EH]", "[EH0]", "[EH1]", "[EH2]", "[ER]", "[ER0]", "[ER1]", "[ER2]", "[EY]",
  138. "[EY0]", "[EY1]", "[EY2]", "[F]", "[G]", "[HH]", "[IH]", "[IH0]", "[IH1]", "[IH2]", "[IY]", "[IY0]", "[IY1]",
  139. "[IY2]", "[JH]", "[K]", "[L]", "[M]", "[N]", "[NG]", "[OW]", "[OW0]", "[OW1]", "[OW2]", "[OY]", "[OY0]",
  140. "[OY1]", "[OY2]", "[P]", "[R]", "[S]", "[SH]", "[T]", "[TH]", "[UH]", "[UH0]", "[UH1]", "[UH2]", "[UW]",
  141. "[UW0]", "[UW1]", "[UW2]", "[V]", "[W]", "[Y]", "[Z]", "[ZH]",
  142. "[a]", "[ai]", "[an]", "[ang]", "[ao]", "[b]", "[c]", "[ch]", "[d]", "[e]", "[ei]", "[en]", "[eng]", "[f]",
  143. "[g]", "[h]", "[i]", "[ian]", "[in]", "[ing]", "[iu]", "[ià]", "[iàn]", "[iàng]", "[iào]", "[iá]", "[ián]",
  144. "[iáng]", "[iáo]", "[iè]", "[ié]", "[iòng]", "[ióng]", "[iù]", "[iú]", "[iā]", "[iān]", "[iāng]", "[iāo]",
  145. "[iē]", "[iě]", "[iōng]", "[iū]", "[iǎ]", "[iǎn]", "[iǎng]", "[iǎo]", "[iǒng]", "[iǔ]", "[j]", "[k]", "[l]",
  146. "[m]", "[n]", "[o]", "[ong]", "[ou]", "[p]", "[q]", "[r]",
  147. "[s]", "[sh]", "[t]", "[u]", "[uang]", "[ue]",
  148. "[un]", "[uo]", "[uà]", "[uài]", "[uàn]", "[uàng]", "[uá]", "[uái]", "[uán]", "[uáng]", "[uè]", "[ué]", "[uì]",
  149. "[uí]", "[uò]", "[uó]", "[uā]", "[uāi]", "[uān]", "[uāng]", "[uē]", "[uě]", "[uī]", "[uō]", "[uǎ]", "[uǎi]",
  150. "[uǎn]", "[uǎng]", "[uǐ]", "[uǒ]", "[vè]", "[w]", "[x]", "[y]", "[z]", "[zh]", "[à]", "[ài]", "[àn]", "[àng]",
  151. "[ào]", "[á]", "[ái]", "[án]", "[áng]", "[áo]", "[è]", "[èi]", "[èn]", "[èng]", "[èr]", "[é]", "[éi]", "[én]",
  152. "[éng]", "[ér]", "[ì]", "[ìn]", "[ìng]", "[í]", "[ín]", "[íng]", "[ò]", "[òng]", "[òu]", "[ó]", "[óng]", "[óu]",
  153. "[ù]", "[ùn]", "[ú]", "[ún]", "[ā]", "[āi]", "[ān]", "[āng]", "[āo]", "[ē]", "[ēi]", "[ēn]", "[ēng]", "[ě]",
  154. "[ěi]", "[ěn]", "[ěng]", "[ěr]", "[ī]", "[īn]", "[īng]", "[ō]", "[ōng]", "[ōu]", "[ū]", "[ūn]", "[ǎ]", "[ǎi]",
  155. "[ǎn]", "[ǎng]", "[ǎo]", "[ǐ]", "[ǐn]", "[ǐng]", "[ǒ]", "[ǒng]", "[ǒu]", "[ǔ]", "[ǔn]", "[ǘ]", "[ǚ]", "[ǜ]"
  156. ]
  157. }
  158. tokenizer.add_special_tokens(special_tokens)
  159. text_vocab_size = len(tokenizer)
  160. logger.info(f"Tokenizer vocab after CosyVoice3 text special tokens: {text_vocab_size}")
  161. # Add speech tokens: <|s_0|>, <|s_1|>, ..., <|s_{embedding_size-1}|>
  162. # IMPORTANT: This range must match speech_embedding.num_embeddings (includes speech special tokens).
  163. actual_speech_tokens = speech_token_size # Full embedding size (with speech special tokens)
  164. # replace <s_6561> to <|sos|>
  165. # replace <s_6562> to <|eos1|>
  166. # replace <s_6563> to <|task_id|>
  167. # replace <s_6564> to <|fill|>
  168. speech_tokens = [f"<|s_{i}|>" for i in range(actual_speech_tokens)]
  169. speech_tokens[6561] = "<|sos|>"
  170. speech_tokens[6562] = "<|eos1|>"
  171. speech_tokens[6563] = "<|task_id|>"
  172. speech_tokens[6564] = "<|fill|>"
  173. assert "<s_6561>" not in speech_tokens
  174. assert "<s_6562>" not in speech_tokens
  175. assert "<s_6563>" not in speech_tokens
  176. assert "<s_6564>" not in speech_tokens
  177. tokenizer.add_tokens(speech_tokens)
  178. new_vocab_size = len(tokenizer)
  179. logger.info(f"New tokenizer vocab size: {new_vocab_size}")
  180. logger.info(f"Added {new_vocab_size - base_vocab_size} tokens total (text special + speech tokens)")
  181. # 3. Resize embeddings in Qwen model
  182. # Align to 128 for TensorRT efficiency
  183. padded_vocab_size = ((new_vocab_size + 127) // 128) * 128
  184. qwen_model.resize_token_embeddings(padded_vocab_size)
  185. logger.info(f"Resized embeddings to: {padded_vocab_size}")
  186. # Speech tokens start after text vocab (base + CosyVoice3 text special tokens)
  187. speech_token_offset = text_vocab_size
  188. # 4. Copy speech_embedding into extended embed_tokens
  189. input_embeddings = qwen_model.get_input_embeddings()
  190. hidden_size = input_embeddings.weight.shape[1]
  191. logger.info(f"Hidden size: {hidden_size}")
  192. logger.info(f"speech_embedding shape: {speech_embedding.weight.shape}")
  193. logger.info(f"llm_decoder shape: {llm_decoder.weight.shape}")
  194. with torch.no_grad():
  195. # Copy speech_embedding weights into embed_tokens
  196. # Indices: [speech_token_offset, speech_token_offset + speech_token_size)
  197. src_size = min(speech_embedding.weight.shape[0], actual_speech_tokens)
  198. input_embeddings.weight[speech_token_offset:speech_token_offset + src_size] = \
  199. speech_embedding.weight[:src_size].to(input_embeddings.weight.dtype)
  200. logger.info(f"Copied speech_embedding to embed_tokens[{speech_token_offset}:{speech_token_offset + src_size}]")
  201. # 5. Create new lm_head with extended vocab and copy llm_decoder
  202. # Original lm_head: hidden_size -> original_vocab_size
  203. # New lm_head: hidden_size -> padded_vocab_size
  204. # llm_decoder: hidden_size -> speech_token_size
  205. # Create new lm_head
  206. has_bias = llm_decoder.bias is not None
  207. new_lm_head = torch.nn.Linear(
  208. in_features=hidden_size,
  209. out_features=padded_vocab_size,
  210. bias=has_bias
  211. )
  212. with torch.no_grad():
  213. # Initialize weights:
  214. # - Text part: copy from original lm_head (or zeros)
  215. # - Speech part: copy from llm_decoder
  216. # - Padding: zeros
  217. # Fill with zeros and -inf in bias (so text tokens are not generated)
  218. new_lm_head.weight.data.zero_()
  219. if has_bias:
  220. new_lm_head.bias.data.fill_(-float('inf'))
  221. # Copy original lm_head for text tokens (optional)
  222. original_lm_head = qwen_model.lm_head
  223. if original_lm_head is not None and original_lm_head.weight.shape[0] >= text_vocab_size:
  224. new_lm_head.weight[:text_vocab_size] = original_lm_head.weight[:text_vocab_size]
  225. if has_bias and original_lm_head.bias is not None:
  226. new_lm_head.bias[:text_vocab_size] = original_lm_head.bias[:text_vocab_size]
  227. # Copy llm_decoder for speech tokens
  228. decoder_size = min(llm_decoder.weight.shape[0], actual_speech_tokens)
  229. new_lm_head.weight[speech_token_offset:speech_token_offset + decoder_size] = \
  230. llm_decoder.weight[:decoder_size].to(new_lm_head.weight.dtype)
  231. if has_bias:
  232. new_lm_head.bias[speech_token_offset:speech_token_offset + decoder_size] = \
  233. llm_decoder.bias[:decoder_size].to(new_lm_head.bias.dtype)
  234. else:
  235. # If llm_decoder has no bias but we want it for text tokens
  236. pass
  237. # Replace lm_head
  238. qwen_model.lm_head = new_lm_head
  239. logger.info(f"Created new lm_head with shape: {new_lm_head.weight.shape}")
  240. logger.info(f"Copied llm_decoder to lm_head[{speech_token_offset}:{speech_token_offset + decoder_size}]")
  241. # 6. Update model configuration
  242. qwen_model.config.vocab_size = padded_vocab_size
  243. qwen_model.config.tie_word_embeddings = False # Embeddings and lm_head are now different!
  244. # Set EOS token for generation (speech EOS lives inside speech_embedding as <|s_{base_speech_token_size+1}|>)
  245. base_speech_token_size = getattr(cosyvoice3_llm, "speech_token_size", 6561)
  246. eos_speech_idx = base_speech_token_size + 1
  247. eos_id = speech_token_offset + eos_speech_idx
  248. qwen_model.config.eos_token_id = eos_id
  249. # Generation settings
  250. qwen_model.generation_config.eos_token_id = eos_id
  251. qwen_model.generation_config.pad_token_id = eos_id
  252. qwen_model.generation_config.temperature = 0.8
  253. qwen_model.generation_config.top_p = 0.95
  254. qwen_model.generation_config.top_k = 25
  255. qwen_model.generation_config.repetition_penalty = 1.1
  256. qwen_model.generation_config.max_new_tokens = 2048
  257. # 7. Convert to target dtype
  258. dtype_map = {
  259. "float16": torch.float16,
  260. "bfloat16": torch.bfloat16,
  261. "float32": torch.float32,
  262. }
  263. target_dtype = dtype_map[dtype]
  264. qwen_model.to(target_dtype)
  265. # 8. Save model and tokenizer
  266. os.makedirs(output_dir, exist_ok=True)
  267. qwen_model.save_pretrained(output_dir)
  268. TEMPLATE = "{%- for message in messages %}{%- if message['role'] == 'user' %}{{- '<|sos|>' + message['content'] + '<|task_id|>' }}{%- elif message['role'] == 'assistant' %}{{- message['content']}}{%- endif %}{%- endfor %}"
  269. tokenizer.chat_template = TEMPLATE
  270. tokenizer.save_pretrained(output_dir)
  271. # Save metadata for TRT-LLM inference
  272. metadata = {
  273. "original_vocab_size": base_vocab_size,
  274. "text_vocab_size": text_vocab_size,
  275. "base_speech_token_size": base_speech_token_size,
  276. "embedding_size": actual_speech_tokens,
  277. "padded_vocab_size": padded_vocab_size,
  278. "eos_token_id": eos_id,
  279. "speech_token_offset": speech_token_offset,
  280. "dtype": dtype,
  281. }
  282. import json
  283. with open(os.path.join(output_dir, "cosyvoice3_metadata.json"), "w") as f:
  284. json.dump(metadata, f, indent=2)
  285. logger.info(f"Saved HuggingFace model to {output_dir}")
  286. logger.info(f"Metadata: {metadata}")
  287. return output_dir, metadata
  288. def main():
  289. args = parse_args()
  290. output_dir = args.output_dir
  291. if output_dir is None:
  292. output_dir = os.path.join(args.model_dir, "hf_merged")
  293. convert_cosyvoice3_to_hf(
  294. model_dir=args.model_dir,
  295. output_dir=output_dir,
  296. dtype=args.dtype,
  297. )
  298. print("\n" + "=" * 70)
  299. print("✅ Conversion complete!")
  300. print("=" * 70)
  301. print(f"\nHuggingFace model saved to: {output_dir}")
  302. print("\nNext steps:")
  303. print("1. Convert to TRT-LLM weights:")
  304. print(f" python -c \"from tensorrt_llm.models import QWenForCausalLM; ...")
  305. print("\n2. Build TRT-LLM engines:")
  306. print(f" trtllm-build --checkpoint_dir <trt_weights_dir> --output_dir <trt_engines_dir> ...")
  307. print("=" * 70)
  308. if __name__ == "__main__":
  309. main()