model.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. # Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
  2. #
  3. # Redistribution and use in source and binary forms, with or without
  4. # modification, are permitted provided that the following conditions
  5. # are met:
  6. # * Redistributions of source code must retain the above copyright
  7. # notice, this list of conditions and the following disclaimer.
  8. # * Redistributions in binary form must reproduce the above copyright
  9. # notice, this list of conditions and the following disclaimer in the
  10. # documentation and/or other materials provided with the distribution.
  11. # * Neither the name of NVIDIA CORPORATION nor the names of its
  12. # contributors may be used to endorse or promote products derived
  13. # from this software without specific prior written permission.
  14. #
  15. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
  16. # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  18. # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  19. # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  20. # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  21. # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  22. # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  23. # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  25. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. import json
  27. import os
  28. import threading
  29. import time
  30. import numpy as np
  31. import torch
  32. from torch.utils.dlpack import from_dlpack, to_dlpack
  33. import triton_python_backend_utils as pb_utils
  34. from transformers import AutoTokenizer
  35. import torchaudio
  36. from matcha.utils.audio import mel_spectrogram
  37. ORIGINAL_VOCAB_SIZE = 151663
  38. torch.set_num_threads(1)
  39. class TritonPythonModel:
  40. """Triton Python model for Spark TTS.
  41. This model orchestrates the end-to-end TTS pipeline by coordinating
  42. between audio tokenizer, LLM, and vocoder components.
  43. """
  44. def initialize(self, args):
  45. """Initialize the model.
  46. Args:
  47. args: Dictionary containing model configuration
  48. """
  49. self.logger = pb_utils.Logger
  50. # Parse model parameters
  51. self.model_config = json.loads(args['model_config'])
  52. parameters = self.model_config['parameters']
  53. model_params = {k: v["string_value"] for k, v in parameters.items()}
  54. self.logger.log_info(f"model_params:{model_params}")
  55. self.dynamic_chunk_strategy = model_params.get("dynamic_chunk_strategy", "exponential") # "exponential" or "time_based"
  56. self.logger.log_info(f"Using dynamic chunk strategy: {self.dynamic_chunk_strategy}")
  57. # Initialize tokenizer
  58. llm_tokenizer_dir = model_params["llm_tokenizer_dir"]
  59. self.tokenizer = AutoTokenizer.from_pretrained(llm_tokenizer_dir)
  60. self.prompt_template = "<|sos|>{input_text}<|task_id|>"
  61. self.eos_token_id = self.tokenizer.convert_tokens_to_ids("<|eos1|>")
  62. self.device = torch.device("cuda")
  63. self.decoupled = pb_utils.using_decoupled_model_transaction_policy(self.model_config)
  64. self.token_frame_rate = 25
  65. self.flow_pre_lookahead_len = 3
  66. self.token_hop_len = 15
  67. spk_info_path = os.path.join(model_params["model_dir"], "spk2info.pt")
  68. if not os.path.exists(spk_info_path):
  69. raise ValueError(f"spk2info.pt not found in {model_params['model_dir']}")
  70. spk_info = torch.load(spk_info_path, map_location="cpu", weights_only=False)
  71. self.default_spk_info = spk_info["001"]
  72. def forward_llm(self, input_ids):
  73. """
  74. Prepares the response from the language model based on the provided
  75. inputs. Creates a `pb_utils.InferenceRequest` object with passed
  76. `llm_request_inputs` to send to a decoupled TensorRTLLM model.
  77. For each response from the language model:
  78. - Checks for errors and raise an exception if any are found.
  79. - Extracts the "output_ids" tensor from the response.
  80. - Determines the finish reason based on the presence of the
  81. end-of-sequence token or reaching the maximum length.
  82. - Appends the generated token IDs to `output_ids`.
  83. - If the finish reason is determined, decodes the output IDs to text
  84. and prepares the final response.
  85. The final response includes the generated text, finish reason,
  86. completion tokens, prompt tokens, and total tokens.
  87. Parameters
  88. ----------
  89. - llm_request_inputs (dict): A dictionary containing the inputs for the language model.
  90. Returns
  91. -------
  92. - pb_utils.InferenceResponse: The response object containing the generated text and additional metadata.
  93. """
  94. # convert input_ids to numpy, with shape [1, sequence_length]
  95. input_ids = input_ids.cpu().numpy()
  96. max_tokens = 750
  97. input_dict = {
  98. "request_output_len": np.array([[max_tokens]], dtype=np.int32),
  99. "end_id": np.array([[self.eos_token_id]], dtype=np.int32),
  100. "pad_id": np.array([[self.eos_token_id]], dtype=np.int32),
  101. "streaming": np.array([[self.decoupled]], dtype=np.bool_),
  102. "runtime_top_p": np.array([[0.95]], dtype=np.float32),
  103. "runtime_top_k": np.array([[50]], dtype=np.int32),
  104. "temperature": np.array([[0.8]], dtype=np.float32),
  105. "repetition_penalty": np.array([[1.1]], dtype=np.float32),
  106. "random_seed": np.array([[42]], dtype=np.uint64),
  107. "input_ids": input_ids,
  108. "input_lengths": np.array([[input_ids.shape[1]]], dtype=np.int32),
  109. }
  110. # Convert inputs to Triton tensors
  111. input_tensor_list = [
  112. pb_utils.Tensor(k, v) for k, v in input_dict.items()
  113. ]
  114. # Create and execute inference request
  115. llm_request = pb_utils.InferenceRequest(
  116. model_name="tensorrt_llm",
  117. requested_output_names=["output_ids", "sequence_length"],
  118. inputs=input_tensor_list,
  119. )
  120. llm_responses = llm_request.exec(decoupled=self.decoupled)
  121. if self.decoupled:
  122. for llm_response in llm_responses:
  123. if llm_response.has_error():
  124. raise pb_utils.TritonModelException(llm_response.error().message())
  125. # Extract and process output
  126. output_ids = pb_utils.get_output_tensor_by_name(
  127. llm_response, "output_ids").as_numpy()
  128. seq_lens = pb_utils.get_output_tensor_by_name(
  129. llm_response, "sequence_length").as_numpy()
  130. # Get actual output IDs up to the sequence length
  131. actual_output_ids = output_ids[0][0][:seq_lens[0][0]]
  132. yield actual_output_ids
  133. else:
  134. llm_response = llm_responses
  135. if llm_response.has_error():
  136. raise pb_utils.TritonModelException(llm_response.error().message())
  137. # Extract and process output
  138. output_ids = pb_utils.get_output_tensor_by_name(
  139. llm_response, "output_ids").as_numpy()
  140. seq_lens = pb_utils.get_output_tensor_by_name(
  141. llm_response, "sequence_length").as_numpy()
  142. # Get actual output IDs up to the sequence length
  143. actual_output_ids = output_ids[0][0][:seq_lens[0][0]]
  144. yield actual_output_ids
  145. def forward_audio_tokenizer(self, wav, wav_len):
  146. """Forward pass through the audio tokenizer component.
  147. Args:
  148. wav: Input waveform tensor
  149. wav_len: Waveform length tensor
  150. Returns:
  151. Tuple of global and semantic tokens
  152. """
  153. inference_request = pb_utils.InferenceRequest(
  154. model_name='audio_tokenizer',
  155. requested_output_names=['prompt_speech_tokens'],
  156. inputs=[wav, wav_len]
  157. )
  158. inference_response = inference_request.exec()
  159. if inference_response.has_error():
  160. raise pb_utils.TritonModelException(inference_response.error().message())
  161. # Extract and convert output tensors
  162. prompt_speech_tokens = pb_utils.get_output_tensor_by_name(inference_response, 'prompt_speech_tokens')
  163. prompt_speech_tokens = torch.utils.dlpack.from_dlpack(prompt_speech_tokens.to_dlpack()).cpu()
  164. return prompt_speech_tokens
  165. def forward_speaker_embedding(self, wav):
  166. """Forward pass through the speaker embedding component.
  167. Args:
  168. wav: Input waveform tensor
  169. Returns:
  170. Prompt speaker embedding tensor
  171. """
  172. inference_request = pb_utils.InferenceRequest(
  173. model_name='speaker_embedding',
  174. requested_output_names=['prompt_spk_embedding'],
  175. inputs=[pb_utils.Tensor.from_dlpack("reference_wav", to_dlpack(wav))]
  176. )
  177. inference_response = inference_request.exec()
  178. if inference_response.has_error():
  179. raise pb_utils.TritonModelException(inference_response.error().message())
  180. # Extract and convert output tensors
  181. prompt_spk_embedding = pb_utils.get_output_tensor_by_name(inference_response, 'prompt_spk_embedding')
  182. prompt_spk_embedding = torch.utils.dlpack.from_dlpack(prompt_spk_embedding.to_dlpack())
  183. return prompt_spk_embedding
  184. def forward_token2wav(
  185. self,
  186. target_speech_tokens: torch.Tensor,
  187. request_id: str,
  188. prompt_speech_tokens: torch.Tensor = None,
  189. prompt_speech_feat: torch.Tensor = None,
  190. prompt_spk_embedding: torch.Tensor = None,
  191. token_offset: int = None,
  192. finalize: bool = None) -> torch.Tensor:
  193. """Forward pass through the vocoder component.
  194. Args:
  195. prompt_speech_tokens: Prompt speech tokens tensor
  196. prompt_speech_feat: Prompt speech feat tensor
  197. prompt_spk_embedding: Prompt spk embedding tensor
  198. target_speech_tokens: Target speech tokens tensor
  199. Returns:
  200. Generated waveform tensor
  201. """
  202. target_speech_tokens_tensor = pb_utils.Tensor.from_dlpack("target_speech_tokens", to_dlpack(target_speech_tokens))
  203. inputs_tensor = [target_speech_tokens_tensor]
  204. if token_offset is not None:
  205. assert finalize is not None
  206. token_offset_tensor = pb_utils.Tensor("token_offset", np.array([[token_offset]], dtype=np.int32))
  207. finalize_tensor = pb_utils.Tensor("finalize", np.array([[finalize]], dtype=np.bool_))
  208. inputs_tensor.append(token_offset_tensor)
  209. inputs_tensor.append(finalize_tensor)
  210. if prompt_spk_embedding is not None:
  211. assert prompt_speech_feat is not None
  212. prompt_speech_tokens_tensor = pb_utils.Tensor.from_dlpack("prompt_speech_tokens", to_dlpack(prompt_speech_tokens))
  213. prompt_speech_feat_tensor = pb_utils.Tensor.from_dlpack("prompt_speech_feat", to_dlpack(prompt_speech_feat))
  214. prompt_spk_embedding_tensor = pb_utils.Tensor.from_dlpack("prompt_spk_embedding", to_dlpack(prompt_spk_embedding))
  215. inputs_tensor.extend([prompt_speech_tokens_tensor, prompt_speech_feat_tensor, prompt_spk_embedding_tensor])
  216. # Create and execute inference request
  217. inference_request = pb_utils.InferenceRequest(
  218. model_name='token2wav',
  219. requested_output_names=['waveform'],
  220. inputs=inputs_tensor,
  221. request_id=request_id,
  222. )
  223. inference_response = inference_request.exec()
  224. if inference_response.has_error():
  225. raise pb_utils.TritonModelException(inference_response.error().message())
  226. # Extract and convert output waveform
  227. waveform = pb_utils.get_output_tensor_by_name(inference_response, 'waveform')
  228. waveform = torch.utils.dlpack.from_dlpack(waveform.to_dlpack()).cpu()
  229. return waveform
  230. def parse_input(self, text, prompt_text, prompt_speech_tokens):
  231. total_text = f"{prompt_text}{text}"
  232. prompt = self.prompt_template.format(input_text=total_text)
  233. input_ids = self.tokenizer.encode(prompt)
  234. input_ids = torch.tensor([input_ids], dtype=torch.int32)
  235. input_ids = torch.cat([input_ids, prompt_speech_tokens], dim=1)
  236. return input_ids
  237. def _extract_speech_feat(self, speech):
  238. speech_feat = mel_spectrogram(
  239. speech,
  240. n_fft=1920,
  241. num_mels=80,
  242. sampling_rate=24000,
  243. hop_size=480,
  244. win_size=1920,
  245. fmin=0,
  246. fmax=8000).squeeze(
  247. dim=0).transpose(
  248. 0,
  249. 1).to(
  250. self.device)
  251. speech_feat = speech_feat.unsqueeze(dim=0)
  252. return speech_feat
  253. def _llm_gen_thread(self, generated_ids_iter, semantic_token_ids_arr, llm_is_done_flag):
  254. for generated_ids in generated_ids_iter:
  255. generated_ids = generated_ids.tolist()
  256. if len(generated_ids) == 0:
  257. break
  258. semantic_token_ids_arr.extend(generated_ids)
  259. llm_is_done_flag[0] = True
  260. def execute(self, requests):
  261. """Execute inference on the batched requests.
  262. Args:
  263. requests: List of inference requests
  264. Returns:
  265. List of inference responses containing generated audio
  266. """
  267. responses = []
  268. for request in requests:
  269. request_id = request.request_id()
  270. # Extract input tensors
  271. wav = pb_utils.get_input_tensor_by_name(request, "reference_wav")
  272. # Process reference audio through audio tokenizer
  273. if wav is not None:
  274. wav_len = pb_utils.get_input_tensor_by_name(request, "reference_wav_len")
  275. prompt_speech_tokens = self.forward_audio_tokenizer(wav, wav_len)
  276. prompt_speech_tokens = prompt_speech_tokens.unsqueeze(0)
  277. wav_tensor = wav.as_numpy()
  278. wav_tensor = torch.from_numpy(wav_tensor)[:, :wav_len.as_numpy()[0][0]]
  279. prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=24000)(wav_tensor)
  280. speech_feat = self._extract_speech_feat(prompt_speech_resample)
  281. token_len = min(int(speech_feat.shape[1] / 2), prompt_speech_tokens.shape[-1])
  282. prompt_speech_feat = speech_feat[:, :2 * token_len].contiguous().half()
  283. prompt_speech_tokens = prompt_speech_tokens[:, :token_len].contiguous()
  284. reference_text = pb_utils.get_input_tensor_by_name(request, "reference_text").as_numpy()
  285. reference_text = reference_text[0][0].decode('utf-8')
  286. prompt_spk_embedding = self.forward_speaker_embedding(wav_tensor)
  287. else:
  288. # using pre-cached reference text
  289. reference_text = self.default_spk_info["prompt_text"]
  290. prompt_speech_tokens = self.default_spk_info["speech_token"] + ORIGINAL_VOCAB_SIZE
  291. prompt_speech_feat = None
  292. prompt_spk_embedding = None
  293. target_text = pb_utils.get_input_tensor_by_name(request, "target_text").as_numpy()
  294. target_text = target_text[0][0].decode('utf-8')
  295. # Prepare prompt for LLM
  296. input_ids = self.parse_input(
  297. text=target_text,
  298. prompt_text=reference_text,
  299. prompt_speech_tokens=prompt_speech_tokens,
  300. )
  301. # Generate semantic tokens with LLM
  302. generated_ids_iter = self.forward_llm(input_ids)
  303. if self.decoupled:
  304. response_sender = request.get_response_sender()
  305. semantic_token_ids_arr = []
  306. llm_is_done_flag = [False]
  307. llm_thread = threading.Thread(
  308. target=self._llm_gen_thread,
  309. args=(generated_ids_iter, semantic_token_ids_arr, llm_is_done_flag)
  310. )
  311. llm_thread.start()
  312. token_offset, chunk_index = 0, 0
  313. start_time = time.time()
  314. this_token_hop_len = self.token_hop_len
  315. while True:
  316. pending_num = len(semantic_token_ids_arr) - token_offset
  317. if llm_is_done_flag[0]:
  318. break
  319. if pending_num >= this_token_hop_len + self.flow_pre_lookahead_len:
  320. this_tts_speech_token = semantic_token_ids_arr[:token_offset + this_token_hop_len + self.flow_pre_lookahead_len]
  321. this_tts_speech_token = torch.tensor(this_tts_speech_token).unsqueeze(dim=0).to(torch.int32).to(self.device)
  322. sub_tts_speech = self.forward_token2wav(
  323. this_tts_speech_token, request_id, prompt_speech_tokens,
  324. prompt_speech_feat, prompt_spk_embedding, token_offset, False
  325. )
  326. audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(sub_tts_speech))
  327. inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
  328. response_sender.send(inference_response)
  329. token_offset += this_token_hop_len
  330. self.logger.log_info(f"chunk_index: {chunk_index}, current_token_hop_len: {this_token_hop_len}")
  331. if self.dynamic_chunk_strategy == "exponential":
  332. this_token_hop_len = self.token_frame_rate * (2 ** chunk_index)
  333. elif self.dynamic_chunk_strategy == "time_based":
  334. # see https://github.com/qi-hua/async_cosyvoice/blob/main/model.py#L306
  335. cost_time = time.time() - start_time
  336. duration = token_offset / self.token_frame_rate
  337. if chunk_index > 0 and cost_time > 0:
  338. avg_chunk_processing_time = cost_time / (chunk_index + 1)
  339. if avg_chunk_processing_time > 0:
  340. multiples = (duration - cost_time) / avg_chunk_processing_time
  341. self.logger.log_info(f"multiples: {multiples}")
  342. next_pending_num = len(semantic_token_ids_arr) - token_offset
  343. if multiples > 4:
  344. this_token_hop_len = (next_pending_num // self.token_hop_len + 1) * self.token_hop_len
  345. elif multiples > 2:
  346. this_token_hop_len = (next_pending_num // self.token_hop_len) * self.token_hop_len
  347. else:
  348. this_token_hop_len = self.token_hop_len
  349. this_token_hop_len = max(self.token_hop_len, this_token_hop_len)
  350. chunk_index += 1
  351. else:
  352. time.sleep(0.02)
  353. this_tts_speech_token = torch.tensor(semantic_token_ids_arr).unsqueeze(dim=0).to(torch.int32).to(self.device)
  354. sub_tts_speech = self.forward_token2wav(this_tts_speech_token, request_id, prompt_speech_tokens, prompt_speech_feat, prompt_spk_embedding, token_offset, True)
  355. audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(sub_tts_speech))
  356. inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
  357. response_sender.send(inference_response)
  358. llm_thread.join()
  359. response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
  360. self.logger.log_info("send tritonserver_response_complete_final to end")
  361. else:
  362. generated_ids = next(generated_ids_iter)
  363. generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(self.device)
  364. if generated_ids is None or len(generated_ids) == 0:
  365. raise pb_utils.TritonModelException("Generated IDs is None or empty")
  366. audio = self.forward_token2wav(generated_ids, request_id, prompt_speech_tokens, prompt_speech_feat, prompt_spk_embedding)
  367. # Prepare response
  368. audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(audio))
  369. inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
  370. responses.append(inference_response)
  371. if not self.decoupled:
  372. return responses