model.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  2. # 2025 Alibaba Inc (authors: Xiang Lyu, Bofan Zhou)
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import os
  16. from typing import Generator
  17. import queue
  18. import torch
  19. import numpy as np
  20. import threading
  21. import time
  22. from torch.nn import functional as F
  23. from contextlib import nullcontext
  24. import uuid
  25. from cosyvoice.utils.common import fade_in_out
  26. from cosyvoice.utils.file_utils import convert_onnx_to_trt, export_cosyvoice2_vllm
  27. from cosyvoice.utils.common import TrtContextWrapper
  28. class CosyVoiceModel:
  29. def __init__(self,
  30. llm: torch.nn.Module,
  31. flow: torch.nn.Module,
  32. hift: torch.nn.Module,
  33. fp16: bool = False,
  34. trt_concurrent: int = 1):
  35. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  36. self.llm = llm
  37. self.flow = flow
  38. self.hift = hift
  39. self.fp16 = fp16
  40. self.trt_concurrent = trt_concurrent
  41. if self.fp16 is True:
  42. self.llm.half()
  43. self.flow.half()
  44. self.token_min_hop_len = 2 * self.flow.input_frame_rate
  45. self.token_max_hop_len = 4 * self.flow.input_frame_rate
  46. self.token_overlap_len = 20
  47. # mel fade in out
  48. self.mel_overlap_len = int(self.token_overlap_len / self.flow.input_frame_rate * 22050 / 256)
  49. self.mel_window = np.hamming(2 * self.mel_overlap_len)
  50. # hift cache
  51. self.mel_cache_len = 20
  52. self.source_cache_len = int(self.mel_cache_len * 256)
  53. # speech fade in out
  54. self.speech_window = np.hamming(2 * self.source_cache_len)
  55. # rtf and decoding related
  56. self.stream_scale_factor = 1
  57. assert self.stream_scale_factor >= 1, 'stream_scale_factor should be greater than 1, change it according to your actual rtf'
  58. self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
  59. self.lock = threading.Lock()
  60. # dict used to store session related variable
  61. self.tts_speech_token_dict = {}
  62. self.llm_end_dict = {}
  63. self.mel_overlap_dict = {}
  64. self.flow_cache_dict = {}
  65. self.hift_cache_dict = {}
  66. def load(self, llm_model, flow_model, hift_model):
  67. self.llm.load_state_dict(torch.load(llm_model, map_location=self.device), strict=True)
  68. self.llm.to(self.device).eval()
  69. self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=True)
  70. self.flow.to(self.device).eval()
  71. # in case hift_model is a hifigan model
  72. hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
  73. self.hift.load_state_dict(hift_state_dict, strict=True)
  74. self.hift.to(self.device).eval()
  75. def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
  76. llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
  77. self.llm.text_encoder = llm_text_encoder
  78. llm_llm = torch.jit.load(llm_llm_model, map_location=self.device)
  79. self.llm.llm = llm_llm
  80. flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
  81. self.flow.encoder = flow_encoder
  82. def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, fp16):
  83. assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
  84. if not os.path.exists(flow_decoder_estimator_model) or os.path.getsize(flow_decoder_estimator_model) == 0:
  85. convert_onnx_to_trt(flow_decoder_estimator_model, self.get_trt_kwargs(), flow_decoder_onnx_model, fp16)
  86. del self.flow.decoder.estimator
  87. import tensorrt as trt
  88. with open(flow_decoder_estimator_model, 'rb') as f:
  89. estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
  90. assert estimator_engine is not None, 'failed to load trt {}'.format(flow_decoder_estimator_model)
  91. self.flow.decoder.estimator = TrtContextWrapper(estimator_engine, trt_concurrent=self.trt_concurrent, device=self.device)
  92. def get_trt_kwargs(self):
  93. min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2, 80, 4)]
  94. opt_shape = [(2, 80, 500), (2, 1, 500), (2, 80, 500), (2, 80, 500)]
  95. max_shape = [(2, 80, 3000), (2, 1, 3000), (2, 80, 3000), (2, 80, 3000)]
  96. input_names = ["x", "mask", "mu", "cond"]
  97. return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
  98. def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
  99. with self.llm_context, torch.cuda.amp.autocast(self.fp16):
  100. if isinstance(text, Generator):
  101. assert isinstance(self, CosyVoice2Model), 'streaming input text is only implemented for CosyVoice2!'
  102. for i in self.llm.inference_bistream(text=text,
  103. prompt_text=prompt_text.to(self.device),
  104. prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
  105. prompt_speech_token=llm_prompt_speech_token.to(self.device),
  106. prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
  107. embedding=llm_embedding.to(self.device)):
  108. self.tts_speech_token_dict[uuid].append(i)
  109. else:
  110. for i in self.llm.inference(text=text.to(self.device),
  111. text_len=torch.tensor([text.shape[1]], dtype=torch.int32).to(self.device),
  112. prompt_text=prompt_text.to(self.device),
  113. prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
  114. prompt_speech_token=llm_prompt_speech_token.to(self.device),
  115. prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
  116. embedding=llm_embedding.to(self.device),
  117. uuid=uuid):
  118. self.tts_speech_token_dict[uuid].append(i)
  119. self.llm_end_dict[uuid] = True
  120. def vc_job(self, source_speech_token, uuid):
  121. self.tts_speech_token_dict[uuid] = source_speech_token.flatten().tolist()
  122. self.llm_end_dict[uuid] = True
  123. def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
  124. with torch.cuda.amp.autocast(self.fp16):
  125. tts_mel, self.flow_cache_dict[uuid] = self.flow.inference(token=token.to(self.device),
  126. token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
  127. prompt_token=prompt_token.to(self.device),
  128. prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
  129. prompt_feat=prompt_feat.to(self.device),
  130. prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
  131. embedding=embedding.to(self.device),
  132. flow_cache=self.flow_cache_dict[uuid])
  133. # mel overlap fade in out
  134. if self.mel_overlap_dict[uuid].shape[2] != 0:
  135. tts_mel = fade_in_out(tts_mel, self.mel_overlap_dict[uuid], self.mel_window)
  136. # append hift cache
  137. if self.hift_cache_dict[uuid] is not None:
  138. hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
  139. tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
  140. else:
  141. hift_cache_source = torch.zeros(1, 1, 0)
  142. # keep overlap mel and hift cache
  143. if finalize is False:
  144. self.mel_overlap_dict[uuid] = tts_mel[:, :, -self.mel_overlap_len:]
  145. tts_mel = tts_mel[:, :, :-self.mel_overlap_len]
  146. tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
  147. if self.hift_cache_dict[uuid] is not None:
  148. tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
  149. self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
  150. 'source': tts_source[:, :, -self.source_cache_len:],
  151. 'speech': tts_speech[:, -self.source_cache_len:]}
  152. tts_speech = tts_speech[:, :-self.source_cache_len]
  153. else:
  154. if speed != 1.0:
  155. assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
  156. tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
  157. tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
  158. if self.hift_cache_dict[uuid] is not None:
  159. tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
  160. return tts_speech
  161. def tts(self, text=torch.zeros(1, 0, dtype=torch.int32), flow_embedding=torch.zeros(0, 192), llm_embedding=torch.zeros(0, 192),
  162. prompt_text=torch.zeros(1, 0, dtype=torch.int32),
  163. llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  164. flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  165. prompt_speech_feat=torch.zeros(1, 0, 80), source_speech_token=torch.zeros(1, 0, dtype=torch.int32), stream=False, speed=1.0, **kwargs):
  166. # this_uuid is used to track variables related to this inference thread
  167. this_uuid = str(uuid.uuid1())
  168. with self.lock:
  169. self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
  170. self.hift_cache_dict[this_uuid] = None
  171. self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
  172. self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
  173. if source_speech_token.shape[1] == 0:
  174. p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
  175. else:
  176. p = threading.Thread(target=self.vc_job, args=(source_speech_token, this_uuid))
  177. p.start()
  178. if stream is True:
  179. token_hop_len = self.token_min_hop_len
  180. while True:
  181. time.sleep(0.1)
  182. if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
  183. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
  184. .unsqueeze(dim=0)
  185. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  186. prompt_token=flow_prompt_speech_token,
  187. prompt_feat=prompt_speech_feat,
  188. embedding=flow_embedding,
  189. uuid=this_uuid,
  190. finalize=False)
  191. yield {'tts_speech': this_tts_speech.cpu()}
  192. with self.lock:
  193. self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
  194. # increase token_hop_len for better speech quality
  195. token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
  196. if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
  197. break
  198. p.join()
  199. # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
  200. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
  201. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  202. prompt_token=flow_prompt_speech_token,
  203. prompt_feat=prompt_speech_feat,
  204. embedding=flow_embedding,
  205. uuid=this_uuid,
  206. finalize=True)
  207. yield {'tts_speech': this_tts_speech.cpu()}
  208. else:
  209. # deal with all tokens
  210. p.join()
  211. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
  212. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  213. prompt_token=flow_prompt_speech_token,
  214. prompt_feat=prompt_speech_feat,
  215. embedding=flow_embedding,
  216. uuid=this_uuid,
  217. finalize=True,
  218. speed=speed)
  219. yield {'tts_speech': this_tts_speech.cpu()}
  220. with self.lock:
  221. self.tts_speech_token_dict.pop(this_uuid)
  222. self.llm_end_dict.pop(this_uuid)
  223. self.mel_overlap_dict.pop(this_uuid)
  224. self.hift_cache_dict.pop(this_uuid)
  225. self.flow_cache_dict.pop(this_uuid)
  226. if torch.cuda.is_available():
  227. torch.cuda.empty_cache()
  228. torch.cuda.current_stream().synchronize()
  229. class CosyVoice2Model(CosyVoiceModel):
  230. def __init__(self,
  231. llm: torch.nn.Module,
  232. flow: torch.nn.Module,
  233. hift: torch.nn.Module,
  234. fp16: bool = False,
  235. trt_concurrent: int = 1):
  236. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  237. self.llm = llm
  238. self.flow = flow
  239. self.hift = hift
  240. self.fp16 = fp16
  241. self.trt_concurrent = trt_concurrent
  242. if self.fp16 is True:
  243. self.llm.half()
  244. self.flow.half()
  245. # NOTE must matching training static_chunk_size
  246. self.token_hop_len = 25
  247. # hift cache
  248. self.mel_cache_len = 8
  249. self.source_cache_len = int(self.mel_cache_len * 480)
  250. # speech fade in out
  251. self.speech_window = np.hamming(2 * self.source_cache_len)
  252. # rtf and decoding related
  253. self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
  254. self.lock = threading.Lock()
  255. # dict used to store session related variable
  256. self.tts_speech_token_dict = {}
  257. self.llm_end_dict = {}
  258. self.hift_cache_dict = {}
  259. def load_jit(self, flow_encoder_model):
  260. flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
  261. self.flow.encoder = flow_encoder
  262. def load_vllm(self, model_dir):
  263. export_cosyvoice2_vllm(self.llm, model_dir, self.device)
  264. from vllm import EngineArgs, LLMEngine
  265. engine_args = EngineArgs(model=model_dir,
  266. skip_tokenizer_init=True,
  267. enable_prompt_embeds=True,
  268. gpu_memory_utilization=0.2)
  269. self.llm.vllm = LLMEngine.from_engine_args(engine_args)
  270. del self.llm.llm.model.model.layers
  271. def token2wav(self, token, prompt_token, prompt_feat, embedding, token_offset, uuid, stream=False, finalize=False, speed=1.0):
  272. with torch.cuda.amp.autocast(self.fp16):
  273. tts_mel, _ = self.flow.inference(token=token.to(self.device),
  274. token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
  275. prompt_token=prompt_token.to(self.device),
  276. prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
  277. prompt_feat=prompt_feat.to(self.device),
  278. prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
  279. embedding=embedding.to(self.device),
  280. streaming=stream,
  281. finalize=finalize)
  282. tts_mel = tts_mel[:, :, token_offset * self.flow.token_mel_ratio:]
  283. # append hift cache
  284. if self.hift_cache_dict[uuid] is not None:
  285. hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
  286. tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
  287. else:
  288. hift_cache_source = torch.zeros(1, 1, 0)
  289. # keep overlap mel and hift cache
  290. if finalize is False:
  291. tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
  292. if self.hift_cache_dict[uuid] is not None:
  293. tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
  294. self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
  295. 'source': tts_source[:, :, -self.source_cache_len:],
  296. 'speech': tts_speech[:, -self.source_cache_len:]}
  297. tts_speech = tts_speech[:, :-self.source_cache_len]
  298. else:
  299. if speed != 1.0:
  300. assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
  301. tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
  302. tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
  303. if self.hift_cache_dict[uuid] is not None:
  304. tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
  305. return tts_speech
  306. def tts(self, text=torch.zeros(1, 0, dtype=torch.int32), flow_embedding=torch.zeros(0, 192), llm_embedding=torch.zeros(0, 192),
  307. prompt_text=torch.zeros(1, 0, dtype=torch.int32),
  308. llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  309. flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  310. prompt_speech_feat=torch.zeros(1, 0, 80), source_speech_token=torch.zeros(1, 0, dtype=torch.int32), stream=False, speed=1.0, **kwargs):
  311. # this_uuid is used to track variables related to this inference thread
  312. this_uuid = str(uuid.uuid1())
  313. with self.lock:
  314. self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
  315. self.hift_cache_dict[this_uuid] = None
  316. if source_speech_token.shape[1] == 0:
  317. p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
  318. else:
  319. p = threading.Thread(target=self.vc_job, args=(source_speech_token, this_uuid))
  320. p.start()
  321. if stream is True:
  322. token_offset = 0
  323. prompt_token_pad = int(np.ceil(flow_prompt_speech_token.shape[1] / self.token_hop_len) * self.token_hop_len - flow_prompt_speech_token.shape[1])
  324. while True:
  325. time.sleep(0.1)
  326. this_token_hop_len = self.token_hop_len + prompt_token_pad if token_offset == 0 else self.token_hop_len
  327. if len(self.tts_speech_token_dict[this_uuid]) - token_offset >= this_token_hop_len + self.flow.pre_lookahead_len:
  328. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_offset + this_token_hop_len + self.flow.pre_lookahead_len]).unsqueeze(dim=0)
  329. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  330. prompt_token=flow_prompt_speech_token,
  331. prompt_feat=prompt_speech_feat,
  332. embedding=flow_embedding,
  333. token_offset=token_offset,
  334. uuid=this_uuid,
  335. stream=stream,
  336. finalize=False)
  337. token_offset += this_token_hop_len
  338. yield {'tts_speech': this_tts_speech.cpu()}
  339. if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) - token_offset < this_token_hop_len + self.flow.pre_lookahead_len:
  340. break
  341. p.join()
  342. # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
  343. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
  344. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  345. prompt_token=flow_prompt_speech_token,
  346. prompt_feat=prompt_speech_feat,
  347. embedding=flow_embedding,
  348. token_offset=token_offset,
  349. uuid=this_uuid,
  350. finalize=True)
  351. yield {'tts_speech': this_tts_speech.cpu()}
  352. else:
  353. # deal with all tokens
  354. p.join()
  355. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
  356. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  357. prompt_token=flow_prompt_speech_token,
  358. prompt_feat=prompt_speech_feat,
  359. embedding=flow_embedding,
  360. token_offset=0,
  361. uuid=this_uuid,
  362. finalize=True,
  363. speed=speed)
  364. yield {'tts_speech': this_tts_speech.cpu()}
  365. with self.lock:
  366. self.tts_speech_token_dict.pop(this_uuid)
  367. self.llm_end_dict.pop(this_uuid)
  368. self.hift_cache_dict.pop(this_uuid)
  369. if torch.cuda.is_available():
  370. torch.cuda.empty_cache()
  371. torch.cuda.current_stream().synchronize()