1
0

model.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import torch
  15. import numpy as np
  16. import threading
  17. import time
  18. from torch.nn import functional as F
  19. from contextlib import nullcontext
  20. import uuid
  21. from cosyvoice.utils.common import fade_in_out
  22. class CosyVoiceModel:
  23. def __init__(self,
  24. llm: torch.nn.Module,
  25. flow: torch.nn.Module,
  26. hift: torch.nn.Module,
  27. fp16: bool):
  28. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  29. self.llm = llm
  30. self.flow = flow
  31. self.hift = hift
  32. self.fp16 = fp16
  33. self.token_min_hop_len = 2 * self.flow.input_frame_rate
  34. self.token_max_hop_len = 4 * self.flow.input_frame_rate
  35. self.token_overlap_len = 20
  36. # mel fade in out
  37. self.mel_overlap_len = int(self.token_overlap_len / self.flow.input_frame_rate * 22050 / 256)
  38. self.mel_window = np.hamming(2 * self.mel_overlap_len)
  39. # hift cache
  40. self.mel_cache_len = 20
  41. self.source_cache_len = int(self.mel_cache_len * 256)
  42. # speech fade in out
  43. self.speech_window = np.hamming(2 * self.source_cache_len)
  44. # rtf and decoding related
  45. self.stream_scale_factor = 1
  46. assert self.stream_scale_factor >= 1, 'stream_scale_factor should be greater than 1, change it according to your actual rtf'
  47. self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
  48. self.lock = threading.Lock()
  49. # dict used to store session related variable
  50. self.tts_speech_token_dict = {}
  51. self.llm_end_dict = {}
  52. self.mel_overlap_dict = {}
  53. self.hift_cache_dict = {}
  54. def load(self, llm_model, flow_model, hift_model):
  55. self.llm.load_state_dict(torch.load(llm_model, map_location=self.device), strict=False)
  56. self.llm.to(self.device).eval()
  57. if self.fp16 is True:
  58. self.llm.half()
  59. self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=False)
  60. self.flow.to(self.device).eval()
  61. # in case hift_model is a hifigan model
  62. hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
  63. self.hift.load_state_dict(hift_state_dict, strict=False)
  64. self.hift.to(self.device).eval()
  65. def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
  66. assert self.fp16 is True, "we only provide fp16 jit model, set fp16=True if you want to use jit model"
  67. llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
  68. self.llm.text_encoder = llm_text_encoder
  69. llm_llm = torch.jit.load(llm_llm_model, map_location=self.device)
  70. self.llm.llm = llm_llm
  71. flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
  72. self.flow.encoder = flow_encoder
  73. def load_onnx(self, flow_decoder_estimator_model):
  74. import onnxruntime
  75. option = onnxruntime.SessionOptions()
  76. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  77. option.intra_op_num_threads = 1
  78. providers = ['CUDAExecutionProvider' if torch.cuda.is_available() else 'CPUExecutionProvider']
  79. del self.flow.decoder.estimator
  80. self.flow.decoder.estimator = onnxruntime.InferenceSession(flow_decoder_estimator_model, sess_options=option, providers=providers)
  81. def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
  82. if self.fp16 is True:
  83. llm_embedding = llm_embedding.half()
  84. with self.llm_context:
  85. for i in self.llm.inference(text=text.to(self.device),
  86. text_len=torch.tensor([text.shape[1]], dtype=torch.int32).to(self.device),
  87. prompt_text=prompt_text.to(self.device),
  88. prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
  89. prompt_speech_token=llm_prompt_speech_token.to(self.device),
  90. prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
  91. embedding=llm_embedding.to(self.device)):
  92. self.tts_speech_token_dict[uuid].append(i)
  93. self.llm_end_dict[uuid] = True
  94. def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
  95. tts_mel = self.flow.inference(token=token.to(self.device),
  96. token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
  97. prompt_token=prompt_token.to(self.device),
  98. prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
  99. prompt_feat=prompt_feat.to(self.device),
  100. prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
  101. embedding=embedding.to(self.device))
  102. # mel overlap fade in out
  103. if self.mel_overlap_dict[uuid] is not None:
  104. tts_mel = fade_in_out(tts_mel, self.mel_overlap_dict[uuid], self.mel_window)
  105. # append hift cache
  106. if self.hift_cache_dict[uuid] is not None:
  107. hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
  108. tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
  109. else:
  110. hift_cache_source = torch.zeros(1, 1, 0)
  111. # keep overlap mel and hift cache
  112. if finalize is False:
  113. self.mel_overlap_dict[uuid] = tts_mel[:, :, -self.mel_overlap_len:]
  114. tts_mel = tts_mel[:, :, :-self.mel_overlap_len]
  115. tts_speech, tts_source = self.hift.inference(mel=tts_mel, cache_source=hift_cache_source)
  116. if self.hift_cache_dict[uuid] is not None:
  117. tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
  118. self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
  119. 'source': tts_source[:, :, -self.source_cache_len:],
  120. 'speech': tts_speech[:, -self.source_cache_len:]}
  121. tts_speech = tts_speech[:, :-self.source_cache_len]
  122. else:
  123. if speed != 1.0:
  124. assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
  125. tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
  126. tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
  127. if self.hift_cache_dict[uuid] is not None:
  128. tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
  129. return tts_speech
  130. def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
  131. prompt_text=torch.zeros(1, 0, dtype=torch.int32),
  132. llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  133. flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  134. prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
  135. # this_uuid is used to track variables related to this inference thread
  136. this_uuid = str(uuid.uuid1())
  137. with self.lock:
  138. self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
  139. self.mel_overlap_dict[this_uuid], self.hift_cache_dict[this_uuid] = None, None
  140. p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
  141. p.start()
  142. if stream is True:
  143. token_hop_len = self.token_min_hop_len
  144. while True:
  145. time.sleep(0.1)
  146. if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
  147. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
  148. .unsqueeze(dim=0)
  149. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  150. prompt_token=flow_prompt_speech_token,
  151. prompt_feat=prompt_speech_feat,
  152. embedding=flow_embedding,
  153. uuid=this_uuid,
  154. finalize=False)
  155. yield {'tts_speech': this_tts_speech.cpu()}
  156. with self.lock:
  157. self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
  158. # increase token_hop_len for better speech quality
  159. token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
  160. if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
  161. break
  162. p.join()
  163. # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
  164. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
  165. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  166. prompt_token=flow_prompt_speech_token,
  167. prompt_feat=prompt_speech_feat,
  168. embedding=flow_embedding,
  169. uuid=this_uuid,
  170. finalize=True)
  171. yield {'tts_speech': this_tts_speech.cpu()}
  172. else:
  173. # deal with all tokens
  174. p.join()
  175. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
  176. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  177. prompt_token=flow_prompt_speech_token,
  178. prompt_feat=prompt_speech_feat,
  179. embedding=flow_embedding,
  180. uuid=this_uuid,
  181. finalize=True,
  182. speed=speed)
  183. yield {'tts_speech': this_tts_speech.cpu()}
  184. with self.lock:
  185. self.tts_speech_token_dict.pop(this_uuid)
  186. self.llm_end_dict.pop(this_uuid)
  187. self.mel_overlap_dict.pop(this_uuid)
  188. self.hift_cache_dict.pop(this_uuid)
  189. def vc(self, source_speech_token, flow_prompt_speech_token, prompt_speech_feat, flow_embedding, stream=False, speed=1.0, **kwargs):
  190. # this_uuid is used to track variables related to this inference thread
  191. this_uuid = str(uuid.uuid1())
  192. with self.lock:
  193. self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = source_speech_token.flatten().tolist(), True
  194. self.mel_overlap_dict[this_uuid], self.hift_cache_dict[this_uuid] = None, None
  195. if stream is True:
  196. token_hop_len = self.token_min_hop_len
  197. while True:
  198. if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
  199. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
  200. .unsqueeze(dim=0)
  201. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  202. prompt_token=flow_prompt_speech_token,
  203. prompt_feat=prompt_speech_feat,
  204. embedding=flow_embedding,
  205. uuid=this_uuid,
  206. finalize=False)
  207. yield {'tts_speech': this_tts_speech.cpu()}
  208. with self.lock:
  209. self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
  210. # increase token_hop_len for better speech quality
  211. token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
  212. if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
  213. break
  214. # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
  215. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid], dim=1).unsqueeze(dim=0)
  216. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  217. prompt_token=flow_prompt_speech_token,
  218. prompt_feat=prompt_speech_feat,
  219. embedding=flow_embedding,
  220. uuid=this_uuid,
  221. finalize=True)
  222. yield {'tts_speech': this_tts_speech.cpu()}
  223. else:
  224. # deal with all tokens
  225. this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
  226. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  227. prompt_token=flow_prompt_speech_token,
  228. prompt_feat=prompt_speech_feat,
  229. embedding=flow_embedding,
  230. uuid=this_uuid,
  231. finalize=True,
  232. speed=speed)
  233. yield {'tts_speech': this_tts_speech.cpu()}
  234. with self.lock:
  235. self.tts_speech_token_dict.pop(this_uuid)
  236. self.llm_end_dict.pop(this_uuid)
  237. self.mel_overlap_dict.pop(this_uuid)
  238. self.hift_cache_dict.pop(this_uuid)