model.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import torch
  15. import numpy as np
  16. import threading
  17. import time
  18. from contextlib import nullcontext
  19. import uuid
  20. from cosyvoice.utils.common import fade_in_out
  21. class CosyVoiceModel:
  22. def __init__(self,
  23. llm: torch.nn.Module,
  24. flow: torch.nn.Module,
  25. hift: torch.nn.Module):
  26. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  27. self.llm = llm
  28. self.flow = flow
  29. self.hift = hift
  30. self.token_min_hop_len = 100
  31. self.token_max_hop_len = 200
  32. self.token_overlap_len = 20
  33. # mel fade in out
  34. self.mel_overlap_len = 34
  35. self.mel_window = np.hamming(2 * self.mel_overlap_len)
  36. # hift cache
  37. self.mel_cache_len = 20
  38. self.source_cache_len = int(self.mel_cache_len * 256)
  39. # rtf and decoding related
  40. self.stream_scale_factor = 1
  41. assert self.stream_scale_factor >= 1, 'stream_scale_factor should be greater than 1, change it according to your actual rtf'
  42. self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
  43. self.lock = threading.Lock()
  44. # dict used to store session related variable
  45. self.tts_speech_token_dict = {}
  46. self.llm_end_dict = {}
  47. self.mel_overlap_dict = {}
  48. self.hift_cache_dict = {}
  49. def load(self, llm_model, flow_model, hift_model):
  50. self.llm.load_state_dict(torch.load(llm_model, map_location=self.device))
  51. self.llm.to(self.device).eval()
  52. self.llm.half()
  53. self.flow.load_state_dict(torch.load(flow_model, map_location=self.device))
  54. self.flow.to(self.device).eval()
  55. self.hift.load_state_dict(torch.load(hift_model, map_location=self.device))
  56. self.hift.to(self.device).eval()
  57. def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
  58. llm_text_encoder = torch.jit.load(llm_text_encoder_model)
  59. self.llm.text_encoder = llm_text_encoder
  60. llm_llm = torch.jit.load(llm_llm_model)
  61. self.llm.llm = llm_llm
  62. flow_encoder = torch.jit.load(flow_encoder_model)
  63. self.flow.encoder = flow_encoder
  64. def load_onnx(self, flow_decoder_estimator_model):
  65. import onnxruntime
  66. option = onnxruntime.SessionOptions()
  67. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  68. option.intra_op_num_threads = 1
  69. providers = ['CUDAExecutionProvider' if torch.cuda.is_available() else 'CPUExecutionProvider']
  70. del self.flow.decoder.estimator
  71. self.flow.decoder.estimator = onnxruntime.InferenceSession(flow_decoder_estimator_model, sess_options=option, providers=providers)
  72. def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
  73. with self.llm_context:
  74. for i in self.llm.inference(text=text.to(self.device),
  75. text_len=torch.tensor([text.shape[1]], dtype=torch.int32).to(self.device),
  76. prompt_text=prompt_text.to(self.device),
  77. prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
  78. prompt_speech_token=llm_prompt_speech_token.to(self.device),
  79. prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
  80. embedding=llm_embedding.to(self.device).half(),
  81. sampling=25,
  82. max_token_text_ratio=30,
  83. min_token_text_ratio=3):
  84. self.tts_speech_token_dict[uuid].append(i)
  85. self.llm_end_dict[uuid] = True
  86. def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False):
  87. tts_mel = self.flow.inference(token=token.to(self.device),
  88. token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
  89. prompt_token=prompt_token.to(self.device),
  90. prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
  91. prompt_feat=prompt_feat.to(self.device),
  92. prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
  93. embedding=embedding.to(self.device))
  94. # mel overlap fade in out
  95. if self.mel_overlap_dict[uuid] is not None:
  96. tts_mel = fade_in_out(tts_mel, self.mel_overlap_dict[uuid], self.mel_window)
  97. # append hift cache
  98. if self.hift_cache_dict[uuid] is not None:
  99. hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
  100. tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
  101. else:
  102. hift_cache_source = torch.zeros(1, 1, 0)
  103. # keep overlap mel and hift cache
  104. if finalize is False:
  105. self.mel_overlap_dict[uuid] = tts_mel[:, :, -self.mel_overlap_len:]
  106. tts_mel = tts_mel[:, :, :-self.mel_overlap_len]
  107. tts_speech, tts_source = self.hift.inference(mel=tts_mel, cache_source=hift_cache_source)
  108. self.hift_cache_dict[uuid] = {'source': tts_source[:, :, -self.source_cache_len:], 'mel': tts_mel[:, :, -self.mel_cache_len:]}
  109. tts_speech = tts_speech[:, :-self.source_cache_len]
  110. else:
  111. tts_speech, tts_source = self.hift.inference(mel=tts_mel, cache_source=hift_cache_source)
  112. return tts_speech
  113. def inference(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
  114. prompt_text=torch.zeros(1, 0, dtype=torch.int32),
  115. llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  116. flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
  117. prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, **kwargs):
  118. # this_uuid is used to track variables related to this inference thread
  119. this_uuid = str(uuid.uuid1())
  120. with self.lock:
  121. self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
  122. self.mel_overlap_dict[this_uuid], self.hift_cache_dict[this_uuid] = None, None
  123. p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
  124. p.start()
  125. if stream is True:
  126. token_hop_len = self.token_min_hop_len
  127. while True:
  128. time.sleep(0.1)
  129. if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
  130. this_tts_speech_token = torch.concat(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len], dim=1)
  131. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  132. prompt_token=flow_prompt_speech_token,
  133. prompt_feat=prompt_speech_feat,
  134. embedding=flow_embedding,
  135. uuid=this_uuid,
  136. finalize=False)
  137. yield {'tts_speech': this_tts_speech.cpu()}
  138. with self.lock:
  139. self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
  140. # increase token_hop_len for better speech quality
  141. token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
  142. if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
  143. break
  144. p.join()
  145. # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
  146. this_tts_speech_token = torch.concat(self.tts_speech_token_dict[this_uuid], dim=1)
  147. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  148. prompt_token=flow_prompt_speech_token,
  149. prompt_feat=prompt_speech_feat,
  150. embedding=flow_embedding,
  151. uuid=this_uuid,
  152. finalize=True)
  153. yield {'tts_speech': this_tts_speech.cpu()}
  154. else:
  155. # deal with all tokens
  156. p.join()
  157. this_tts_speech_token = torch.concat(self.tts_speech_token_dict[this_uuid], dim=1)
  158. this_tts_speech = self.token2wav(token=this_tts_speech_token,
  159. prompt_token=flow_prompt_speech_token,
  160. prompt_feat=prompt_speech_feat,
  161. embedding=flow_embedding,
  162. uuid=this_uuid,
  163. finalize=True)
  164. yield {'tts_speech': this_tts_speech.cpu()}
  165. with self.lock:
  166. self.tts_speech_token_dict.pop(this_uuid)
  167. self.llm_end_dict.pop(this_uuid)
  168. self.mel_overlap_dict.pop(this_uuid)
  169. self.hift_cache_dict.pop(this_uuid)