cosyvoice.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import time
  16. from typing import Generator
  17. from tqdm import tqdm
  18. from hyperpyyaml import load_hyperpyyaml
  19. from modelscope import snapshot_download
  20. import torch
  21. from cosyvoice.cli.frontend import CosyVoiceFrontEnd
  22. from cosyvoice.cli.model import CosyVoiceModel, CosyVoice2Model, VllmCosyVoice2Model
  23. from cosyvoice.utils.file_utils import logging
  24. from cosyvoice.utils.class_utils import get_model_type
  25. import queue
  26. class CosyVoice:
  27. def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False):
  28. self.instruct = True if '-Instruct' in model_dir else False
  29. self.model_dir = model_dir
  30. self.fp16 = fp16
  31. if not os.path.exists(model_dir):
  32. model_dir = snapshot_download(model_dir)
  33. with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
  34. configs = load_hyperpyyaml(f)
  35. assert get_model_type(configs) != CosyVoice2Model, 'do not use {} for CosyVoice initialization!'.format(model_dir)
  36. self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
  37. configs['feat_extractor'],
  38. '{}/campplus.onnx'.format(model_dir),
  39. '{}/speech_tokenizer_v1.onnx'.format(model_dir),
  40. '{}/spk2info.pt'.format(model_dir),
  41. configs['allowed_special'])
  42. self.sample_rate = configs['sample_rate']
  43. if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
  44. load_jit, load_trt, fp16 = False, False, False
  45. logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
  46. self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'], fp16)
  47. self.model.load('{}/llm.pt'.format(model_dir),
  48. '{}/flow.pt'.format(model_dir),
  49. '{}/hift.pt'.format(model_dir))
  50. if load_jit:
  51. self.model.load_jit('{}/llm.text_encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
  52. '{}/llm.llm.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
  53. '{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
  54. if load_trt:
  55. self.estimator_count = configs['flow']['decoder']['estimator'].get('estimator_count', 1)
  56. self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
  57. '{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
  58. self.fp16, self.estimator_count)
  59. del configs
  60. thread_count = 10
  61. self.stream_pool = queue.Queue(maxsize=thread_count)
  62. for _ in range(thread_count):
  63. self.stream_pool.put(torch.cuda.Stream(self.device))
  64. def list_available_spks(self):
  65. spks = list(self.frontend.spk2info.keys())
  66. return spks
  67. def add_spk_info(self, spk_id, spk_info):
  68. self.frontend.add_spk_info(spk_id, spk_info)
  69. def inference_sft(self, tts_text, spk_id, stream=False, speed=1.0, text_frontend=True):
  70. cuda_stream = self.stream_pool.get()
  71. with torch.cuda.stream(cuda_stream):
  72. for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
  73. model_input = self.frontend.frontend_sft(i, spk_id)
  74. start_time = time.time()
  75. logging.info('synthesis text {}'.format(i))
  76. for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
  77. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  78. logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
  79. yield model_output
  80. start_time = time.time()
  81. cuda_stream.synchronize()
  82. self.stream_pool.put(cuda_stream)
  83. def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, stream=False, speed=1.0, text_frontend=True):
  84. cuda_stream = self.stream_pool.get()
  85. with torch.cuda.stream(cuda_stream):
  86. prompt_text = self.frontend.text_normalize(prompt_text, split=False, text_frontend=text_frontend)
  87. for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
  88. if (not isinstance(i, Generator)) and len(i) < 0.5 * len(prompt_text):
  89. logging.warning('synthesis text {} too short than prompt text {}, this may lead to bad performance'.format(i, prompt_text))
  90. model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k, self.sample_rate)
  91. start_time = time.time()
  92. logging.info('synthesis text {}'.format(i))
  93. for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
  94. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  95. logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
  96. yield model_output
  97. start_time = time.time()
  98. cuda_stream.synchronize()
  99. self.stream_pool.put(cuda_stream)
  100. def inference_zero_shot_by_spk_id(self, tts_text, spk_id, stream=False, speed=1.0, text_frontend=True):
  101. """使用预定义的说话人执行 zero_shot 推理"""
  102. cuda_stream = self.stream_pool.get()
  103. with torch.cuda.stream(cuda_stream):
  104. for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
  105. model_input = self.frontend.frontend_zero_shot_by_spk_id(i, spk_id)
  106. start_time = time.time()
  107. last_time = start_time
  108. chunk_index = 0
  109. logging.info('synthesis text {}'.format(i))
  110. for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
  111. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  112. logging.info('yield speech index:{}, len {:.2f}, rtf {:.3f}, cost {:.3f}s, all cost time {:.3f}s'.format(
  113. chunk_index, speech_len, (time.time()-last_time)/speech_len, time.time()-last_time, time.time()-start_time))
  114. yield model_output
  115. last_time = time.time()
  116. chunk_index += 1
  117. cuda_stream.synchronize()
  118. self.stream_pool.put(cuda_stream)
  119. def inference_cross_lingual(self, tts_text, prompt_speech_16k, stream=False, speed=1.0, text_frontend=True):
  120. cuda_stream = self.stream_pool.get()
  121. with torch.cuda.stream(cuda_stream):
  122. for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
  123. model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k, self.sample_rate)
  124. start_time = time.time()
  125. logging.info('synthesis text {}'.format(i))
  126. for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
  127. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  128. logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
  129. yield model_output
  130. start_time = time.time()
  131. cuda_stream.synchronize()
  132. self.stream_pool.put(cuda_stream)
  133. def inference_instruct(self, tts_text, spk_id, instruct_text, stream=False, speed=1.0, text_frontend=True):
  134. cuda_stream = self.stream_pool.get()
  135. with torch.cuda.stream(cuda_stream):
  136. assert isinstance(self.model, CosyVoiceModel), 'inference_instruct is only implemented for CosyVoice!'
  137. if self.instruct is False:
  138. raise ValueError('{} do not support instruct inference'.format(self.model_dir))
  139. instruct_text = self.frontend.text_normalize(instruct_text, split=False, text_frontend=text_frontend)
  140. for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
  141. model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
  142. start_time = time.time()
  143. logging.info('synthesis text {}'.format(i))
  144. for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
  145. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  146. logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
  147. yield model_output
  148. start_time = time.time()
  149. cuda_stream.synchronize()
  150. self.stream_pool.put(cuda_stream)
  151. def inference_vc(self, source_speech_16k, prompt_speech_16k, stream=False, speed=1.0):
  152. cuda_stream = self.stream_pool.get()
  153. with torch.cuda.stream(cuda_stream):
  154. model_input = self.frontend.frontend_vc(source_speech_16k, prompt_speech_16k, self.sample_rate)
  155. start_time = time.time()
  156. for model_output in self.model.vc(**model_input, stream=stream, speed=speed):
  157. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  158. logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
  159. yield model_output
  160. start_time = time.time()
  161. cuda_stream.synchronize()
  162. self.stream_pool.put(cuda_stream)
  163. class CosyVoice2(CosyVoice):
  164. def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False, use_vllm=False):
  165. self.instruct = True if '-Instruct' in model_dir else False
  166. self.model_dir = model_dir
  167. self.fp16 = fp16
  168. if not os.path.exists(model_dir):
  169. model_dir = snapshot_download(model_dir)
  170. with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
  171. configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': os.path.join(model_dir, 'CosyVoice-BlankEN')})
  172. assert get_model_type(configs) == CosyVoice2Model, 'do not use {} for CosyVoice2 initialization!'.format(model_dir)
  173. self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
  174. configs['feat_extractor'],
  175. '{}/campplus.onnx'.format(model_dir),
  176. '{}/speech_tokenizer_v2.onnx'.format(model_dir),
  177. '{}/spk2info.pt'.format(model_dir),
  178. configs['allowed_special'])
  179. self.sample_rate = configs['sample_rate']
  180. if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
  181. load_jit, load_trt, fp16 = False, False, False
  182. logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
  183. if use_vllm:
  184. try:
  185. self.model = VllmCosyVoice2Model(model_dir, configs['flow'], configs['hift'], fp16)
  186. except Exception as e:
  187. logging.warning(f'use vllm inference failed. \n{e}')
  188. raise e
  189. else:
  190. self.model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'], fp16)
  191. self.model.load('{}/llm.pt'.format(model_dir),
  192. '{}/flow.pt'.format(model_dir),
  193. '{}/hift.pt'.format(model_dir))
  194. if load_jit:
  195. self.model.load_jit('{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
  196. if load_trt:
  197. self.estimator_count = configs['flow']['decoder']['estimator'].get('estimator_count', 1)
  198. self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
  199. '{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
  200. self.fp16, self.estimator_count)
  201. del configs
  202. thread_count = 10
  203. self.stream_pool = queue.Queue(maxsize=thread_count)
  204. for _ in range(thread_count):
  205. self.stream_pool.put(torch.cuda.Stream(self.device))
  206. def inference_instruct(self, *args, **kwargs):
  207. raise NotImplementedError('inference_instruct is not implemented for CosyVoice2!')
  208. def inference_instruct2(self, tts_text, instruct_text, prompt_speech_16k, stream=False, speed=1.0, text_frontend=True):
  209. cuda_stream = self.stream_pool.get()
  210. with torch.cuda.stream(cuda_stream):
  211. assert isinstance(self.model, CosyVoice2Model), 'inference_instruct2 is only implemented for CosyVoice2!'
  212. for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
  213. model_input = self.frontend.frontend_instruct2(i, instruct_text, prompt_speech_16k, self.sample_rate)
  214. start_time = time.time()
  215. logging.info('synthesis text {}'.format(i))
  216. for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
  217. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  218. logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
  219. yield model_output
  220. start_time = time.time()
  221. cuda_stream.synchronize()
  222. self.stream_pool.put(cuda_stream)
  223. def inference_instruct2_by_spk_id(self, tts_text, instruct_text, spk_id, stream=False, speed=1.0, text_frontend=True):
  224. cuda_stream = self.stream_pool.get()
  225. with torch.cuda.stream(cuda_stream):
  226. assert isinstance(self.model, CosyVoice2Model), 'inference_instruct2 is only implemented for CosyVoice2!'
  227. for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
  228. model_input = self.frontend.frontend_instruct2_by_spk_id(i, instruct_text, spk_id)
  229. start_time = time.time()
  230. logging.info('synthesis text {}'.format(i))
  231. for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
  232. speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
  233. logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
  234. yield model_output
  235. start_time = time.time()
  236. cuda_stream.synchronize()
  237. self.stream_pool.put(cuda_stream)