frontend.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from functools import partial
  15. from typing import Generator
  16. import json
  17. import onnxruntime
  18. import torch
  19. import numpy as np
  20. import whisper
  21. from typing import Callable
  22. import torchaudio.compliance.kaldi as kaldi
  23. import torchaudio
  24. import os
  25. import re
  26. import inflect
  27. try:
  28. import ttsfrd
  29. use_ttsfrd = True
  30. except ImportError:
  31. print("failed to import ttsfrd, use wetext instead")
  32. from wetext import Normalizer as ZhNormalizer
  33. from wetext import Normalizer as EnNormalizer
  34. use_ttsfrd = False
  35. from cosyvoice.utils.file_utils import logging, load_wav
  36. from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph, is_only_punctuation
  37. class CosyVoiceFrontEnd:
  38. def __init__(self,
  39. get_tokenizer: Callable,
  40. feat_extractor: Callable,
  41. campplus_model: str,
  42. speech_tokenizer_model: str,
  43. spk2info: str = '',
  44. allowed_special: str = 'all'):
  45. self.tokenizer = get_tokenizer()
  46. self.feat_extractor = feat_extractor
  47. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  48. option = onnxruntime.SessionOptions()
  49. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  50. option.intra_op_num_threads = 1
  51. self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
  52. self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option,
  53. providers=["CUDAExecutionProvider" if torch.cuda.is_available() else
  54. "CPUExecutionProvider"])
  55. if os.path.exists(spk2info):
  56. self.spk2info = torch.load(spk2info, map_location=self.device)
  57. else:
  58. self.spk2info = {}
  59. self.allowed_special = allowed_special
  60. self.use_ttsfrd = use_ttsfrd
  61. if self.use_ttsfrd:
  62. self.frd = ttsfrd.TtsFrontendEngine()
  63. ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
  64. assert self.frd.initialize('{}/../../pretrained_models/CosyVoice-ttsfrd/resource'.format(ROOT_DIR)) is True, \
  65. 'failed to initialize ttsfrd resource'
  66. self.frd.set_lang_type('pinyinvg')
  67. else:
  68. self.zh_tn_model = ZhNormalizer(remove_erhua=False)
  69. self.en_tn_model = EnNormalizer()
  70. self.inflect_parser = inflect.engine()
  71. def _extract_text_token(self, text):
  72. if isinstance(text, Generator):
  73. logging.info('get tts_text generator, will return _extract_text_token_generator!')
  74. # NOTE add a dummy text_token_len for compatibility
  75. return self._extract_text_token_generator(text), torch.tensor([0], dtype=torch.int32).to(self.device)
  76. else:
  77. text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
  78. text_token = torch.tensor([text_token], dtype=torch.int32).to(self.device)
  79. text_token_len = torch.tensor([text_token.shape[1]], dtype=torch.int32).to(self.device)
  80. return text_token, text_token_len
  81. def _extract_text_token_generator(self, text_generator):
  82. for text in text_generator:
  83. text_token, _ = self._extract_text_token(text)
  84. for i in range(text_token.shape[1]):
  85. yield text_token[:, i: i + 1]
  86. def _extract_speech_token(self, prompt_wav):
  87. speech = load_wav(prompt_wav, 16000)
  88. assert speech.shape[1] / 16000 <= 30, 'do not support extract speech token for audio longer than 30s'
  89. feat = whisper.log_mel_spectrogram(speech, n_mels=128)
  90. speech_token = self.speech_tokenizer_session.run(None,
  91. {self.speech_tokenizer_session.get_inputs()[0].name:
  92. feat.detach().cpu().numpy(),
  93. self.speech_tokenizer_session.get_inputs()[1].name:
  94. np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
  95. speech_token = torch.tensor([speech_token], dtype=torch.int32).to(self.device)
  96. speech_token_len = torch.tensor([speech_token.shape[1]], dtype=torch.int32).to(self.device)
  97. return speech_token, speech_token_len
  98. def _extract_spk_embedding(self, prompt_wav):
  99. speech = load_wav(prompt_wav, 16000)
  100. feat = kaldi.fbank(speech,
  101. num_mel_bins=80,
  102. dither=0,
  103. sample_frequency=16000)
  104. feat = feat - feat.mean(dim=0, keepdim=True)
  105. embedding = self.campplus_session.run(None,
  106. {self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
  107. embedding = torch.tensor([embedding]).to(self.device)
  108. return embedding
  109. def _extract_speech_feat(self, prompt_wav):
  110. speech = load_wav(prompt_wav, 24000)
  111. speech_feat = self.feat_extractor(speech).squeeze(dim=0).transpose(0, 1).to(self.device)
  112. speech_feat = speech_feat.unsqueeze(dim=0)
  113. speech_feat_len = torch.tensor([speech_feat.shape[1]], dtype=torch.int32).to(self.device)
  114. return speech_feat, speech_feat_len
  115. def text_normalize(self, text, split=True, text_frontend=True):
  116. # NOTE skip text_frontend when ssml symbol in text
  117. if '<|' in text and '|>' in text:
  118. text_frontend = False
  119. if isinstance(text, Generator):
  120. logging.info('get tts_text generator, will skip text_normalize!')
  121. return [text]
  122. if text_frontend is False or text == '':
  123. return [text] if split is True else text
  124. text = text.strip()
  125. if self.use_ttsfrd:
  126. texts = [i["text"] for i in json.loads(self.frd.do_voicegen_frd(text))["sentences"]]
  127. text = ''.join(texts)
  128. else:
  129. if contains_chinese(text):
  130. text = self.zh_tn_model.normalize(text)
  131. text = text.replace("\n", "")
  132. text = replace_blank(text)
  133. text = replace_corner_mark(text)
  134. text = text.replace(".", "。")
  135. text = text.replace(" - ", ",")
  136. text = remove_bracket(text)
  137. text = re.sub(r'[,,、]+$', '。', text)
  138. texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "zh", token_max_n=80,
  139. token_min_n=60, merge_len=20, comma_split=False))
  140. else:
  141. text = self.en_tn_model.normalize(text)
  142. text = spell_out_number(text, self.inflect_parser)
  143. texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
  144. token_min_n=60, merge_len=20, comma_split=False))
  145. texts = [i for i in texts if not is_only_punctuation(i)]
  146. return texts if split is True else text
  147. def frontend_sft(self, tts_text, spk_id):
  148. tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
  149. embedding = self.spk2info[spk_id]['embedding']
  150. model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, 'llm_embedding': embedding, 'flow_embedding': embedding}
  151. return model_input
  152. def frontend_zero_shot(self, tts_text, prompt_text, prompt_wav, resample_rate, zero_shot_spk_id):
  153. tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
  154. if zero_shot_spk_id == '':
  155. prompt_text_token, prompt_text_token_len = self._extract_text_token(prompt_text)
  156. speech_feat, speech_feat_len = self._extract_speech_feat(prompt_wav)
  157. speech_token, speech_token_len = self._extract_speech_token(prompt_wav)
  158. if resample_rate == 24000:
  159. # cosyvoice2, force speech_feat % speech_token = 2
  160. token_len = min(int(speech_feat.shape[1] / 2), speech_token.shape[1])
  161. speech_feat, speech_feat_len[:] = speech_feat[:, :2 * token_len], 2 * token_len
  162. speech_token, speech_token_len[:] = speech_token[:, :token_len], token_len
  163. embedding = self._extract_spk_embedding(prompt_wav)
  164. model_input = {'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len,
  165. 'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
  166. 'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
  167. 'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
  168. 'llm_embedding': embedding, 'flow_embedding': embedding}
  169. else:
  170. model_input = self.spk2info[zero_shot_spk_id]
  171. model_input['text'] = tts_text_token
  172. model_input['text_len'] = tts_text_token_len
  173. return model_input
  174. def frontend_cross_lingual(self, tts_text, prompt_wav, resample_rate, zero_shot_spk_id):
  175. model_input = self.frontend_zero_shot(tts_text, '', prompt_wav, resample_rate, zero_shot_spk_id)
  176. # in cross lingual mode, we remove prompt in llm
  177. del model_input['prompt_text']
  178. del model_input['prompt_text_len']
  179. del model_input['llm_prompt_speech_token']
  180. del model_input['llm_prompt_speech_token_len']
  181. return model_input
  182. def frontend_instruct(self, tts_text, spk_id, instruct_text):
  183. model_input = self.frontend_sft(tts_text, spk_id)
  184. # in instruct mode, we remove spk_embedding in llm due to information leakage
  185. del model_input['llm_embedding']
  186. instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text)
  187. model_input['prompt_text'] = instruct_text_token
  188. model_input['prompt_text_len'] = instruct_text_token_len
  189. return model_input
  190. def frontend_instruct2(self, tts_text, instruct_text, prompt_wav, resample_rate, zero_shot_spk_id):
  191. model_input = self.frontend_zero_shot(tts_text, instruct_text, prompt_wav, resample_rate, zero_shot_spk_id)
  192. del model_input['llm_prompt_speech_token']
  193. del model_input['llm_prompt_speech_token_len']
  194. return model_input
  195. def frontend_vc(self, source_speech_16k, prompt_wav, resample_rate):
  196. prompt_speech_token, prompt_speech_token_len = self._extract_speech_token(prompt_wav)
  197. prompt_speech_feat, prompt_speech_feat_len = self._extract_speech_feat(prompt_wav)
  198. embedding = self._extract_spk_embedding(prompt_wav)
  199. source_speech_token, source_speech_token_len = self._extract_speech_token(source_speech_16k)
  200. model_input = {'source_speech_token': source_speech_token, 'source_speech_token_len': source_speech_token_len,
  201. 'flow_prompt_speech_token': prompt_speech_token, 'flow_prompt_speech_token_len': prompt_speech_token_len,
  202. 'prompt_speech_feat': prompt_speech_feat, 'prompt_speech_feat_len': prompt_speech_feat_len,
  203. 'flow_embedding': embedding}
  204. return model_input