frontend.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from functools import partial
  15. from typing import Generator
  16. import json
  17. import onnxruntime
  18. import torch
  19. import numpy as np
  20. import whisper
  21. from typing import Callable
  22. import torchaudio.compliance.kaldi as kaldi
  23. import torchaudio
  24. import os
  25. import re
  26. import inflect
  27. try:
  28. import ttsfrd
  29. use_ttsfrd = True
  30. except ImportError:
  31. print("failed to import ttsfrd, use wetext instead")
  32. from wetext import Normalizer as ZhNormalizer
  33. from wetext import Normalizer as EnNormalizer
  34. use_ttsfrd = False
  35. from cosyvoice.utils.file_utils import logging
  36. from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph, is_only_punctuation
  37. class CosyVoiceFrontEnd:
  38. def __init__(self,
  39. get_tokenizer: Callable,
  40. feat_extractor: Callable,
  41. campplus_model: str,
  42. speech_tokenizer_model: str,
  43. spk2info: str = '',
  44. allowed_special: str = 'all'):
  45. self.tokenizer = get_tokenizer()
  46. self.feat_extractor = feat_extractor
  47. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  48. option = onnxruntime.SessionOptions()
  49. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  50. option.intra_op_num_threads = 1
  51. self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
  52. self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option,
  53. providers=["CUDAExecutionProvider" if torch.cuda.is_available() else
  54. "CPUExecutionProvider"])
  55. if os.path.exists(spk2info):
  56. self.spk2info = torch.load(spk2info, map_location=self.device)
  57. else:
  58. self.spk2info = {}
  59. self.allowed_special = allowed_special
  60. self.use_ttsfrd = use_ttsfrd
  61. if self.use_ttsfrd:
  62. self.frd = ttsfrd.TtsFrontendEngine()
  63. ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
  64. assert self.frd.initialize('{}/../../pretrained_models/CosyVoice-ttsfrd/resource'.format(ROOT_DIR)) is True, \
  65. 'failed to initialize ttsfrd resource'
  66. self.frd.set_lang_type('pinyinvg')
  67. else:
  68. self.zh_tn_model = ZhNormalizer(remove_erhua=False, overwrite_cache=False)
  69. self.en_tn_model = EnNormalizer()
  70. self.inflect_parser = inflect.engine()
  71. def _extract_text_token(self, text):
  72. if isinstance(text, Generator):
  73. logging.info('get tts_text generator, will return _extract_text_token_generator!')
  74. # NOTE add a dummy text_token_len for compatibility
  75. return self._extract_text_token_generator(text), torch.tensor([0], dtype=torch.int32).to(self.device)
  76. else:
  77. text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
  78. text_token = torch.tensor([text_token], dtype=torch.int32).to(self.device)
  79. text_token_len = torch.tensor([text_token.shape[1]], dtype=torch.int32).to(self.device)
  80. return text_token, text_token_len
  81. def _extract_text_token_generator(self, text_generator):
  82. for text in text_generator:
  83. text_token, _ = self._extract_text_token(text)
  84. for i in range(text_token.shape[1]):
  85. yield text_token[:, i: i + 1]
  86. def _extract_speech_token(self, speech):
  87. assert speech.shape[1] / 16000 <= 30, 'do not support extract speech token for audio longer than 30s'
  88. feat = whisper.log_mel_spectrogram(speech, n_mels=128)
  89. speech_token = self.speech_tokenizer_session.run(None,
  90. {self.speech_tokenizer_session.get_inputs()[0].name:
  91. feat.detach().cpu().numpy(),
  92. self.speech_tokenizer_session.get_inputs()[1].name:
  93. np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
  94. speech_token = torch.tensor([speech_token], dtype=torch.int32).to(self.device)
  95. speech_token_len = torch.tensor([speech_token.shape[1]], dtype=torch.int32).to(self.device)
  96. return speech_token, speech_token_len
  97. def _extract_spk_embedding(self, speech):
  98. feat = kaldi.fbank(speech,
  99. num_mel_bins=80,
  100. dither=0,
  101. sample_frequency=16000)
  102. feat = feat - feat.mean(dim=0, keepdim=True)
  103. embedding = self.campplus_session.run(None,
  104. {self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
  105. embedding = torch.tensor([embedding]).to(self.device)
  106. return embedding
  107. def _extract_speech_feat(self, speech):
  108. speech_feat = self.feat_extractor(speech).squeeze(dim=0).transpose(0, 1).to(self.device)
  109. speech_feat = speech_feat.unsqueeze(dim=0)
  110. speech_feat_len = torch.tensor([speech_feat.shape[1]], dtype=torch.int32).to(self.device)
  111. return speech_feat, speech_feat_len
  112. def text_normalize(self, text, split=True, text_frontend=True):
  113. if isinstance(text, Generator):
  114. logging.info('get tts_text generator, will skip text_normalize!')
  115. return [text]
  116. if text_frontend is False or text == '':
  117. return [text] if split is True else text
  118. text = text.strip()
  119. if self.use_ttsfrd:
  120. texts = [i["text"] for i in json.loads(self.frd.do_voicegen_frd(text))["sentences"]]
  121. text = ''.join(texts)
  122. else:
  123. if contains_chinese(text):
  124. text = self.zh_tn_model.normalize(text)
  125. text = text.replace("\n", "")
  126. text = replace_blank(text)
  127. text = replace_corner_mark(text)
  128. text = text.replace(".", "。")
  129. text = text.replace(" - ", ",")
  130. text = remove_bracket(text)
  131. text = re.sub(r'[,,、]+$', '。', text)
  132. texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "zh", token_max_n=80,
  133. token_min_n=60, merge_len=20, comma_split=False))
  134. else:
  135. text = self.en_tn_model.normalize(text)
  136. text = spell_out_number(text, self.inflect_parser)
  137. texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
  138. token_min_n=60, merge_len=20, comma_split=False))
  139. texts = [i for i in texts if not is_only_punctuation(i)]
  140. return texts if split is True else text
  141. def frontend_sft(self, tts_text, spk_id):
  142. tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
  143. embedding = self.spk2info[spk_id]['embedding']
  144. model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, 'llm_embedding': embedding, 'flow_embedding': embedding}
  145. return model_input
  146. def frontend_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, resample_rate, zero_shot_spk_id):
  147. tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
  148. if zero_shot_spk_id == '':
  149. prompt_text_token, prompt_text_token_len = self._extract_text_token(prompt_text)
  150. prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=resample_rate)(prompt_speech_16k)
  151. speech_feat, speech_feat_len = self._extract_speech_feat(prompt_speech_resample)
  152. speech_token, speech_token_len = self._extract_speech_token(prompt_speech_16k)
  153. if resample_rate == 24000:
  154. # cosyvoice2, force speech_feat % speech_token = 2
  155. token_len = min(int(speech_feat.shape[1] / 2), speech_token.shape[1])
  156. speech_feat, speech_feat_len[:] = speech_feat[:, :2 * token_len], 2 * token_len
  157. speech_token, speech_token_len[:] = speech_token[:, :token_len], token_len
  158. embedding = self._extract_spk_embedding(prompt_speech_16k)
  159. model_input = {'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len,
  160. 'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
  161. 'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
  162. 'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
  163. 'llm_embedding': embedding, 'flow_embedding': embedding}
  164. else:
  165. model_input = self.spk2info[zero_shot_spk_id]
  166. model_input['text'] = tts_text_token
  167. model_input['text_len'] = tts_text_token_len
  168. return model_input
  169. def frontend_cross_lingual(self, tts_text, prompt_speech_16k, resample_rate, zero_shot_spk_id):
  170. model_input = self.frontend_zero_shot(tts_text, '', prompt_speech_16k, resample_rate, zero_shot_spk_id)
  171. # in cross lingual mode, we remove prompt in llm
  172. del model_input['prompt_text']
  173. del model_input['prompt_text_len']
  174. del model_input['llm_prompt_speech_token']
  175. del model_input['llm_prompt_speech_token_len']
  176. return model_input
  177. def frontend_instruct(self, tts_text, spk_id, instruct_text):
  178. model_input = self.frontend_sft(tts_text, spk_id)
  179. # in instruct mode, we remove spk_embedding in llm due to information leakage
  180. del model_input['llm_embedding']
  181. instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
  182. model_input['prompt_text'] = instruct_text_token
  183. model_input['prompt_text_len'] = instruct_text_token_len
  184. return model_input
  185. def frontend_instruct2(self, tts_text, instruct_text, prompt_speech_16k, resample_rate, zero_shot_spk_id):
  186. model_input = self.frontend_zero_shot(tts_text, instruct_text + '<|endofprompt|>', prompt_speech_16k, resample_rate, zero_shot_spk_id)
  187. del model_input['llm_prompt_speech_token']
  188. del model_input['llm_prompt_speech_token_len']
  189. return model_input
  190. def frontend_vc(self, source_speech_16k, prompt_speech_16k, resample_rate):
  191. prompt_speech_token, prompt_speech_token_len = self._extract_speech_token(prompt_speech_16k)
  192. prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=resample_rate)(prompt_speech_16k)
  193. prompt_speech_feat, prompt_speech_feat_len = self._extract_speech_feat(prompt_speech_resample)
  194. embedding = self._extract_spk_embedding(prompt_speech_16k)
  195. source_speech_token, source_speech_token_len = self._extract_speech_token(source_speech_16k)
  196. model_input = {'source_speech_token': source_speech_token, 'source_speech_token_len': source_speech_token_len,
  197. 'flow_prompt_speech_token': prompt_speech_token, 'flow_prompt_speech_token_len': prompt_speech_token_len,
  198. 'prompt_speech_feat': prompt_speech_feat, 'prompt_speech_feat_len': prompt_speech_feat_len,
  199. 'flow_embedding': embedding}
  200. return model_input