webui.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import sys
  16. ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
  17. sys.path.append('{}/third_party/AcademiCodec'.format(ROOT_DIR))
  18. sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))
  19. import argparse
  20. import gradio as gr
  21. import numpy as np
  22. import torch
  23. import torchaudio
  24. import random
  25. import librosa
  26. import logging
  27. logging.getLogger('matplotlib').setLevel(logging.WARNING)
  28. from cosyvoice.cli.cosyvoice import CosyVoice
  29. from cosyvoice.utils.file_utils import load_wav
  30. logging.basicConfig(level=logging.DEBUG,
  31. format='%(asctime)s %(levelname)s %(message)s')
  32. def generate_seed():
  33. seed = random.randint(1, 100000000)
  34. return {
  35. "__type__": "update",
  36. "value": seed
  37. }
  38. def set_all_random_seed(seed):
  39. random.seed(seed)
  40. np.random.seed(seed)
  41. torch.manual_seed(seed)
  42. torch.cuda.manual_seed_all(seed)
  43. max_val = 0.8
  44. def postprocess(speech, top_db=60, hop_length=220, win_length=440):
  45. speech, _ = librosa.effects.trim(
  46. speech, top_db=top_db,
  47. frame_length=win_length,
  48. hop_length=hop_length
  49. )
  50. if speech.abs().max() > max_val:
  51. speech = speech / speech.abs().max() * max_val
  52. speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1)
  53. return speech
  54. inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制']
  55. instruct_dict = {'预训练音色': '1. 选择预训练音色\n2.点击生成音频按钮',
  56. '3s极速复刻': '1. 选择prompt音频文件,或录入prompt音频,若同时提供,优先选择prompt音频文件\n2. 输入prompt文本\n3.点击生成音频按钮',
  57. '跨语种复刻': '1. 选择prompt音频文件,或录入prompt音频,若同时提供,优先选择prompt音频文件\n2.点击生成音频按钮',
  58. '自然语言控制': '1. 输入instruct文本\n2.点击生成音频按钮'}
  59. def change_instruction(mode_checkbox_group):
  60. return instruct_dict[mode_checkbox_group]
  61. def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed):
  62. if prompt_wav_upload is not None:
  63. prompt_wav = prompt_wav_upload
  64. elif prompt_wav_record is not None:
  65. prompt_wav = prompt_wav_record
  66. else:
  67. prompt_wav = None
  68. # if instruct mode, please make sure that model is speech_tts/CosyVoice-300M-Instruct and not cross_lingual mode
  69. if mode_checkbox_group in ['自然语言控制']:
  70. if cosyvoice.frontend.instruct is False:
  71. gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用speech_tts/CosyVoice-300M-Instruct模型'.format(args.model_dir))
  72. return (target_sr, default_data)
  73. if instruct_text == '':
  74. gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本')
  75. return (target_sr, default_data)
  76. if prompt_wav is not None or prompt_text != '':
  77. gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略')
  78. # if cross_lingual mode, please make sure that model is speech_tts/CosyVoice-300M and tts_text prompt_text are different language
  79. if mode_checkbox_group in ['跨语种复刻']:
  80. if cosyvoice.frontend.instruct is True:
  81. gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用speech_tts/CosyVoice-300M模型'.format(args.model_dir))
  82. return (target_sr, default_data)
  83. if instruct_text != '':
  84. gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略')
  85. if prompt_wav is None:
  86. gr.Warning('您正在使用跨语种复刻模式, 请提供prompt音频')
  87. return (target_sr, default_data)
  88. gr.Info('您正在使用跨语种复刻模式, 请确保合成文本和prompt文本为不同语言')
  89. # if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements
  90. if mode_checkbox_group in ['3s极速复刻', '跨语种复刻']:
  91. if prompt_wav is None:
  92. gr.Warning('prompt音频为空,您是否忘记输入prompt音频?')
  93. return (target_sr, default_data)
  94. if torchaudio.info(prompt_wav).sample_rate < prompt_sr:
  95. gr.Warning('prompt音频采样率{}低于{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr))
  96. return (target_sr, default_data)
  97. # sft mode only use sft_dropdown
  98. if mode_checkbox_group in ['预训练音色']:
  99. if instruct_text != '' or prompt_wav is not None or prompt_text != '':
  100. gr.Info('您正在使用预训练音色模式,prompt文本/prompt音频/instruct文本会被忽略!')
  101. # zero_shot mode only use prompt_wav prompt text
  102. if mode_checkbox_group in ['3s极速复刻']:
  103. if prompt_text == '':
  104. gr.Warning('prompt文本为空,您是否忘记输入prompt文本?')
  105. return (target_sr, default_data)
  106. if instruct_text != '':
  107. gr.Info('您正在使用3s极速复刻模式,预训练音色/instruct文本会被忽略!')
  108. if mode_checkbox_group == '预训练音色':
  109. logging.info('get sft inference request')
  110. set_all_random_seed(seed)
  111. output = cosyvoice.inference_sft(tts_text, sft_dropdown)
  112. elif mode_checkbox_group == '3s极速复刻':
  113. logging.info('get zero_shot inference request')
  114. prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
  115. set_all_random_seed(seed)
  116. output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k)
  117. elif mode_checkbox_group == '跨语种复刻':
  118. logging.info('get cross_lingual inference request')
  119. prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
  120. set_all_random_seed(seed)
  121. output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k)
  122. else:
  123. logging.info('get instruct inference request')
  124. set_all_random_seed(seed)
  125. output = cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text)
  126. audio_data = output['tts_speech'].numpy().flatten()
  127. return (target_sr, audio_data)
  128. def main():
  129. with gr.Blocks() as demo:
  130. gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) 预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M) [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M-Instruct) [CosyVoice-300M-SFT](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M-SFT)")
  131. gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作")
  132. tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。")
  133. with gr.Row():
  134. mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0])
  135. instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=0.5)
  136. sft_dropdown = gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=0.25)
  137. with gr.Column(scale=0.25):
  138. seed_button = gr.Button(value="\U0001F3B2")
  139. seed = gr.Number(value=0, label="随机推理种子")
  140. with gr.Row():
  141. prompt_wav_upload = gr.Audio(sources='upload', type='filepath', label='选择prompt音频文件,注意采样率不低于16khz')
  142. prompt_wav_record = gr.Audio(sources='microphone', type='filepath', label='录制prompt音频文件')
  143. prompt_text = gr.Textbox(label="输入prompt文本", lines=1, placeholder="请输入prompt文本,需与prompt音频内容一致,暂时不支持自动识别...", value='')
  144. instruct_text = gr.Textbox(label="输入instruct文本", lines=1, placeholder="请输入instruct文本.", value='')
  145. generate_button = gr.Button("生成音频")
  146. audio_output = gr.Audio(label="合成音频")
  147. seed_button.click(generate_seed, inputs=[], outputs=seed)
  148. generate_button.click(generate_audio,
  149. inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed],
  150. outputs=[audio_output])
  151. mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text])
  152. demo.queue(max_size=4, default_concurrency_limit=2)
  153. demo.launch(server_port=args.port)
  154. if __name__ == '__main__':
  155. parser = argparse.ArgumentParser()
  156. parser.add_argument('--port',
  157. type=int,
  158. default=8000)
  159. parser.add_argument('--model_dir',
  160. type=str,
  161. default='speech_tts/CosyVoice-300M',
  162. help='local path or modelscope repo id')
  163. args = parser.parse_args()
  164. cosyvoice = CosyVoice(args.model_dir)
  165. sft_spk = cosyvoice.list_avaliable_spks()
  166. prompt_sr, target_sr = 16000, 22050
  167. default_data = np.zeros(target_sr)
  168. main()