lyuxiang.lx 1 yıl önce
ebeveyn
işleme
39f02fa1f9
5 değiştirilmiş dosya ile 33 ekleme ve 17 silme
  1. 16 0
      FAQ.md
  2. 9 9
      README.md
  3. 1 1
      runtime/python/Dockerfile
  4. 1 1
      runtime/python/server.py
  5. 6 6
      webui.py

+ 16 - 0
FAQ.md

@@ -0,0 +1,16 @@
+## ModuleNotFoundError: No module named 'matcha'
+
+Matcha-TTS is a third_party module. Please check `third_party` directory. If there is no `Matcha-TTS`, execute `git submodule update --init --recursive`.
+
+run `export PYTHONPATH=third_party/AcademiCodec:third_party/Matcha-TTS` if you want to use `from cosyvoice.cli.cosyvoice import CosyVoice` in python script.
+
+## cannot find resource.zip or cannot unzip resource.zip
+
+Please make sure you have git-lfs installed. Execute
+
+```sh
+git clone https://www.modelscope.cn/iic/CosyVoice-ttsfrd.git pretrained_models/CosyVoice-ttsfrd
+cd pretrained_models/CosyVoice-ttsfrd/
+unzip resource.zip -d .
+pip install ttsfrd-0.3.6-cp38-cp38-linux_x86_64.whl
+```

+ 9 - 9
README.md

@@ -67,10 +67,10 @@ pip install ttsfrd-0.3.6-cp38-cp38-linux_x86_64.whl
 For zero_shot/cross_lingual inference, please use `CosyVoice-300M` model.
 For sft inference, please use `CosyVoice-300M-SFT` model.
 For instruct inference, please use `CosyVoice-300M-Instruct` model.
-First, add `third_party/AcademiCodec` and `third_party/Matcha-TTS` to your `PYTHONPATH`.
+First, add `third_party/Matcha-TTS` to your `PYTHONPATH`.
 
 ``` sh
-export PYTHONPATH=third_party/AcademiCodec:third_party/Matcha-TTS
+export PYTHONPATH=third_party/Matcha-TTS
 ```
 
 ``` python
@@ -78,13 +78,13 @@ from cosyvoice.cli.cosyvoice import CosyVoice
 from cosyvoice.utils.file_utils import load_wav
 import torchaudio
 
-cosyvoice = CosyVoice('speech_tts/CosyVoice-300M-SFT')
+cosyvoice = CosyVoice('iic/CosyVoice-300M-SFT')
 # sft usage
 print(cosyvoice.list_avaliable_spks())
 output = cosyvoice.inference_sft('你好,我是通义生成式语音大模型,请问有什么可以帮您的吗?', '中文女')
 torchaudio.save('sft.wav', output['tts_speech'], 22050)
 
-cosyvoice = CosyVoice('speech_tts/CosyVoice-300M')
+cosyvoice = CosyVoice('iic/CosyVoice-300M')
 # zero_shot usage
 prompt_speech_16k = load_wav('zero_shot_prompt.wav', 16000)
 output = cosyvoice.inference_zero_shot('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', '希望你以后能够做的比我还好呦。', prompt_speech_16k)
@@ -94,7 +94,7 @@ prompt_speech_16k = load_wav('cross_lingual_prompt.wav', 16000)
 output = cosyvoice.inference_cross_lingual('<|en|>And then later on, fully acquiring that company. So keeping management in line, interest in line with the asset that\'s coming into the family is a reason why sometimes we don\'t buy the whole thing.', prompt_speech_16k)
 torchaudio.save('cross_lingual.wav', output['tts_speech'], 22050)
 
-cosyvoice = CosyVoice('speech_tts/CosyVoice-300M-Instruct')
+cosyvoice = CosyVoice('iic/CosyVoice-300M-Instruct')
 # instruct usage
 output = cosyvoice.inference_instruct('在面对挑战时,他展现了非凡的<strong>勇气</strong>与<strong>智慧</strong>。', '中文男', 'Theo \'Crimson\', is a fiery, passionate rebel leader. Fights with fervor for justice, but struggles with impulsiveness.')
 torchaudio.save('instruct.wav', output['tts_speech'], 22050)
@@ -108,8 +108,8 @@ We support sft/zero_shot/cross_lingual/instruct inference in web demo.
 Please see the demo website for details.
 
 ``` python
-# change speech_tts/CosyVoice-300M-SFT for sft inference, or speech_tts/CosyVoice-300M-Instruct for instruct inference
-python3 webui.py --port 50000 --model_dir speech_tts/CosyVoice-300M
+# change iic/CosyVoice-300M-SFT for sft inference, or iic/CosyVoice-300M-Instruct for instruct inference
+python3 webui.py --port 50000 --model_dir iic/CosyVoice-300M
 ```
 
 **Advanced Usage**
@@ -125,8 +125,8 @@ you can run following steps. Otherwise, you can just ignore this step.
 ``` sh
 cd runtime/python
 docker build -t cosyvoice:v1.0 .
-# change speech_tts/CosyVoice-300M to speech_tts/CosyVoice-300M-Instruct if you want to use instruct inference
-docker run -d --runtime=nvidia -p 50000:50000 cosyvoice:v1.0 /bin/bash -c "cd /opt/CosyVoice/CosyVoice/runtime/python && python3 server.py --port 50000 --max_conc 4 --model_dir speech_tts/CosyVoice-300M && sleep infinity"
+# change iic/CosyVoice-300M to iic/CosyVoice-300M-Instruct if you want to use instruct inference
+docker run -d --runtime=nvidia -p 50000:50000 cosyvoice:v1.0 /bin/bash -c "cd /opt/CosyVoice/CosyVoice/runtime/python && python3 server.py --port 50000 --max_conc 4 --model_dir iic/CosyVoice-300M && sleep infinity"
 python3 client.py --port 50000 --mode <sft|zero_shot|cross_lingual|instruct>
 ```
 

+ 1 - 1
runtime/python/Dockerfile

@@ -12,4 +12,4 @@ RUN apt install git-lfs && git lfs install
 RUN cd CosyVoice && git clone https://www.modelscope.cn/iic/CosyVoice-ttsfrd.git pretrained_models/CosyVoice-ttsfrd
 RUN cd CosyVoice/pretrained_models/CosyVoice-ttsfrd && unzip resource.zip -d . && pip3 install ttsfrd-0.3.6-cp38-cp38-linux_x86_64.whl
 RUN cd CosyVoice/runtime/python && python3 -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. cosyvoice.proto
-CMD ["/bin/bash", "-c", "cd /opt/CosyVoice/CosyVoice/runtime/python && . ./path/sh && python3 server.py --port 50000 --max_conc 4 --model_dir speech_tts/CosyVoice-300M && sleep infinity"]
+CMD ["/bin/bash", "-c", "cd /opt/CosyVoice/CosyVoice/runtime/python && . ./path/sh && python3 server.py --port 50000 --max_conc 4 --model_dir iic/CosyVoice-300M && sleep infinity"]

+ 1 - 1
runtime/python/server.py

@@ -79,7 +79,7 @@ if __name__ == '__main__':
     parser.add_argument('--model_dir',
                         type=str,
                         required=True,
-                        default='speech_tts/CosyVoice-300M',
+                        default='iic/CosyVoice-300M',
                         help='local path or modelscope repo id')
     args = parser.parse_args()
     main()

+ 6 - 6
webui.py

@@ -74,20 +74,20 @@ def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, pro
         prompt_wav = prompt_wav_record
     else:
         prompt_wav = None
-    # if instruct mode, please make sure that model is speech_tts/CosyVoice-300M-Instruct and not cross_lingual mode
+    # if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode
     if mode_checkbox_group in ['自然语言控制']:
         if cosyvoice.frontend.instruct is False:
-            gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用speech_tts/CosyVoice-300M-Instruct模型'.format(args.model_dir))
+            gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M-Instruct模型'.format(args.model_dir))
             return (target_sr, default_data)
         if instruct_text == '':
             gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本')
             return (target_sr, default_data)
         if prompt_wav is not None or prompt_text != '':
             gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略')
-    # if cross_lingual mode, please make sure that model is speech_tts/CosyVoice-300M and tts_text prompt_text are different language
+    # if cross_lingual mode, please make sure that model is iic/CosyVoice-300M and tts_text prompt_text are different language
     if mode_checkbox_group in ['跨语种复刻']:
         if cosyvoice.frontend.instruct is True:
-            gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用speech_tts/CosyVoice-300M模型'.format(args.model_dir))
+            gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M模型'.format(args.model_dir))
             return (target_sr, default_data)
         if instruct_text != '':
             gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略')
@@ -138,7 +138,7 @@ def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, pro
 
 def main():
     with gr.Blocks() as demo:
-        gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) 预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M) [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M-Instruct) [CosyVoice-300M-SFT](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M-SFT)")
+        gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) 预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/iic/CosyVoice-300M) [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/iic/CosyVoice-300M-Instruct) [CosyVoice-300M-SFT](https://www.modelscope.cn/models/iic/CosyVoice-300M-SFT)")
         gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作")
 
         tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。")
@@ -176,7 +176,7 @@ if __name__ == '__main__':
                         default=8000)
     parser.add_argument('--model_dir',
                         type=str,
-                        default='speech_tts/CosyVoice-300M',
+                        default='iic/CosyVoice-300M',
                         help='local path or modelscope repo id')
     args = parser.parse_args()
     cosyvoice = CosyVoice(args.model_dir)