Explorar o código

Merge branch 'main' into dev/lyuxiang.lx

lyuxiang.lx hai 10 meses
pai
achega
24f796a2b1
Modificáronse 3 ficheiros con 9 adicións e 2 borrados
  1. 1 1
      README.md
  2. 4 1
      cosyvoice/llm/llm.py
  3. 4 0
      runtime/python/fastapi/server.py

+ 1 - 1
README.md

@@ -151,7 +151,7 @@ def text_generator():
     yield '那份意外的惊喜与深深的祝福'
     yield '让我心中充满了甜蜜的快乐,'
     yield '笑容如花儿般绽放。'
-for i, j in enumerate(cosyvoice.inference_zero_shot(text_generator, '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)):
+for i, j in enumerate(cosyvoice.inference_zero_shot(text_generator(), '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)):
     torchaudio.save('zero_shot_{}.wav'.format(i), j['tts_speech'], cosyvoice.sample_rate)
 ```
 

+ 4 - 1
cosyvoice/llm/llm.py

@@ -382,7 +382,10 @@ class Qwen2LM(TransformerLM):
                     if text_cache.size(1) >= self.mix_ratio[0]:
                         lm_input_text = text_cache[:, :self.mix_ratio[0]]
                         logging.info('append {} text token'.format(lm_input_text.size(1)))
-                        lm_input = torch.concat([lm_input, lm_input_text], dim=1)
+                        if len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2:
+                            lm_input = lm_input_text
+                        else:
+                            lm_input = torch.concat([lm_input, lm_input_text], dim=1)
                         text_cache = text_cache[:, self.mix_ratio[0]:]
                     else:
                         logging.info('not enough text token to decode, wait for more')

+ 4 - 0
runtime/python/fastapi/server.py

@@ -44,12 +44,14 @@ def generate_data(model_output):
 
 
 @app.get("/inference_sft")
+@app.post("/inference_sft")
 async def inference_sft(tts_text: str = Form(), spk_id: str = Form()):
     model_output = cosyvoice.inference_sft(tts_text, spk_id)
     return StreamingResponse(generate_data(model_output))
 
 
 @app.get("/inference_zero_shot")
+@app.post("/inference_zero_shot")
 async def inference_zero_shot(tts_text: str = Form(), prompt_text: str = Form(), prompt_wav: UploadFile = File()):
     prompt_speech_16k = load_wav(prompt_wav.file, 16000)
     model_output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k)
@@ -57,6 +59,7 @@ async def inference_zero_shot(tts_text: str = Form(), prompt_text: str = Form(),
 
 
 @app.get("/inference_cross_lingual")
+@app.post("/inference_cross_lingual")
 async def inference_cross_lingual(tts_text: str = Form(), prompt_wav: UploadFile = File()):
     prompt_speech_16k = load_wav(prompt_wav.file, 16000)
     model_output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k)
@@ -64,6 +67,7 @@ async def inference_cross_lingual(tts_text: str = Form(), prompt_wav: UploadFile
 
 
 @app.get("/inference_instruct")
+@app.post("/inference_instruct")
 async def inference_instruct(tts_text: str = Form(), spk_id: str = Form(), instruct_text: str = Form()):
     model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text)
     return StreamingResponse(generate_data(model_output))