Ver Fonte

fix lint

lyuxiang.lx há 7 meses atrás
pai
commit
e1ffb1e978
2 ficheiros alterados com 6 adições e 6 exclusões
  1. 4 4
      cosyvoice/cli/frontend.py
  2. 2 2
      runtime/python/fastapi/server.py

+ 4 - 4
cosyvoice/cli/frontend.py

@@ -168,10 +168,10 @@ class CosyVoiceFrontEnd:
                 speech_token, speech_token_len[:] = speech_token[:, :token_len], token_len
             embedding = self._extract_spk_embedding(prompt_speech_16k)
             model_input = {'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len,
-                        'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
-                        'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
-                        'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
-                        'llm_embedding': embedding, 'flow_embedding': embedding}
+                           'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
+                           'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
+                           'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
+                           'llm_embedding': embedding, 'flow_embedding': embedding}
         else:
             model_input = self.spk2info[zero_shot_spk_id]
         model_input['text'] = tts_text_token

+ 2 - 2
runtime/python/fastapi/server.py

@@ -72,6 +72,7 @@ async def inference_instruct(tts_text: str = Form(), spk_id: str = Form(), instr
     model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text)
     return StreamingResponse(generate_data(model_output))
 
+
 @app.get("/inference_instruct2")
 @app.post("/inference_instruct2")
 async def inference_instruct2(tts_text: str = Form(), instruct_text: str = Form(), prompt_wav: UploadFile = File()):
@@ -80,7 +81,6 @@ async def inference_instruct2(tts_text: str = Form(), instruct_text: str = Form(
     return StreamingResponse(generate_data(model_output))
 
 
-
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
     parser.add_argument('--port',
@@ -98,4 +98,4 @@ if __name__ == '__main__':
             cosyvoice = CosyVoice2(args.model_dir)
         except Exception:
             raise TypeError('no valid model_type!')
-    uvicorn.run(app, host="0.0.0.0", port=args.port)
+    uvicorn.run(app, host="0.0.0.0", port=args.port)