Parcourir la source

update fastapi

lyuxiang.lx il y a 1 an
Parent
commit
7555afb90a

+ 5 - 4
cosyvoice/cli/cosyvoice.py

@@ -13,6 +13,7 @@
 # limitations under the License.
 import os
 import time
+from tqdm import tqdm
 from hyperpyyaml import load_hyperpyyaml
 from modelscope import snapshot_download
 from cosyvoice.cli.frontend import CosyVoiceFrontEnd
@@ -52,7 +53,7 @@ class CosyVoice:
         return spks
 
     def inference_sft(self, tts_text, spk_id, stream=False):
-        for i in self.frontend.text_normalize(tts_text, split=True):
+        for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
             model_input = self.frontend.frontend_sft(i, spk_id)
             start_time = time.time()
             logging.info('synthesis text {}'.format(i))
@@ -64,7 +65,7 @@ class CosyVoice:
 
     def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, stream=False):
         prompt_text = self.frontend.text_normalize(prompt_text, split=False)
-        for i in self.frontend.text_normalize(tts_text, split=True):
+        for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
             model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
             start_time = time.time()
             logging.info('synthesis text {}'.format(i))
@@ -77,7 +78,7 @@ class CosyVoice:
     def inference_cross_lingual(self, tts_text, prompt_speech_16k, stream=False):
         if self.frontend.instruct is True:
             raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir))
-        for i in self.frontend.text_normalize(tts_text, split=True):
+        for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
             model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
             start_time = time.time()
             logging.info('synthesis text {}'.format(i))
@@ -91,7 +92,7 @@ class CosyVoice:
         if self.frontend.instruct is False:
             raise ValueError('{} do not support instruct inference'.format(self.model_dir))
         instruct_text = self.frontend.text_normalize(instruct_text, split=False)
-        for i in self.frontend.text_normalize(tts_text, split=True):
+        for i in tqdm(self.frontend.text_normalize(tts_text, split=True)):
             model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
             start_time = time.time()
             logging.info('synthesis text {}'.format(i))

+ 1 - 1
cosyvoice/hifigan/generator.py

@@ -340,7 +340,7 @@ class HiFTGenerator(nn.Module):
         s = self._f02source(f0)
 
         # use cache_source to avoid glitch
-        if cache_source.shape[2] == 0:
+        if cache_source.shape[2] != 0:
             s[:, :, :cache_source.shape[2]] = cache_source
 
         s_stft_real, s_stft_imag = self._stft(s.squeeze(1))

+ 6 - 0
examples/libritts/cosyvoice/run.sh

@@ -102,4 +102,10 @@ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
       --deepspeed_config ./conf/ds_stage2.json \
       --deepspeed.save_states model+optimizer
   done
+fi
+
+if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
+  echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
+  python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
+  python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
 fi

+ 6 - 0
examples/magicdata-read/cosyvoice/run.sh

@@ -102,4 +102,10 @@ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
       --deepspeed_config ./conf/ds_stage2.json \
       --deepspeed.save_states model+optimizer
   done
+fi
+
+if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
+  echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
+  python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
+  python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
 fi

+ 46 - 34
runtime/python/fastapi/client.py

@@ -1,56 +1,68 @@
+# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 import argparse
 import logging
 import requests
+import torch
+import torchaudio
+import numpy as np
 
-def saveResponse(path, response):
-    # 以二进制写入模式打开文件
-    with open(path, 'wb') as file:
-        # 将响应的二进制内容写入文件
-        file.write(response.content)
 
 def main():
-    api = args.api_base
+    url = "http://{}:{}/inference_{}".format(args.host, args.port, args.mode)
     if args.mode == 'sft':
-        url = api + "/api/inference/sft"
-        payload={
-            'tts': args.tts_text,
-            'role': args.spk_id
+        payload = {
+            'tts_text': args.tts_text,
+            'spk_id': args.spk_id
         }
-        response = requests.request("POST", url, data=payload)
-        saveResponse(args.tts_wav, response)
+        response = requests.request("GET", url, data=payload, stream=True)
     elif args.mode == 'zero_shot':
-        url = api + "/api/inference/zero-shot"
-        payload={
-            'tts': args.tts_text,
-            'prompt': args.prompt_text
+        payload = {
+            'tts_text': args.tts_text,
+            'prompt_text': args.prompt_text
         }
-        files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
-        response = requests.request("POST", url, data=payload, files=files)
-        saveResponse(args.tts_wav, response)
+        files = [('prompt_wav', ('prompt_wav', open(args.prompt_wav, 'rb'), 'application/octet-stream'))]
+        response = requests.request("GET", url, data=payload, files=files, stream=True)
     elif args.mode == 'cross_lingual':
-        url = api + "/api/inference/cross-lingual"
-        payload={
-            'tts': args.tts_text,
+        payload = {
+            'tts_text': args.tts_text,
         }
-        files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
-        response = requests.request("POST", url, data=payload, files=files)
-        saveResponse(args.tts_wav, response)
+        files = [('prompt_wav', ('prompt_wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
+        response = requests.request("GET", url, data=payload, files=files, stream=True)
     else:
-        url = api + "/api/inference/instruct"
         payload = {
-            'tts': args.tts_text,
-            'role': args.spk_id,
-            'instruct': args.instruct_text
+            'tts_text': args.tts_text,
+            'spk_id': args.spk_id,
+            'instruct_text': args.instruct_text
         }
-        response = requests.request("POST", url, data=payload)
-        saveResponse(args.tts_wav, response)
-    logging.info("Response save to {}", args.tts_wav)
+        response = requests.request("GET", url, data=payload, stream=True)
+    tts_audio = b''
+    for r in response.iter_content(chunk_size=16000):
+        tts_audio += r
+    tts_speech = torch.from_numpy(np.array(np.frombuffer(tts_audio, dtype=np.int16))).unsqueeze(dim=0)
+    logging.info('save response to {}'.format(args.tts_wav))
+    torchaudio.save(args.tts_wav, tts_speech, target_sr)
+    logging.info('get response')
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()
-    parser.add_argument('--api_base',
+    parser.add_argument('--host',
                         type=str,
-                        default='http://127.0.0.1:6006')
+                        default='0.0.0.0')
+    parser.add_argument('--port',
+                        type=int,
+                        default='50000')
     parser.add_argument('--mode',
                         default='sft',
                         choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],

+ 61 - 103
runtime/python/fastapi/server.py

@@ -1,119 +1,77 @@
-# Set inference model
-# export MODEL_DIR=pretrained_models/CosyVoice-300M-Instruct
-# For development
-# fastapi dev --port 6006 fastapi_server.py
-# For production deployment
-# fastapi run --port 6006 fastapi_server.py
-
+# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 import os
 import sys
-import io,time
-from fastapi import FastAPI, Response, File, UploadFile, Form
-from fastapi.responses import HTMLResponse
-from fastapi.middleware.cors import CORSMiddleware  #引入 CORS中间件模块
-from contextlib import asynccontextmanager
 ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
 sys.path.append('{}/../../..'.format(ROOT_DIR))
 sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
-from cosyvoice.cli.cosyvoice import CosyVoice
-from cosyvoice.utils.file_utils import load_wav
-import numpy as np
-import torch
-import torchaudio
+import argparse
 import logging
 logging.getLogger('matplotlib').setLevel(logging.WARNING)
+from fastapi import FastAPI, UploadFile, Form, File
+from fastapi.responses import StreamingResponse
+from fastapi.middleware.cors import CORSMiddleware
+import uvicorn
+import numpy as np
+from cosyvoice.cli.cosyvoice import CosyVoice
+from cosyvoice.utils.file_utils import load_wav
 
-class LaunchFailed(Exception):
-    pass
-
-@asynccontextmanager
-async def lifespan(app: FastAPI):
-    model_dir = os.getenv("MODEL_DIR", "pretrained_models/CosyVoice-300M-SFT")
-    if model_dir:
-        logging.info("MODEL_DIR is {}", model_dir)
-        app.cosyvoice = CosyVoice(model_dir)
-        # sft usage
-        logging.info("Avaliable speakers {}", app.cosyvoice.list_avaliable_spks())
-    else:
-        raise LaunchFailed("MODEL_DIR environment must set")
-    yield
-
-app = FastAPI(lifespan=lifespan)
-
-#设置允许访问的域名
-origins = ["*"]  #"*",即为所有,也可以改为允许的特定ip。
+app = FastAPI()
+# set cross region allowance
 app.add_middleware(
-    CORSMiddleware, 
-    allow_origins=origins,  #设置允许的origins来源
+    CORSMiddleware,
+    allow_origins=["*"],
     allow_credentials=True,
-    allow_methods=["*"],  # 设置允许跨域的http方法,比如 get、post、put等。
-    allow_headers=["*"])  #允许跨域的headers,可以用来鉴别来源等作用。
-
-def buildResponse(output):
-    buffer = io.BytesIO()
-    torchaudio.save(buffer, output, 22050, format="wav")
-    buffer.seek(0)
-    return Response(content=buffer.read(-1), media_type="audio/wav")
-
-@app.post("/api/inference/sft")
-@app.get("/api/inference/sft")
-async def sft(tts: str = Form(), role: str = Form()):
-    start = time.process_time()
-    output = app.cosyvoice.inference_sft(tts, role)
-    end = time.process_time()
-    logging.info("infer time is {} seconds", end-start)
-    return buildResponse(output['tts_speech'])
-
-@app.post("/api/inference/zero-shot")
-async def zeroShot(tts: str = Form(), prompt: str = Form(), audio: UploadFile = File()):
-    start = time.process_time()
-    prompt_speech = load_wav(audio.file, 16000)
-    prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
-    prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
-    prompt_speech_16k = prompt_speech_16k.float() / (2**15)
+    allow_methods=["*"],
+    allow_headers=["*"])
 
-    output = app.cosyvoice.inference_zero_shot(tts, prompt, prompt_speech_16k)
-    end = time.process_time()
-    logging.info("infer time is {} seconds", end-start)
-    return buildResponse(output['tts_speech'])
+def generate_data(model_output):
+    for i in model_output:
+        tts_audio = (i['tts_speech'].numpy() * (2 ** 15)).astype(np.int16).tobytes()
+        yield tts_audio
 
-@app.post("/api/inference/cross-lingual")
-async def crossLingual(tts: str = Form(), audio: UploadFile = File()):
-    start = time.process_time()
-    prompt_speech = load_wav(audio.file, 16000)
-    prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
-    prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
-    prompt_speech_16k = prompt_speech_16k.float() / (2**15)
+@app.get("/inference_sft")
+async def inference_sft(tts_text: str = Form(), spk_id: str = Form()):
+    model_output = cosyvoice.inference_sft(tts_text, spk_id)
+    return StreamingResponse(generate_data(model_output))
 
-    output = app.cosyvoice.inference_cross_lingual(tts, prompt_speech_16k)
-    end = time.process_time()
-    logging.info("infer time is {} seconds", end-start)
-    return buildResponse(output['tts_speech'])
+@app.get("/inference_zero_shot")
+async def inference_zero_shot(tts_text: str = Form(), prompt_text: str = Form(), prompt_wav: UploadFile = File()):
+    prompt_speech_16k = load_wav(prompt_wav.file, 16000)
+    model_output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k)
+    return StreamingResponse(generate_data(model_output))
 
-@app.post("/api/inference/instruct")
-@app.get("/api/inference/instruct")
-async def instruct(tts: str = Form(), role: str = Form(), instruct: str = Form()):
-    start = time.process_time()
-    output = app.cosyvoice.inference_instruct(tts, role, instruct)
-    end = time.process_time()
-    logging.info("infer time is {} seconds", end-start)
-    return buildResponse(output['tts_speech'])
+@app.get("/inference_cross_lingual")
+async def inference_cross_lingual(tts_text: str = Form(), prompt_wav: UploadFile = File()):
+    prompt_speech_16k = load_wav(prompt_wav.file, 16000)
+    model_output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k)
+    return StreamingResponse(generate_data(model_output))
 
-@app.get("/api/roles")
-async def roles():
-    return {"roles": app.cosyvoice.list_avaliable_spks()}
+@app.get("/inference_instruct")
+async def inference_instruct(tts_text: str = Form(), spk_id: str = Form(), instruct_text: str = Form()):
+    model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text)
+    return StreamingResponse(generate_data(model_output))
 
-@app.get("/", response_class=HTMLResponse)
-async def root():
-    return """
-    <!DOCTYPE html>
-    <html lang=zh-cn>
-        <head>
-            <meta charset=utf-8>
-            <title>Api information</title>
-        </head>
-        <body>
-            Get the supported tones from the Roles API first, then enter the tones and textual content in the TTS API for synthesis. <a href='./docs'>Documents of API</a>
-        </body>
-    </html>
-    """
+if __name__=='__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--port',
+                        type=int,
+                        default=50000)
+    parser.add_argument('--model_dir',
+                        type=str,
+                        default='iic/CosyVoice-300M',
+                        help='local path or modelscope repo id')
+    args = parser.parse_args()
+    cosyvoice = CosyVoice(args.model_dir)
+    uvicorn.run(app, host="127.0.0.1", port=args.port)