lyuxiang.lx 7 сар өмнө
parent
commit
9ebcf7b1ad

+ 2 - 2
cosyvoice/bin/inference.py

@@ -63,12 +63,12 @@ def main():
     try:
         with open(args.config, 'r') as f:
             configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': args.qwen_pretrain_path})
-        model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'], fp16=False)
+        model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'])
     except Exception:
         try:
             with open(args.config, 'r') as f:
                 configs = load_hyperpyyaml(f)
-            model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'], fp16=False)
+            model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'])
         except Exception:
             raise TypeError('no valid model_type!')
 

+ 3 - 3
cosyvoice/cli/model.py

@@ -30,7 +30,7 @@ class CosyVoiceModel:
                  llm: torch.nn.Module,
                  flow: torch.nn.Module,
                  hift: torch.nn.Module,
-                 fp16: bool):
+                 fp16: bool = False):
         self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
         self.llm = llm
         self.flow = flow
@@ -240,8 +240,8 @@ class CosyVoice2Model(CosyVoiceModel):
                  llm: torch.nn.Module,
                  flow: torch.nn.Module,
                  hift: torch.nn.Module,
-                 fp16: bool,
-                 use_flow_cache: bool):
+                 fp16: bool = False,
+                 use_flow_cache: bool = False):
         self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
         self.llm = llm
         self.flow = flow