diff --git a/cosyvoice/bin/inference.py b/cosyvoice/bin/inference.py index f6ec39f..00b3372 100644 --- a/cosyvoice/bin/inference.py +++ b/cosyvoice/bin/inference.py @@ -63,12 +63,12 @@ def main(): try: with open(args.config, 'r') as f: configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': args.qwen_pretrain_path}) - model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'], fp16=False) + model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift']) except Exception: try: with open(args.config, 'r') as f: configs = load_hyperpyyaml(f) - model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'], fp16=False) + model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift']) except Exception: raise TypeError('no valid model_type!') diff --git a/cosyvoice/cli/model.py b/cosyvoice/cli/model.py index a14cfbe..20ddad0 100644 --- a/cosyvoice/cli/model.py +++ b/cosyvoice/cli/model.py @@ -30,7 +30,7 @@ class CosyVoiceModel: llm: torch.nn.Module, flow: torch.nn.Module, hift: torch.nn.Module, - fp16: bool): + fp16: bool = False): self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.llm = llm self.flow = flow @@ -240,8 +240,8 @@ class CosyVoice2Model(CosyVoiceModel): llm: torch.nn.Module, flow: torch.nn.Module, hift: torch.nn.Module, - fp16: bool, - use_flow_cache: bool): + fp16: bool = False, + use_flow_cache: bool = False): self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.llm = llm self.flow = flow