diff --git a/cosyvoice/cli/frontend.py b/cosyvoice/cli/frontend.py index 7ad6f7c..6d397cc 100644 --- a/cosyvoice/cli/frontend.py +++ b/cosyvoice/cli/frontend.py @@ -47,7 +47,7 @@ class CosyVoiceFrontEnd: providers=["CUDAExecutionProvider" if torch.cuda.is_available() else "CPUExecutionProvider"]) if os.path.exists(spk2info): - self.spk2info = torch.load(spk2info, map_location=self.device) + self.spk2info = torch.load(spk2info, map_location=self.device, weights_only=True) else: self.spk2info = {} self.allowed_special = allowed_special diff --git a/cosyvoice/cli/model.py b/cosyvoice/cli/model.py index 0dc2561..4a37ba9 100644 --- a/cosyvoice/cli/model.py +++ b/cosyvoice/cli/model.py @@ -63,12 +63,12 @@ class CosyVoiceModel: self.silent_tokens = [] def load(self, llm_model, flow_model, hift_model): - self.llm.load_state_dict(torch.load(llm_model, map_location=self.device), strict=True) + self.llm.load_state_dict(torch.load(llm_model, map_location=self.device, weights_only=True), strict=True) self.llm.to(self.device).eval() - self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=True) + self.flow.load_state_dict(torch.load(flow_model, map_location=self.device, weights_only=True), strict=True) self.flow.to(self.device).eval() # in case hift_model is a hifigan model - hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()} + hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device, weights_only=True).items()} self.hift.load_state_dict(hift_state_dict, strict=True) self.hift.to(self.device).eval()