diff --git a/README.md b/README.md index 7df61d1..65728e2 100644 --- a/README.md +++ b/README.md @@ -124,8 +124,8 @@ from cosyvoice.utils.file_utils import load_wav import torchaudio ## cosyvoice2 usage -cosyvoice2 = CosyVoice('pretrained_models/CosyVoice-300M-SFT', load_jit=False, load_onnx=False, load_trt=False) -# sft usage +cosyvoice2 = CosyVoice('pretrained_models/CosyVoice2-0.5B', load_jit=False, load_onnx=False, load_trt=False) +# zero_shot usage prompt_speech_16k = load_wav('zero_shot_prompt.wav', 16000) for i, j in enumerate(cosyvoice2.inference_zero_shot('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=True)): torchaudio.save('zero_shot_{}.wav'.format(i), j['tts_speech'], cosyvoice2.sample_rate) diff --git a/cosyvoice/cli/cosyvoice.py b/cosyvoice/cli/cosyvoice.py index c95439a..7c2531d 100644 --- a/cosyvoice/cli/cosyvoice.py +++ b/cosyvoice/cli/cosyvoice.py @@ -128,7 +128,7 @@ class CosyVoice2(CosyVoice): if not os.path.exists(model_dir): model_dir = snapshot_download(model_dir) with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f: - configs = load_hyperpyyaml(f) + configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': os.path.join(model_dir, 'Qwen2-0.5B-CosyVoice-BlankEN')}) self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'], configs['feat_extractor'], '{}/campplus.onnx'.format(model_dir), diff --git a/requirements.txt b/requirements.txt index 04c600e..7fd5596 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu121 +--extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/ # https://github.com/microsoft/onnxruntime/issues/21684 conformer==0.3.2 deepspeed==0.14.2; sys_platform == 'linux' diffusers==0.27.2