mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 17:39:25 +08:00
add vc code
This commit is contained in:
@@ -25,6 +25,7 @@ class CosyVoice:
|
||||
|
||||
def __init__(self, model_dir, load_jit=True, load_onnx=False):
|
||||
instruct = True if '-Instruct' in model_dir else False
|
||||
vc = True if '-VC' in model_dir else False
|
||||
self.model_dir = model_dir
|
||||
if not os.path.exists(model_dir):
|
||||
model_dir = snapshot_download(model_dir)
|
||||
@@ -36,6 +37,7 @@ class CosyVoice:
|
||||
'{}/speech_tokenizer_v1.onnx'.format(model_dir),
|
||||
'{}/spk2info.pt'.format(model_dir),
|
||||
instruct,
|
||||
vc,
|
||||
configs['allowed_special'])
|
||||
self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'])
|
||||
self.model.load('{}/llm.pt'.format(model_dir),
|
||||
@@ -58,7 +60,7 @@ class CosyVoice:
|
||||
model_input = self.frontend.frontend_sft(i, spk_id)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
||||
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
yield model_output
|
||||
@@ -70,7 +72,7 @@ class CosyVoice:
|
||||
model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
||||
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
yield model_output
|
||||
@@ -83,7 +85,7 @@ class CosyVoice:
|
||||
model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
||||
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
yield model_output
|
||||
@@ -97,8 +99,17 @@ class CosyVoice:
|
||||
model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
|
||||
start_time = time.time()
|
||||
logging.info('synthesis text {}'.format(i))
|
||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
||||
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
yield model_output
|
||||
start_time = time.time()
|
||||
|
||||
def inference_vc(self, source_speech_16k, prompt_speech_16k, stream=False, speed=1.0):
|
||||
model_input = self.frontend.frontend_vc(source_speech_16k, prompt_speech_16k)
|
||||
start_time = time.time()
|
||||
for model_output in self.model.vc(**model_input, stream=stream, speed=speed):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
yield model_output
|
||||
start_time = time.time()
|
||||
|
||||
Reference in New Issue
Block a user