mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-05 01:49:25 +08:00
update model inference
This commit is contained in:
@@ -46,9 +46,9 @@ class CosyVoice:
|
||||
return spks
|
||||
|
||||
def inference_sft(self, tts_text, spk_id, stream=False):
|
||||
start_time = time.time()
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_sft(i, spk_id)
|
||||
start_time = time.time()
|
||||
for model_output in self.model.inference(**model_input, stream=stream):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
@@ -56,10 +56,10 @@ class CosyVoice:
|
||||
start_time = time.time()
|
||||
|
||||
def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, stream=False):
|
||||
start_time = time.time()
|
||||
prompt_text = self.frontend.text_normalize(prompt_text, split=False)
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
|
||||
start_time = time.time()
|
||||
for model_output in self.model.inference(**model_input, stream=stream):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
@@ -69,9 +69,9 @@ class CosyVoice:
|
||||
def inference_cross_lingual(self, tts_text, prompt_speech_16k, stream=False):
|
||||
if self.frontend.instruct is True:
|
||||
raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir))
|
||||
start_time = time.time()
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
|
||||
start_time = time.time()
|
||||
for model_output in self.model.inference(**model_input, stream=stream):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
@@ -81,10 +81,10 @@ class CosyVoice:
|
||||
def inference_instruct(self, tts_text, spk_id, instruct_text, stream=False):
|
||||
if self.frontend.instruct is False:
|
||||
raise ValueError('{} do not support instruct inference'.format(self.model_dir))
|
||||
start_time = time.time()
|
||||
instruct_text = self.frontend.text_normalize(instruct_text, split=False)
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
|
||||
start_time = time.time()
|
||||
for model_output in self.model.inference(**model_input, stream=stream):
|
||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
# limitations under the License.
|
||||
import torch
|
||||
import numpy as np
|
||||
import threading
|
||||
import time
|
||||
from contextlib import nullcontext
|
||||
|
||||
|
||||
class CosyVoiceModel:
|
||||
@@ -25,10 +28,13 @@ class CosyVoiceModel:
|
||||
self.llm = llm
|
||||
self.flow = flow
|
||||
self.hift = hift
|
||||
self.stream_win_len = 60
|
||||
self.stream_hop_len = 50
|
||||
self.overlap = 4395 # 10 token equals 4395 sample point
|
||||
self.stream_win_len = 60 * 4
|
||||
self.stream_hop_len = 50 * 4
|
||||
self.overlap = 4395 * 4 # 10 token equals 4395 sample point
|
||||
self.window = np.hamming(2 * self.overlap)
|
||||
self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
|
||||
self.flow_hift_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def load(self, llm_model, flow_model, hift_model):
|
||||
self.llm.load_state_dict(torch.load(llm_model, map_location=self.device))
|
||||
@@ -38,13 +44,8 @@ class CosyVoiceModel:
|
||||
self.hift.load_state_dict(torch.load(hift_model, map_location=self.device))
|
||||
self.hift.to(self.device).eval()
|
||||
|
||||
def inference(self, text, text_len, flow_embedding, llm_embedding=torch.zeros(0, 192),
|
||||
prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32),
|
||||
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
||||
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
||||
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32), stream=False):
|
||||
if stream is True:
|
||||
tts_speech_token, cache_speech = [], None
|
||||
def llm_job(self, text, text_len, prompt_text, prompt_text_len, llm_prompt_speech_token, llm_prompt_speech_token_len, llm_embedding):
|
||||
with self.llm_context:
|
||||
for i in self.llm.inference(text=text.to(self.device),
|
||||
text_len=text_len.to(self.device),
|
||||
prompt_text=prompt_text.to(self.device),
|
||||
@@ -56,10 +57,56 @@ class CosyVoiceModel:
|
||||
sampling=25,
|
||||
max_token_text_ratio=30,
|
||||
min_token_text_ratio=3,
|
||||
stream=stream):
|
||||
tts_speech_token.append(i)
|
||||
if len(tts_speech_token) == self.stream_win_len:
|
||||
this_tts_speech_token = torch.concat(tts_speech_token, dim=1)
|
||||
stream=True):
|
||||
self.tts_speech_token.append(i)
|
||||
self.llm_end = True
|
||||
|
||||
def token2wav(self, token, prompt_token, prompt_token_len, prompt_feat, prompt_feat_len, embedding):
|
||||
with self.flow_hift_context:
|
||||
tts_mel = self.flow.inference(token=token.to(self.device),
|
||||
token_len=torch.tensor([token.size(1)], dtype=torch.int32).to(self.device),
|
||||
prompt_token=prompt_token.to(self.device),
|
||||
prompt_token_len=prompt_token_len.to(self.device),
|
||||
prompt_feat=prompt_feat.to(self.device),
|
||||
prompt_feat_len=prompt_feat_len.to(self.device),
|
||||
embedding=embedding.to(self.device))
|
||||
tts_speech = self.hift.inference(mel=tts_mel).cpu()
|
||||
return tts_speech
|
||||
|
||||
def inference(self, text, text_len, flow_embedding, llm_embedding=torch.zeros(0, 192),
|
||||
prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32),
|
||||
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
||||
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
||||
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32), stream=False):
|
||||
if stream is True:
|
||||
self.tts_speech_token, self.llm_end, cache_speech = [], False, None
|
||||
p = threading.Thread(target=self.llm_job, args=(text.to(self.device), text_len.to(self.device), prompt_text.to(self.device), prompt_text_len.to(self.device),
|
||||
llm_prompt_speech_token.to(self.device), llm_prompt_speech_token_len.to(self.device), llm_embedding.to(self.device)))
|
||||
p.start()
|
||||
while True:
|
||||
time.sleep(0.1)
|
||||
if len(self.tts_speech_token) >= self.stream_win_len:
|
||||
this_tts_speech_token = torch.concat(self.tts_speech_token[:self.stream_win_len], dim=1)
|
||||
with self.flow_hift_context:
|
||||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||||
prompt_token=flow_prompt_speech_token.to(self.device),
|
||||
prompt_token_len=flow_prompt_speech_token_len.to(self.device),
|
||||
prompt_feat=prompt_speech_feat.to(self.device),
|
||||
prompt_feat_len=prompt_speech_feat_len.to(self.device),
|
||||
embedding=flow_embedding.to(self.device))
|
||||
# fade in/out if necessary
|
||||
if cache_speech is not None:
|
||||
this_tts_speech[:, :self.overlap] = this_tts_speech[:, :self.overlap] * self.window[:self.overlap] + cache_speech * self.window[-self.overlap:]
|
||||
yield {'tts_speech': this_tts_speech[:, :-self.overlap]}
|
||||
cache_speech = this_tts_speech[:, -self.overlap:]
|
||||
with self.lock:
|
||||
self.tts_speech_token = self.tts_speech_token[self.stream_hop_len:]
|
||||
if self.llm_end is True:
|
||||
break
|
||||
# deal with remain tokens
|
||||
if cache_speech is None or len(self.tts_speech_token) > self.stream_win_len - self.stream_hop_len:
|
||||
this_tts_speech_token = torch.concat(self.tts_speech_token, dim=1)
|
||||
with self.flow_hift_context:
|
||||
this_tts_mel = self.flow.inference(token=this_tts_speech_token,
|
||||
token_len=torch.tensor([this_tts_speech_token.size(1)], dtype=torch.int32).to(self.device),
|
||||
prompt_token=flow_prompt_speech_token.to(self.device),
|
||||
@@ -68,29 +115,14 @@ class CosyVoiceModel:
|
||||
prompt_feat_len=prompt_speech_feat_len.to(self.device),
|
||||
embedding=flow_embedding.to(self.device))
|
||||
this_tts_speech = self.hift.inference(mel=this_tts_mel).cpu()
|
||||
# fade in/out if necessary
|
||||
if cache_speech is not None:
|
||||
this_tts_speech[:, :self.overlap] = this_tts_speech[:, :self.overlap] * self.window[:self.overlap] + cache_speech * self.window[-self.overlap:]
|
||||
yield {'tts_speech': this_tts_speech[:, :-self.overlap]}
|
||||
cache_speech = this_tts_speech[:, -self.overlap:]
|
||||
tts_speech_token = tts_speech_token[-(self.stream_win_len - self.stream_hop_len):]
|
||||
# deal with remain tokens
|
||||
if cache_speech is None or len(tts_speech_token) > self.stream_win_len - self.stream_hop_len:
|
||||
this_tts_speech_token = torch.concat(tts_speech_token, dim=1)
|
||||
this_tts_mel = self.flow.inference(token=this_tts_speech_token,
|
||||
token_len=torch.tensor([this_tts_speech_token.size(1)], dtype=torch.int32).to(self.device),
|
||||
prompt_token=flow_prompt_speech_token.to(self.device),
|
||||
prompt_token_len=flow_prompt_speech_token_len.to(self.device),
|
||||
prompt_feat=prompt_speech_feat.to(self.device),
|
||||
prompt_feat_len=prompt_speech_feat_len.to(self.device),
|
||||
embedding=flow_embedding.to(self.device))
|
||||
this_tts_speech = self.hift.inference(mel=this_tts_mel).cpu()
|
||||
if cache_speech is not None:
|
||||
this_tts_speech[:, :self.overlap] = this_tts_speech[:, :self.overlap] * self.window[:self.overlap] + cache_speech * self.window[-self.overlap:]
|
||||
yield {'tts_speech': this_tts_speech}
|
||||
else:
|
||||
assert len(tts_speech_token) == self.stream_win_len - self.stream_hop_len, 'tts_speech_token not equal to {}'.format(self.stream_win_len - self.stream_hop_len)
|
||||
assert len(self.tts_speech_token) == self.stream_win_len - self.stream_hop_len, 'tts_speech_token not equal to {}'.format(self.stream_win_len - self.stream_hop_len)
|
||||
yield {'tts_speech': cache_speech}
|
||||
p.join()
|
||||
torch.cuda.synchronize()
|
||||
else:
|
||||
tts_speech_token = []
|
||||
for i in self.llm.inference(text=text.to(self.device),
|
||||
|
||||
Reference in New Issue
Block a user