mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 17:39:25 +08:00
459 lines
29 KiB
Python
459 lines
29 KiB
Python
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||
#
|
||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||
# you may not use this file except in compliance with the License.
|
||
# You may obtain a copy of the License at
|
||
#
|
||
# http://www.apache.org/licenses/LICENSE-2.0
|
||
#
|
||
# Unless required by applicable law or agreed to in writing, software
|
||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
# See the License for the specific language governing permissions and
|
||
# limitations under the License.
|
||
import os
|
||
from typing import Generator
|
||
import torch
|
||
import numpy as np
|
||
import threading
|
||
import time
|
||
from torch.nn import functional as F
|
||
from contextlib import nullcontext
|
||
import uuid
|
||
from cosyvoice.utils.common import fade_in_out
|
||
from cosyvoice.utils.file_utils import convert_onnx_to_trt
|
||
|
||
|
||
class CosyVoiceModel:
|
||
|
||
def __init__(self,
|
||
llm: torch.nn.Module,
|
||
flow: torch.nn.Module,
|
||
hift: torch.nn.Module,
|
||
fp16: bool):
|
||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||
self.llm = llm
|
||
self.flow = flow
|
||
self.hift = hift
|
||
self.fp16 = fp16
|
||
if self.fp16 is True:
|
||
self.llm.half()
|
||
self.flow.half()
|
||
self.token_min_hop_len = 2 * self.flow.input_frame_rate
|
||
self.token_max_hop_len = 4 * self.flow.input_frame_rate
|
||
self.token_overlap_len = 20
|
||
# mel fade in out
|
||
self.mel_overlap_len = int(self.token_overlap_len / self.flow.input_frame_rate * 22050 / 256)
|
||
self.mel_window = np.hamming(2 * self.mel_overlap_len)
|
||
# hift cache
|
||
self.mel_cache_len = 20
|
||
self.source_cache_len = int(self.mel_cache_len * 256)
|
||
# speech fade in out
|
||
self.speech_window = np.hamming(2 * self.source_cache_len)
|
||
# rtf and decoding related
|
||
self.stream_scale_factor = 1
|
||
assert self.stream_scale_factor >= 1, 'stream_scale_factor should be greater than 1, change it according to your actual rtf'
|
||
self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
|
||
self.lock = threading.Lock()
|
||
# dict used to store session related variable
|
||
self.tts_speech_token_dict = {}
|
||
self.llm_end_dict = {}
|
||
self.mel_overlap_dict = {}
|
||
self.flow_cache_dict = {}
|
||
self.hift_cache_dict = {}
|
||
|
||
def load(self, llm_model, flow_model, hift_model):
|
||
self.llm.load_state_dict(torch.load(llm_model, map_location=self.device), strict=True)
|
||
self.llm.to(self.device).eval()
|
||
self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=True)
|
||
self.flow.to(self.device).eval()
|
||
# in case hift_model is a hifigan model
|
||
hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
|
||
self.hift.load_state_dict(hift_state_dict, strict=True)
|
||
self.hift.to(self.device).eval()
|
||
|
||
def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
|
||
llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
|
||
self.llm.text_encoder = llm_text_encoder
|
||
llm_llm = torch.jit.load(llm_llm_model, map_location=self.device)
|
||
self.llm.llm = llm_llm
|
||
flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
|
||
self.flow.encoder = flow_encoder
|
||
|
||
def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, fp16):
|
||
assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
|
||
if not os.path.exists(flow_decoder_estimator_model):
|
||
convert_onnx_to_trt(flow_decoder_estimator_model, self.get_trt_kwargs(), flow_decoder_onnx_model, fp16)
|
||
if os.path.getsize(flow_decoder_estimator_model) == 0:
|
||
raise ValueError('{} is empty file, delete it and export again!'.format(flow_decoder_estimator_model))
|
||
del self.flow.decoder.estimator
|
||
import tensorrt as trt
|
||
with open(flow_decoder_estimator_model, 'rb') as f:
|
||
self.flow.decoder.estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
|
||
assert self.flow.decoder.estimator_engine is not None, 'failed to load trt {}'.format(flow_decoder_estimator_model)
|
||
self.flow.decoder.estimator = self.flow.decoder.estimator_engine.create_execution_context()
|
||
|
||
def get_trt_kwargs(self):
|
||
min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2, 80, 4)]
|
||
opt_shape = [(2, 80, 200), (2, 1, 200), (2, 80, 200), (2, 80, 200)]
|
||
max_shape = [(2, 80, 3000), (2, 1, 3000), (2, 80, 3000), (2, 80, 3000)]
|
||
input_names = ["x", "mask", "mu", "cond"]
|
||
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
|
||
|
||
def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
|
||
with self.llm_context, torch.cuda.amp.autocast(self.fp16):
|
||
if isinstance(text, Generator):
|
||
assert isinstance(self, CosyVoice2Model), 'streaming input text is only implemented for CosyVoice2!'
|
||
for i in self.llm.inference_bistream(text=text,
|
||
prompt_text=prompt_text.to(self.device),
|
||
prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
|
||
prompt_speech_token=llm_prompt_speech_token.to(self.device),
|
||
prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
|
||
embedding=llm_embedding.to(self.device)):
|
||
self.tts_speech_token_dict[uuid].append(i)
|
||
else:
|
||
for i in self.llm.inference(text=text.to(self.device),
|
||
text_len=torch.tensor([text.shape[1]], dtype=torch.int32).to(self.device),
|
||
prompt_text=prompt_text.to(self.device),
|
||
prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
|
||
prompt_speech_token=llm_prompt_speech_token.to(self.device),
|
||
prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
|
||
embedding=llm_embedding.to(self.device)):
|
||
self.tts_speech_token_dict[uuid].append(i)
|
||
self.llm_end_dict[uuid] = True
|
||
|
||
def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
|
||
with torch.cuda.amp.autocast(self.fp16):
|
||
tts_mel, self.flow_cache_dict[uuid] = self.flow.inference(token=token.to(self.device),
|
||
token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
|
||
prompt_token=prompt_token.to(self.device),
|
||
prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
|
||
prompt_feat=prompt_feat.to(self.device),
|
||
prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
|
||
embedding=embedding.to(self.device),
|
||
flow_cache=self.flow_cache_dict[uuid])
|
||
|
||
# mel overlap fade in out
|
||
if self.mel_overlap_dict[uuid].shape[2] != 0:
|
||
tts_mel = fade_in_out(tts_mel, self.mel_overlap_dict[uuid], self.mel_window)
|
||
# append hift cache
|
||
if self.hift_cache_dict[uuid] is not None:
|
||
hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
|
||
tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
|
||
else:
|
||
hift_cache_source = torch.zeros(1, 1, 0)
|
||
# keep overlap mel and hift cache
|
||
if finalize is False:
|
||
self.mel_overlap_dict[uuid] = tts_mel[:, :, -self.mel_overlap_len:]
|
||
tts_mel = tts_mel[:, :, :-self.mel_overlap_len]
|
||
tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
|
||
if self.hift_cache_dict[uuid] is not None:
|
||
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
|
||
self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
|
||
'source': tts_source[:, :, -self.source_cache_len:],
|
||
'speech': tts_speech[:, -self.source_cache_len:]}
|
||
tts_speech = tts_speech[:, :-self.source_cache_len]
|
||
else:
|
||
if speed != 1.0:
|
||
assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
|
||
tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
|
||
tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
|
||
if self.hift_cache_dict[uuid] is not None:
|
||
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
|
||
return tts_speech
|
||
|
||
def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
|
||
prompt_text=torch.zeros(1, 0, dtype=torch.int32),
|
||
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
||
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
||
prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
|
||
# this_uuid is used to track variables related to this inference thread
|
||
this_uuid = str(uuid.uuid1())
|
||
with self.lock:
|
||
self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
|
||
self.hift_cache_dict[this_uuid] = None
|
||
self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
|
||
self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
|
||
p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
|
||
p.start()
|
||
if stream is True:
|
||
token_hop_len = self.token_min_hop_len
|
||
while True:
|
||
time.sleep(0.1)
|
||
if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
|
||
.unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=False)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
with self.lock:
|
||
self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
|
||
# increase token_hop_len for better speech quality
|
||
token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
|
||
if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
|
||
break
|
||
p.join()
|
||
# deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=True)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
else:
|
||
# deal with all tokens
|
||
p.join()
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=True,
|
||
speed=speed)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
with self.lock:
|
||
self.tts_speech_token_dict.pop(this_uuid)
|
||
self.llm_end_dict.pop(this_uuid)
|
||
self.mel_overlap_dict.pop(this_uuid)
|
||
self.hift_cache_dict.pop(this_uuid)
|
||
self.flow_cache_dict.pop(this_uuid)
|
||
torch.cuda.empty_cache()
|
||
|
||
def vc(self, source_speech_token, flow_prompt_speech_token, prompt_speech_feat, flow_embedding, stream=False, speed=1.0, **kwargs):
|
||
# this_uuid is used to track variables related to this inference thread
|
||
this_uuid = str(uuid.uuid1())
|
||
with self.lock:
|
||
self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = source_speech_token.flatten().tolist(), True
|
||
self.hift_cache_dict[this_uuid] = None
|
||
self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
|
||
self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
|
||
if stream is True:
|
||
token_hop_len = self.token_min_hop_len
|
||
while True:
|
||
if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
|
||
.unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=False)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
with self.lock:
|
||
self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
|
||
# increase token_hop_len for better speech quality
|
||
token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
|
||
if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
|
||
break
|
||
# deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=True)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
else:
|
||
# deal with all tokens
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=True,
|
||
speed=speed)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
with self.lock:
|
||
self.tts_speech_token_dict.pop(this_uuid)
|
||
self.llm_end_dict.pop(this_uuid)
|
||
self.mel_overlap_dict.pop(this_uuid)
|
||
self.hift_cache_dict.pop(this_uuid)
|
||
self.flow_cache_dict.pop(this_uuid)
|
||
torch.cuda.empty_cache()
|
||
|
||
|
||
class CosyVoice2Model(CosyVoiceModel):
|
||
|
||
def __init__(self,
|
||
llm: torch.nn.Module,
|
||
flow: torch.nn.Module,
|
||
hift: torch.nn.Module,
|
||
fp16: bool):
|
||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||
self.llm = llm
|
||
self.flow = flow
|
||
self.hift = hift
|
||
self.fp16 = fp16
|
||
if self.fp16 is True:
|
||
self.llm.half()
|
||
self.flow.half()
|
||
self.token_hop_len = self.flow.encoder.static_chunk_size
|
||
# flow decoder required_cache_size
|
||
# TODO 基模型训练时没有设置num_decoding_left_chunks,需要重新训一下才能指定flow_decoder_required_cache_size
|
||
self.flow_decoder_required_cache_size = 999
|
||
# hift cache
|
||
self.mel_cache_len = 8
|
||
self.source_cache_len = int(self.mel_cache_len * 480)
|
||
# speech fade in out
|
||
self.speech_window = np.hamming(2 * self.source_cache_len)
|
||
# rtf and decoding related
|
||
self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
|
||
self.lock = threading.Lock()
|
||
# dict used to store session related variable
|
||
self.tts_speech_token_dict = {}
|
||
self.llm_end_dict = {}
|
||
self.flow_cache_dict = {}
|
||
self.hift_cache_dict = {}
|
||
|
||
def init_flow_cache(self):
|
||
encoder_cache = {'offset': 0,
|
||
'pre_lookahead_layer_conv2_cache': torch.zeros(1, 512, 2).to(self.device),
|
||
'encoders_kv_cache': torch.zeros(6, 1, 8, 0, 64 * 2).to(self.device),
|
||
'upsample_offset': 0,
|
||
'upsample_conv_cache': torch.zeros(1, 512, 4).to(self.device),
|
||
'upsample_kv_cache': torch.zeros(4, 1, 8, 0, 64 * 2).to(self.device)}
|
||
decoder_cache = {'offset': 0,
|
||
'down_blocks_conv_cache': torch.zeros(10, 1, 2, 832, 2).to(self.device),
|
||
'down_blocks_kv_cache': torch.zeros(10, 1, 4, 2, 0, 512, 2).to(self.device),
|
||
'mid_blocks_conv_cache': torch.zeros(10, 12, 2, 512, 2).to(self.device),
|
||
'mid_blocks_kv_cache': torch.zeros(10, 12, 4, 2, 0, 512, 2).to(self.device),
|
||
'up_blocks_conv_cache': torch.zeros(10, 1, 2, 1024, 2).to(self.device),
|
||
'up_blocks_kv_cache': torch.zeros(10, 1, 4, 2, 0, 512, 2).to(self.device),
|
||
'final_blocks_conv_cache': torch.zeros(10, 2, 256, 2).to(self.device)}
|
||
if self.fp16 is True:
|
||
for cache in [encoder_cache, decoder_cache]:
|
||
for k, v in cache.items():
|
||
if isinstance(v, torch.Tensor):
|
||
cache[k] = v.half()
|
||
cache = {'encoder_cache': encoder_cache, 'decoder_cache': decoder_cache}
|
||
return cache
|
||
|
||
def trim_flow_cache(self, cache):
|
||
if cache['decoder_cache']['down_blocks_kv_cache'].size(4) > self.flow_decoder_required_cache_size:
|
||
cache['decoder_cache']['down_blocks_kv_cache'] = cache['decoder_cache']['down_blocks_kv_cache'][:, :, :, :, -self.flow_decoder_required_cache_size:]
|
||
cache['decoder_cache']['mid_blocks_kv_cache'] = cache['decoder_cache']['mid_blocks_kv_cache'][:, :, :, :, -self.flow_decoder_required_cache_size:]
|
||
cache['decoder_cache']['up_blocks_kv_cache'] = cache['decoder_cache']['up_blocks_kv_cache'][:, :, :, :, -self.flow_decoder_required_cache_size:]
|
||
return cache
|
||
|
||
def load_jit(self, flow_encoder_model):
|
||
flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
|
||
self.flow.encoder = flow_encoder
|
||
|
||
def get_trt_kwargs(self):
|
||
min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2, 80, 4), (1, 4, 2, 0, 512, 2), (12, 4, 2, 0, 512, 2), (1, 4, 2, 0, 512, 2)]
|
||
opt_shape = [(2, 80, 200), (2, 1, 200), (2, 80, 200), (2, 80, 200), (1, 4, 2, 100, 512, 2), (12, 4, 2, 100, 512, 2), (1, 4, 2, 100, 512, 2)]
|
||
max_shape = [(2, 80, 1500), (2, 1, 1500), (2, 80, 1500), (2, 80, 1500), (1, 4, 2, 200, 512, 2), (12, 4, 2, 200, 512, 2), (1, 4, 2, 200, 512, 2)]
|
||
input_names = ["x", "mask", "mu", "cond", 'down_blocks_kv_cache', 'mid_blocks_kv_cache', 'up_blocks_kv_cache']
|
||
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
|
||
|
||
def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
|
||
with torch.cuda.amp.autocast(self.fp16):
|
||
tts_mel, self.flow_cache_dict[uuid] = self.flow.inference(token=token.to(self.device),
|
||
token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
|
||
prompt_token=prompt_token.to(self.device),
|
||
prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
|
||
prompt_feat=prompt_feat.to(self.device),
|
||
prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
|
||
embedding=embedding.to(self.device),
|
||
cache=self.flow_cache_dict[uuid],
|
||
finalize=finalize)
|
||
self.flow_cache_dict[uuid] = self.trim_flow_cache(self.flow_cache_dict[uuid])
|
||
# append hift cache
|
||
if self.hift_cache_dict[uuid] is not None:
|
||
hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
|
||
tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
|
||
else:
|
||
hift_cache_source = torch.zeros(1, 1, 0)
|
||
# keep overlap mel and hift cache
|
||
if finalize is False:
|
||
tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
|
||
if self.hift_cache_dict[uuid] is not None:
|
||
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
|
||
self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
|
||
'source': tts_source[:, :, -self.source_cache_len:],
|
||
'speech': tts_speech[:, -self.source_cache_len:]}
|
||
tts_speech = tts_speech[:, :-self.source_cache_len]
|
||
else:
|
||
if speed != 1.0:
|
||
assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
|
||
tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
|
||
tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
|
||
if self.hift_cache_dict[uuid] is not None:
|
||
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
|
||
return tts_speech
|
||
|
||
def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
|
||
prompt_text=torch.zeros(1, 0, dtype=torch.int32),
|
||
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
||
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
||
prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
|
||
# this_uuid is used to track variables related to this inference thread
|
||
this_uuid = str(uuid.uuid1())
|
||
# NOTE flow model is only trained with static_chunk_size, so we need to trim flow prompt
|
||
n_chunk = int(flow_prompt_speech_token.size(1) / self.token_hop_len)
|
||
flow_prompt_speech_token = flow_prompt_speech_token[:, :n_chunk * self.token_hop_len]
|
||
prompt_speech_feat = prompt_speech_feat[:, :n_chunk * self.token_hop_len * 2]
|
||
with self.lock:
|
||
self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
|
||
self.hift_cache_dict[this_uuid] = None
|
||
self.flow_cache_dict[this_uuid] = self.init_flow_cache()
|
||
p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
|
||
p.start()
|
||
if stream is True:
|
||
while True:
|
||
time.sleep(0.1)
|
||
if len(self.tts_speech_token_dict[this_uuid]) >= self.token_hop_len + self.flow.pre_lookahead_len:
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:self.token_hop_len + self.flow.pre_lookahead_len]).unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=False)
|
||
# NOTE in cache inference mode, we only use flow_prompt_speech_token/prompt_speech_feat in first chunk
|
||
flow_prompt_speech_token = torch.zeros(1, 0, dtype=torch.int32).to(self.device)
|
||
prompt_speech_feat = torch.zeros(1, 0, 80).to(self.device)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
with self.lock:
|
||
self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][self.token_hop_len:]
|
||
if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < self.token_hop_len + self.flow.pre_lookahead_len:
|
||
break
|
||
p.join()
|
||
# deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=True)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
else:
|
||
# deal with all tokens
|
||
p.join()
|
||
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||
prompt_token=flow_prompt_speech_token,
|
||
prompt_feat=prompt_speech_feat,
|
||
embedding=flow_embedding,
|
||
uuid=this_uuid,
|
||
finalize=True,
|
||
speed=speed)
|
||
yield {'tts_speech': this_tts_speech.cpu()}
|
||
with self.lock:
|
||
self.tts_speech_token_dict.pop(this_uuid)
|
||
self.llm_end_dict.pop(this_uuid)
|
||
self.hift_cache_dict.pop(this_uuid)
|
||
self.flow_cache_dict.pop(this_uuid)
|
||
torch.cuda.empty_cache()
|