mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 17:39:25 +08:00
add cosyvoice code
This commit is contained in:
0
cosyvoice/cli/__init__.py
Normal file
0
cosyvoice/cli/__init__.py
Normal file
83
cosyvoice/cli/cosyvoice.py
Normal file
83
cosyvoice/cli/cosyvoice.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import torch
|
||||
from hyperpyyaml import load_hyperpyyaml
|
||||
from modelscope import snapshot_download
|
||||
from cosyvoice.cli.frontend import CosyVoiceFrontEnd
|
||||
from cosyvoice.cli.model import CosyVoiceModel
|
||||
|
||||
class CosyVoice:
|
||||
|
||||
def __init__(self, model_dir):
|
||||
instruct = True if '-Instruct' in model_dir else False
|
||||
self.model_dir = model_dir
|
||||
if not os.path.exists(model_dir):
|
||||
model_dir = snapshot_download(model_dir)
|
||||
with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
|
||||
configs = load_hyperpyyaml(f)
|
||||
self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
|
||||
configs['feat_extractor'],
|
||||
'{}/campplus.onnx'.format(model_dir),
|
||||
'{}/speech_tokenizer_v1.onnx'.format(model_dir),
|
||||
'{}/spk2info.pt'.format(model_dir),
|
||||
instruct,
|
||||
configs['allowed_special'])
|
||||
self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'])
|
||||
self.model.load('{}/llm.pt'.format(model_dir),
|
||||
'{}/flow.pt'.format(model_dir),
|
||||
'{}/hift.pt'.format(model_dir))
|
||||
del configs
|
||||
|
||||
def list_avaliable_spks(self):
|
||||
spks = list(self.frontend.spk2info.keys())
|
||||
return spks
|
||||
|
||||
def inference_sft(self, tts_text, spk_id):
|
||||
tts_speeches = []
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_sft(i, spk_id)
|
||||
model_output = self.model.inference(**model_input)
|
||||
tts_speeches.append(model_output['tts_speech'])
|
||||
return {'tts_speech': torch.concat(tts_speeches, dim=1)}
|
||||
|
||||
def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k):
|
||||
prompt_text = self.frontend.text_normalize(prompt_text, split=False)
|
||||
tts_speeches = []
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
|
||||
model_output = self.model.inference(**model_input)
|
||||
tts_speeches.append(model_output['tts_speech'])
|
||||
return {'tts_speech': torch.concat(tts_speeches, dim=1)}
|
||||
|
||||
def inference_cross_lingual(self, tts_text, prompt_speech_16k):
|
||||
if self.frontend.instruct is True:
|
||||
raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir))
|
||||
tts_speeches = []
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
|
||||
model_output = self.model.inference(**model_input)
|
||||
tts_speeches.append(model_output['tts_speech'])
|
||||
return {'tts_speech': torch.concat(tts_speeches, dim=1)}
|
||||
|
||||
def inference_instruct(self, tts_text, spk_id, instruct_text):
|
||||
if self.frontend.instruct is False:
|
||||
raise ValueError('{} do not support instruct inference'.format(self.model_dir))
|
||||
instruct_text = self.frontend.text_normalize(instruct_text, split=False)
|
||||
tts_speeches = []
|
||||
for i in self.frontend.text_normalize(tts_text, split=True):
|
||||
model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
|
||||
model_output = self.model.inference(**model_input)
|
||||
tts_speeches.append(model_output['tts_speech'])
|
||||
return {'tts_speech': torch.concat(tts_speeches, dim=1)}
|
||||
146
cosyvoice/cli/frontend.py
Normal file
146
cosyvoice/cli/frontend.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from functools import partial
|
||||
import onnxruntime
|
||||
import torch
|
||||
import numpy as np
|
||||
import whisper
|
||||
from typing import Callable
|
||||
import torchaudio.compliance.kaldi as kaldi
|
||||
import torchaudio
|
||||
import os
|
||||
import inflect
|
||||
import ttsfrd
|
||||
from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
|
||||
|
||||
|
||||
class CosyVoiceFrontEnd:
|
||||
|
||||
def __init__(self,
|
||||
get_tokenizer: Callable,
|
||||
feat_extractor: Callable,
|
||||
campplus_model: str,
|
||||
speech_tokenizer_model: str,
|
||||
spk2info: str = '',
|
||||
instruct: bool = False,
|
||||
allowed_special: str = 'all'):
|
||||
self.tokenizer = get_tokenizer()
|
||||
self.feat_extractor = feat_extractor
|
||||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
option = onnxruntime.SessionOptions()
|
||||
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
option.intra_op_num_threads = 1
|
||||
self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
|
||||
self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option, providers=["CUDAExecutionProvider"])
|
||||
if os.path.exists(spk2info):
|
||||
self.spk2info = torch.load(spk2info, map_location=self.device)
|
||||
self.instruct = instruct
|
||||
self.allowed_special = allowed_special
|
||||
self.inflect_parser = inflect.engine()
|
||||
self.frd = ttsfrd.TtsFrontendEngine()
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
assert self.frd.initialize('{}/../../pretrained_models/speech_kantts_ttsfrd/resource'.format(ROOT_DIR)) is True, 'failed to initialize ttsfrd resource'
|
||||
self.frd.set_lang_type('pinyin')
|
||||
self.frd.enable_pinyin_mix(True)
|
||||
self.frd.set_breakmodel_index(1)
|
||||
|
||||
def _extract_text_token(self, text):
|
||||
text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
|
||||
text_token = torch.tensor([text_token], dtype=torch.int32).to(self.device)
|
||||
text_token_len = torch.tensor([text_token.shape[1]], dtype=torch.int32).to(self.device)
|
||||
return text_token, text_token_len
|
||||
|
||||
def _extract_speech_token(self, speech):
|
||||
feat = whisper.log_mel_spectrogram(speech, n_mels=128)
|
||||
speech_token = self.speech_tokenizer_session.run(None, {self.speech_tokenizer_session.get_inputs()[0].name: feat.detach().cpu().numpy(),
|
||||
self.speech_tokenizer_session.get_inputs()[1].name: np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
|
||||
speech_token = torch.tensor([speech_token], dtype=torch.int32).to(self.device)
|
||||
speech_token_len = torch.tensor([speech_token.shape[1]], dtype=torch.int32).to(self.device)
|
||||
return speech_token, speech_token_len
|
||||
|
||||
def _extract_spk_embedding(self, speech):
|
||||
feat = kaldi.fbank(speech,
|
||||
num_mel_bins=80,
|
||||
dither=0,
|
||||
sample_frequency=16000)
|
||||
feat = feat - feat.mean(dim=0, keepdim=True)
|
||||
embedding = self.campplus_session.run(None, {self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
|
||||
embedding = torch.tensor([embedding]).to(self.device)
|
||||
return embedding
|
||||
|
||||
def _extract_speech_feat(self, speech):
|
||||
speech_feat = self.feat_extractor(speech).squeeze(dim=0).transpose(0, 1).to(self.device)
|
||||
speech_feat = speech_feat.unsqueeze(dim=0)
|
||||
speech_feat_len = torch.tensor([speech_feat.shape[1]], dtype=torch.int32).to(self.device)
|
||||
return speech_feat, speech_feat_len
|
||||
|
||||
def text_normalize(self, text, split=True):
|
||||
text = text.strip()
|
||||
if contains_chinese(text):
|
||||
text = self.frd.get_frd_extra_info(text, 'input').replace("\n", "")
|
||||
text = replace_blank(text)
|
||||
text = replace_corner_mark(text)
|
||||
text = text.replace(".", "、")
|
||||
text = text.replace(" - ", ",")
|
||||
text = remove_bracket(text)
|
||||
texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "zh", token_max_n=80,
|
||||
token_min_n=60, merge_len=20,
|
||||
comma_split=False)]
|
||||
else:
|
||||
text = spell_out_number(text, self.inflect_parser)
|
||||
texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
|
||||
token_min_n=60, merge_len=20,
|
||||
comma_split=False)]
|
||||
if split is False:
|
||||
return text
|
||||
return texts
|
||||
|
||||
def frontend_sft(self, tts_text, spk_id):
|
||||
tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
|
||||
embedding = self.spk2info[spk_id]['embedding']
|
||||
model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, 'llm_embedding': embedding, 'flow_embedding': embedding}
|
||||
return model_input
|
||||
|
||||
def frontend_zero_shot(self, tts_text, prompt_text, prompt_speech_16k):
|
||||
tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
|
||||
prompt_text_token, prompt_text_token_len = self._extract_text_token(prompt_text)
|
||||
prompt_speech_22050 = torchaudio.transforms.Resample(orig_freq=16000, new_freq=22050)(prompt_speech_16k)
|
||||
speech_feat, speech_feat_len = self._extract_speech_feat(prompt_speech_22050)
|
||||
speech_token, speech_token_len = self._extract_speech_token(prompt_speech_16k)
|
||||
embedding = self._extract_spk_embedding(prompt_speech_16k)
|
||||
model_input = {'text': tts_text_token, 'text_len': tts_text_token_len,
|
||||
'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len,
|
||||
'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
|
||||
'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
|
||||
'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
|
||||
'llm_embedding': embedding, 'flow_embedding': embedding}
|
||||
return model_input
|
||||
|
||||
def frontend_cross_lingual(self, tts_text, prompt_speech_16k):
|
||||
model_input = self.frontend_zero_shot(tts_text, '', prompt_speech_16k)
|
||||
# in cross lingual mode, we remove prompt in llm
|
||||
del model_input['prompt_text']
|
||||
del model_input['prompt_text_len']
|
||||
del model_input['llm_prompt_speech_token']
|
||||
del model_input['llm_prompt_speech_token_len']
|
||||
return model_input
|
||||
|
||||
def frontend_instruct(self, tts_text, spk_id, instruct_text):
|
||||
model_input = self.frontend_sft(tts_text, spk_id)
|
||||
# in instruct mode, we remove spk_embedding in llm due to information leakage
|
||||
del model_input['llm_embedding']
|
||||
instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
|
||||
model_input['prompt_text'] = instruct_text_token
|
||||
model_input['prompt_text_len'] = instruct_text_token_len
|
||||
return model_input
|
||||
59
cosyvoice/cli/model.py
Normal file
59
cosyvoice/cli/model.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
class CosyVoiceModel:
|
||||
|
||||
def __init__(self,
|
||||
llm: torch.nn.Module,
|
||||
flow: torch.nn.Module,
|
||||
hift: torch.nn.Module):
|
||||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
self.llm = llm
|
||||
self.flow = flow
|
||||
self.hift = hift
|
||||
|
||||
def load(self, llm_model, flow_model, hift_model):
|
||||
self.llm.load_state_dict(torch.load(llm_model, map_location=self.device))
|
||||
self.llm.to(self.device).eval()
|
||||
self.flow.load_state_dict(torch.load(flow_model, map_location=self.device))
|
||||
self.flow.to(self.device).eval()
|
||||
self.hift.load_state_dict(torch.load(hift_model, map_location=self.device))
|
||||
self.hift.to(self.device).eval()
|
||||
|
||||
def inference(self, text, text_len, flow_embedding, llm_embedding=torch.zeros(0, 192),
|
||||
prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32),
|
||||
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
||||
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
||||
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)):
|
||||
tts_speech_token = self.llm.inference(text=text.to(self.device),
|
||||
text_len=text_len.to(self.device),
|
||||
prompt_text=prompt_text.to(self.device),
|
||||
prompt_text_len=prompt_text_len.to(self.device),
|
||||
prompt_speech_token=llm_prompt_speech_token.to(self.device),
|
||||
prompt_speech_token_len=llm_prompt_speech_token_len.to(self.device),
|
||||
embedding=llm_embedding.to(self.device),
|
||||
beam_size=1,
|
||||
sampling=25,
|
||||
max_token_text_ratio=30,
|
||||
min_token_text_ratio=3)
|
||||
tts_mel = self.flow.inference(token=tts_speech_token,
|
||||
token_len=torch.tensor([tts_speech_token.size(1)], dtype=torch.int32).to(self.device),
|
||||
prompt_token=flow_prompt_speech_token.to(self.device),
|
||||
prompt_token_len=flow_prompt_speech_token_len.to(self.device),
|
||||
prompt_feat=prompt_speech_feat.to(self.device),
|
||||
prompt_feat_len=prompt_speech_feat_len.to(self.device),
|
||||
embedding=flow_embedding.to(self.device))
|
||||
tts_speech = self.hift.inference(mel=tts_mel).cpu()
|
||||
return {'tts_speech': tts_speech}
|
||||
Reference in New Issue
Block a user