mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-05 18:09:24 +08:00
Merge pull request #436 from FunAudioLLM/dev/lyuxiang.lx
Dev/lyuxiang.lx
This commit is contained in:
14
README.md
14
README.md
@@ -22,12 +22,9 @@ For `SenseVoice`, visit [SenseVoice repo](https://github.com/FunAudioLLM/SenseVo
|
|||||||
- [ ] 25hz cosyvoice base model
|
- [ ] 25hz cosyvoice base model
|
||||||
- [ ] 25hz cosyvoice voice conversion model
|
- [ ] 25hz cosyvoice voice conversion model
|
||||||
|
|
||||||
- [ ] 2024/10
|
|
||||||
|
|
||||||
- [ ] 50hz llama based llm model which supports lora finetune
|
|
||||||
|
|
||||||
- [ ] TBD
|
- [ ] TBD
|
||||||
|
|
||||||
|
- [ ] 25hz llama based llm model which supports lora finetune
|
||||||
- [ ] Support more instruction mode
|
- [ ] Support more instruction mode
|
||||||
- [ ] Voice conversion
|
- [ ] Voice conversion
|
||||||
- [ ] Music generation
|
- [ ] Music generation
|
||||||
@@ -74,6 +71,7 @@ If you are expert in this field, and you are only interested in training your ow
|
|||||||
# SDK模型下载
|
# SDK模型下载
|
||||||
from modelscope import snapshot_download
|
from modelscope import snapshot_download
|
||||||
snapshot_download('iic/CosyVoice-300M', local_dir='pretrained_models/CosyVoice-300M')
|
snapshot_download('iic/CosyVoice-300M', local_dir='pretrained_models/CosyVoice-300M')
|
||||||
|
snapshot_download('iic/CosyVoice-300M-25Hz', local_dir='pretrained_models/CosyVoice-300M-25Hz')
|
||||||
snapshot_download('iic/CosyVoice-300M-SFT', local_dir='pretrained_models/CosyVoice-300M-SFT')
|
snapshot_download('iic/CosyVoice-300M-SFT', local_dir='pretrained_models/CosyVoice-300M-SFT')
|
||||||
snapshot_download('iic/CosyVoice-300M-Instruct', local_dir='pretrained_models/CosyVoice-300M-Instruct')
|
snapshot_download('iic/CosyVoice-300M-Instruct', local_dir='pretrained_models/CosyVoice-300M-Instruct')
|
||||||
snapshot_download('iic/CosyVoice-ttsfrd', local_dir='pretrained_models/CosyVoice-ttsfrd')
|
snapshot_download('iic/CosyVoice-ttsfrd', local_dir='pretrained_models/CosyVoice-ttsfrd')
|
||||||
@@ -83,6 +81,7 @@ snapshot_download('iic/CosyVoice-ttsfrd', local_dir='pretrained_models/CosyVoice
|
|||||||
# git模型下载,请确保已安装git lfs
|
# git模型下载,请确保已安装git lfs
|
||||||
mkdir -p pretrained_models
|
mkdir -p pretrained_models
|
||||||
git clone https://www.modelscope.cn/iic/CosyVoice-300M.git pretrained_models/CosyVoice-300M
|
git clone https://www.modelscope.cn/iic/CosyVoice-300M.git pretrained_models/CosyVoice-300M
|
||||||
|
git clone https://www.modelscope.cn/iic/CosyVoice-300M-25Hz.git pretrained_models/CosyVoice-300M-25Hz
|
||||||
git clone https://www.modelscope.cn/iic/CosyVoice-300M-SFT.git pretrained_models/CosyVoice-300M-SFT
|
git clone https://www.modelscope.cn/iic/CosyVoice-300M-SFT.git pretrained_models/CosyVoice-300M-SFT
|
||||||
git clone https://www.modelscope.cn/iic/CosyVoice-300M-Instruct.git pretrained_models/CosyVoice-300M-Instruct
|
git clone https://www.modelscope.cn/iic/CosyVoice-300M-Instruct.git pretrained_models/CosyVoice-300M-Instruct
|
||||||
git clone https://www.modelscope.cn/iic/CosyVoice-ttsfrd.git pretrained_models/CosyVoice-ttsfrd
|
git clone https://www.modelscope.cn/iic/CosyVoice-ttsfrd.git pretrained_models/CosyVoice-ttsfrd
|
||||||
@@ -121,7 +120,7 @@ print(cosyvoice.list_avaliable_spks())
|
|||||||
for i, j in enumerate(cosyvoice.inference_sft('你好,我是通义生成式语音大模型,请问有什么可以帮您的吗?', '中文女', stream=False)):
|
for i, j in enumerate(cosyvoice.inference_sft('你好,我是通义生成式语音大模型,请问有什么可以帮您的吗?', '中文女', stream=False)):
|
||||||
torchaudio.save('sft_{}.wav'.format(i), j['tts_speech'], 22050)
|
torchaudio.save('sft_{}.wav'.format(i), j['tts_speech'], 22050)
|
||||||
|
|
||||||
cosyvoice = CosyVoice('pretrained_models/CosyVoice-300M')
|
cosyvoice = CosyVoice('pretrained_models/CosyVoice-300M-25Hz') # or change to pretrained_models/CosyVoice-300M for 50Hz inference
|
||||||
# zero_shot usage, <|zh|><|en|><|jp|><|yue|><|ko|> for Chinese/English/Japanese/Cantonese/Korean
|
# zero_shot usage, <|zh|><|en|><|jp|><|yue|><|ko|> for Chinese/English/Japanese/Cantonese/Korean
|
||||||
prompt_speech_16k = load_wav('zero_shot_prompt.wav', 16000)
|
prompt_speech_16k = load_wav('zero_shot_prompt.wav', 16000)
|
||||||
for i, j in enumerate(cosyvoice.inference_zero_shot('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)):
|
for i, j in enumerate(cosyvoice.inference_zero_shot('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', '希望你以后能够做的比我还好呦。', prompt_speech_16k, stream=False)):
|
||||||
@@ -130,6 +129,11 @@ for i, j in enumerate(cosyvoice.inference_zero_shot('收到好友从远方寄来
|
|||||||
prompt_speech_16k = load_wav('cross_lingual_prompt.wav', 16000)
|
prompt_speech_16k = load_wav('cross_lingual_prompt.wav', 16000)
|
||||||
for i, j in enumerate(cosyvoice.inference_cross_lingual('<|en|>And then later on, fully acquiring that company. So keeping management in line, interest in line with the asset that\'s coming into the family is a reason why sometimes we don\'t buy the whole thing.', prompt_speech_16k, stream=False)):
|
for i, j in enumerate(cosyvoice.inference_cross_lingual('<|en|>And then later on, fully acquiring that company. So keeping management in line, interest in line with the asset that\'s coming into the family is a reason why sometimes we don\'t buy the whole thing.', prompt_speech_16k, stream=False)):
|
||||||
torchaudio.save('cross_lingual_{}.wav'.format(i), j['tts_speech'], 22050)
|
torchaudio.save('cross_lingual_{}.wav'.format(i), j['tts_speech'], 22050)
|
||||||
|
# vc usage
|
||||||
|
prompt_speech_16k = load_wav('zero_shot_prompt.wav', 16000)
|
||||||
|
source_speech_16k = load_wav('cross_lingual_prompt.wav', 16000)
|
||||||
|
for i, j in enumerate(cosyvoice.inference_vc(source_speech_16k, prompt_speech_16k, stream=False)):
|
||||||
|
torchaudio.save('vc_{}.wav'.format(i), j['tts_speech'], 22050)
|
||||||
|
|
||||||
cosyvoice = CosyVoice('pretrained_models/CosyVoice-300M-Instruct')
|
cosyvoice = CosyVoice('pretrained_models/CosyVoice-300M-Instruct')
|
||||||
# instruct usage, support <laughter></laughter><strong></strong>[laughter][breath]
|
# instruct usage, support <laughter></laughter><strong></strong>[laughter][breath]
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ class CosyVoice:
|
|||||||
model_input = self.frontend.frontend_sft(i, spk_id)
|
model_input = self.frontend.frontend_sft(i, spk_id)
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
logging.info('synthesis text {}'.format(i))
|
logging.info('synthesis text {}'.format(i))
|
||||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||||
yield model_output
|
yield model_output
|
||||||
@@ -70,7 +70,7 @@ class CosyVoice:
|
|||||||
model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
|
model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
logging.info('synthesis text {}'.format(i))
|
logging.info('synthesis text {}'.format(i))
|
||||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||||
yield model_output
|
yield model_output
|
||||||
@@ -83,7 +83,7 @@ class CosyVoice:
|
|||||||
model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
|
model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
logging.info('synthesis text {}'.format(i))
|
logging.info('synthesis text {}'.format(i))
|
||||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||||
yield model_output
|
yield model_output
|
||||||
@@ -97,8 +97,17 @@ class CosyVoice:
|
|||||||
model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
|
model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
logging.info('synthesis text {}'.format(i))
|
logging.info('synthesis text {}'.format(i))
|
||||||
for model_output in self.model.inference(**model_input, stream=stream, speed=speed):
|
for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
|
||||||
speech_len = model_output['tts_speech'].shape[1] / 22050
|
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||||
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||||
yield model_output
|
yield model_output
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
|
def inference_vc(self, source_speech_16k, prompt_speech_16k, stream=False, speed=1.0):
|
||||||
|
model_input = self.frontend.frontend_vc(source_speech_16k, prompt_speech_16k)
|
||||||
|
start_time = time.time()
|
||||||
|
for model_output in self.model.vc(**model_input, stream=stream, speed=speed):
|
||||||
|
speech_len = model_output['tts_speech'].shape[1] / 22050
|
||||||
|
logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
|
||||||
|
yield model_output
|
||||||
|
start_time = time.time()
|
||||||
|
|||||||
@@ -55,6 +55,8 @@ class CosyVoiceFrontEnd:
|
|||||||
"CPUExecutionProvider"])
|
"CPUExecutionProvider"])
|
||||||
if os.path.exists(spk2info):
|
if os.path.exists(spk2info):
|
||||||
self.spk2info = torch.load(spk2info, map_location=self.device)
|
self.spk2info = torch.load(spk2info, map_location=self.device)
|
||||||
|
else:
|
||||||
|
self.spk2info = {}
|
||||||
self.instruct = instruct
|
self.instruct = instruct
|
||||||
self.allowed_special = allowed_special
|
self.allowed_special = allowed_special
|
||||||
self.inflect_parser = inflect.engine()
|
self.inflect_parser = inflect.engine()
|
||||||
@@ -172,3 +174,15 @@ class CosyVoiceFrontEnd:
|
|||||||
model_input['prompt_text'] = instruct_text_token
|
model_input['prompt_text'] = instruct_text_token
|
||||||
model_input['prompt_text_len'] = instruct_text_token_len
|
model_input['prompt_text_len'] = instruct_text_token_len
|
||||||
return model_input
|
return model_input
|
||||||
|
|
||||||
|
def frontend_vc(self, source_speech_16k, prompt_speech_16k):
|
||||||
|
prompt_speech_token, prompt_speech_token_len = self._extract_speech_token(prompt_speech_16k)
|
||||||
|
prompt_speech_22050 = torchaudio.transforms.Resample(orig_freq=16000, new_freq=22050)(prompt_speech_16k)
|
||||||
|
prompt_speech_feat, prompt_speech_feat_len = self._extract_speech_feat(prompt_speech_22050)
|
||||||
|
embedding = self._extract_spk_embedding(prompt_speech_16k)
|
||||||
|
source_speech_token, source_speech_token_len = self._extract_speech_token(source_speech_16k)
|
||||||
|
model_input = {'source_speech_token': source_speech_token, 'source_speech_token_len': source_speech_token_len,
|
||||||
|
'flow_prompt_speech_token': prompt_speech_token, 'flow_prompt_speech_token_len': prompt_speech_token_len,
|
||||||
|
'prompt_speech_feat': prompt_speech_feat, 'prompt_speech_feat_len': prompt_speech_feat_len,
|
||||||
|
'flow_embedding': embedding}
|
||||||
|
return model_input
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ class CosyVoiceModel:
|
|||||||
self.token_max_hop_len = 200
|
self.token_max_hop_len = 200
|
||||||
self.token_overlap_len = 20
|
self.token_overlap_len = 20
|
||||||
# mel fade in out
|
# mel fade in out
|
||||||
self.mel_overlap_len = 34
|
self.mel_overlap_len = int(self.token_overlap_len / self.flow.input_frame_rate * 22050 / 256)
|
||||||
self.mel_window = np.hamming(2 * self.mel_overlap_len)
|
self.mel_window = np.hamming(2 * self.mel_overlap_len)
|
||||||
# hift cache
|
# hift cache
|
||||||
self.mel_cache_len = 20
|
self.mel_cache_len = 20
|
||||||
@@ -63,11 +63,11 @@ class CosyVoiceModel:
|
|||||||
self.hift.to(self.device).eval()
|
self.hift.to(self.device).eval()
|
||||||
|
|
||||||
def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
|
def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
|
||||||
llm_text_encoder = torch.jit.load(llm_text_encoder_model)
|
llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
|
||||||
self.llm.text_encoder = llm_text_encoder
|
self.llm.text_encoder = llm_text_encoder
|
||||||
llm_llm = torch.jit.load(llm_llm_model)
|
llm_llm = torch.jit.load(llm_llm_model, map_location=self.device)
|
||||||
self.llm.llm = llm_llm
|
self.llm.llm = llm_llm
|
||||||
flow_encoder = torch.jit.load(flow_encoder_model)
|
flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
|
||||||
self.flow.encoder = flow_encoder
|
self.flow.encoder = flow_encoder
|
||||||
|
|
||||||
def load_onnx(self, flow_decoder_estimator_model):
|
def load_onnx(self, flow_decoder_estimator_model):
|
||||||
@@ -131,11 +131,11 @@ class CosyVoiceModel:
|
|||||||
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
|
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
|
||||||
return tts_speech
|
return tts_speech
|
||||||
|
|
||||||
def inference(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
|
def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
|
||||||
prompt_text=torch.zeros(1, 0, dtype=torch.int32),
|
prompt_text=torch.zeros(1, 0, dtype=torch.int32),
|
||||||
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
||||||
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
|
||||||
prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
|
prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
|
||||||
# this_uuid is used to track variables related to this inference thread
|
# this_uuid is used to track variables related to this inference thread
|
||||||
this_uuid = str(uuid.uuid1())
|
this_uuid = str(uuid.uuid1())
|
||||||
with self.lock:
|
with self.lock:
|
||||||
@@ -148,7 +148,8 @@ class CosyVoiceModel:
|
|||||||
while True:
|
while True:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
|
if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
|
||||||
this_tts_speech_token = torch.concat(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len], dim=1)
|
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
|
||||||
|
.unsqueeze(dim=0)
|
||||||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||||||
prompt_token=flow_prompt_speech_token,
|
prompt_token=flow_prompt_speech_token,
|
||||||
prompt_feat=prompt_speech_feat,
|
prompt_feat=prompt_speech_feat,
|
||||||
@@ -164,7 +165,7 @@ class CosyVoiceModel:
|
|||||||
break
|
break
|
||||||
p.join()
|
p.join()
|
||||||
# deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
|
# deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
|
||||||
this_tts_speech_token = torch.concat(self.tts_speech_token_dict[this_uuid], dim=1)
|
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||||||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||||||
prompt_token=flow_prompt_speech_token,
|
prompt_token=flow_prompt_speech_token,
|
||||||
prompt_feat=prompt_speech_feat,
|
prompt_feat=prompt_speech_feat,
|
||||||
@@ -175,7 +176,58 @@ class CosyVoiceModel:
|
|||||||
else:
|
else:
|
||||||
# deal with all tokens
|
# deal with all tokens
|
||||||
p.join()
|
p.join()
|
||||||
this_tts_speech_token = torch.concat(self.tts_speech_token_dict[this_uuid], dim=1)
|
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||||||
|
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||||||
|
prompt_token=flow_prompt_speech_token,
|
||||||
|
prompt_feat=prompt_speech_feat,
|
||||||
|
embedding=flow_embedding,
|
||||||
|
uuid=this_uuid,
|
||||||
|
finalize=True,
|
||||||
|
speed=speed)
|
||||||
|
yield {'tts_speech': this_tts_speech.cpu()}
|
||||||
|
with self.lock:
|
||||||
|
self.tts_speech_token_dict.pop(this_uuid)
|
||||||
|
self.llm_end_dict.pop(this_uuid)
|
||||||
|
self.mel_overlap_dict.pop(this_uuid)
|
||||||
|
self.hift_cache_dict.pop(this_uuid)
|
||||||
|
|
||||||
|
def vc(self, source_speech_token, flow_prompt_speech_token, prompt_speech_feat, flow_embedding, stream=False, speed=1.0, **kwargs):
|
||||||
|
# this_uuid is used to track variables related to this inference thread
|
||||||
|
this_uuid = str(uuid.uuid1())
|
||||||
|
with self.lock:
|
||||||
|
self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = source_speech_token.flatten().tolist(), True
|
||||||
|
self.mel_overlap_dict[this_uuid], self.hift_cache_dict[this_uuid] = None, None
|
||||||
|
if stream is True:
|
||||||
|
token_hop_len = self.token_min_hop_len
|
||||||
|
while True:
|
||||||
|
if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
|
||||||
|
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
|
||||||
|
.unsqueeze(dim=0)
|
||||||
|
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||||||
|
prompt_token=flow_prompt_speech_token,
|
||||||
|
prompt_feat=prompt_speech_feat,
|
||||||
|
embedding=flow_embedding,
|
||||||
|
uuid=this_uuid,
|
||||||
|
finalize=False)
|
||||||
|
yield {'tts_speech': this_tts_speech.cpu()}
|
||||||
|
with self.lock:
|
||||||
|
self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
|
||||||
|
# increase token_hop_len for better speech quality
|
||||||
|
token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
|
||||||
|
if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
|
||||||
|
break
|
||||||
|
# deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
|
||||||
|
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid], dim=1).unsqueeze(dim=0)
|
||||||
|
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||||||
|
prompt_token=flow_prompt_speech_token,
|
||||||
|
prompt_feat=prompt_speech_feat,
|
||||||
|
embedding=flow_embedding,
|
||||||
|
uuid=this_uuid,
|
||||||
|
finalize=True)
|
||||||
|
yield {'tts_speech': this_tts_speech.cpu()}
|
||||||
|
else:
|
||||||
|
# deal with all tokens
|
||||||
|
this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
|
||||||
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
this_tts_speech = self.token2wav(token=this_tts_speech_token,
|
||||||
prompt_token=flow_prompt_speech_token,
|
prompt_token=flow_prompt_speech_token,
|
||||||
prompt_feat=prompt_speech_feat,
|
prompt_feat=prompt_speech_feat,
|
||||||
|
|||||||
@@ -124,15 +124,14 @@ class MaskedDiffWithXvec(torch.nn.Module):
|
|||||||
# text encode
|
# text encode
|
||||||
h, h_lengths = self.encoder(token, token_len)
|
h, h_lengths = self.encoder(token, token_len)
|
||||||
h = self.encoder_proj(h)
|
h = self.encoder_proj(h)
|
||||||
mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / 50 * 22050 / 256)
|
mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / self.input_frame_rate * 22050 / 256)
|
||||||
h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2)
|
h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2, self.input_frame_rate)
|
||||||
|
|
||||||
# get conditions
|
# get conditions
|
||||||
conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device)
|
conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device)
|
||||||
conds[:, :mel_len1] = prompt_feat
|
conds[:, :mel_len1] = prompt_feat
|
||||||
conds = conds.transpose(1, 2)
|
conds = conds.transpose(1, 2)
|
||||||
|
|
||||||
# mask = (~make_pad_mask(feat_len)).to(h)
|
|
||||||
mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
|
mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
|
||||||
feat = self.decoder(
|
feat = self.decoder(
|
||||||
mu=h.transpose(1, 2).contiguous(),
|
mu=h.transpose(1, 2).contiguous(),
|
||||||
|
|||||||
@@ -49,13 +49,14 @@ class InterpolateRegulator(nn.Module):
|
|||||||
olens = ylens
|
olens = ylens
|
||||||
return out * mask, olens
|
return out * mask, olens
|
||||||
|
|
||||||
def inference(self, x1, x2, mel_len1, mel_len2):
|
def inference(self, x1, x2, mel_len1, mel_len2, input_frame_rate=50):
|
||||||
# in inference mode, interploate prompt token and token(head/mid/tail) seprately, so we can get a clear separation point of mel
|
# in inference mode, interploate prompt token and token(head/mid/tail) seprately, so we can get a clear separation point of mel
|
||||||
# x in (B, T, D)
|
# x in (B, T, D)
|
||||||
if x2.shape[1] > 40:
|
if x2.shape[1] > 40:
|
||||||
x2_head = F.interpolate(x2[:, :20].transpose(1, 2).contiguous(), size=34, mode='linear')
|
x2_head = F.interpolate(x2[:, :20].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear')
|
||||||
x2_mid = F.interpolate(x2[:, 20:-20].transpose(1, 2).contiguous(), size=mel_len2 - 34 * 2, mode='linear')
|
x2_mid = F.interpolate(x2[:, 20:-20].transpose(1, 2).contiguous(), size=mel_len2 - int(20 / input_frame_rate * 22050 / 256) * 2,
|
||||||
x2_tail = F.interpolate(x2[:, -20:].transpose(1, 2).contiguous(), size=34, mode='linear')
|
mode='linear')
|
||||||
|
x2_tail = F.interpolate(x2[:, -20:].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear')
|
||||||
x2 = torch.concat([x2_head, x2_mid, x2_tail], dim=2)
|
x2 = torch.concat([x2_head, x2_mid, x2_tail], dim=2)
|
||||||
else:
|
else:
|
||||||
x2 = F.interpolate(x2.transpose(1, 2).contiguous(), size=mel_len2, mode='linear')
|
x2 = F.interpolate(x2.transpose(1, 2).contiguous(), size=mel_len2, mode='linear')
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ class TransformerLM(torch.nn.Module):
|
|||||||
if top_ids == self.speech_token_size:
|
if top_ids == self.speech_token_size:
|
||||||
break
|
break
|
||||||
# in stream mode, yield token one by one
|
# in stream mode, yield token one by one
|
||||||
yield torch.tensor([[top_ids]], dtype=torch.int64, device=device)
|
yield top_ids
|
||||||
out_tokens.append(top_ids)
|
out_tokens.append(top_ids)
|
||||||
offset += lm_input.size(1)
|
offset += lm_input.size(1)
|
||||||
lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
|
lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
|
||||||
|
|||||||
58836
cosyvoice/tokenizer/assets/multilingual_zh_ja_yue_char_del.tiktoken
Normal file
58836
cosyvoice/tokenizer/assets/multilingual_zh_ja_yue_char_del.tiktoken
Normal file
File diff suppressed because it is too large
Load Diff
236
cosyvoice/tokenizer/tokenizer.py
Normal file
236
cosyvoice/tokenizer/tokenizer.py
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
import base64
|
||||||
|
import os
|
||||||
|
from functools import lru_cache
|
||||||
|
from typing import Optional
|
||||||
|
from whisper.tokenizer import Tokenizer
|
||||||
|
|
||||||
|
import tiktoken
|
||||||
|
|
||||||
|
LANGUAGES = {
|
||||||
|
"en": "english",
|
||||||
|
"zh": "chinese",
|
||||||
|
"de": "german",
|
||||||
|
"es": "spanish",
|
||||||
|
"ru": "russian",
|
||||||
|
"ko": "korean",
|
||||||
|
"fr": "french",
|
||||||
|
"ja": "japanese",
|
||||||
|
"pt": "portuguese",
|
||||||
|
"tr": "turkish",
|
||||||
|
"pl": "polish",
|
||||||
|
"ca": "catalan",
|
||||||
|
"nl": "dutch",
|
||||||
|
"ar": "arabic",
|
||||||
|
"sv": "swedish",
|
||||||
|
"it": "italian",
|
||||||
|
"id": "indonesian",
|
||||||
|
"hi": "hindi",
|
||||||
|
"fi": "finnish",
|
||||||
|
"vi": "vietnamese",
|
||||||
|
"he": "hebrew",
|
||||||
|
"uk": "ukrainian",
|
||||||
|
"el": "greek",
|
||||||
|
"ms": "malay",
|
||||||
|
"cs": "czech",
|
||||||
|
"ro": "romanian",
|
||||||
|
"da": "danish",
|
||||||
|
"hu": "hungarian",
|
||||||
|
"ta": "tamil",
|
||||||
|
"no": "norwegian",
|
||||||
|
"th": "thai",
|
||||||
|
"ur": "urdu",
|
||||||
|
"hr": "croatian",
|
||||||
|
"bg": "bulgarian",
|
||||||
|
"lt": "lithuanian",
|
||||||
|
"la": "latin",
|
||||||
|
"mi": "maori",
|
||||||
|
"ml": "malayalam",
|
||||||
|
"cy": "welsh",
|
||||||
|
"sk": "slovak",
|
||||||
|
"te": "telugu",
|
||||||
|
"fa": "persian",
|
||||||
|
"lv": "latvian",
|
||||||
|
"bn": "bengali",
|
||||||
|
"sr": "serbian",
|
||||||
|
"az": "azerbaijani",
|
||||||
|
"sl": "slovenian",
|
||||||
|
"kn": "kannada",
|
||||||
|
"et": "estonian",
|
||||||
|
"mk": "macedonian",
|
||||||
|
"br": "breton",
|
||||||
|
"eu": "basque",
|
||||||
|
"is": "icelandic",
|
||||||
|
"hy": "armenian",
|
||||||
|
"ne": "nepali",
|
||||||
|
"mn": "mongolian",
|
||||||
|
"bs": "bosnian",
|
||||||
|
"kk": "kazakh",
|
||||||
|
"sq": "albanian",
|
||||||
|
"sw": "swahili",
|
||||||
|
"gl": "galician",
|
||||||
|
"mr": "marathi",
|
||||||
|
"pa": "punjabi",
|
||||||
|
"si": "sinhala",
|
||||||
|
"km": "khmer",
|
||||||
|
"sn": "shona",
|
||||||
|
"yo": "yoruba",
|
||||||
|
"so": "somali",
|
||||||
|
"af": "afrikaans",
|
||||||
|
"oc": "occitan",
|
||||||
|
"ka": "georgian",
|
||||||
|
"be": "belarusian",
|
||||||
|
"tg": "tajik",
|
||||||
|
"sd": "sindhi",
|
||||||
|
"gu": "gujarati",
|
||||||
|
"am": "amharic",
|
||||||
|
"yi": "yiddish",
|
||||||
|
"lo": "lao",
|
||||||
|
"uz": "uzbek",
|
||||||
|
"fo": "faroese",
|
||||||
|
"ht": "haitian creole",
|
||||||
|
"ps": "pashto",
|
||||||
|
"tk": "turkmen",
|
||||||
|
"nn": "nynorsk",
|
||||||
|
"mt": "maltese",
|
||||||
|
"sa": "sanskrit",
|
||||||
|
"lb": "luxembourgish",
|
||||||
|
"my": "myanmar",
|
||||||
|
"bo": "tibetan",
|
||||||
|
"tl": "tagalog",
|
||||||
|
"mg": "malagasy",
|
||||||
|
"as": "assamese",
|
||||||
|
"tt": "tatar",
|
||||||
|
"haw": "hawaiian",
|
||||||
|
"ln": "lingala",
|
||||||
|
"ha": "hausa",
|
||||||
|
"ba": "bashkir",
|
||||||
|
"jw": "javanese",
|
||||||
|
"su": "sundanese",
|
||||||
|
"yue": "cantonese",
|
||||||
|
"minnan": "minnan",
|
||||||
|
"wuyu": "wuyu",
|
||||||
|
"dialect": "dialect",
|
||||||
|
"zh/en": "zh/en",
|
||||||
|
"en/zh": "en/zh",
|
||||||
|
}
|
||||||
|
|
||||||
|
# language code lookup by name, with a few language aliases
|
||||||
|
TO_LANGUAGE_CODE = {
|
||||||
|
**{language: code for code, language in LANGUAGES.items()},
|
||||||
|
"burmese": "my",
|
||||||
|
"valencian": "ca",
|
||||||
|
"flemish": "nl",
|
||||||
|
"haitian": "ht",
|
||||||
|
"letzeburgesch": "lb",
|
||||||
|
"pushto": "ps",
|
||||||
|
"panjabi": "pa",
|
||||||
|
"moldavian": "ro",
|
||||||
|
"moldovan": "ro",
|
||||||
|
"sinhalese": "si",
|
||||||
|
"castilian": "es",
|
||||||
|
"mandarin": "zh",
|
||||||
|
}
|
||||||
|
|
||||||
|
AUDIO_EVENT = {
|
||||||
|
"ASR": "ASR",
|
||||||
|
"AED": "AED",
|
||||||
|
"SER": "SER",
|
||||||
|
"Speech": "Speech",
|
||||||
|
"/Speech": "/Speech",
|
||||||
|
"BGM": "BGM",
|
||||||
|
"/BGM": "/BGM",
|
||||||
|
"Laughter": "Laughter",
|
||||||
|
"/Laughter": "/Laughter",
|
||||||
|
"Applause": "Applause",
|
||||||
|
"/Applause": "/Applause",
|
||||||
|
}
|
||||||
|
|
||||||
|
EMOTION = {
|
||||||
|
"HAPPY": "HAPPY",
|
||||||
|
"SAD": "SAD",
|
||||||
|
"ANGRY": "ANGRY",
|
||||||
|
"NEUTRAL": "NEUTRAL",
|
||||||
|
}
|
||||||
|
|
||||||
|
TTS_Vocal_Token = {
|
||||||
|
"TTS/B": "TTS/B",
|
||||||
|
"TTS/O": "TTS/O",
|
||||||
|
"TTS/Q": "TTS/Q",
|
||||||
|
"TTS/A": "TTS/A",
|
||||||
|
"TTS/CO": "TTS/CO",
|
||||||
|
"TTS/CL": "TTS/CL",
|
||||||
|
"TTS/H": "TTS/H",
|
||||||
|
**{f"TTS/SP{i:02d}": f"TTS/SP{i:02d}" for i in range(1, 14)}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache(maxsize=None)
|
||||||
|
def get_encoding(name: str = "gpt2", num_languages: int = 99):
|
||||||
|
vocab_path = os.path.join(os.path.dirname(__file__), "assets", f"{name}.tiktoken")
|
||||||
|
ranks = {
|
||||||
|
base64.b64decode(token): int(rank)
|
||||||
|
for token, rank in (line.split() for line in open(vocab_path) if line)
|
||||||
|
}
|
||||||
|
n_vocab = len(ranks)
|
||||||
|
special_tokens = {}
|
||||||
|
|
||||||
|
specials = [
|
||||||
|
"<|endoftext|>",
|
||||||
|
"<|startoftranscript|>",
|
||||||
|
*[f"<|{lang}|>" for lang in list(LANGUAGES.keys())[:num_languages]],
|
||||||
|
*[f"<|{audio_event}|>" for audio_event in list(AUDIO_EVENT.keys())],
|
||||||
|
*[f"<|{emotion}|>" for emotion in list(EMOTION.keys())],
|
||||||
|
"<|translate|>",
|
||||||
|
"<|transcribe|>",
|
||||||
|
"<|startoflm|>",
|
||||||
|
"<|startofprev|>",
|
||||||
|
"<|nospeech|>",
|
||||||
|
"<|notimestamps|>",
|
||||||
|
*[f"<|SPECIAL_TOKEN_{i}|>" for i in range(1, 31)], # register special tokens for ASR
|
||||||
|
*[f"<|{tts}|>" for tts in list(TTS_Vocal_Token.keys())], # register special tokens for TTS
|
||||||
|
*[f"<|{i * 0.02:.2f}|>" for i in range(1501)],
|
||||||
|
]
|
||||||
|
|
||||||
|
for token in specials:
|
||||||
|
special_tokens[token] = n_vocab
|
||||||
|
n_vocab += 1
|
||||||
|
|
||||||
|
return tiktoken.Encoding(
|
||||||
|
name=os.path.basename(vocab_path),
|
||||||
|
explicit_n_vocab=n_vocab,
|
||||||
|
pat_str=r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""",
|
||||||
|
mergeable_ranks=ranks,
|
||||||
|
special_tokens=special_tokens,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache(maxsize=None)
|
||||||
|
def get_tokenizer(
|
||||||
|
multilingual: bool,
|
||||||
|
*,
|
||||||
|
num_languages: int = 99,
|
||||||
|
language: Optional[str] = None,
|
||||||
|
task: Optional[str] = None, # Literal["transcribe", "translate", None]
|
||||||
|
) -> Tokenizer:
|
||||||
|
if language is not None:
|
||||||
|
language = language.lower()
|
||||||
|
if language not in LANGUAGES:
|
||||||
|
if language in TO_LANGUAGE_CODE:
|
||||||
|
language = TO_LANGUAGE_CODE[language]
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported language: {language}")
|
||||||
|
|
||||||
|
if multilingual:
|
||||||
|
encoding_name = "multilingual_zh_ja_yue_char_del"
|
||||||
|
language = language or "en"
|
||||||
|
task = task or "transcribe"
|
||||||
|
else:
|
||||||
|
encoding_name = "gpt2"
|
||||||
|
language = None
|
||||||
|
task = None
|
||||||
|
|
||||||
|
encoding = get_encoding(name=encoding_name, num_languages=num_languages)
|
||||||
|
|
||||||
|
return Tokenizer(
|
||||||
|
encoding=encoding, num_languages=num_languages, language=language, task=task
|
||||||
|
)
|
||||||
@@ -15,8 +15,10 @@
|
|||||||
# Modified from ESPnet(https://github.com/espnet/espnet)
|
# Modified from ESPnet(https://github.com/espnet/espnet)
|
||||||
"""Unility functions for Transformer."""
|
"""Unility functions for Transformer."""
|
||||||
|
|
||||||
|
import random
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
IGNORE_ID = -1
|
IGNORE_ID = -1
|
||||||
@@ -142,3 +144,10 @@ def fade_in_out(fade_in_mel, fade_out_mel, window):
|
|||||||
fade_in_mel[..., :mel_overlap_len] = fade_in_mel[..., :mel_overlap_len] * window[:mel_overlap_len] + \
|
fade_in_mel[..., :mel_overlap_len] = fade_in_mel[..., :mel_overlap_len] * window[:mel_overlap_len] + \
|
||||||
fade_out_mel[..., -mel_overlap_len:] * window[mel_overlap_len:]
|
fade_out_mel[..., -mel_overlap_len:] * window[mel_overlap_len:]
|
||||||
return fade_in_mel.to(device)
|
return fade_in_mel.to(device)
|
||||||
|
|
||||||
|
|
||||||
|
def set_all_random_seed(seed):
|
||||||
|
random.seed(seed)
|
||||||
|
np.random.seed(seed)
|
||||||
|
torch.manual_seed(seed)
|
||||||
|
torch.cuda.manual_seed_all(seed)
|
||||||
|
|||||||
8
webui.py
8
webui.py
@@ -24,6 +24,7 @@ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|||||||
sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))
|
sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))
|
||||||
from cosyvoice.cli.cosyvoice import CosyVoice
|
from cosyvoice.cli.cosyvoice import CosyVoice
|
||||||
from cosyvoice.utils.file_utils import load_wav, logging
|
from cosyvoice.utils.file_utils import load_wav, logging
|
||||||
|
from cosyvoice.utils.common import set_all_random_seed
|
||||||
|
|
||||||
inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制']
|
inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制']
|
||||||
instruct_dict = {'预训练音色': '1. 选择预训练音色\n2. 点击生成音频按钮',
|
instruct_dict = {'预训练音色': '1. 选择预训练音色\n2. 点击生成音频按钮',
|
||||||
@@ -42,13 +43,6 @@ def generate_seed():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def set_all_random_seed(seed):
|
|
||||||
random.seed(seed)
|
|
||||||
np.random.seed(seed)
|
|
||||||
torch.manual_seed(seed)
|
|
||||||
torch.cuda.manual_seed_all(seed)
|
|
||||||
|
|
||||||
|
|
||||||
def postprocess(speech, top_db=60, hop_length=220, win_length=440):
|
def postprocess(speech, top_db=60, hop_length=220, win_length=440):
|
||||||
speech, _ = librosa.effects.trim(
|
speech, _ = librosa.effects.trim(
|
||||||
speech, top_db=top_db,
|
speech, top_db=top_db,
|
||||||
|
|||||||
Reference in New Issue
Block a user