mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 09:29:25 +08:00
add flow trt wrapper
This commit is contained in:
212
cosyvoice/llm/llm_vllm.py
Normal file
212
cosyvoice/llm/llm_vllm.py
Normal file
@@ -0,0 +1,212 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
import queue
|
||||
import asyncio
|
||||
import threading
|
||||
from typing import List, Generator, AsyncGenerator
|
||||
import torch
|
||||
from cosyvoice.utils.file_utils import logging
|
||||
from cosyvoice.llm.llm import Qwen2LM
|
||||
|
||||
# 启用vllm V1版本
|
||||
import os
|
||||
os.environ["VLLM_USE_V1"] = '1'
|
||||
from vllm import ModelRegistry
|
||||
from vllm import LLMEngine, AsyncLLMEngine, CompletionOutput
|
||||
from vllm.engine.arg_utils import EngineArgs, AsyncEngineArgs
|
||||
from vllm.sampling_params import SamplingParams
|
||||
|
||||
from cosyvoice.llm.vllm_use_cosyvoice2_model import CosyVoice2Model as CosyVoice2LLM
|
||||
ModelRegistry.register_model("CosyVoice2Model", CosyVoice2LLM)
|
||||
|
||||
# EngineArgs
|
||||
ENGINE_ARGS = {
|
||||
"block_size": 16,
|
||||
"swap_space": 0,
|
||||
# "enforce_eager": True,
|
||||
"gpu_memory_utilization": 0.4,
|
||||
"max_num_batched_tokens": 1024,
|
||||
"max_model_len": 1024,
|
||||
"max_num_seqs": 256,
|
||||
"disable_log_requests": True,
|
||||
"disable_log_stats": True,
|
||||
"dtype": "float16"
|
||||
}
|
||||
|
||||
from vllm.sampling_params import RequestOutputKind
|
||||
# SamplingParams
|
||||
SAMPLING_PARAMS = {
|
||||
"temperature": 1, # 不能低于0.8, 否则会生成非常多的空音频,或者无法正常生成语音Token
|
||||
"top_p": 1, # 不能低于0.8, 否则会生成非常多的空音频,或者无法正常生成语音Token
|
||||
"top_k": 25,
|
||||
# "min_tokens": 80, # 不支持设置最小的tokens数量设置,开启后vllm直接崩溃,无法启动
|
||||
# "presence_penalty": 1.0, # 不支持设置
|
||||
# "frequency_penalty": 0.0, # 不支持设置
|
||||
"max_tokens": 1024,
|
||||
"detokenize": False, # 目前 vllm 0.7.3 v1版本中设置无效,待后续版本更新后减少计算
|
||||
"ignore_eos": False,
|
||||
"output_kind": RequestOutputKind.DELTA # 设置为DELTA,如调整该参数,请同时调整llm_inference的处理代码
|
||||
}
|
||||
|
||||
def tensor_to_list(tensor: torch.tensor):
|
||||
return tensor.view(-1).cpu().numpy().tolist()
|
||||
|
||||
class VllmQwen2LM(Qwen2LM):
|
||||
def __init__(
|
||||
self,
|
||||
model_dir,
|
||||
mix_ratio: List[int] = [5, 15],
|
||||
):
|
||||
self.fp16 = False
|
||||
self.half = lambda: None
|
||||
self.mix_ratio = mix_ratio
|
||||
# ---------------------------------------------
|
||||
# vllm engine 的参数配置
|
||||
engine_args = AsyncEngineArgs(
|
||||
model=model_dir,
|
||||
**ENGINE_ARGS,
|
||||
)
|
||||
self.llm_engine: AsyncLLMEngine = AsyncLLMEngine.from_engine_args(engine_args)
|
||||
|
||||
self.speech_token_size = 6564 # 6561 + 3
|
||||
self.llm_token_size = 151936 # llm vocab_size
|
||||
self.sos_eos_token_id = self.speech_token_size + self.llm_token_size + 1
|
||||
self.task_token_id = self.sos_eos_token_id + 1
|
||||
self.zero_token_id = self.task_token_id + 1
|
||||
|
||||
# vllm 的推理任务需要在一个固定的事件循环中,因此启动一个后台线程运行转用于推理任务
|
||||
self.loop = asyncio.new_event_loop()
|
||||
self.loop_thread = threading.Thread(target=self._run_event_loop, daemon=True)
|
||||
self.loop_thread.start()
|
||||
|
||||
def _run_event_loop(self):
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.loop.run_forever()
|
||||
|
||||
async def async_llm_inference(self, out_queue, prompt_token_ids, request_id, stop_token_ids, max_tokens):
|
||||
sampling_params = SamplingParams(**SAMPLING_PARAMS)
|
||||
sampling_params.stop_token_ids = stop_token_ids or [6561]
|
||||
if max_tokens:
|
||||
sampling_params.max_tokens = max_tokens
|
||||
async for output in self.llm_engine.generate(
|
||||
{
|
||||
"prompt_token_ids": prompt_token_ids,
|
||||
},
|
||||
sampling_params=sampling_params,
|
||||
request_id=request_id or f"{time.time()}",
|
||||
):
|
||||
out_queue.put((output.outputs[0], output.finished))
|
||||
|
||||
def llm_inference(self, prompt_token_ids: List[int], request_id: str=None, stop_token_ids=None, max_tokens=None):
|
||||
out_queue = queue.Queue()
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self.async_llm_inference(out_queue, prompt_token_ids, request_id, stop_token_ids, max_tokens), self.loop
|
||||
)
|
||||
# 接收 out_queue 返回的结果
|
||||
finished = False
|
||||
while not finished:
|
||||
(output, finished) = out_queue.get_nowait() if not out_queue.empty() else out_queue.get()
|
||||
yield output
|
||||
|
||||
def inference(
|
||||
self,
|
||||
text: torch.Tensor,
|
||||
text_len: torch.Tensor,
|
||||
prompt_text: torch.Tensor,
|
||||
prompt_text_len: torch.Tensor,
|
||||
prompt_speech_token: torch.Tensor,
|
||||
prompt_speech_token_len: torch.Tensor,
|
||||
embedding: torch.Tensor,
|
||||
sampling: int = 25,
|
||||
max_token_text_ratio: float = 20,
|
||||
min_token_text_ratio: float = 2,
|
||||
) -> Generator[torch.Tensor|int, None, None]:
|
||||
prompt_text = tensor_to_list(prompt_text + torch.tensor(6564))
|
||||
prompt_speech_token = tensor_to_list(prompt_speech_token)
|
||||
|
||||
text = tensor_to_list(text + torch.tensor(6564))
|
||||
prompt_token_ids = [self.sos_eos_token_id] + prompt_text + text + \
|
||||
[self.task_token_id] + prompt_speech_token
|
||||
max_tokens = len(text) * 20
|
||||
for output in self.llm_inference(
|
||||
prompt_token_ids,
|
||||
stop_token_ids=[6561],
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
if output.token_ids[-1] == 6561:
|
||||
need_add_tokens = output.token_ids[:-1]
|
||||
else:
|
||||
need_add_tokens = output.token_ids
|
||||
for token in need_add_tokens:
|
||||
yield token
|
||||
|
||||
def inference_bistream(
|
||||
self,
|
||||
text: Generator,
|
||||
prompt_text: torch.Tensor,
|
||||
prompt_text_len: torch.Tensor,
|
||||
prompt_speech_token: torch.Tensor,
|
||||
prompt_speech_token_len: torch.Tensor,
|
||||
embedding: torch.Tensor,
|
||||
sampling: int = 25,
|
||||
max_token_text_ratio: float = 20,
|
||||
min_token_text_ratio: float = 2,
|
||||
) -> Generator[torch.Tensor, None, None]:
|
||||
prompt_text = tensor_to_list(prompt_text + torch.tensor(6564))
|
||||
prompt_speech_token = tensor_to_list(prompt_speech_token)
|
||||
|
||||
last_tokens = []
|
||||
prompt_token_ids = [self.sos_eos_token_id]
|
||||
text_tokens_cache = prompt_text
|
||||
for this_text in text:
|
||||
this_text = tensor_to_list(this_text + torch.tensor(6564))
|
||||
# text need tokens
|
||||
assert isinstance(this_text, list), "text need token ids List[int]."
|
||||
text_tokens_cache += this_text
|
||||
while len(prompt_speech_token) != 0:
|
||||
if len(text_tokens_cache) >= self.mix_ratio[0]:
|
||||
text_input_token = text_tokens_cache[:self.mix_ratio[0]]
|
||||
speech_input_token = prompt_speech_token[:self.mix_ratio[1]]
|
||||
prompt_token_ids += text_input_token + speech_input_token
|
||||
# reset the last cache
|
||||
text_tokens_cache = text_tokens_cache[self.mix_ratio[0]:]
|
||||
prompt_speech_token = prompt_speech_token[self.mix_ratio[1]:]
|
||||
else:
|
||||
break
|
||||
if len(prompt_speech_token) == 0:
|
||||
if (len(last_tokens) > 0 and last_tokens[-1] == 6563) or len(prompt_token_ids) == 1:
|
||||
if len(text_tokens_cache) >= self.mix_ratio[0]:
|
||||
text_tokens_temp = text_tokens_cache[:self.mix_ratio[0]]
|
||||
prompt_token_ids += text_tokens_temp
|
||||
text_tokens_cache = text_tokens_cache[self.mix_ratio[0]:]
|
||||
else:
|
||||
continue
|
||||
for output in self.llm_inference(prompt_token_ids, stop_token_ids=[6563]):
|
||||
last_tokens = output.token_ids
|
||||
if last_tokens[-1] == 6563:
|
||||
need_add_tokens = last_tokens[:-1]
|
||||
else:
|
||||
need_add_tokens = last_tokens
|
||||
for token in need_add_tokens:
|
||||
yield token
|
||||
prompt_token_ids.extend(need_add_tokens)
|
||||
prompt_token_ids += text_tokens_cache + [self.task_token_id]
|
||||
for output in self.llm_inference(prompt_token_ids, stop_token_ids=[6561]):
|
||||
if output.token_ids[-1] == 6561:
|
||||
need_add_tokens = output.token_ids[:-1]
|
||||
else:
|
||||
need_add_tokens = output.token_ids
|
||||
for token in need_add_tokens:
|
||||
yield token
|
||||
263
cosyvoice/llm/vllm_use_cosyvoice2_model.py
Normal file
263
cosyvoice/llm/vllm_use_cosyvoice2_model.py
Normal file
@@ -0,0 +1,263 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Adapted from
|
||||
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2/modeling_qwen2.py
|
||||
# Copyright 2024 The Qwen team.
|
||||
# Copyright 2023 The vLLM team.
|
||||
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
||||
# and OPT implementations in this library. It has been modified from its
|
||||
# original forms to accommodate minor architectural differences compared
|
||||
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Inference-only Qwen2 model compatible with HuggingFace weights."""
|
||||
from typing import Iterable, List, Optional, Set, Tuple, Union, Iterator, overload, TypedDict, Mapping, Any
|
||||
from typing_extensions import TypeVar
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from vllm.attention import AttentionMetadata
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.logger import init_logger
|
||||
from vllm.model_executor.layers.logits_processor import LogitsProcessor
|
||||
from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler
|
||||
from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead
|
||||
from vllm.model_executor.sampling_metadata import SamplingMetadata
|
||||
from vllm.sequence import IntermediateTensors
|
||||
|
||||
from vllm.model_executor.models.interfaces import T
|
||||
from vllm.model_executor.models.qwen2 import Qwen2Model
|
||||
|
||||
from vllm.model_executor.models.utils import AutoWeightsLoader, maybe_prefix, merge_multimodal_embeddings
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
IGNORE_ID = -1
|
||||
|
||||
|
||||
class CosyVoice2Model(nn.Module):
|
||||
|
||||
packed_modules_mapping = {
|
||||
"qkv_proj": [
|
||||
"q_proj",
|
||||
"k_proj",
|
||||
"v_proj",
|
||||
],
|
||||
"gate_up_proj": [
|
||||
"gate_proj",
|
||||
"up_proj",
|
||||
],
|
||||
}
|
||||
|
||||
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
|
||||
super().__init__()
|
||||
config = vllm_config.model_config.hf_config
|
||||
quant_config = vllm_config.quant_config
|
||||
lora_config = vllm_config.lora_config
|
||||
|
||||
self.config = config
|
||||
self.lora_config = lora_config
|
||||
self.quant_config = quant_config
|
||||
|
||||
self.llm_input_size = 896
|
||||
self.llm_output_size = 896
|
||||
|
||||
self.speech_token_size = 6561+3
|
||||
self.llm_token_size = config.vocab_size
|
||||
|
||||
# 2. build speech token language model related modules
|
||||
self.sos_eos = 0
|
||||
self.task_id = 1
|
||||
self.fill_token = 2
|
||||
|
||||
|
||||
self.allow_patterns_overrides = ["llm.*"]
|
||||
self.llm_embedding = torch.nn.Embedding(2, self.llm_input_size)
|
||||
self.model = Qwen2Model(vllm_config=vllm_config,
|
||||
prefix=maybe_prefix(prefix, "model"))
|
||||
|
||||
# self.llm_decoder = nn.Linear(self.llm_output_size, self.speech_token_size)
|
||||
self.llm_decoder = ParallelLMHead(self.speech_token_size,
|
||||
self.llm_output_size,
|
||||
bias=True,
|
||||
quant_config=quant_config,
|
||||
prefix=maybe_prefix(
|
||||
prefix, "llm_decoder"))
|
||||
self.logits_processor = LogitsProcessor(self.speech_token_size)
|
||||
|
||||
# length_normalized_loss: bool = True,
|
||||
# lsm_weight: float = 0.0,
|
||||
# self.criterion_ce = LabelSmoothingLoss(
|
||||
# size=self.speech_token_size,
|
||||
# padding_idx=IGNORE_ID,
|
||||
# smoothing=lsm_weight,
|
||||
# normalize_length=length_normalized_loss,
|
||||
# )
|
||||
|
||||
# 3. [Optional] build speech token related modules
|
||||
self.speech_embedding = torch.nn.Embedding(self.speech_token_size, self.llm_input_size)
|
||||
|
||||
# 4. sampling method
|
||||
## use vllm sampling method
|
||||
self.sampler = get_sampler()
|
||||
self.make_empty_intermediate_tensors = (
|
||||
self.model.make_empty_intermediate_tensors)
|
||||
|
||||
self.mix_ratio: List[int] = [5, 15]
|
||||
|
||||
# 定义特殊token常量
|
||||
self.llm_token_id_delta = torch.tensor(self.speech_token_size, dtype=torch.int32)
|
||||
self.sos_eos_token_id = torch.tensor((self.llm_token_id_delta + self.llm_token_size + 1), dtype=torch.int32) # 163840 + 6564 = 170404
|
||||
self.task_token_id = self.sos_eos_token_id + torch.tensor(1, dtype=torch.int32) # 170405
|
||||
self.zero_token_id = self.task_token_id + torch.tensor(1, dtype=torch.int32)
|
||||
|
||||
self.zero_embed_buffer = torch.zeros(
|
||||
(vllm_config.scheduler_config.max_num_seqs, self.llm_input_size),
|
||||
dtype=self.llm_embedding.weight.dtype,
|
||||
device=self.llm_embedding.weight.device
|
||||
)
|
||||
self.inputs_embed_buffer = torch.zeros(
|
||||
(vllm_config.scheduler_config.max_num_batched_tokens, self.llm_input_size),
|
||||
dtype=self.llm_embedding.weight.dtype,
|
||||
device=self.llm_embedding.weight.device,
|
||||
)
|
||||
|
||||
def get_sos_eos_emb(self):
|
||||
return self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
|
||||
|
||||
def get_task_id_emb(self):
|
||||
return self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
|
||||
|
||||
def get_input_embeddings(
|
||||
self,
|
||||
input_ids: torch.Tensor,
|
||||
multimodal_embeddings: Optional[T] = None,
|
||||
attn_metadata: Optional["AttentionMetadata"] = None,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Returns the input embeddings merged from the text embeddings from
|
||||
input_ids and the multimodal embeddings generated from multimodal
|
||||
kwargs.
|
||||
"""
|
||||
# 创建掩码,标记哪些 token_id 属于音频 Token
|
||||
mask = input_ids < self.speech_token_size
|
||||
|
||||
# 获取 input_ids 的原始形状
|
||||
input_shape = input_ids.shape
|
||||
# 展平 input_ids 和掩码以便统一处理
|
||||
flat_input_ids = input_ids.view(-1)
|
||||
flat_mask = mask.view(-1)
|
||||
|
||||
inputs_embeds = self.inputs_embed_buffer[:flat_input_ids.shape[0]]
|
||||
inputs_embeds.zero_()
|
||||
|
||||
# Process speech tokens
|
||||
if flat_mask.any():
|
||||
speech_token_ids = flat_input_ids[flat_mask]
|
||||
inputs_embeds[flat_mask] = self.speech_embedding(speech_token_ids)
|
||||
|
||||
# 处理大于 delta 的 token_id
|
||||
if (~flat_mask).any():
|
||||
llm_token_ids = flat_input_ids[~flat_mask]
|
||||
llm_embeds = torch.zeros_like(inputs_embeds[~flat_mask])
|
||||
|
||||
sos_eos_mask = llm_token_ids == self.sos_eos_token_id
|
||||
task_mask = llm_token_ids == self.task_token_id
|
||||
zero_mask = llm_token_ids == self.zero_token_id
|
||||
normal_mask = ~(sos_eos_mask | task_mask | zero_mask)
|
||||
|
||||
# 分层处理逻辑
|
||||
# 第一优先级:SOS/EOS标记
|
||||
if sos_eos_mask.any():
|
||||
llm_embeds[sos_eos_mask] = self.llm_embedding.weight[self.sos_eos].unsqueeze(0)
|
||||
|
||||
# 第二优先级:任务标记
|
||||
if task_mask.any():
|
||||
llm_embeds[task_mask] = self.llm_embedding.weight[self.task_id].unsqueeze(0)
|
||||
|
||||
# 第二优先级:空音频标记
|
||||
if zero_mask.any():
|
||||
llm_embeds[zero_mask] = self.zero_embed_buffer[:len(llm_embeds[zero_mask])]
|
||||
|
||||
# 常规LLM token
|
||||
if normal_mask.any():
|
||||
original_ids = llm_token_ids[normal_mask] - self.llm_token_id_delta
|
||||
# print('original_ids: ',original_ids)
|
||||
llm_embeds[normal_mask] = self.model.get_input_embeddings(original_ids)
|
||||
|
||||
inputs_embeds[~flat_mask] = llm_embeds
|
||||
|
||||
inputs_embeds = inputs_embeds.view(*input_shape, self.llm_input_size)
|
||||
|
||||
# 合并多模态嵌入(如果有)
|
||||
if multimodal_embeddings is not None:
|
||||
inputs_embeds = merge_multimodal_embeddings(
|
||||
input_ids, inputs_embeds, multimodal_embeddings,
|
||||
self.config.audio_token_index
|
||||
)
|
||||
return inputs_embeds
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids: torch.Tensor,
|
||||
positions: torch.Tensor,
|
||||
kv_caches: List[torch.Tensor],
|
||||
attn_metadata: AttentionMetadata,
|
||||
intermediate_tensors: Optional[IntermediateTensors] = None,
|
||||
inputs_embeds: Optional[torch.Tensor] = None,
|
||||
) -> Union[torch.Tensor, IntermediateTensors]:
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = self.get_input_embeddings(
|
||||
input_ids,
|
||||
attn_metadata=attn_metadata,
|
||||
)
|
||||
return self.model(input_ids, positions, kv_caches,
|
||||
attn_metadata, intermediate_tensors,
|
||||
inputs_embeds)
|
||||
|
||||
def compute_logits(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
sampling_metadata: SamplingMetadata,
|
||||
) -> Optional[torch.Tensor]:
|
||||
logits = self.logits_processor(self.llm_decoder, hidden_states,
|
||||
sampling_metadata)
|
||||
return logits
|
||||
|
||||
def sample(
|
||||
self,
|
||||
logits: torch.Tensor,
|
||||
sampling_metadata: SamplingMetadata,
|
||||
) -> Optional[SamplerOutput]:
|
||||
next_tokens = self.sampler(logits, sampling_metadata)
|
||||
return next_tokens
|
||||
|
||||
@staticmethod
|
||||
def convert_weights(weights: Iterable[Tuple[str, torch.Tensor]]) -> Iterable[Tuple[str, torch.Tensor]]:
|
||||
for name, param in weights:
|
||||
# 处理Qwen2Model核心参数
|
||||
if name.startswith("llm."):
|
||||
if name.startswith("llm.model.model."):
|
||||
name = name.replace("llm.model.model.", "model.")
|
||||
else:
|
||||
continue
|
||||
# print('weights name: ', name)
|
||||
yield name, param
|
||||
|
||||
def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
|
||||
weights = self.convert_weights(weights)
|
||||
loader = AutoWeightsLoader(self)
|
||||
loader.load_weights(weights)
|
||||
Reference in New Issue
Block a user