mirror of
https://github.com/FunAudioLLM/CosyVoice.git
synced 2026-02-04 17:39:25 +08:00
add online trt export
This commit is contained in:
@@ -53,7 +53,9 @@ class CosyVoice:
|
||||
'{}/llm.llm.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
|
||||
'{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
|
||||
if load_trt:
|
||||
self.model.load_trt('{}/flow.decoder.estimator.{}.v100.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
|
||||
self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
|
||||
'{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
|
||||
self.fp16)
|
||||
del configs
|
||||
|
||||
def list_available_spks(self):
|
||||
@@ -149,7 +151,9 @@ class CosyVoice2(CosyVoice):
|
||||
if load_jit:
|
||||
self.model.load_jit('{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
|
||||
if load_trt:
|
||||
self.model.load_trt('{}/flow.decoder.estimator'.format(model_dir), self.fp16)
|
||||
self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
|
||||
'{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
|
||||
self.fp16)
|
||||
del configs
|
||||
|
||||
def inference_instruct(self, *args, **kwargs):
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import torch
|
||||
import numpy as np
|
||||
import threading
|
||||
@@ -19,7 +20,7 @@ from torch.nn import functional as F
|
||||
from contextlib import nullcontext
|
||||
import uuid
|
||||
from cosyvoice.utils.common import fade_in_out
|
||||
from cosyvoice.trt.estimator_trt import EstimatorTRT
|
||||
from cosyvoice.utils.file_utils import convert_onnx_to_trt
|
||||
|
||||
|
||||
class CosyVoiceModel:
|
||||
@@ -36,6 +37,9 @@ class CosyVoiceModel:
|
||||
self.fp16 = fp16
|
||||
self.llm.fp16 = fp16
|
||||
self.flow.fp16 = fp16
|
||||
if self.fp16 is True:
|
||||
self.llm.half()
|
||||
self.flow.half()
|
||||
self.token_min_hop_len = 2 * self.flow.input_frame_rate
|
||||
self.token_max_hop_len = 4 * self.flow.input_frame_rate
|
||||
self.token_overlap_len = 20
|
||||
@@ -70,9 +74,6 @@ class CosyVoiceModel:
|
||||
hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
|
||||
self.hift.load_state_dict(hift_state_dict, strict=True)
|
||||
self.hift.to(self.device).eval()
|
||||
if self.fp16 is True:
|
||||
self.llm.half()
|
||||
self.flow.half()
|
||||
|
||||
def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
|
||||
llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
|
||||
@@ -82,9 +83,17 @@ class CosyVoiceModel:
|
||||
flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
|
||||
self.flow.encoder = flow_encoder
|
||||
|
||||
def load_trt(self, flow_decoder_estimator_model, fp16):
|
||||
def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, fp16):
|
||||
assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
|
||||
if not os.path.exists(flow_decoder_estimator_model):
|
||||
convert_onnx_to_trt(flow_decoder_estimator_model, flow_decoder_onnx_model, fp16)
|
||||
del self.flow.decoder.estimator
|
||||
self.flow.decoder.estimator = EstimatorTRT(flow_decoder_estimator_model, self.device, fp16)
|
||||
import tensorrt as trt
|
||||
with open(flow_decoder_estimator_model, 'rb') as f:
|
||||
self.flow.decoder.estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
|
||||
if self.flow.decoder.estimator_engine is None:
|
||||
raise ValueError('failed to load trt {}'.format(flow_decoder_estimator_model))
|
||||
self.flow.decoder.estimator = self.flow.decoder.estimator_engine.create_execution_context()
|
||||
|
||||
def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
|
||||
with self.llm_context:
|
||||
@@ -269,6 +278,9 @@ class CosyVoice2Model(CosyVoiceModel):
|
||||
self.fp16 = fp16
|
||||
self.llm.fp16 = fp16
|
||||
self.flow.fp16 = fp16
|
||||
if self.fp16 is True:
|
||||
self.llm.half()
|
||||
self.flow.half()
|
||||
self.token_hop_len = 2 * self.flow.input_frame_rate
|
||||
# here we fix flow encoder/decoder decoding_chunk_size, in the future we will send it as arguments, or use cache
|
||||
self.flow.encoder.static_chunk_size = 2 * self.flow.input_frame_rate
|
||||
|
||||
Reference in New Issue
Block a user