diff --git a/cosyvoice/bin/export_jit.py b/cosyvoice/bin/export_jit.py index 0013d64..8dd6531 100644 --- a/cosyvoice/bin/export_jit.py +++ b/cosyvoice/bin/export_jit.py @@ -62,14 +62,6 @@ def main(): model = AutoModel(model_dir=args.model_dir) if get_model_type(model.model) == CosyVoiceModel: - # 1. export flow encoder - flow_encoder = model.model.flow.encoder - script = get_optimized_script(flow_encoder) - script.save('{}/flow.encoder.fp32.zip'.format(args.model_dir)) - script = get_optimized_script(flow_encoder.half()) - script.save('{}/flow.encoder.fp16.zip'.format(args.model_dir)) - logging.info('successfully export flow_encoder') - elif get_model_type(model.model) == CosyVoice2Model: # 1. export llm text_encoder llm_text_encoder = model.model.llm.text_encoder script = get_optimized_script(llm_text_encoder) @@ -93,6 +85,14 @@ def main(): script = get_optimized_script(flow_encoder.half()) script.save('{}/flow.encoder.fp16.zip'.format(args.model_dir)) logging.info('successfully export flow_encoder') + elif get_model_type(model.model) == CosyVoice2Model: + # 1. export flow encoder + flow_encoder = model.model.flow.encoder + script = get_optimized_script(flow_encoder) + script.save('{}/flow.encoder.fp32.zip'.format(args.model_dir)) + script = get_optimized_script(flow_encoder.half()) + script.save('{}/flow.encoder.fp16.zip'.format(args.model_dir)) + logging.info('successfully export flow_encoder') else: raise ValueError('unsupported model type')