update lora finetune inference bug (#224)

This commit is contained in:
qianyu chen
2024-06-07 18:00:22 +08:00
committed by GitHub
parent 31eaa26ee1
commit 9bd93a281c
6 changed files with 61 additions and 17 deletions

View File

@@ -40,7 +40,6 @@ torchrun $DISTRIBUTED_ARGS finetune.py \
--lora_target_modules "llm\..*layers\.\d+\.self_attn\.(q_proj|k_proj)" \
--model_max_length 2048 \
--max_slice_nums 9 \
--scale_resolution 448 \
--max_steps 10000 \
--eval_steps 1000 \
--output_dir output/output_minicpmv2_lora \