mirror of
https://github.com/OpenBMB/MiniCPM-V.git
synced 2026-02-05 18:29:18 +08:00
Update to MiniCPM-V 2.6
This commit is contained in:
@@ -6,13 +6,14 @@ NODE_RANK=0
|
||||
MASTER_ADDR=localhost
|
||||
MASTER_PORT=6001
|
||||
|
||||
MODEL="openbmb/MiniCPM-Llama3-V-2_5" # or openbmb/MiniCPM-V-2
|
||||
MODEL="openbmb/MiniCPM-V-2_6" # or openbmb/MiniCPM-V-2, openbmb/MiniCPM-Llama3-V-2_5
|
||||
# ATTENTION: specify the path to your training data, which should be a json file consisting of a list of conversations.
|
||||
# See the section for finetuning in README for more information.
|
||||
DATA="path/to/trainging_data"
|
||||
EVAL_DATA="path/to/test_data"
|
||||
LLM_TYPE="llama3" # if use openbmb/MiniCPM-V-2, please set LLM_TYPE=minicpm
|
||||
|
||||
LLM_TYPE="qwen2"
|
||||
# if use openbmb/MiniCPM-V-2, please set LLM_TYPE=minicpm
|
||||
#if use openbmb/MiniCPM-Llama3-V-2_5, please set LLM_TYPE=llama3
|
||||
DISTRIBUTED_ARGS="
|
||||
--nproc_per_node $GPUS_PER_NODE \
|
||||
--nnodes $NNODES \
|
||||
@@ -42,12 +43,12 @@ torchrun $DISTRIBUTED_ARGS finetune.py \
|
||||
--max_slice_nums 9 \
|
||||
--max_steps 10000 \
|
||||
--eval_steps 1000 \
|
||||
--output_dir output/output_minicpmv2_lora \
|
||||
--logging_dir output/output_minicpmv2_lora \
|
||||
--output_dir output/output__lora \
|
||||
--logging_dir output/output_lora \
|
||||
--logging_strategy "steps" \
|
||||
--per_device_train_batch_size 2 \
|
||||
--per_device_train_batch_size 1 \
|
||||
--per_device_eval_batch_size 1 \
|
||||
--gradient_accumulation_steps 8 \
|
||||
--gradient_accumulation_steps 1 \
|
||||
--evaluation_strategy "steps" \
|
||||
--save_strategy "steps" \
|
||||
--save_steps 1000 \
|
||||
|
||||
Reference in New Issue
Block a user