Model Fine-tuning Memory Usage Statistics (#160)

This commit is contained in:
qianyu chen
2024-05-28 11:41:27 +08:00
committed by GitHub
parent 7e12387362
commit f592fedb2e
4 changed files with 30 additions and 10 deletions

View File

@@ -42,7 +42,7 @@ torchrun $DISTRIBUTED_ARGS finetune.py \
--output_dir output/output_minicpmv2_lora \
--logging_dir output/output_minicpmv2_lora \
--logging_strategy "steps" \
--per_device_train_batch_size w \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 1 \
--evaluation_strategy "steps" \
@@ -57,5 +57,4 @@ torchrun $DISTRIBUTED_ARGS finetune.py \
--logging_steps 1 \
--gradient_checkpointing true \
--deepspeed ds_config_zero2.json \
--report_to "tensorboard" \ # wandb
--report_to "tensorboard" # wandb