diff --git a/finetune/__init__.py b/finetune/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/omnilmm/finetune/dataset.py b/finetune/dataset.py similarity index 98% rename from omnilmm/finetune/dataset.py rename to finetune/dataset.py index 19592eb..b93035a 100644 --- a/omnilmm/finetune/dataset.py +++ b/finetune/dataset.py @@ -1,13 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright @2024 AI, ZHIHU Inc. (zhihu.com) -# -# @author: wangchongyi -# @author: chenqianyu -# @date: 2024/5/06 -# - import os import math import json diff --git a/omnilmm/finetune/ds_config_zero2.json b/finetune/ds_config_zero2.json similarity index 100% rename from omnilmm/finetune/ds_config_zero2.json rename to finetune/ds_config_zero2.json diff --git a/omnilmm/finetune/ds_config_zero3.json b/finetune/ds_config_zero3.json similarity index 100% rename from omnilmm/finetune/ds_config_zero3.json rename to finetune/ds_config_zero3.json diff --git a/omnilmm/finetune/finetune.py b/finetune/finetune.py similarity index 95% rename from omnilmm/finetune/finetune.py rename to finetune/finetune.py index 9f2f7ec..6a751e3 100644 --- a/omnilmm/finetune/finetune.py +++ b/finetune/finetune.py @@ -1,11 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright @2024 AI, ZHIHU Inc. (zhihu.com) -# -# @author: chenqianyu -# @date: 2024/5/03 -# import os import glob import json diff --git a/omnilmm/finetune/finetune_ds.sh b/finetune/finetune_ds.sh similarity index 100% rename from omnilmm/finetune/finetune_ds.sh rename to finetune/finetune_ds.sh diff --git a/omnilmm/finetune/readme.md b/finetune/readme.md similarity index 96% rename from omnilmm/finetune/readme.md rename to finetune/readme.md index 136aa84..26c3c47 100644 --- a/omnilmm/finetune/readme.md +++ b/finetune/readme.md @@ -64,6 +64,3 @@ sh finetune_ds.sh ``` #### Customizing Hyperparameters To tailor the training process according to your specific requirements, you can adjust various hyperparameters. For comprehensive documentation on available hyperparameters and their functionalities, you can refer to the [official Transformers documentation](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments). Experimentation and fine-tuning of these parameters are essential for achieving optimal model performance tailored to your specific task and dataset. -### LoRA finetuning - -**This part is still unfinished, and we will complete it as soon as possible.** diff --git a/omnilmm/finetune/trainer.py b/finetune/trainer.py similarity index 97% rename from omnilmm/finetune/trainer.py rename to finetune/trainer.py index e8794dc..53ffaa6 100644 --- a/omnilmm/finetune/trainer.py +++ b/finetune/trainer.py @@ -1,11 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright @2024 AI, ZHIHU Inc. (zhihu.com) -# -# @author: chenqianyu -# @date: 2024/5/03 -# import torch import torch.nn as nn from typing import Tuple, Union, Optional, List, Dict, Any diff --git a/omnilmm/finetune/__init__.py b/omnilmm/finetune/__init__.py deleted file mode 100644 index 9b79fbc..0000000 --- a/omnilmm/finetune/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright @2024 AI, ZHIHU Inc. (zhihu.com) -# -# @author: chenqianyu -# @date: 2024/5/02 -#