Modify eval_mm for MiniCPM-o 2.6

This commit is contained in:
Poppy Xu
2025-01-21 15:34:54 +08:00
parent ec68cefc17
commit d8f382e157
82 changed files with 14279 additions and 843 deletions

View File

@@ -22,7 +22,7 @@ from eval_utils.vqa_evaluate import *
def get_model(args):
if args.model_name == '':
raise Exception('Model name cannot be empty str!')
from models.MiniCPM.minicpmv import MiniCPM_V, MiniCPM_V_2_6
from models.MiniCPM.minicpmv import MiniCPM_V, MiniCPM_V_2_6, MiniCPM_o_2_6
model_path = args.model_path
ckpt = args.ckpt
@@ -30,6 +30,8 @@ def get_model(args):
model = MiniCPM_V(model_path=model_path, ckpt=ckpt, device=args.device)
elif args.model_name == 'minicpmv26':
model = MiniCPM_V_2_6(model_path=model_path, ckpt=ckpt, device=args.device)
elif args.model_name == 'minicpmo26':
model = MiniCPM_o_2_6(model_path=model_path, ckpt=ckpt, device=args.device)
else:
raise Exception(f"Unexpected Moedel Name {args.model_name}!")
@@ -67,15 +69,16 @@ def main(args):
dataset = docVQADataset(args.docVQA_image_dir, args.docVQA_ann_path)
if max_sample_num is not None:
dataset = torch.utils.data.Subset(dataset, range(max_sample_num))
acc = evaluate_VQA(model, dataset, args.model_name, 'docVQA', time, batch_size=args.batchsize, generate_method=args.generate_method, answer_path=args.answer_path)
acc = evaluate_VQA(model, dataset, args.model_name, 'docVQA', time, \
batch_size=args.batchsize, generate_method=args.generate_method, answer_path=args.answer_path)
result['docVQA'] = acc
if args.eval_docVQATest or args.eval_all:
target_dataset = "docVQATest"
dataset = docVQATESTDataset(args.docVQATest_image_dir, args.docVQATest_ann_path)
if max_sample_num is not None:
dataset = torch.utils.data.Subset(dataset, range(max_sample_num))
acc = evaluate_VQA(model, dataset, args.model_name, target_dataset, time, batch_size=args.batchsize, generate_method=args.generate_method, answer_path=args.answer_path)
acc = evaluate_VQA(model, dataset, args.model_name, 'docVQATest', time, \
batch_size=args.batchsize, generate_method=args.generate_method, answer_path=args.answer_path)
result['docVQATest'] = acc
if torch.distributed.is_initialized():

View File

@@ -370,8 +370,6 @@ def evaluate_VQA(
generate_method="interleave",
answer_path='./answers',
):
print(f"answer path:{answer_path}")
sampler = None
if torch.distributed.is_initialized():
sampler=InferenceSampler(len(dataset))
@@ -383,8 +381,6 @@ def evaluate_VQA(
collate_fn=collate_fn_vqa
)
now_rank = torch.distributed.get_rank()
answer_dir = os.path.join(answer_path, model_name, time)
os.makedirs(answer_dir, exist_ok=True)
@@ -395,21 +391,15 @@ def evaluate_VQA(
predictions = []
for batch in tqdm(dataloader, desc="Running inference"):
image_paths, questions, gt_answers, ocr_tokens_list, question_ids, question_type = batch
image_paths, questions, gt_answers, ocr_tokens_list, question_ids, question_type = batch
with torch.no_grad():
if model_name != "minicpm":
if model_name != "codellama":
outputs = model.generate(images=image_paths, questions=questions, datasetname=dataset_name)
else:
outputs = model.generate()
elif model_name == "minicpm":
if generate_method == "old":
outputs = model.generate(images=image_paths, questions=questions, datasetname=dataset_name)
elif generate_method == "interleave":
outputs = model.generate_with_interleaved(images=image_paths, questions=questions, datasetname=dataset_name)
else:
raise Exception(f"Wrong generate paradigm {generate_method}!")
if generate_method == "old":
outputs = model.generate(images=image_paths, questions=questions, datasetname=dataset_name)
elif generate_method == "interleave":
outputs = model.generate_with_interleaved(images=image_paths, questions=questions, datasetname=dataset_name)
else:
raise Exception(f"Wrong generate paradigm {generate_method}!")
for i in range(len(outputs)):
answer_dict = {

View File

@@ -33,14 +33,9 @@ class MiniCPM_V:
def generate(self, images, questions, datasetname):
image = Image.open(images[0]).convert('RGB')
try:
max_new_tokens = max_token[datasetname]
except:
max_new_tokens = 1024
if (datasetname == 'docVQA') or (datasetname == "docVQATest") :
prompt = "Answer the question directly with single word." + "\n" + questions[0]
elif (datasetname == 'textVQA') :
prompt = "Answer the question directly with single word." + '\n'+ questions[0]
max_new_tokens = max_token[datasetname]
prompt = "Answer the question directly with single word." + '\n' + questions[0]
msgs = [{'role': 'user', 'content': prompt}]
default_kwargs = dict(
@@ -59,10 +54,7 @@ class MiniCPM_V:
return [res]
def generate_with_interleaved(self, images, questions, datasetname):
try:
max_new_tokens = max_token[datasetname]
except:
max_new_tokens = 1024
max_new_tokens = max_token[datasetname]
prompt = "Answer the question directly with single word."
@@ -103,11 +95,10 @@ class MiniCPM_V:
class MiniCPM_V_2_6:
def __init__(self, model_path, ckpt, device=None)->None:
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
self.model_path = model_path
self.ckpt = ckpt
@@ -125,14 +116,17 @@ class MiniCPM_V_2_6:
def generate(self, images, questions, datasetname):
image = Image.open(images[0]).convert('RGB')
try:
max_new_tokens = max_token[datasetname]
except:
max_new_tokens = 1024
if (datasetname == 'docVQA') or (datasetname == "docVQATest") :
prompt = "Answer the question directly with single word." + "\n" + questions[0]
elif (datasetname == 'textVQA') :
prompt = "Answer the question directly with single word." + '\n'+ questions[0]
img_width, img_height = image.width, image.height
if (img_width * img_height) < (1344 * 1344):
ratio = math.sqrt((1344 * 1344) / (img_width * img_height))
max_img_width = int(img_width * ratio)
new_img_width = random.randint(img_width, max_img_width)
new_img_height = int(new_img_width / img_width * img_height)
image = image.resize((new_img_width, new_img_height))
max_new_tokens = max_token[datasetname]
prompt = "Answer the question directly with single word." + '\n' + questions[0]
msgs = [{'role': 'user', 'content': prompt}]
default_kwargs = dict(
@@ -151,10 +145,7 @@ class MiniCPM_V_2_6:
return [res]
def generate_with_interleaved(self, images, questions, datasetname):
try:
max_new_tokens = max_token[datasetname]
except:
max_new_tokens = 1024
max_new_tokens = max_token[datasetname]
prompt = "Answer the question directly with single word."
@@ -197,5 +188,117 @@ class MiniCPM_V_2_6:
if isinstance(res, tuple) and len(res) > 0:
res = res[0]
print(f"Q: {content}, \nA: {res}")
return [res]
class MiniCPM_o_2_6:
def __init__(self, model_path, ckpt, device=None)->None:
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
self.model_path = model_path
self.ckpt = ckpt
self.model = AutoModel.from_pretrained(
self.model_path,
trust_remote_code=True,
attn_implementation='sdpa',
torch_dtype=torch.bfloat16,
init_vision=True,
init_audio=False,
init_tts=False
)
if self.ckpt is not None:
self.ckpt = ckpt
self.state_dict = torch.load(self.ckpt, map_location=torch.device('cpu'))
self.model.load_state_dict(self.state_dict)
self.model = self.model.eval().to(device)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)
torch.cuda.empty_cache()
def generate(self, images, questions, datasetname):
image = Image.open(images[0]).convert('RGB')
img_width, img_height = image.width, image.height
if (img_width * img_height) < (1344 * 1344):
ratio = math.sqrt((1344 * 1344) / (img_width * img_height))
max_img_width = int(img_width * ratio)
new_img_width = random.randint(img_width, max_img_width)
new_img_height = int(new_img_width / img_width * img_height)
image = image.resize((new_img_width, new_img_height))
max_new_tokens = max_token[datasetname]
prompt = "Answer the question directly with single word." + '\n' + questions[0]
msgs = [{'role': 'user', 'content': prompt}]
default_kwargs = dict(
max_new_tokens=max_new_tokens,
sampling=False,
num_beams=3,
max_inp_length=8192,
use_image_id=True,
max_slice_nums=None
)
res = self.model.chat(
image=image,
msgs=msgs,
context=None,
tokenizer=self.tokenizer,
**default_kwargs
)
return [res]
def generate_with_interleaved(self, images, questions, datasetname):
max_new_tokens = max_token[datasetname]
prompt = "Answer the question directly with single word."
default_kwargs = dict(
max_new_tokens=max_new_tokens,
sampling=False,
num_beams=3,
max_inp_length=8192,
use_image_id=True,
max_slice_nums=None
)
content = []
message = [
{'type': 'text', 'value': prompt},
{'type': 'image', 'value': images[0]},
{'type': 'text', 'value': questions[0]}
]
for x in message:
if x['type'] == 'text':
content.append(x['value'])
elif x['type'] == 'image':
image = Image.open(x['value']).convert('RGB')
img_width, img_height = image.width, image.height
if (img_width * img_height) >= (1344 * 1344):
content.append(image)
else:
ratio = math.sqrt((1344 * 1344) / (img_width * img_height))
max_img_width = int(img_width * ratio)
new_img_width = random.randint(img_width, max_img_width)
new_img_height = int(new_img_width / img_width * img_height)
resized_image = image.resize((new_img_width, new_img_height))
content.append(resized_image)
msgs = [{'role': 'user', 'content': content}]
res = self.model.chat(
image=None,
msgs=msgs,
context=None,
tokenizer=self.tokenizer,
**default_kwargs
)
if isinstance(res, tuple) and len(res) > 0:
res = res[0]
print(f"Q: {content}, \nA: {res}")
return [res]

View File

@@ -26,7 +26,7 @@ pyyaml==6.0
regex==2022.10.31
tokenizers==0.13.2
tqdm==4.64.1
transformers
transformers==4.44.2
timm==0.6.13
spacy==3.5.1
webdataset==0.2.48

View File

@@ -12,4 +12,4 @@ python -m torch.distributed.launch \
--eval_textVQA \
--eval_docVQA \
--answer_path ./answers \
--batchsize 1
--batchsize 1

View File

@@ -1,3 +1,3 @@
python ./transform_docvqatest_for_submission.py \
--input_file_path \
--output_file_path
--output_file_path