Files
MiniCPM-o/eval_mm/vlmevalkit/vlmeval/dataset/utils/judge_util.py
2024-08-30 18:18:22 +00:00

42 lines
1.2 KiB
Python

import os
from ...api import OpenAIWrapper
from ...smp import load_env
INTERNAL = os.environ.get('INTERNAL', 0)
def build_judge(**kwargs):
model = kwargs.pop('model', None)
kwargs.pop('nproc', None)
load_env()
LOCAL_LLM = os.environ.get('LOCAL_LLM', None)
if LOCAL_LLM is None:
model_map = {
'gpt-4-turbo': 'gpt-4-1106-preview',
'gpt-4-0613': 'gpt-4-0613',
'gpt-4-0125': 'gpt-4-0125-preview',
'gpt-4-0409': 'gpt-4-turbo-2024-04-09',
'chatgpt-1106': 'gpt-3.5-turbo-1106',
'chatgpt-0125': 'gpt-3.5-turbo-0125',
'gpt-4o': 'gpt-4o-2024-05-13',
'gpt-4o-mini': 'gpt-4o-mini-2024-07-18',
}
model_version = model_map[model]
else:
model_version = LOCAL_LLM
model = OpenAIWrapper(model_version, **kwargs)
return model
DEBUG_MESSAGE = """
To debug the OpenAI API, you can try the following scripts in python:
```python
from vlmeval.api import OpenAIWrapper
model = OpenAIWrapper('gpt-4-1106-preview', verbose=True)
msgs = [dict(type='text', value='Hello!')]
code, answer, resp = model.generate_inner(msgs)
print(code, answer, resp)
```
You cam see the specific error if the API call fails.
"""