mirror of
https://github.com/OpenBMB/MiniCPM-V.git
synced 2026-02-04 09:49:20 +08:00
Merge pull request #18 from YuzaChongyi/main
docs(readme): update OmniLLM-3B evalution table
This commit is contained in:
@@ -209,6 +209,7 @@ We combine the OmniLMM-12B and GPT-3.5 (text-only) into a **real-time multimodal
|
||||
<tr>
|
||||
<th align="left">Model</th>
|
||||
<th>Size</th>
|
||||
<th nowrap="nowrap" >Visual Tokens</th>
|
||||
<th>MME</th>
|
||||
<th nowrap="nowrap" >MMB dev (en)</th>
|
||||
<th nowrap="nowrap" >MMB dev (zh)</th>
|
||||
@@ -220,6 +221,7 @@ We combine the OmniLMM-12B and GPT-3.5 (text-only) into a **real-time multimodal
|
||||
<tr>
|
||||
<td align="left">LLaVA-Phi</td>
|
||||
<td align="right">3B</td>
|
||||
<td>576</td>
|
||||
<td>1335</td>
|
||||
<td>59.8</td>
|
||||
<td>- </td>
|
||||
@@ -229,6 +231,7 @@ We combine the OmniLMM-12B and GPT-3.5 (text-only) into a **real-time multimodal
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left">MobileVLM</td>
|
||||
<td align="right">3B</td>
|
||||
<td>144</td>
|
||||
<td>1289</td>
|
||||
<td>59.6</td>
|
||||
<td>- </td>
|
||||
@@ -238,6 +241,7 @@ We combine the OmniLMM-12B and GPT-3.5 (text-only) into a **real-time multimodal
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left" >Imp-v1</td>
|
||||
<td align="right">3B</td>
|
||||
<td>576</td>
|
||||
<td>1434</td>
|
||||
<td>66.5</td>
|
||||
<td>- </td>
|
||||
@@ -245,8 +249,9 @@ We combine the OmniLMM-12B and GPT-3.5 (text-only) into a **real-time multimodal
|
||||
<td>- </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left" >Qwen-VL-Chat</td>
|
||||
<td nowrap="nowrap" align="left" >Qwen-VL-Chat</td>
|
||||
<td align="right" >9.6B</td>
|
||||
<td>256</td>
|
||||
<td>1487</td>
|
||||
<td>60.6 </td>
|
||||
<td>56.7 </td>
|
||||
@@ -256,6 +261,7 @@ We combine the OmniLMM-12B and GPT-3.5 (text-only) into a **real-time multimodal
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left" >CogVLM</td>
|
||||
<td align="right">17.4B </td>
|
||||
<td>1225</td>
|
||||
<td>1438 </td>
|
||||
<td>63.7 </td>
|
||||
<td>53.8 </td>
|
||||
@@ -265,6 +271,7 @@ We combine the OmniLMM-12B and GPT-3.5 (text-only) into a **real-time multimodal
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left" ><b>OmniLMM-3B</b></td>
|
||||
<td align="right">3B </td>
|
||||
<td>64</td>
|
||||
<td>1452 </td>
|
||||
<td>67.3 </td>
|
||||
<td>61.9 </td>
|
||||
|
||||
@@ -214,6 +214,7 @@
|
||||
<tr>
|
||||
<th align="left">Model</th>
|
||||
<th>Size</th>
|
||||
<th nowrap="nowrap" >Visual Tokens</th>
|
||||
<th>MME</th>
|
||||
<th nowrap="nowrap" >MMB dev (en)</th>
|
||||
<th nowrap="nowrap" >MMB dev (zh)</th>
|
||||
@@ -225,6 +226,7 @@
|
||||
<tr>
|
||||
<td align="left">LLaVA-Phi</td>
|
||||
<td align="right">3B</td>
|
||||
<td>576</td>
|
||||
<td>1335</td>
|
||||
<td>59.8</td>
|
||||
<td>- </td>
|
||||
@@ -234,6 +236,7 @@
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left">MobileVLM</td>
|
||||
<td align="right">3B</td>
|
||||
<td>144</td>
|
||||
<td>1289</td>
|
||||
<td>59.6</td>
|
||||
<td>- </td>
|
||||
@@ -243,6 +246,7 @@
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left" >Imp-v1</td>
|
||||
<td align="right">3B</td>
|
||||
<td>576</td>
|
||||
<td>1434</td>
|
||||
<td>66.5</td>
|
||||
<td>- </td>
|
||||
@@ -250,8 +254,9 @@
|
||||
<td>- </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left" >Qwen-VL-Chat</td>
|
||||
<td nowrap="nowrap" align="left" >Qwen-VL-Chat</td>
|
||||
<td align="right" >9.6B</td>
|
||||
<td>256</td>
|
||||
<td>1487</td>
|
||||
<td>60.6 </td>
|
||||
<td>56.7 </td>
|
||||
@@ -261,6 +266,7 @@
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left" >CogVLM</td>
|
||||
<td align="right">17.4B </td>
|
||||
<td>1225</td>
|
||||
<td>1438 </td>
|
||||
<td>63.7 </td>
|
||||
<td>53.8 </td>
|
||||
@@ -270,6 +276,7 @@
|
||||
<tr>
|
||||
<td nowrap="nowrap" align="left" ><b>OmniLMM-3B</b></td>
|
||||
<td align="right">3B </td>
|
||||
<td>64</td>
|
||||
<td>1452 </td>
|
||||
<td>67.3 </td>
|
||||
<td>61.9 </td>
|
||||
|
||||
Reference in New Issue
Block a user