From 1b593c318e76cef5a63075a29a8989dea80eeaa1 Mon Sep 17 00:00:00 2001 From: yiranyyu <2606375857@qq.com> Date: Tue, 6 Aug 2024 20:31:00 +0800 Subject: [PATCH] update readme --- README.md | 2 +- README_en.md | 14 +++++++++++--- README_zh.md | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index e14a97f..97a02d8 100644 --- a/README.md +++ b/README.md @@ -841,7 +841,7 @@ We deploy MiniCPM-V 2.6 on end devices. The demo video is the raw screen recordi

- +

diff --git a/README_en.md b/README_en.md index bcfd9bb..97a02d8 100644 --- a/README_en.md +++ b/README_en.md @@ -279,7 +279,7 @@ MiniCPM-V 2.6 can be easily used in various ways: (1) [llama.cpp](https://github 34B 157 - - 2141 + 2141.0 59.3 518 48.0 @@ -830,11 +830,18 @@ We deploy MiniCPM-V 2.6 on end devices. The demo video is the raw screen recordi

+ +

+ +      + +

+

- +

@@ -1416,7 +1423,7 @@ model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=Tru model = model.eval().cuda() tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=True) -MAX_NUM_FRAMES=64 +MAX_NUM_FRAMES=64 # if cuda OOM set a smaller number def encode_video(video_path): def uniform_sample(l, n): @@ -1442,6 +1449,7 @@ msgs = [ ] # Set decode params for video +params = {} params["use_image_id"] = False params["max_slice_nums"] = 2 # use 1 if cuda OOM and video resolution > 448*448 diff --git a/README_zh.md b/README_zh.md index e1af1d6..c0b504d 100644 --- a/README_zh.md +++ b/README_zh.md @@ -841,7 +841,7 @@

- +