Update README.md
Browse files
README.md
CHANGED
@@ -10,33 +10,32 @@ tags:
|
|
10 |
- visual-question-answering
|
11 |
pipeline_tag: visual-question-answering
|
12 |
base_model:
|
13 |
-
- Qwen/Qwen3-0.6B
|
14 |
model-index:
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
---
|
41 |
# 🦙📷 MicroLLaVA-Qwen3-0.6B-base-siglip2-so400m
|
42 |
|
@@ -170,5 +169,4 @@ If you find this model helpful, please consider citing or referencing this repo:
|
|
170 |
}
|
171 |
```
|
172 |
|
173 |
-
Please also support my release of https://huggingface.co/keeeeenw/MicroLlava which is based on own https://huggingface.co/keeeeenw/MicroLlama for the language capabilities.
|
174 |
-
|
|
|
10 |
- visual-question-answering
|
11 |
pipeline_tag: visual-question-answering
|
12 |
base_model:
|
13 |
+
- Qwen/Qwen3-0.6B-Base
|
14 |
model-index:
|
15 |
+
- name: MicroLLaVA-Qwen3-0.6B-base-siglip2-so400m
|
16 |
+
results:
|
17 |
+
- task:
|
18 |
+
type: visual-question-answering
|
19 |
+
name: VQAv2
|
20 |
+
dataset:
|
21 |
+
name: VQAv2
|
22 |
+
type: vqav2
|
23 |
+
metrics:
|
24 |
+
- name: Overall Accuracy
|
25 |
+
type: accuracy
|
26 |
+
value: 78.52
|
27 |
+
- name: Yes/No Accuracy
|
28 |
+
type: accuracy
|
29 |
+
value: 91.56
|
30 |
+
- name: Number Accuracy
|
31 |
+
type: accuracy
|
32 |
+
value: 65.69
|
33 |
+
- name: Other Accuracy
|
34 |
+
type: accuracy
|
35 |
+
value: 70.28
|
36 |
+
source:
|
37 |
+
name: Internal Evaluation on VQAv2 test-dev
|
38 |
+
url: https://visualqa.org/download.html
|
|
|
39 |
---
|
40 |
# 🦙📷 MicroLLaVA-Qwen3-0.6B-base-siglip2-so400m
|
41 |
|
|
|
169 |
}
|
170 |
```
|
171 |
|
172 |
+
Please also support my release of https://huggingface.co/keeeeenw/MicroLlava which is based on own https://huggingface.co/keeeeenw/MicroLlama for the language capabilities.
|
|