sergiopaniego HF Staff commited on
Commit
d0d545b
·
verified ·
1 Parent(s): 8df3dc1

Training in progress, step 10

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-VL-2B-Instruct
3
+ library_name: transformers
4
+ model_name: Qwen2-VL-2B-Instruct-Thinking
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - grpo
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2-VL-2B-Instruct-Thinking
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2-VL-2B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="sergiopaniego/Qwen2-VL-2B-Instruct-Thinking", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+
31
+
32
+
33
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.20.0.dev0
38
+ - Transformers: 4.53.0
39
+ - Pytorch: 2.6.0+cu124
40
+ - Datasets: 4.0.0
41
+ - Tokenizers: 0.21.2
42
+
43
+ ## Citations
44
+
45
+ Cite GRPO as:
46
+
47
+ ```bibtex
48
+ @article{zhihong2024deepseekmath,
49
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
50
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
51
+ year = 2024,
52
+ eprint = {arXiv:2402.03300},
53
+ }
54
+
55
+ ```
56
+
57
+ Cite TRL as:
58
+
59
+ ```bibtex
60
+ @misc{vonwerra2022trl,
61
+ title = {{TRL: Transformer Reinforcement Learning}},
62
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
63
+ year = 2020,
64
+ journal = {GitHub repository},
65
+ publisher = {GitHub},
66
+ howpublished = {\url{https://github.com/huggingface/trl}}
67
+ }
68
+ ```
adapter_config.json CHANGED
@@ -25,8 +25,8 @@
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
- "q_proj",
29
- "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "trainable_token_indices": null,
 
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "v_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "trainable_token_indices": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76b2792cfc903006489ecd64f6481e5eee149daac9fe3922bb908aad28a06b06
3
- size 4372840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0921cee8b015b9c091d809f1a7fac984690d48de3281f5b4cd1ac01bd5e67180
3
+ size 4374520
chat_template.jinja ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
2
+ You are a helpful assistant.<|im_end|>
3
+ {% endif %}<|im_start|>{{ message['role'] }}
4
+ {% if message['content'] is string %}{{ message['content'] }}<|im_end|>
5
+ {% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
6
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
7
+ {% endif %}
preprocessor_config.json CHANGED
@@ -1,4 +1,10 @@
1
  {
 
 
 
 
 
 
2
  "do_convert_rgb": true,
3
  "do_normalize": true,
4
  "do_rescale": true,
@@ -8,12 +14,13 @@
8
  0.4578275,
9
  0.40821073
10
  ],
11
- "image_processor_type": "Qwen2VLImageProcessor",
12
  "image_std": [
13
  0.26862954,
14
  0.26130258,
15
  0.27577711
16
  ],
 
17
  "max_pixels": 12845056,
18
  "merge_size": 2,
19
  "min_pixels": 3136,
@@ -21,9 +28,10 @@
21
  "processor_class": "Qwen2VLProcessor",
22
  "resample": 3,
23
  "rescale_factor": 0.00392156862745098,
 
24
  "size": {
25
- "max_pixels": 12845056,
26
- "min_pixels": 3136
27
  },
28
  "temporal_patch_size": 2
29
  }
 
1
  {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
  "do_convert_rgb": true,
9
  "do_normalize": true,
10
  "do_rescale": true,
 
14
  0.4578275,
15
  0.40821073
16
  ],
17
+ "image_processor_type": "Qwen2VLImageProcessorFast",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
21
  0.27577711
22
  ],
23
+ "input_data_format": null,
24
  "max_pixels": 12845056,
25
  "merge_size": 2,
26
  "min_pixels": 3136,
 
28
  "processor_class": "Qwen2VLProcessor",
29
  "resample": 3,
30
  "rescale_factor": 0.00392156862745098,
31
+ "return_tensors": null,
32
  "size": {
33
+ "longest_edge": 12845056,
34
+ "shortest_edge": 3136
35
  },
36
  "temporal_patch_size": 2
37
  }
runs/Jul09_16-26-55_4137bc6253df/events.out.tfevents.1752078434.4137bc6253df.1221.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcbafdd5495767223a924618b236fda5e68a3a9fba3dae13ae2e50d4340ecfde
3
+ size 10057
tokenizer_config.json CHANGED
@@ -130,10 +130,10 @@
130
  "<|video_pad|>"
131
  ],
132
  "bos_token": null,
133
- "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
134
  "clean_up_tokenization_spaces": false,
135
  "eos_token": "<|im_end|>",
136
  "errors": "replace",
 
137
  "model_max_length": 32768,
138
  "pad_token": "<|endoftext|>",
139
  "padding_side": "left",
 
130
  "<|video_pad|>"
131
  ],
132
  "bos_token": null,
 
133
  "clean_up_tokenization_spaces": false,
134
  "eos_token": "<|im_end|>",
135
  "errors": "replace",
136
+ "extra_special_tokens": {},
137
  "model_max_length": 32768,
138
  "pad_token": "<|endoftext|>",
139
  "padding_side": "left",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6730ac38775f35d048b780843e3840177304a2e68360e603ba1bd9498f309d7d
3
- size 6072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:464a755bbb220bdbbd1b509b3017e935da00021c77af8eab707e79f40656ad33
3
+ size 6520
video_preprocessor_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_pad": null,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "do_sample_frames": false,
13
+ "fps": null,
14
+ "image_mean": [
15
+ 0.48145466,
16
+ 0.4578275,
17
+ 0.40821073
18
+ ],
19
+ "image_std": [
20
+ 0.26862954,
21
+ 0.26130258,
22
+ 0.27577711
23
+ ],
24
+ "input_data_format": null,
25
+ "max_frames": 768,
26
+ "max_pixels": 12845056,
27
+ "merge_size": 2,
28
+ "min_frames": 4,
29
+ "min_pixels": 3136,
30
+ "num_frames": null,
31
+ "patch_size": 14,
32
+ "processor_class": "Qwen2VLProcessor",
33
+ "resample": 3,
34
+ "rescale_factor": 0.00392156862745098,
35
+ "size": {
36
+ "longest_edge": 12845056,
37
+ "shortest_edge": 3136
38
+ },
39
+ "size_divisor": null,
40
+ "temporal_patch_size": 2,
41
+ "video_metadata": null,
42
+ "video_processor_type": "Qwen2VLVideoProcessor"
43
+ }