danielhanchen commited on
Commit
ddaa9e0
·
verified ·
1 Parent(s): 79ebe4e

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -28,48 +28,9 @@
28
  },
29
  "rope_theta": 1000000.0,
30
  "sliding_window": 32768,
31
- "text_config": {
32
- "architectures": [
33
- "Qwen2_5_VLForConditionalGeneration"
34
- ],
35
- "attention_dropout": 0.0,
36
- "bos_token_id": 151643,
37
- "eos_token_id": 151645,
38
- "hidden_act": "silu",
39
- "hidden_size": 3584,
40
- "image_token_id": null,
41
- "initializer_range": 0.02,
42
- "intermediate_size": 18944,
43
- "max_position_embeddings": 128000,
44
- "max_window_layers": 28,
45
- "model_type": "qwen2_5_vl_text",
46
- "num_attention_heads": 28,
47
- "num_hidden_layers": 28,
48
- "num_key_value_heads": 4,
49
- "rms_norm_eps": 1e-06,
50
- "rope_scaling": {
51
- "mrope_section": [
52
- 16,
53
- 24,
54
- 24
55
- ],
56
- "rope_type": "default",
57
- "type": "default"
58
- },
59
- "rope_theta": 1000000.0,
60
- "sliding_window": 32768,
61
- "torch_dtype": "bfloat16",
62
- "use_cache": true,
63
- "use_sliding_window": false,
64
- "video_token_id": null,
65
- "vision_end_token_id": 151653,
66
- "vision_start_token_id": 151652,
67
- "vision_token_id": 151654,
68
- "vocab_size": 152064
69
- },
70
  "tie_word_embeddings": false,
71
  "torch_dtype": "bfloat16",
72
- "transformers_version": "4.52.0.dev0",
73
  "unsloth_fixed": true,
74
  "use_cache": true,
75
  "use_sliding_window": false,
@@ -86,7 +47,6 @@
86
  "hidden_size": 1280,
87
  "in_channels": 3,
88
  "in_chans": 3,
89
- "initializer_range": 0.02,
90
  "intermediate_size": 3420,
91
  "model_type": "qwen2_5_vl",
92
  "num_heads": 16,
 
28
  },
29
  "rope_theta": 1000000.0,
30
  "sliding_window": 32768,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  "tie_word_embeddings": false,
32
  "torch_dtype": "bfloat16",
33
+ "transformers_version": "4.51.3",
34
  "unsloth_fixed": true,
35
  "use_cache": true,
36
  "use_sliding_window": false,
 
47
  "hidden_size": 1280,
48
  "in_channels": 3,
49
  "in_chans": 3,
 
50
  "intermediate_size": 3420,
51
  "model_type": "qwen2_5_vl",
52
  "num_heads": 16,
generation_config.json CHANGED
@@ -9,5 +9,5 @@
9
  "pad_token_id": 151654,
10
  "repetition_penalty": 1.05,
11
  "temperature": 1e-06,
12
- "transformers_version": "4.52.0.dev0"
13
  }
 
9
  "pad_token_id": 151654,
10
  "repetition_penalty": 1.05,
11
  "temperature": 1e-06,
12
+ "transformers_version": "4.51.3"
13
  }
model-00001-of-00005.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e97b877e47fde53a6c6e77aafb36e58e91ee9d95c4a3eeac6f1b5c0e6a1c986e
3
- size 3900233256
 
 
 
 
model-00002-of-00005.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9a300a43b4724eee2abe7c18ceb26768d0ab011eb0cad19d9bfd2476a24d024
3
- size 3864726320
 
 
 
 
model-00003-of-00005.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:111223d173e00bbee81cba1216fad28668df3476706b7fd26f4d5b50f8b3a507
3
- size 3864726424
 
 
 
 
model-00004-of-00005.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef47f634fa57d46ee134edcc09f34085a47da1e16c12a2abe0d67118be6d72ed
3
- size 3864733680
 
 
 
 
model-00005-of-00005.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c859795ad3a627a9b95bcb762e059d5b768a4a36fdd4affeff269d93fdecc67
3
- size 1089994880
 
 
 
 
tokenizer_config.json CHANGED
@@ -195,6 +195,7 @@
195
  "<|video_pad|>"
196
  ],
197
  "bos_token": null,
 
198
  "clean_up_tokenization_spaces": false,
199
  "eos_token": "<|im_end|>",
200
  "errors": "replace",
@@ -205,6 +206,5 @@
205
  "processor_class": "Qwen2_5_VLProcessor",
206
  "split_special_tokens": false,
207
  "tokenizer_class": "Qwen2Tokenizer",
208
- "unk_token": null,
209
- "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
210
  }
 
195
  "<|video_pad|>"
196
  ],
197
  "bos_token": null,
198
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
199
  "clean_up_tokenization_spaces": false,
200
  "eos_token": "<|im_end|>",
201
  "errors": "replace",
 
206
  "processor_class": "Qwen2_5_VLProcessor",
207
  "split_special_tokens": false,
208
  "tokenizer_class": "Qwen2Tokenizer",
209
+ "unk_token": null
 
210
  }