warshanks commited on
Commit
8b748e4
·
verified ·
1 Parent(s): cddea0c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: health-ai-developer-foundations
4
+ license_link: https://developers.google.com/health-ai-developer-foundations/terms
5
+ library_name: transformers
6
+ pipeline_tag: image-text-to-text
7
+ language: en
8
+ extra_gated_heading: Access MedGemma on Hugging Face
9
+ extra_gated_prompt: To access MedGemma on Hugging Face, you're required to review
10
+ and agree to [Health AI Developer Foundation's terms of use](https://developers.google.com/health-ai-developer-foundations/terms).
11
+ To do this, please ensure you're logged in to Hugging Face and click below. Requests
12
+ are processed immediately.
13
+ extra_gated_button_content: Acknowledge license
14
+ tags:
15
+ - medical
16
+ - x-ray
17
+ - pathology
18
+ - dermatology
19
+ - fundus
20
+ - radiology report generation
21
+ - chest-x-ray
22
+ - medical-embeddings
23
+ - image-classification
24
+ - zero-shot-image-classification
25
+ - image-feature-extraction
26
+ - image-text-to-text
27
+ - mlx
28
+ base_model: google/gemma-3-27b-pt
29
+ ---
30
+
31
+ # mlx-community/medgemma-27b-it-bf16
32
+ This model was converted to MLX format from [`google/medgemma-27b-it`]() using mlx-vlm version **0.3.0**.
33
+ Refer to the [original model card](https://huggingface.co/google/medgemma-27b-it) for more details on the model.
34
+ ## Use with mlx
35
+
36
+ ```bash
37
+ pip install -U mlx-vlm
38
+ ```
39
+
40
+ ```bash
41
+ python -m mlx_vlm.generate --model mlx-community/medgemma-27b-it-bf16 --max-tokens 100 --temperature 0.0 --prompt "Describe this image." --image <path_to_image>
42
+ ```
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_cross_attention": false,
3
+ "architectures": [
4
+ "Gemma3ForConditionalGeneration"
5
+ ],
6
+ "bad_words_ids": null,
7
+ "begin_suppress_tokens": null,
8
+ "boi_token_index": 255999,
9
+ "bos_token_id": null,
10
+ "chunk_size_feed_forward": 0,
11
+ "cross_attention_hidden_size": null,
12
+ "decoder_start_token_id": null,
13
+ "diversity_penalty": 0.0,
14
+ "do_sample": false,
15
+ "early_stopping": false,
16
+ "encoder_no_repeat_ngram_size": 0,
17
+ "eoi_token_index": 256000,
18
+ "eos_token_id": [
19
+ 1,
20
+ 106
21
+ ],
22
+ "exponential_decay_length_penalty": null,
23
+ "finetuning_task": null,
24
+ "forced_bos_token_id": null,
25
+ "forced_eos_token_id": null,
26
+ "id2label": {
27
+ "0": "LABEL_0",
28
+ "1": "LABEL_1"
29
+ },
30
+ "image_token_index": 262144,
31
+ "initializer_range": 0.02,
32
+ "is_decoder": false,
33
+ "is_encoder_decoder": false,
34
+ "label2id": {
35
+ "LABEL_0": 0,
36
+ "LABEL_1": 1
37
+ },
38
+ "length_penalty": 1.0,
39
+ "max_length": 20,
40
+ "min_length": 0,
41
+ "mm_tokens_per_image": 256,
42
+ "model_type": "gemma3",
43
+ "no_repeat_ngram_size": 0,
44
+ "num_beam_groups": 1,
45
+ "num_beams": 1,
46
+ "num_return_sequences": 1,
47
+ "output_attentions": false,
48
+ "output_hidden_states": false,
49
+ "output_scores": false,
50
+ "pad_token_id": null,
51
+ "prefix": null,
52
+ "problem_type": null,
53
+ "pruned_heads": {},
54
+ "remove_invalid_values": false,
55
+ "repetition_penalty": 1.0,
56
+ "return_dict": true,
57
+ "return_dict_in_generate": false,
58
+ "sep_token_id": null,
59
+ "suppress_tokens": null,
60
+ "task_specific_params": null,
61
+ "temperature": 1.0,
62
+ "text_config": {
63
+ "return_dict": true,
64
+ "output_hidden_states": false,
65
+ "torchscript": false,
66
+ "torch_dtype": "bfloat16",
67
+ "use_bfloat16": false,
68
+ "tf_legacy_loss": false,
69
+ "pruned_heads": {},
70
+ "tie_word_embeddings": true,
71
+ "chunk_size_feed_forward": 0,
72
+ "is_encoder_decoder": false,
73
+ "is_decoder": false,
74
+ "cross_attention_hidden_size": null,
75
+ "add_cross_attention": false,
76
+ "tie_encoder_decoder": false,
77
+ "max_length": 20,
78
+ "min_length": 0,
79
+ "do_sample": false,
80
+ "early_stopping": false,
81
+ "num_beams": 1,
82
+ "num_beam_groups": 1,
83
+ "diversity_penalty": 0.0,
84
+ "temperature": 1.0,
85
+ "top_k": 50,
86
+ "top_p": 1.0,
87
+ "typical_p": 1.0,
88
+ "repetition_penalty": 1.0,
89
+ "length_penalty": 1.0,
90
+ "no_repeat_ngram_size": 0,
91
+ "encoder_no_repeat_ngram_size": 0,
92
+ "bad_words_ids": null,
93
+ "num_return_sequences": 1,
94
+ "output_scores": false,
95
+ "return_dict_in_generate": false,
96
+ "forced_bos_token_id": null,
97
+ "forced_eos_token_id": null,
98
+ "remove_invalid_values": false,
99
+ "exponential_decay_length_penalty": null,
100
+ "suppress_tokens": null,
101
+ "begin_suppress_tokens": null,
102
+ "architectures": null,
103
+ "finetuning_task": null,
104
+ "id2label": {
105
+ "0": "LABEL_0",
106
+ "1": "LABEL_1"
107
+ },
108
+ "label2id": {
109
+ "LABEL_0": 0,
110
+ "LABEL_1": 1
111
+ },
112
+ "tokenizer_class": null,
113
+ "prefix": null,
114
+ "bos_token_id": 2,
115
+ "pad_token_id": 0,
116
+ "eos_token_id": 1,
117
+ "sep_token_id": null,
118
+ "decoder_start_token_id": null,
119
+ "task_specific_params": null,
120
+ "problem_type": null,
121
+ "_name_or_path": "",
122
+ "model_type": "gemma3_text",
123
+ "vocab_size": 262208,
124
+ "max_position_embeddings": 131072,
125
+ "hidden_size": 5376,
126
+ "intermediate_size": 21504,
127
+ "num_hidden_layers": 62,
128
+ "num_attention_heads": 32,
129
+ "head_dim": 128,
130
+ "num_key_value_heads": 16,
131
+ "initializer_range": 0.02,
132
+ "rms_norm_eps": 1e-06,
133
+ "use_cache": true,
134
+ "rope_theta": 1000000,
135
+ "attention_bias": false,
136
+ "attention_dropout": 0.0,
137
+ "hidden_activation": "gelu_pytorch_tanh",
138
+ "query_pre_attn_scalar": 168,
139
+ "sliding_window": 1024,
140
+ "final_logit_softcapping": null,
141
+ "attn_logit_softcapping": null,
142
+ "layer_types": [
143
+ "sliding_attention",
144
+ "sliding_attention",
145
+ "sliding_attention",
146
+ "sliding_attention",
147
+ "sliding_attention",
148
+ "full_attention",
149
+ "sliding_attention",
150
+ "sliding_attention",
151
+ "sliding_attention",
152
+ "sliding_attention",
153
+ "sliding_attention",
154
+ "full_attention",
155
+ "sliding_attention",
156
+ "sliding_attention",
157
+ "sliding_attention",
158
+ "sliding_attention",
159
+ "sliding_attention",
160
+ "full_attention",
161
+ "sliding_attention",
162
+ "sliding_attention",
163
+ "sliding_attention",
164
+ "sliding_attention",
165
+ "sliding_attention",
166
+ "full_attention",
167
+ "sliding_attention",
168
+ "sliding_attention",
169
+ "sliding_attention",
170
+ "sliding_attention",
171
+ "sliding_attention",
172
+ "full_attention",
173
+ "sliding_attention",
174
+ "sliding_attention",
175
+ "sliding_attention",
176
+ "sliding_attention",
177
+ "sliding_attention",
178
+ "full_attention",
179
+ "sliding_attention",
180
+ "sliding_attention",
181
+ "sliding_attention",
182
+ "sliding_attention",
183
+ "sliding_attention",
184
+ "full_attention",
185
+ "sliding_attention",
186
+ "sliding_attention",
187
+ "sliding_attention",
188
+ "sliding_attention",
189
+ "sliding_attention",
190
+ "full_attention",
191
+ "sliding_attention",
192
+ "sliding_attention",
193
+ "sliding_attention",
194
+ "sliding_attention",
195
+ "sliding_attention",
196
+ "full_attention",
197
+ "sliding_attention",
198
+ "sliding_attention",
199
+ "sliding_attention",
200
+ "sliding_attention",
201
+ "sliding_attention",
202
+ "full_attention",
203
+ "sliding_attention",
204
+ "sliding_attention"
205
+ ],
206
+ "rope_local_base_freq": 10000,
207
+ "rope_scaling": {
208
+ "factor": 8.0,
209
+ "rope_type": "linear"
210
+ },
211
+ "output_attentions": false
212
+ },
213
+ "tf_legacy_loss": false,
214
+ "tie_encoder_decoder": false,
215
+ "tie_word_embeddings": true,
216
+ "tokenizer_class": null,
217
+ "top_k": 50,
218
+ "top_p": 1.0,
219
+ "torchscript": false,
220
+ "transformers_version": "4.53.1",
221
+ "typical_p": 1.0,
222
+ "use_bfloat16": false,
223
+ "vision_config": {
224
+ "return_dict": true,
225
+ "output_hidden_states": false,
226
+ "torchscript": false,
227
+ "torch_dtype": "bfloat16",
228
+ "use_bfloat16": false,
229
+ "tf_legacy_loss": false,
230
+ "pruned_heads": {},
231
+ "tie_word_embeddings": true,
232
+ "chunk_size_feed_forward": 0,
233
+ "is_encoder_decoder": false,
234
+ "is_decoder": false,
235
+ "cross_attention_hidden_size": null,
236
+ "add_cross_attention": false,
237
+ "tie_encoder_decoder": false,
238
+ "max_length": 20,
239
+ "min_length": 0,
240
+ "do_sample": false,
241
+ "early_stopping": false,
242
+ "num_beams": 1,
243
+ "num_beam_groups": 1,
244
+ "diversity_penalty": 0.0,
245
+ "temperature": 1.0,
246
+ "top_k": 50,
247
+ "top_p": 1.0,
248
+ "typical_p": 1.0,
249
+ "repetition_penalty": 1.0,
250
+ "length_penalty": 1.0,
251
+ "no_repeat_ngram_size": 0,
252
+ "encoder_no_repeat_ngram_size": 0,
253
+ "bad_words_ids": null,
254
+ "num_return_sequences": 1,
255
+ "output_scores": false,
256
+ "return_dict_in_generate": false,
257
+ "forced_bos_token_id": null,
258
+ "forced_eos_token_id": null,
259
+ "remove_invalid_values": false,
260
+ "exponential_decay_length_penalty": null,
261
+ "suppress_tokens": null,
262
+ "begin_suppress_tokens": null,
263
+ "architectures": null,
264
+ "finetuning_task": null,
265
+ "id2label": {
266
+ "0": "LABEL_0",
267
+ "1": "LABEL_1"
268
+ },
269
+ "label2id": {
270
+ "LABEL_0": 0,
271
+ "LABEL_1": 1
272
+ },
273
+ "tokenizer_class": null,
274
+ "prefix": null,
275
+ "bos_token_id": null,
276
+ "pad_token_id": null,
277
+ "eos_token_id": null,
278
+ "sep_token_id": null,
279
+ "decoder_start_token_id": null,
280
+ "task_specific_params": null,
281
+ "problem_type": null,
282
+ "_name_or_path": "",
283
+ "model_type": "siglip_vision_model",
284
+ "vision_use_head": false,
285
+ "hidden_size": 1152,
286
+ "intermediate_size": 4304,
287
+ "num_hidden_layers": 27,
288
+ "num_attention_heads": 16,
289
+ "num_channels": 3,
290
+ "patch_size": 14,
291
+ "image_size": 896,
292
+ "attention_dropout": 0.0,
293
+ "layer_norm_eps": 1e-06,
294
+ "hidden_act": "gelu_pytorch_tanh",
295
+ "output_attentions": false
296
+ }
297
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": [
5
+ 1,
6
+ 106
7
+ ],
8
+ "pad_token_id": 0,
9
+ "transformers_version": "4.54.0.dev0"
10
+ }
model-00001-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9c7807e4b247af5edca10bf8988805220711ce68f7cbe342e2ee6103eca6c5
3
+ size 5348690938
model-00002-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca22834430de8cf7085e3169e795bdaf8c187142028a95904e4f9f8ee2e66268
3
+ size 5274085604
model-00003-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a6de37ca10bba45a57820ce1fb4b8210f70b56cfbb9802fbda107802929b2a
3
+ size 5186004140
model-00004-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900af1f1e0b030ea6ea448f3d72f97ce44f610bb301c993edb270a56e6ab5aae
3
+ size 5318169575
model-00005-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05dc6c3161c7e69be99b8fbcdb2851f0da010e72df605e3ff836bfcf522e99a3
3
+ size 5186004133
model-00006-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca42be10e93143a6faa2a01ade22c7dc091db60d8b641de71d3431c99c01db7d
3
+ size 5186004165
model-00007-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e091379c1402e83cac116e30d3234902a9252422b8939ea43f9b20c1ace11ea
3
+ size 5318169539
model-00008-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c064ba0168f4b735570b611466401f2648c5f145ba9ecab6a32be09db56a18b
3
+ size 5186004119
model-00009-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c953dd54530c0e0c3fabd86efcaa353fb066cadf05132e31eb109b576e2ac361
3
+ size 5186004129
model-00010-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b738bd4591e25c4c663d703231e78c08529065ad2aa97395d4e527e4713f981
3
+ size 5318169549
model-00011-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4aca0986c6d0422c850143349fda77a17c8043ca65ddf49686d4758b60060ff9
3
+ size 5176934889
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_pan_and_scan": null,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Gemma3ImageProcessor",
13
+ "image_seq_length": 256,
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "pan_and_scan_max_num_crops": null,
20
+ "pan_and_scan_min_crop_size": null,
21
+ "pan_and_scan_min_ratio_to_activate": null,
22
+ "processor_class": "Gemma3Processor",
23
+ "resample": 2,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "height": 896,
27
+ "width": 896
28
+ }
29
+ }
processor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "image_seq_length": 256,
3
+ "processor_class": "Gemma3Processor"
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff