warshanks commited on
Commit
3d23a48
·
verified ·
1 Parent(s): effe616

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: health-ai-developer-foundations
4
+ license_link: https://developers.google.com/health-ai-developer-foundations/terms
5
+ library_name: mlx
6
+ pipeline_tag: text-generation
7
+ extra_gated_heading: Access MedGemma on Hugging Face
8
+ extra_gated_prompt: To access MedGemma on Hugging Face, you're required to review
9
+ and agree to [Health AI Developer Foundation's terms of use](https://developers.google.com/health-ai-developer-foundations/terms).
10
+ To do this, please ensure you're logged in to Hugging Face and click below. Requests
11
+ are processed immediately.
12
+ extra_gated_button_content: Acknowledge license
13
+ base_model: google/medgemma-27b-text-it
14
+ tags:
15
+ - medical
16
+ - clinical-reasoning
17
+ - thinking
18
+ - mlx
19
+ ---
20
+
21
+ # mlx-community/medgemma-27b-text-it-6bit
22
+
23
+ This model [mlx-community/medgemma-27b-text-it-6bit](https://huggingface.co/mlx-community/medgemma-27b-text-it-6bit) was
24
+ converted to MLX format from [google/medgemma-27b-text-it](https://huggingface.co/google/medgemma-27b-text-it)
25
+ using mlx-lm version **0.24.1**.
26
+
27
+ ## Use with mlx
28
+
29
+ ```bash
30
+ pip install mlx-lm
31
+ ```
32
+
33
+ ```python
34
+ from mlx_lm import load, generate
35
+
36
+ model, tokenizer = load("mlx-community/medgemma-27b-text-it-6bit")
37
+
38
+ prompt = "hello"
39
+
40
+ if tokenizer.chat_template is not None:
41
+ messages = [{"role": "user", "content": prompt}]
42
+ prompt = tokenizer.apply_chat_template(
43
+ messages, add_generation_prompt=True
44
+ )
45
+
46
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
47
+ ```
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "attn_logit_softcapping": null,
8
+ "bos_token_id": 2,
9
+ "cache_implementation": "hybrid",
10
+ "eos_token_id": 1,
11
+ "final_logit_softcapping": null,
12
+ "head_dim": 128,
13
+ "hidden_activation": "gelu_pytorch_tanh",
14
+ "hidden_size": 5376,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 21504,
17
+ "max_position_embeddings": 131072,
18
+ "model_type": "gemma3_text",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 62,
21
+ "num_key_value_heads": 16,
22
+ "pad_token_id": 0,
23
+ "quantization": {
24
+ "group_size": 64,
25
+ "bits": 6
26
+ },
27
+ "quantization_config": {
28
+ "group_size": 64,
29
+ "bits": 6
30
+ },
31
+ "query_pre_attn_scalar": 168,
32
+ "rms_norm_eps": 1e-06,
33
+ "rope_local_base_freq": 10000,
34
+ "rope_scaling": {
35
+ "factor": 8.0,
36
+ "rope_type": "linear"
37
+ },
38
+ "rope_theta": 1000000,
39
+ "sliding_window": 1024,
40
+ "sliding_window_pattern": 6,
41
+ "torch_dtype": "bfloat16",
42
+ "transformers_version": "4.52.0.dev0",
43
+ "use_cache": true,
44
+ "vocab_size": 262144
45
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cache_implementation": "hybrid",
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 1,
6
+ 106
7
+ ],
8
+ "top_k": 64,
9
+ "top_p": 0.95,
10
+ "transformers_version": "4.52.0.dev0"
11
+ }
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0656cb2cf43632b336b9f19332ad57534bd91320d7eaf68c9932f6890486e526
3
+ size 5318759265
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc45d8b779245a0e47f0c2ca208aecd986588837eaccff524d49970bf80cb85b
3
+ size 5368144440
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bdb18514486d08795ad5e49039769209ee20f5c97a6ecc91ff9af57ea037358
3
+ size 5368144484
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e9e75f94fe2eac8279078f0c91f9ceebb681435759abcbada7121664ec768f4
3
+ size 5368144472
model-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82c5b1d9d2af6be5d140ecf816e8097aebba7c23a3325ea3cebf4c7837227782
3
+ size 1668468291
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff