neilmehta24 commited on
Commit
a4033dd
·
verified ·
1 Parent(s): 28a1774

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ library_name: transformers
4
+ pipeline_tag: image-text-to-text
5
+ extra_gated_heading: Access Gemma on Hugging Face
6
+ extra_gated_prompt: To access Gemma on Hugging Face, you’re required to review and
7
+ agree to Google’s usage license. To do this, please ensure you’re logged in to Hugging
8
+ Face and click below. Requests are processed immediately.
9
+ extra_gated_button_content: Acknowledge license
10
+ base_model: google/gemma-3n-E2B-it
11
+ tags:
12
+ - automatic-speech-recognition
13
+ - automatic-speech-translation
14
+ - audio-text-to-text
15
+ - video-text-to-text
16
+ - mlx
17
+ ---
18
+ ## 💫 Community Model> gemma-3n-E2B-it by google
19
+
20
+ *👾 [LM Studio](https://lmstudio.ai) Community models highlights program. Highlighting new & noteworthy models by the community. Join the conversation on [Discord](https://discord.gg/aPQfnNkxGC)*.
21
+
22
+ **Model creator:** [google](https://huggingface.co/google)<br>
23
+ **Original model**: [gemma-3n-E2B-it](https://huggingface.co/google/gemma-3n-E2B-it)<br>
24
+ **MLX quantization:** provided by [LM Studio team](https://x.com/lmstudio) using [mlx_vlm](https://github.com/Blaizzy/mlx-vlm)<br>
25
+
26
+ ## Technical Details
27
+
28
+ 8-bit quantized version of gemma-3n-E2B-it using MLX, optimized for Apple Silicon.
29
+
30
+ ## Special thanks
31
+
32
+ 🙏 Special thanks to the [Apple Machine Learning Research](https://github.com/ml-explore) team for creating [MLX](https://github.com/ml-explore/mlx).
33
+
34
+ ## Disclaimers
35
+
36
+ LM Studio is not the creator, originator, or owner of any Model featured in the Community Model Program. Each Community Model is created and provided by third parties. LM Studio does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any Community Model. You understand that Community Models can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. Each Community Model is the sole responsibility of the person or entity who originated such Model. LM Studio may not monitor or control the Community Models and cannot, and does not, take responsibility for any such Model. LM Studio disclaims all warranties or guarantees about the accuracy, reliability or benefits of the Community Models. LM Studio further disclaims any warranty that the Community Model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to the Community Models, your downloading of any Community Model, or use of any other Community Model provided by or through LM Studio.
chat_template.jinja ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'audio' -%}
33
+ {{ '<audio_soft_token>' }}
34
+ {%- elif item['type'] == 'image' -%}
35
+ {{ '<image_soft_token>' }}
36
+ {%- elif item['type'] == 'text' -%}
37
+ {{ item['text'] | trim }}
38
+ {%- endif -%}
39
+ {%- endfor -%}
40
+ {%- else -%}
41
+ {{ raise_exception("Invalid content type") }}
42
+ {%- endif -%}
43
+ {{ '<end_of_turn>
44
+ ' }}
45
+ {%- endfor -%}
46
+ {%- if add_generation_prompt -%}
47
+ {{'<start_of_turn>model
48
+ '}}
49
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_cross_attention": false,
3
+ "architectures": [
4
+ "Gemma3nForConditionalGeneration"
5
+ ],
6
+ "audio_config": {
7
+ "return_dict": true,
8
+ "output_hidden_states": false,
9
+ "torchscript": false,
10
+ "torch_dtype": "bfloat16",
11
+ "use_bfloat16": false,
12
+ "tf_legacy_loss": false,
13
+ "pruned_heads": {},
14
+ "tie_word_embeddings": true,
15
+ "chunk_size_feed_forward": 0,
16
+ "is_encoder_decoder": false,
17
+ "is_decoder": false,
18
+ "cross_attention_hidden_size": null,
19
+ "add_cross_attention": false,
20
+ "tie_encoder_decoder": false,
21
+ "max_length": 20,
22
+ "min_length": 0,
23
+ "do_sample": false,
24
+ "early_stopping": false,
25
+ "num_beams": 1,
26
+ "num_beam_groups": 1,
27
+ "diversity_penalty": 0.0,
28
+ "temperature": 1.0,
29
+ "top_k": 50,
30
+ "top_p": 1.0,
31
+ "typical_p": 1.0,
32
+ "repetition_penalty": 1.0,
33
+ "length_penalty": 1.0,
34
+ "no_repeat_ngram_size": 0,
35
+ "encoder_no_repeat_ngram_size": 0,
36
+ "bad_words_ids": null,
37
+ "num_return_sequences": 1,
38
+ "output_scores": false,
39
+ "return_dict_in_generate": false,
40
+ "forced_bos_token_id": null,
41
+ "forced_eos_token_id": null,
42
+ "remove_invalid_values": false,
43
+ "exponential_decay_length_penalty": null,
44
+ "suppress_tokens": null,
45
+ "begin_suppress_tokens": null,
46
+ "architectures": null,
47
+ "finetuning_task": null,
48
+ "id2label": {
49
+ "0": "LABEL_0",
50
+ "1": "LABEL_1"
51
+ },
52
+ "label2id": {
53
+ "LABEL_0": 0,
54
+ "LABEL_1": 1
55
+ },
56
+ "tokenizer_class": null,
57
+ "prefix": null,
58
+ "bos_token_id": null,
59
+ "pad_token_id": null,
60
+ "eos_token_id": null,
61
+ "sep_token_id": null,
62
+ "decoder_start_token_id": null,
63
+ "task_specific_params": null,
64
+ "problem_type": null,
65
+ "_name_or_path": "",
66
+ "conf_positional_bias_size": 256,
67
+ "model_type": "gemma3n_audio",
68
+ "sscp_conv_eps": 0.001,
69
+ "input_feat_size": 128,
70
+ "hidden_size": 1536,
71
+ "rms_norm_eps": 1e-06,
72
+ "vocab_size": 128,
73
+ "vocab_offset": 262272,
74
+ "gradient_clipping": 10000000000.0,
75
+ "conf_attention_chunk_size": 12,
76
+ "conf_attention_context_left": 13,
77
+ "conf_attention_context_right": 0,
78
+ "conf_attention_logit_cap": 50.0,
79
+ "conf_num_attention_heads": 8,
80
+ "conf_num_hidden_layers": 12,
81
+ "conf_conv_kernel_size": 5,
82
+ "conf_reduction_factor": 4,
83
+ "conf_residual_weight": 0.5,
84
+ "sscp_conv_channel_size": [
85
+ 128,
86
+ 32
87
+ ],
88
+ "sscp_conv_group_norm_eps": 0.001,
89
+ "sscp_conv_kernel_size": [
90
+ [
91
+ 3,
92
+ 3
93
+ ],
94
+ [
95
+ 3,
96
+ 3
97
+ ]
98
+ ],
99
+ "sscp_conv_stride_size": [
100
+ [
101
+ 2,
102
+ 2
103
+ ],
104
+ [
105
+ 2,
106
+ 2
107
+ ]
108
+ ],
109
+ "output_attentions": false
110
+ },
111
+ "audio_soft_tokens_per_image": 188,
112
+ "audio_token_id": 262273,
113
+ "bad_words_ids": null,
114
+ "begin_suppress_tokens": null,
115
+ "boa_token_id": 256000,
116
+ "boi_token_id": 255999,
117
+ "bos_token_id": null,
118
+ "chunk_size_feed_forward": 0,
119
+ "cross_attention_hidden_size": null,
120
+ "decoder_start_token_id": null,
121
+ "diversity_penalty": 0.0,
122
+ "do_sample": false,
123
+ "early_stopping": false,
124
+ "encoder_no_repeat_ngram_size": 0,
125
+ "eoa_token_id": 262272,
126
+ "eoi_token_id": 262144,
127
+ "eos_token_id": [
128
+ 1,
129
+ 106
130
+ ],
131
+ "exponential_decay_length_penalty": null,
132
+ "finetuning_task": null,
133
+ "forced_bos_token_id": null,
134
+ "forced_eos_token_id": null,
135
+ "id2label": {
136
+ "0": "LABEL_0",
137
+ "1": "LABEL_1"
138
+ },
139
+ "image_token_id": 262145,
140
+ "initializer_range": 0.02,
141
+ "is_decoder": false,
142
+ "is_encoder_decoder": false,
143
+ "label2id": {
144
+ "LABEL_0": 0,
145
+ "LABEL_1": 1
146
+ },
147
+ "length_penalty": 1.0,
148
+ "max_length": 20,
149
+ "min_length": 0,
150
+ "model_type": "gemma3n",
151
+ "no_repeat_ngram_size": 0,
152
+ "num_beam_groups": 1,
153
+ "num_beams": 1,
154
+ "num_return_sequences": 1,
155
+ "output_attentions": false,
156
+ "output_hidden_states": false,
157
+ "output_scores": false,
158
+ "pad_token_id": null,
159
+ "prefix": null,
160
+ "problem_type": null,
161
+ "pruned_heads": {},
162
+ "quantization": {
163
+ "group_size": 64,
164
+ "bits": 8
165
+ },
166
+ "remove_invalid_values": false,
167
+ "repetition_penalty": 1.0,
168
+ "return_dict": true,
169
+ "return_dict_in_generate": false,
170
+ "sep_token_id": null,
171
+ "suppress_tokens": null,
172
+ "task_specific_params": null,
173
+ "temperature": 1.0,
174
+ "text_config": {
175
+ "return_dict": true,
176
+ "output_hidden_states": false,
177
+ "torchscript": false,
178
+ "torch_dtype": "bfloat16",
179
+ "use_bfloat16": false,
180
+ "tf_legacy_loss": false,
181
+ "pruned_heads": {},
182
+ "tie_word_embeddings": true,
183
+ "chunk_size_feed_forward": 0,
184
+ "is_encoder_decoder": false,
185
+ "is_decoder": false,
186
+ "cross_attention_hidden_size": null,
187
+ "add_cross_attention": false,
188
+ "tie_encoder_decoder": false,
189
+ "max_length": 20,
190
+ "min_length": 0,
191
+ "do_sample": false,
192
+ "early_stopping": false,
193
+ "num_beams": 1,
194
+ "num_beam_groups": 1,
195
+ "diversity_penalty": 0.0,
196
+ "temperature": 1.0,
197
+ "top_k": 50,
198
+ "top_p": 1.0,
199
+ "typical_p": 1.0,
200
+ "repetition_penalty": 1.0,
201
+ "length_penalty": 1.0,
202
+ "no_repeat_ngram_size": 0,
203
+ "encoder_no_repeat_ngram_size": 0,
204
+ "bad_words_ids": null,
205
+ "num_return_sequences": 1,
206
+ "output_scores": false,
207
+ "return_dict_in_generate": false,
208
+ "forced_bos_token_id": null,
209
+ "forced_eos_token_id": null,
210
+ "remove_invalid_values": false,
211
+ "exponential_decay_length_penalty": null,
212
+ "suppress_tokens": null,
213
+ "begin_suppress_tokens": null,
214
+ "architectures": null,
215
+ "finetuning_task": null,
216
+ "id2label": {
217
+ "0": "LABEL_0",
218
+ "1": "LABEL_1"
219
+ },
220
+ "label2id": {
221
+ "LABEL_0": 0,
222
+ "LABEL_1": 1
223
+ },
224
+ "tokenizer_class": null,
225
+ "prefix": null,
226
+ "bos_token_id": 2,
227
+ "pad_token_id": 0,
228
+ "eos_token_id": 1,
229
+ "sep_token_id": null,
230
+ "decoder_start_token_id": null,
231
+ "task_specific_params": null,
232
+ "problem_type": null,
233
+ "_name_or_path": "",
234
+ "altup_lr_multiplier": 1.0,
235
+ "model_type": "gemma3n_text",
236
+ "query_pre_attn_scalar": 256,
237
+ "vocab_size": 262400,
238
+ "vocab_size_per_layer_input": 262144,
239
+ "max_position_embeddings": 32768,
240
+ "hidden_size": 2048,
241
+ "intermediate_size": [
242
+ 8192,
243
+ 8192,
244
+ 8192,
245
+ 8192,
246
+ 8192,
247
+ 8192,
248
+ 8192,
249
+ 8192,
250
+ 8192,
251
+ 8192,
252
+ 8192,
253
+ 8192,
254
+ 8192,
255
+ 8192,
256
+ 8192,
257
+ 8192,
258
+ 8192,
259
+ 8192,
260
+ 8192,
261
+ 8192,
262
+ 8192,
263
+ 8192,
264
+ 8192,
265
+ 8192,
266
+ 8192,
267
+ 8192,
268
+ 8192,
269
+ 8192,
270
+ 8192,
271
+ 8192
272
+ ],
273
+ "num_hidden_layers": 30,
274
+ "num_attention_heads": 8,
275
+ "head_dim": 256,
276
+ "num_key_value_heads": 2,
277
+ "initializer_range": 0.02,
278
+ "rms_norm_eps": 1e-06,
279
+ "use_cache": true,
280
+ "rope_theta": 1000000.0,
281
+ "attention_bias": false,
282
+ "attention_dropout": 0.0,
283
+ "hidden_activation": "gelu_pytorch_tanh",
284
+ "sliding_window": 512,
285
+ "final_logit_softcapping": 30.0,
286
+ "layer_types": [
287
+ "sliding_attention",
288
+ "sliding_attention",
289
+ "sliding_attention",
290
+ "sliding_attention",
291
+ "full_attention",
292
+ "sliding_attention",
293
+ "sliding_attention",
294
+ "sliding_attention",
295
+ "sliding_attention",
296
+ "full_attention",
297
+ "sliding_attention",
298
+ "sliding_attention",
299
+ "sliding_attention",
300
+ "sliding_attention",
301
+ "full_attention",
302
+ "sliding_attention",
303
+ "sliding_attention",
304
+ "sliding_attention",
305
+ "sliding_attention",
306
+ "full_attention",
307
+ "sliding_attention",
308
+ "sliding_attention",
309
+ "sliding_attention",
310
+ "sliding_attention",
311
+ "full_attention",
312
+ "sliding_attention",
313
+ "sliding_attention",
314
+ "sliding_attention",
315
+ "sliding_attention",
316
+ "full_attention"
317
+ ],
318
+ "rope_local_base_freq": 10000.0,
319
+ "rope_scaling": null,
320
+ "hidden_size_per_layer_input": 256,
321
+ "num_kv_shared_layers": 10,
322
+ "altup_active_idx": 0,
323
+ "altup_coef_clip": 120.0,
324
+ "altup_correct_scale": true,
325
+ "altup_num_inputs": 4,
326
+ "laurel_rank": 64,
327
+ "activation_sparsity_pattern": [
328
+ 0.95,
329
+ 0.95,
330
+ 0.95,
331
+ 0.95,
332
+ 0.95,
333
+ 0.95,
334
+ 0.95,
335
+ 0.95,
336
+ 0.95,
337
+ 0.95,
338
+ 0.0,
339
+ 0.0,
340
+ 0.0,
341
+ 0.0,
342
+ 0.0,
343
+ 0.0,
344
+ 0.0,
345
+ 0.0,
346
+ 0.0,
347
+ 0.0,
348
+ 0.0,
349
+ 0.0,
350
+ 0.0,
351
+ 0.0,
352
+ 0.0,
353
+ 0.0,
354
+ 0.0,
355
+ 0.0,
356
+ 0.0,
357
+ 0.0
358
+ ],
359
+ "output_attentions": false
360
+ },
361
+ "tf_legacy_loss": false,
362
+ "tie_encoder_decoder": false,
363
+ "tie_word_embeddings": true,
364
+ "tokenizer_class": null,
365
+ "top_k": 50,
366
+ "top_p": 1.0,
367
+ "torchscript": false,
368
+ "transformers_version": "4.53.0",
369
+ "typical_p": 1.0,
370
+ "use_bfloat16": false,
371
+ "vision_config": {
372
+ "return_dict": true,
373
+ "output_hidden_states": false,
374
+ "torchscript": false,
375
+ "torch_dtype": "bfloat16",
376
+ "use_bfloat16": false,
377
+ "tf_legacy_loss": false,
378
+ "pruned_heads": {},
379
+ "tie_word_embeddings": true,
380
+ "chunk_size_feed_forward": 0,
381
+ "is_encoder_decoder": false,
382
+ "is_decoder": false,
383
+ "cross_attention_hidden_size": null,
384
+ "add_cross_attention": false,
385
+ "tie_encoder_decoder": false,
386
+ "max_length": 20,
387
+ "min_length": 0,
388
+ "do_sample": false,
389
+ "early_stopping": false,
390
+ "num_beams": 1,
391
+ "num_beam_groups": 1,
392
+ "diversity_penalty": 0.0,
393
+ "temperature": 1.0,
394
+ "top_k": 50,
395
+ "top_p": 1.0,
396
+ "typical_p": 1.0,
397
+ "repetition_penalty": 1.0,
398
+ "length_penalty": 1.0,
399
+ "no_repeat_ngram_size": 0,
400
+ "encoder_no_repeat_ngram_size": 0,
401
+ "bad_words_ids": null,
402
+ "num_return_sequences": 1,
403
+ "output_scores": false,
404
+ "return_dict_in_generate": false,
405
+ "forced_bos_token_id": null,
406
+ "forced_eos_token_id": null,
407
+ "remove_invalid_values": false,
408
+ "exponential_decay_length_penalty": null,
409
+ "suppress_tokens": null,
410
+ "begin_suppress_tokens": null,
411
+ "architectures": null,
412
+ "finetuning_task": null,
413
+ "tokenizer_class": null,
414
+ "prefix": null,
415
+ "bos_token_id": null,
416
+ "pad_token_id": null,
417
+ "eos_token_id": null,
418
+ "sep_token_id": null,
419
+ "decoder_start_token_id": null,
420
+ "task_specific_params": null,
421
+ "problem_type": null,
422
+ "_name_or_path": "",
423
+ "label_names": [
424
+ "LABEL_0",
425
+ "LABEL_1"
426
+ ],
427
+ "model_type": "gemma3n_vision",
428
+ "num_classes": 2,
429
+ "initializer_range": 0.02,
430
+ "do_pooling": true,
431
+ "model_args": null,
432
+ "architecture": "mobilenetv5_300m_enc",
433
+ "hidden_size": 2048,
434
+ "vocab_size": 128,
435
+ "vocab_offset": 262144,
436
+ "rms_norm_eps": 1e-06,
437
+ "output_attentions": false
438
+ },
439
+ "vision_soft_tokens_per_image": 256
440
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "top_k": 64,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.53.0.dev0"
13
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1376e43bb0b05f36692b0696370f97541810cb3d59afe04c9cde2ee07146a1a
3
+ size 5367794792
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:312e771912ca9de1f7e5fe6c58e0da757adc3367630fe1b7191e52ebe20c1caf
3
+ size 688452198
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": false,
5
+ "device": null,
6
+ "dither": 0.0,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": null,
9
+ "do_normalize": false,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "feature_size": 128,
13
+ "fft_length": 1024,
14
+ "fft_overdrive": true,
15
+ "frame_length": 512,
16
+ "hop_length": 160,
17
+ "image_mean": [
18
+ 0.5,
19
+ 0.5,
20
+ 0.5
21
+ ],
22
+ "image_processor_type": "SiglipImageProcessor",
23
+ "image_seq_length": 256,
24
+ "image_std": [
25
+ 0.5,
26
+ 0.5,
27
+ 0.5
28
+ ],
29
+ "input_data_format": null,
30
+ "input_scale_factor": 1.0,
31
+ "max_frequency": 7600.0,
32
+ "mel_floor": 1e-05,
33
+ "min_frequency": 125.0,
34
+ "padding_side": "right",
35
+ "padding_value": 0.0,
36
+ "per_bin_mean": null,
37
+ "per_bin_stddev": null,
38
+ "preemphasis": 0.97,
39
+ "preemphasis_htk_flavor": true,
40
+ "processor_class": "Gemma3nProcessor",
41
+ "resample": 2,
42
+ "rescale_factor": 0.00392156862745098,
43
+ "return_attention_mask": false,
44
+ "return_tensors": null,
45
+ "sampling_rate": 16000,
46
+ "size": {
47
+ "height": 768,
48
+ "width": 768
49
+ }
50
+ }
processor_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "audio_seq_length": 188,
3
+ "image_seq_length": 256,
4
+ "processor_class": "Gemma3nProcessor"
5
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<audio_soft_token>",
3
+ "boa_token": "<start_of_audio>",
4
+ "boi_token": "<start_of_image>",
5
+ "bos_token": {
6
+ "content": "<bos>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eoa_token": "<end_of_audio>",
13
+ "eoi_token": "<end_of_image>",
14
+ "eos_token": {
15
+ "content": "<eos>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "image_token": "<image_soft_token>",
22
+ "pad_token": {
23
+ "content": "<pad>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "unk_token": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c35ee648c07754b44cd9e371c75d4caa05c4504910b7ad29b1847ee9d8ba5d
3
+ size 33442553
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea5f0cc48abfbfc04d14562270a32e02149a3e7035f368cc5a462786f4a59961
3
+ size 4696020
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff