sharpenb commited on
Commit
c1b5117
·
verified ·
1 Parent(s): 6efb17e

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -14,8 +14,8 @@ tags:
14
  <!-- header start -->
15
  <!-- 200823 -->
16
  <div style="width: auto; margin-left: auto; margin-right: auto">
17
- <a href="https://docs.pruna.ai/en/latest/setup/pip.html" target="_blank" rel="noopener noreferrer">
18
- <img src="https://imgur.com/rVAgqMY.png" alt="PrunaAI" style="width: 100%; min-width: 400px; display: block; margin: auto;">
19
  </a>
20
  </div>
21
  <!-- header end -->
@@ -38,7 +38,7 @@ tags:
38
  ![image info](./plots.png)
39
 
40
  **Frequently Asked Questions**
41
- - ***How does the compression work?*** The model is compressed with llm-int8.
42
  - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
43
  - ***How is the model efficiency evaluated?*** These results were obtained with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
44
  - ***What is the model format?*** We use safetensors.
@@ -73,13 +73,13 @@ You can run the smashed model with these steps:
73
 
74
  ## Configurations
75
 
76
- The configuration info are in `smash_config.json`.
77
 
78
  ## Credits & License
79
 
80
- The license of the smashed model follows the license of the original model. Please check the license of the original model HuggingFaceTB/SmolLM2-135M-Instruct before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
81
 
82
  ## Want to compress other models?
83
 
84
  - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
85
- - Do it by yourself [here](https://docs.pruna.ai/en/latest/setup/pip.html).
 
14
  <!-- header start -->
15
  <!-- 200823 -->
16
  <div style="width: auto; margin-left: auto; margin-right: auto">
17
+ <a href="https://www.pruna.ai/" target="_blank" rel="noopener noreferrer">
18
+ <img src="banner.png" alt="PrunaAI" style="width: 100%; min-width: 400px; display: block; margin: auto;">
19
  </a>
20
  </div>
21
  <!-- header end -->
 
38
  ![image info](./plots.png)
39
 
40
  **Frequently Asked Questions**
41
+ - ***How does the compression work?*** The model is compressed with llm_int8.
42
  - ***How does the model quality change?*** The quality of the model output might vary compared to the base model.
43
  - ***How is the model efficiency evaluated?*** These results were obtained with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you.
44
  - ***What is the model format?*** We use safetensors.
 
73
 
74
  ## Configurations
75
 
76
+ The configuration info are in `smash_config.json`. This model has been smashed with pruna in version O.1.3
77
 
78
  ## Credits & License
79
 
80
+ The license of the smashed model follows the license of the original model. Please check the license of the original model HuggingFaceTB/SmolLM2-135M-Instruct before using this model which provided the base model. The license of `pruna` is [here](https://github.com/PrunaAI/pruna/blob/main/LICENSE) on GitHub.
81
 
82
  ## Want to compress other models?
83
 
84
  - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
85
+ - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
banner.png ADDED
base_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "perplexity_y_gt": 38109.7109375,
3
+ "inference_elapsed_time_ms_@1": 400.4822006225586,
4
+ "inference_latency_ms_@1": 40.04822006225586,
5
+ "inference_throughput_batches_per_ms_@1": 0.02496989874819599,
6
+ "Loading model_emissions": 7.406049849317766e-06,
7
+ "Loading model_energy_consumed": 2.5467408914281957e-05,
8
+ "Inference_emissions": 1.7792599123990086e-05,
9
+ "Inference_energy_consumed": 6.118395187149493e-05,
10
+ "tracker_emissions": 2.894878941998153e-05,
11
+ "tracker_energy_consumed": 9.954708282175866e-05,
12
+ "disk_memory": 3158.1982421875
13
+ }
config.json CHANGED
@@ -1,57 +1,56 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmprz_p8abo1bk3610p",
3
- "architectures": [
4
- "LlamaForCausalLM"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ],
6
- "attention_bias": false,
7
- "attention_dropout": 0.0,
8
- "bos_token_id": 1,
9
- "eos_token_id": 2,
10
- "head_dim": 64,
11
- "hidden_act": "silu",
12
- "hidden_size": 576,
13
- "initializer_range": 0.041666666666666664,
14
- "intermediate_size": 1536,
15
- "is_llama_config": true,
16
- "max_position_embeddings": 8192,
17
- "mlp_bias": false,
18
- "model_type": "llama",
19
- "num_attention_heads": 9,
20
- "num_hidden_layers": 30,
21
- "num_key_value_heads": 3,
22
- "pad_token_id": 2,
23
- "pretraining_tp": 1,
24
- "quantization_config": {
25
- "_load_in_4bit": false,
26
- "_load_in_8bit": true,
27
- "bnb_4bit_compute_dtype": "bfloat16",
28
- "bnb_4bit_quant_storage": "uint8",
29
- "bnb_4bit_quant_type": "fp4",
30
- "bnb_4bit_use_double_quant": false,
31
- "llm_int8_enable_fp32_cpu_offload": false,
32
- "llm_int8_has_fp16_weight": false,
33
- "llm_int8_skip_modules": [
34
- "lm_head"
35
- ],
36
- "llm_int8_threshold": 6.0,
37
- "load_in_4bit": false,
38
- "load_in_8bit": true,
39
- "quant_method": "bitsandbytes"
40
- },
41
- "rms_norm_eps": 1e-05,
42
- "rope_interleaved": false,
43
- "rope_scaling": null,
44
- "rope_theta": 100000,
45
- "tie_word_embeddings": true,
46
- "torch_dtype": "float16",
47
- "transformers.js_config": {
48
- "kv_cache_dtype": {
49
- "fp16": "float16",
50
- "q4f16": "float16"
51
- }
52
- },
53
- "transformers_version": "4.46.2",
54
- "use_cache": true,
55
- "vocab_size": 49152,
56
- "api_key": null
57
- }
 
1
  {
2
+ "_name_or_path": "/tmp/models/tmphs3okmnxfnpy7s4b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "head_dim": 64,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 576,
13
+ "initializer_range": 0.041666666666666664,
14
+ "intermediate_size": 1536,
15
+ "is_llama_config": true,
16
+ "max_position_embeddings": 8192,
17
+ "mlp_bias": false,
18
+ "model_type": "llama",
19
+ "num_attention_heads": 9,
20
+ "num_hidden_layers": 30,
21
+ "num_key_value_heads": 3,
22
+ "pad_token_id": 2,
23
+ "pretraining_tp": 1,
24
+ "quantization_config": {
25
+ "_load_in_4bit": false,
26
+ "_load_in_8bit": true,
27
+ "bnb_4bit_compute_dtype": "bfloat16",
28
+ "bnb_4bit_quant_storage": "uint8",
29
+ "bnb_4bit_quant_type": "fp4",
30
+ "bnb_4bit_use_double_quant": false,
31
+ "llm_int8_enable_fp32_cpu_offload": false,
32
+ "llm_int8_has_fp16_weight": false,
33
+ "llm_int8_skip_modules": [
34
+ "lm_head"
35
  ],
36
+ "llm_int8_threshold": 6.0,
37
+ "load_in_4bit": false,
38
+ "load_in_8bit": true,
39
+ "quant_method": "bitsandbytes"
40
+ },
41
+ "rms_norm_eps": 1e-05,
42
+ "rope_interleaved": false,
43
+ "rope_scaling": null,
44
+ "rope_theta": 100000,
45
+ "tie_word_embeddings": true,
46
+ "torch_dtype": "bfloat16",
47
+ "transformers.js_config": {
48
+ "kv_cache_dtype": {
49
+ "fp16": "float16",
50
+ "q4f16": "float16"
51
+ }
52
+ },
53
+ "transformers_version": "4.48.2",
54
+ "use_cache": true,
55
+ "vocab_size": 49152
56
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.46.2"
7
  }
 
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.48.2"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08e20c7986b075b7b5138d33f7a1fe65214219b2a03192d6bd368b79ee9d3707
3
- size 163557570
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fac425922e1361ea1b659664f3c73cfda40fba46c55501c226a804c21376260
3
+ size 163557634
smash_config.json CHANGED
@@ -1,35 +1,32 @@
1
  {
2
- "comp_cgenerate_active": false,
3
- "comp_ctranslate_active": false,
4
- "comp_cwhisper_active": false,
5
- "comp_diffusers2_active": false,
6
- "comp_ifw_active": false,
7
- "comp_onediff_active": false,
8
- "comp_step_caching_active": false,
9
- "comp_torch_compile_active": false,
10
- "comp_ws2t_active": false,
11
- "comp_x-fast_active": false,
12
- "prune_torch-structured_active": false,
13
- "quant_aqlm_active": false,
14
- "quant_awq_active": false,
15
- "quant_gptq_active": false,
16
- "quant_half_active": false,
17
- "quant_hqq_active": false,
18
- "quant_llm-int8_active": true,
19
- "quant_quanto_active": false,
20
- "quant_torch_dynamic_active": false,
21
- "quant_torch_static_active": false,
22
- "quant_llm-int8_compute_dtype": "bfloat16",
23
- "quant_llm-int8_double_quant": false,
24
- "quant_llm-int8_enable_fp32_cpu_offload": false,
25
- "quant_llm-int8_has_fp16_weight": false,
26
- "quant_llm-int8_quant_type": "fp4",
27
- "quant_llm-int8_threshold": 6.0,
28
- "quant_llm-int8_weight_bits": 8,
29
- "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmprz_p8abo",
32
- "task": "",
33
- "save_load_fn": "bitsandbytes",
34
- "save_load_fn_args": {}
 
 
 
 
 
 
 
 
 
 
 
35
  }
 
1
  {
2
+ "batcher": null,
3
+ "cacher": null,
4
+ "compiler": null,
5
+ "factorizer": null,
6
+ "pruner": null,
7
+ "quantizer": "llm_int8",
8
+ "llm_int8_compute_dtype": "bfloat16",
9
+ "llm_int8_double_quant": false,
10
+ "llm_int8_enable_fp32_cpu_offload": false,
11
+ "llm_int8_has_fp16_weight": false,
12
+ "llm_int8_quant_type": "fp4",
13
+ "llm_int8_threshold": 6.0,
14
+ "llm_int8_weight_bits": 8,
15
+ "batch_size": 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  "device": "cuda",
17
+ "save_fns": [],
18
+ "load_fns": [
19
+ "transformers",
20
+ "transformers",
21
+ "transformers"
22
+ ],
23
+ "reapply_after_load": {
24
+ "factorizer": null,
25
+ "pruner": null,
26
+ "quantizer": null,
27
+ "cacher": null,
28
+ "compiler": null,
29
+ "batcher": null
30
+ },
31
+ "api_key": null
32
  }
smashed_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "perplexity_y_gt": 22252.1484375,
3
+ "inference_elapsed_time_ms_@1": 1410.4688110351562,
4
+ "inference_latency_ms_@1": 141.04688110351563,
5
+ "inference_throughput_batches_per_ms_@1": 0.0070898412795536445,
6
+ "Loading model_emissions": 1.2611034407058323e-05,
7
+ "Loading model_energy_consumed": 4.336594765240736e-05,
8
+ "Inference_emissions": 3.463975955665669e-05,
9
+ "Inference_energy_consumed": 0.00011911679495420192,
10
+ "tracker_emissions": 5.0698589660335453e-05,
11
+ "tracker_energy_consumed": 0.00017433878255303535,
12
+ "disk_memory": 3150.1982421875
13
+ }