woodchen7 commited on
Commit
3306ae6
·
verified ·
1 Parent(s): 8ccbba4

Upload angelslim_config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. angelslim_config.json +75 -0
angelslim_config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_config": {
3
+ "name": "QwenVL",
4
+ "model_path": "Qwen/Qwen2.5-VL-72B-Instruct",
5
+ "trust_remote_code": true,
6
+ "torch_dtype": "auto",
7
+ "device_map": "auto",
8
+ "low_cpu_mem_usage": true,
9
+ "use_cache": false,
10
+ "cache_dir": null
11
+ },
12
+ "compression_config": {
13
+ "name": "PTQ",
14
+ "quantization": {
15
+ "name": "fp8_static",
16
+ "bits": 8,
17
+ "quant_method": {
18
+ "weight": "per-tensor",
19
+ "activation": "per-tensor"
20
+ },
21
+ "quant_helpers": [],
22
+ "smooth_alpha": 0.5,
23
+ "low_memory": false,
24
+ "modules_to_quantize": [],
25
+ "zero_point": true,
26
+ "mse_range": false,
27
+ "ignore_layers": [
28
+ "model.visual.patch_embed.proj",
29
+ "model.lm_head",
30
+ "model.language_model.embed_tokens",
31
+ "model.visual.merger.mlp.0",
32
+ "model.visual.merger.mlp.2",
33
+ "lm_head"
34
+ ],
35
+ "quant_analyse": false,
36
+ "quant_vit": true
37
+ },
38
+ "cache": null
39
+ },
40
+ "dataset_config": {
41
+ "name": "MultiModalDataset",
42
+ "data_path": "HuggingFaceM4/ChartQA",
43
+ "max_seq_length": 4096,
44
+ "num_samples": 512,
45
+ "batch_size": 1,
46
+ "shuffle": false
47
+ },
48
+ "global_config": {
49
+ "save_path": "./qwen2_5_vl-72b_fp8_static",
50
+ "max_seq_length": 4096,
51
+ "hidden_size": 8192,
52
+ "model_arch_type": "qwen2_5_vl",
53
+ "deploy_backend": "vllm"
54
+ },
55
+ "infer_config": null,
56
+ "debug_info": {
57
+ "python": "3.12.11 (main, Jun 4 2025, 08:56:18) [GCC 11.4.0]",
58
+ "angelslim": {
59
+ "name": "angelslim",
60
+ "version": "677193e9ad36197c3a8ff1caa82dd230abf2447e",
61
+ "source": "git"
62
+ },
63
+ "torch": {
64
+ "name": "torch",
65
+ "version": "2.7.1",
66
+ "source": "pip"
67
+ },
68
+ "transformers": {
69
+ "name": "transformers",
70
+ "version": "4.53.3",
71
+ "source": "pip"
72
+ },
73
+ "torch_cuda_version": "12.6"
74
+ }
75
+ }