woodchen7 commited on
Commit
65a9ffd
·
verified ·
1 Parent(s): d0004de

Upload angelslim_config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. angelslim_config.json +72 -0
angelslim_config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_config": {
3
+ "name": "QwenVL",
4
+ "model_path": "Qwen/Qwen2.5-VL-7B-Instruct",
5
+ "trust_remote_code": true,
6
+ "torch_dtype": "auto",
7
+ "device_map": "cpu",
8
+ "low_cpu_mem_usage": true,
9
+ "use_cache": false,
10
+ "cache_dir": null
11
+ },
12
+ "compression_config": {
13
+ "name": "PTQ",
14
+ "quantization": {
15
+ "name": "int4_awq",
16
+ "bits": 4,
17
+ "quant_method": {
18
+ "weight": "per-group",
19
+ "group_size": 128,
20
+ "zero_point": true,
21
+ "mse_range": false
22
+ },
23
+ "quant_helpers": [],
24
+ "smooth_alpha": 0.5,
25
+ "low_memory": false,
26
+ "modules_to_quantize": [],
27
+ "zero_point": true,
28
+ "mse_range": false,
29
+ "ignore_layers": [
30
+ "lm_head",
31
+ "model.language_model.embed_tokens"
32
+ ],
33
+ "quant_analyse": false
34
+ },
35
+ "cache": null
36
+ },
37
+ "dataset_config": {
38
+ "name": "MultiModalDataset",
39
+ "data_path": "HuggingFaceM4/ChartQA",
40
+ "max_seq_length": 4096,
41
+ "num_samples": 128,
42
+ "batch_size": 1,
43
+ "shuffle": false
44
+ },
45
+ "global_config": {
46
+ "save_path": "./qwen2_5_vl-7b_int4_awq",
47
+ "max_seq_length": 4096,
48
+ "hidden_size": 3584,
49
+ "model_arch_type": "qwen2_5_vl",
50
+ "deploy_backend": "vllm"
51
+ },
52
+ "infer_config": null,
53
+ "debug_info": {
54
+ "python": "3.12.11 | packaged by Anaconda, Inc. | (main, Jun 5 2025, 13:09:17) [GCC 11.2.0]",
55
+ "angelslim": {
56
+ "name": "angelslim",
57
+ "version": "1c1de9bcf64058b7ba1f6f5836a98a8b7c529a16",
58
+ "source": "git"
59
+ },
60
+ "torch": {
61
+ "name": "torch",
62
+ "version": "2.6.0+cu124",
63
+ "source": "pip"
64
+ },
65
+ "transformers": {
66
+ "name": "transformers",
67
+ "version": "4.55.0.dev0",
68
+ "source": "pip"
69
+ },
70
+ "torch_cuda_version": "12.4"
71
+ }
72
+ }