DesonDai commited on
Commit
fbc94ef
·
verified ·
1 Parent(s): 08e70fa

Add files using upload-large-folder tool

Browse files
Files changed (45) hide show
  1. .gitignore +38 -8
  2. results/ablation_study/MIGRATION_HANDOFF.md +156 -0
  3. results/ablation_study/epoch_clip_loss.csv +21 -0
  4. results/ablation_study/figure2_current_curve.json +73 -0
  5. results/ablation_study/figure_debug_data.json +168 -0
  6. results/ablation_study/method_manifest.json +63 -0
  7. results/ablation_study/runs/cross_domain_e18/qwen3_full/per_sample.json +352 -0
  8. results/ablation_study/runs/cross_domain_e18/qwen3_full/summary.json +8 -0
  9. results/ablation_study/top30/top30_selected_ids.json +322 -0
  10. results/ablation_study/top30/top30_table.json +122 -0
  11. results/ablation_study/top30/top30_table.md +17 -0
  12. results/clip_epoch17_gpu1/uipress_256/clip_scores.json +58 -0
  13. results/clip_epoch17_gpu1/uipress_256/per_sample.json +352 -0
  14. results/clip_epoch17_gpu1/uipress_256/summary.json +8 -0
  15. results/clip_epoch17_gpu1/uipress_256/summary_top35_by_clip.json +34 -0
  16. results/clip_epoch17_gpu1/uipress_256/summary_top40_by_clip.json +34 -0
  17. results/clip_per_epoch/optical_mix_d2c/CLIP_TABLE.md +15 -0
  18. results/clip_per_epoch/optical_mix_d2c/epoch_10/uipress_256/per_sample.json +352 -0
  19. results/clip_per_epoch/optical_mix_d2c/epoch_11/uipress_256/clip_scores.json +58 -0
  20. results/clip_per_epoch/optical_mix_d2c/epoch_11/uipress_256/per_sample.json +352 -0
  21. results/clip_per_epoch/optical_mix_d2c/epoch_11/uipress_256/summary.json +8 -0
  22. results/clip_per_epoch/optical_mix_d2c/epoch_12/uipress_256/clip_scores.json +58 -0
  23. results/clip_per_epoch/optical_mix_d2c/epoch_12/uipress_256/per_sample.json +352 -0
  24. results/clip_per_epoch/optical_mix_d2c/epoch_12/uipress_256/summary.json +8 -0
  25. results/clip_per_epoch/optical_mix_d2c/epoch_14/uipress_256/clip_scores.json +58 -0
  26. results/clip_per_epoch/optical_mix_d2c/epoch_14/uipress_256/per_sample.json +352 -0
  27. results/clip_per_epoch/optical_mix_d2c/epoch_14/uipress_256/summary.json +8 -0
  28. results/clip_per_epoch/optical_mix_d2c/epoch_17/uipress_256/clip_scores.json +58 -0
  29. results/clip_per_epoch/optical_mix_d2c/epoch_17/uipress_256/per_sample.json +352 -0
  30. results/clip_per_epoch/optical_mix_d2c/epoch_17/uipress_256/summary.json +8 -0
  31. results/clip_per_epoch/optical_mix_d2c/epoch_5/uipress_256/clip_scores.json +58 -0
  32. results/clip_per_epoch/optical_mix_d2c/epoch_5/uipress_256/per_sample.json +352 -0
  33. results/clip_per_epoch/optical_mix_d2c/epoch_5/uipress_256/summary.json +8 -0
  34. results/clip_per_epoch/optical_mix_d2c/epoch_6/uipress_256/summary.json +8 -0
  35. results/clip_per_epoch/optical_mix_d2c/summary.json +93 -0
  36. results/comparison/top30_by_clip_per_method_table.json +102 -0
  37. scripts/ablation_topk_report.py +303 -0
  38. scripts/ablation_watch_status.sh +25 -0
  39. scripts/batch_uipress_clip_epochs.py +193 -0
  40. scripts/run_ablation_gpu0_cross_domain_e18.sh +71 -0
  41. scripts/run_ablation_gpu1_train_eval.sh +123 -0
  42. scripts/run_ablation_queue_gpu1.sh +184 -0
  43. scripts/run_ablation_study.sh +108 -0
  44. scripts/train_compressor.py +41 -27
  45. sync_up.py +20 -0
.gitignore CHANGED
@@ -1,32 +1,62 @@
1
- # Model weights
2
  checkpoints/
3
  *.safetensors
4
  *.bin
5
  *.pt
6
  *.pth
 
 
 
7
 
8
- # HuggingFace cache
9
  .cache/
 
10
 
11
- # Data (download via scripts)
12
  data/
13
  repos/
14
 
15
- # Python
 
 
 
 
 
16
  __pycache__/
17
  *.pyc
 
 
 
 
 
 
18
 
19
- # Logs
20
  logs/
21
  *.log
22
  *.out
23
  nohup.*
24
 
25
- # OS
26
  .DS_Store
27
  Thumbs.db
28
  .vscode/
 
29
 
30
- # Results: keep JSON summaries, ignore large files
31
- results/**/rendered/
 
 
 
 
32
  results/**/rendered_screenshots/
 
 
 
 
 
 
 
 
 
 
 
1
+ # --- Model weights & large binaries (可丢弃,不同步 HF) ---
2
  checkpoints/
3
  *.safetensors
4
  *.bin
5
  *.pt
6
  *.pth
7
+ *.ckpt
8
+ *.onnx
9
+ *.gguf
10
 
11
+ # --- HuggingFace / 缓存 ---
12
  .cache/
13
+ .huggingface/
14
 
15
+ # --- 数据(用脚本拉取,体积大)---
16
  data/
17
  repos/
18
 
19
+ # --- 密钥与环境(切勿提交 token)---
20
+ .env
21
+ .env.*
22
+ !.env.example
23
+
24
+ # --- Python ---
25
  __pycache__/
26
  *.pyc
27
+ *.pyo
28
+ .Python
29
+ *.egg-info/
30
+ .eggs/
31
+ dist/
32
+ build/
33
 
34
+ # --- 日志与后台输出 ---
35
  logs/
36
  *.log
37
  *.out
38
  nohup.*
39
 
40
+ # --- IDE / OS ---
41
  .DS_Store
42
  Thumbs.db
43
  .vscode/
44
+ .idea/
45
 
46
+ # --- 历史/杂项目录 ---
47
+ OLD/
48
+
49
+ # --- Results:保留小文件(json/md/csv/部分图),忽略大体积生成物 ---
50
+ # 渲染缓存与逐页 HTML(体积最大)
51
+ results/**/html_predictions/
52
  results/**/rendered_screenshots/
53
+ results/**/rendered/
54
+
55
+ # 若不需要把 per-epoch 整目录同步,可取消下一行注释(会忽略该树下一切)
56
+ # results/clip_per_epoch/
57
+
58
+ # 常见大包/压缩包
59
+ *.tar
60
+ *.tar.gz
61
+ *.zip
62
+ *.7z
results/ablation_study/MIGRATION_HANDOFF.md ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # UIPress Ablation & Figure Work — Migration Handoff
2
+
3
+ 本文档汇总本仓库里为 **Ablation Study、Top‑K 表、预演图、跨域评估** 所做的工作,以及在新机器上**如何停止 / 恢复 / 补跑**。
4
+ 项目根目录记为 `$ROOT`(例如 `/data/Albus/UIPress`)。
5
+
6
+ ---
7
+
8
+ ## 1. 如何停止所有相关后台任务(迁移前)
9
+
10
+ 在 `$ROOT` 下执行(已在本机尝试执行过一次;新环境可再跑一遍确保干净):
11
+
12
+ ```bash
13
+ pkill -f "run_ablation_gpu1_train_eval.sh" 2>/dev/null || true
14
+ pkill -f "run_ablation_gpu0_cross_domain_e18.sh" 2>/dev/null || true
15
+ pkill -f "run_ablation_queue_gpu1.sh" 2>/dev/null || true
16
+ pkill -f "train_compressor.py --output_dir results/ablation_study" 2>/dev/null || true
17
+ ```
18
+
19
+ 检查是否还有残留:
20
+
21
+ ```bash
22
+ pgrep -af "run_ablation|train_compressor.py --output_dir results/ablation_study" || echo "ok"
23
+ ```
24
+
25
+ ---
26
+
27
+ ## 2. 代码改动(需要随仓库一起迁移)
28
+
29
+ | 文件 | 作用 |
30
+ |------|------|
31
+ | `scripts/train_compressor.py` | 新增 `--disable_lora`,用于 **No‑LoRA** 消融 |
32
+ | `scripts/ablation_topk_report.py` | 从各方法目录读 `clip_scores.json` + `per_sample.json`,生成 **Top‑K** 表 |
33
+ | `scripts/run_ablation_study.sh` | 说明用入口(轻量) |
34
+ | `scripts/run_ablation_queue_gpu1.sh` | 旧版长队列(含续训到 E20 等,**若不再用可忽略**) |
35
+ | `scripts/run_ablation_gpu1_train_eval.sh` | **GPU1**:No‑LoRA → token 64/128/512 → LR scan(训练+eval+CLIP) |
36
+ | `scripts/run_ablation_gpu0_cross_domain_e18.sh` | **GPU0**:WebSight 跨域 eval + CLIP,固定 **`epoch18.pt`** |
37
+ | `scripts/ablation_watch_status.sh` | 快速查看队列是否还在跑、日志尾部 |
38
+
39
+ > 说明:`scripts/eval_all.py` 里 UIPress 的 LoRA 加载仍是 **TODO**(打印 `Loading LoRA weights...` 但未真正注入 LoRA)。训练 checkpoint 里含 LoRA 时,eval 与训练行为可能不完全一致;若论文要严格对齐,需要在 eval 侧补全 LoRA 加载逻辑。
40
+
41
+ ---
42
+
43
+ ## 3. 约定与关键路径
44
+
45
+ - **主训练 checkpoint(mix_d2c)**:`checkpoints/optical_mix_d2c/`
46
+ - **用户约定:只用 E18,不再追 E19/E20**:`checkpoints/optical_mix_d2c/epoch18.pt`
47
+ - **Design2Code 50 样本评测**:`data/` 下需有 `ref_screenshots`(或 `testset_final`)
48
+ - **Ablation 输出根目录**:`results/ablation_study/`
49
+ - `checkpoints/` — 各消融训练的 `latest.pt`
50
+ - `runs/` — `eval_all` 输出(`summary.json`、`per_sample.json`、`html_predictions/`)
51
+ - `logs/` — `nohup` 日志
52
+ - **跨域 eval 临时数据目录**:`results/ablation_study/tmp_websight_eval/ref_screenshots` → 符号链接到 `data/ref_screenshots_websight`
53
+
54
+ ---
55
+
56
+ ## 4. 计划中的 Ablation 任务(按脚本顺序)
57
+
58
+ ### 4.1 GPU1:`scripts/run_ablation_gpu1_train_eval.sh`
59
+
60
+ 环境变量(可选):
61
+
62
+ - `GPU_ID`(默认 `1`)
63
+ - `EPOCHS_ABL`(默认 `5`)— **每个子实验训练 epoch 数**
64
+ - `MAX_SAMPLES`(默认 `10000`)— WebSight 子集上限;**过大则每 epoch step 数极多、耗时长**,且易 OOM
65
+
66
+ 顺序:
67
+
68
+ 1. **No‑LoRA**:`results/ablation_study/checkpoints/no_lora_256/`
69
+ → eval → `runs/no_lora_256/` → CLIP
70
+ 2. **Token 敏感性**:`target_tokens ∈ {64,128,512}`,各训一套 checkpoint
71
+ → 各自 `runs/token_*` → CLIP
72
+ 3. **学习率扫描**:`lr_compressor ∈ {1e-4, 2e-4, 4e-4}`
73
+ → 各自 `runs/lr_*` → CLIP
74
+
75
+ 日志:`results/ablation_study/logs/ablation_gpu1_train_eval.nohup.log`
76
+ 结束标志:日志中出现 `All GPU1 ablation jobs completed at ...`
77
+
78
+ ### 4.2 GPU0:`scripts/run_ablation_gpu0_cross_domain_e18.sh`
79
+
80
+ - 使用 **`E18_CKPT`**(默认 `checkpoints/optical_mix_d2c/epoch18.pt`)
81
+ - 在 **WebSight 截图** 上跑 baseline + UIPress,各 50 条,再分别算 CLIP(参考图 `data/ref_screenshots_websight`)
82
+
83
+ 输出:`results/ablation_study/runs/cross_domain_e18/`
84
+ 日志:`results/ablation_study/logs/ablation_gpu0_cross_domain_e18.nohup.log`
85
+ 结束标志:`Cross-domain (E18) queue completed at ...`
86
+
87
+ ---
88
+
89
+ ## 5. 迁移后如何一键重启(建议)
90
+
91
+ 在 `$ROOT` 下:
92
+
93
+ ```bash
94
+ export PYTHONPATH=.
95
+ mkdir -p results/ablation_study/logs
96
+
97
+ # GPU1(后台)
98
+ nohup bash scripts/run_ablation_gpu1_train_eval.sh \
99
+ > results/ablation_study/logs/ablation_gpu1_train_eval.nohup.log 2>&1 &
100
+
101
+ # GPU0(后台,与 GPU1 并行)
102
+ nohup bash scripts/run_ablation_gpu0_cross_domain_e18.sh \
103
+ > results/ablation_study/logs/ablation_gpu0_cross_domain_e18.nohup.log 2>&1 &
104
+ ```
105
+
106
+ 状态检查:
107
+
108
+ ```bash
109
+ bash scripts/ablation_watch_status.sh
110
+ ```
111
+
112
+ ---
113
+
114
+ ## 6. Top‑K 表(与主表口径)
115
+
116
+ - 脚本:`python scripts/ablation_topk_report.py --topk 30 --out_root results/ablation_study`
117
+ - 输出:`results/ablation_study/top30/top30_table.json`、`.md`、`top30_selected_ids.json`
118
+ - 口径:每个方法 **按自己的 per‑sample CLIP 排序取 Top‑K**,再平均 CLIP / 延迟 / SSIM;**名义 token 与压缩比**见表���字段说明
119
+
120
+ ---
121
+
122
+ ## 7. 图表与调试数据(预演 / 非论文主结果请注明)
123
+
124
+ | 路径 | 说明 |
125
+ |------|------|
126
+ | `results/ablation_study/epoch_clip_loss.csv` | 你维护的 Epoch / CLIP / Loss(用于排版调试) |
127
+ | `results/ablation_study/figure_debug_data.json` | 汇总真实曲线引用 + 预演曲线 |
128
+ | `results/ablation_study/figure2_current_curve.json` | 来自已有 `clip_per_epoch` 汇总的真实 **full50/top30** 曲线片段 |
129
+ | `results/ablation_study/figure_acmmm_rehearsal_*.png` | 英文无标题预演图 |
130
+ | `results/ablation_study/figure2_rehearsal_cn.png` | 早期中文预演图 |
131
+
132
+ ---
133
+
134
+ ## 8. 已知问题与建议
135
+
136
+ 1. **训练 OOM**:日志中可能出现 `OOM at step ..., skipping`。可尝试:减小 `--max_samples`、减小 `--max_html_tokens`、或换更大显存 GPU。
137
+ 2. **No‑LoRA 训练极慢**:`--max_samples 10000` + 大 epoch 步数会导致单实验耗时很长;迁移后若只想快速出表,可先改小 `EPOCHS_ABL` / `MAX_SAMPLES` 做 smoke。
138
+ 3. **跨域 uipress 若中断**:检查 `runs/cross_domain_e18/uipress_256/` 是否已有完整 50 个 `html_predictions` 与 `summary.json`,再单独补跑 `step_clip_batch.py`。
139
+
140
+ ---
141
+
142
+ ## 9. 建议一并打包带走的目录(最小集)
143
+
144
+ - 整个仓库 **或** 至少:`scripts/`、`models/`、`requirements.txt`、`data/`(若体积允许)、`checkpoints/optical_mix_d2c/epoch18.pt`、`results/ablation_study/`(含 logs、runs、top30、图表与 csv/json)
145
+
146
+ ---
147
+
148
+ ## 10. 本机停止操作记录
149
+
150
+ 迁移前已执行 **§1** 中的 `pkill` 命令,用于结束与本 Ablation 队列相关的后台进程。
151
+
152
+ 若仍有其它无关任务占用 GPU(例如 vLLM),需在本机单独 `nvidia-smi` 查看后自行处理。
153
+
154
+ ---
155
+
156
+ *生成时间:用于仓库迁移与交接;在新环境请从 §5 重新拉起实验。*
results/ablation_study/epoch_clip_loss.csv ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Epoch,CLIP,Loss
2
+ 1,0.7232,1.9200
3
+ 2,0.7268,1.7500
4
+ 3,0.7315,1.6100
5
+ 4,0.7294,1.5800
6
+ 5,0.7362,1.4900
7
+ 6,0.7391,1.4300
8
+ 7,0.7370,1.3900
9
+ 8,0.7425,1.3600
10
+ 9,0.7450,1.3300
11
+ 10,0.7438,1.3000
12
+ 11,0.7493,1.2800
13
+ 12,0.7510,1.2500
14
+ 13,0.7489,1.2700
15
+ 14,0.7546,1.2200
16
+ 15,0.7572,1.2000
17
+ 16,0.7605,1.1800
18
+ 17,0.8127,1.1600
19
+ 18,0.8068,1.1700
20
+ 19,0.8094,1.1500
21
+ 20,0.8076,1.1400
results/ablation_study/figure2_current_curve.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "untrained": {
3
+ "full50": 0.7232,
4
+ "top30": 0.7925
5
+ },
6
+ "epochs": [
7
+ {
8
+ "epoch": 5,
9
+ "full50": 0.7047,
10
+ "top30": 0.7874
11
+ },
12
+ {
13
+ "epoch": 6,
14
+ "full50": 0.7192,
15
+ "top30": 0.7882
16
+ },
17
+ {
18
+ "epoch": 7,
19
+ "full50": 0.7192,
20
+ "top30": 0.7882
21
+ },
22
+ {
23
+ "epoch": 8,
24
+ "full50": 0.7192,
25
+ "top30": 0.7882
26
+ },
27
+ {
28
+ "epoch": 9,
29
+ "full50": 0.7025,
30
+ "top30": 0.7835
31
+ },
32
+ {
33
+ "epoch": 10,
34
+ "full50": 0.7192,
35
+ "top30": 0.7882
36
+ },
37
+ {
38
+ "epoch": 11,
39
+ "full50": 0.7054,
40
+ "top30": 0.7882
41
+ },
42
+ {
43
+ "epoch": 12,
44
+ "full50": 0.719,
45
+ "top30": 0.7882
46
+ },
47
+ {
48
+ "epoch": 13,
49
+ "full50": 0.7193,
50
+ "top30": 0.7882
51
+ },
52
+ {
53
+ "epoch": 14,
54
+ "full50": 0.7051,
55
+ "top30": 0.7844
56
+ },
57
+ {
58
+ "epoch": 15,
59
+ "full50": 0.7029,
60
+ "top30": 0.7848
61
+ },
62
+ {
63
+ "epoch": 16,
64
+ "full50": 0.7027,
65
+ "top30": 0.7837
66
+ },
67
+ {
68
+ "epoch": 17,
69
+ "full50": 0.6747,
70
+ "top30": 0.7817
71
+ }
72
+ ]
73
+ }
results/ablation_study/figure_debug_data.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "note": "Central debug data for figure tuning (real curve + rehearsal plotting inputs).",
3
+ "real_data": {
4
+ "source_file": "/data/Albus/UIPress/results/ablation_study/figure2_current_curve.json",
5
+ "content": {
6
+ "untrained": {
7
+ "full50": 0.7232,
8
+ "top30": 0.7925
9
+ },
10
+ "epochs": [
11
+ {
12
+ "epoch": 5,
13
+ "full50": 0.7047,
14
+ "top30": 0.7874
15
+ },
16
+ {
17
+ "epoch": 6,
18
+ "full50": 0.7192,
19
+ "top30": 0.7882
20
+ },
21
+ {
22
+ "epoch": 7,
23
+ "full50": 0.7192,
24
+ "top30": 0.7882
25
+ },
26
+ {
27
+ "epoch": 8,
28
+ "full50": 0.7192,
29
+ "top30": 0.7882
30
+ },
31
+ {
32
+ "epoch": 9,
33
+ "full50": 0.7025,
34
+ "top30": 0.7835
35
+ },
36
+ {
37
+ "epoch": 10,
38
+ "full50": 0.7192,
39
+ "top30": 0.7882
40
+ },
41
+ {
42
+ "epoch": 11,
43
+ "full50": 0.7054,
44
+ "top30": 0.7882
45
+ },
46
+ {
47
+ "epoch": 12,
48
+ "full50": 0.719,
49
+ "top30": 0.7882
50
+ },
51
+ {
52
+ "epoch": 13,
53
+ "full50": 0.7193,
54
+ "top30": 0.7882
55
+ },
56
+ {
57
+ "epoch": 14,
58
+ "full50": 0.7051,
59
+ "top30": 0.7844
60
+ },
61
+ {
62
+ "epoch": 15,
63
+ "full50": 0.7029,
64
+ "top30": 0.7848
65
+ },
66
+ {
67
+ "epoch": 16,
68
+ "full50": 0.7027,
69
+ "top30": 0.7837
70
+ },
71
+ {
72
+ "epoch": 17,
73
+ "full50": 0.6747,
74
+ "top30": 0.7817
75
+ }
76
+ ]
77
+ }
78
+ },
79
+ "rehearsal_plot_data": {
80
+ "epochs": [
81
+ 1,
82
+ 2,
83
+ 3,
84
+ 4,
85
+ 5,
86
+ 6,
87
+ 7,
88
+ 8,
89
+ 9,
90
+ 10,
91
+ 11,
92
+ 12,
93
+ 13,
94
+ 14,
95
+ 15,
96
+ 16,
97
+ 17,
98
+ 18,
99
+ 19,
100
+ 20
101
+ ],
102
+ "clip_curve": [
103
+ 0.7232,
104
+ 0.7268,
105
+ 0.7315,
106
+ 0.7294,
107
+ 0.7362,
108
+ 0.7391,
109
+ 0.737,
110
+ 0.7425,
111
+ 0.745,
112
+ 0.7438,
113
+ 0.7493,
114
+ 0.751,
115
+ 0.7489,
116
+ 0.7546,
117
+ 0.7572,
118
+ 0.7605,
119
+ 0.8127,
120
+ 0.8068,
121
+ 0.8094,
122
+ 0.8076
123
+ ],
124
+ "loss_curve": [
125
+ 1.92,
126
+ 1.75,
127
+ 1.61,
128
+ 1.58,
129
+ 1.49,
130
+ 1.43,
131
+ 1.39,
132
+ 1.36,
133
+ 1.33,
134
+ 1.3,
135
+ 1.28,
136
+ 1.25,
137
+ 1.27,
138
+ 1.22,
139
+ 1.2,
140
+ 1.18,
141
+ 1.16,
142
+ 1.17,
143
+ 1.15,
144
+ 1.14
145
+ ],
146
+ "baseline_clip": 0.7563,
147
+ "start_clip": {
148
+ "epoch": 1,
149
+ "value": 0.7232
150
+ },
151
+ "peak_clip": {
152
+ "epoch": 17,
153
+ "value": 0.8127
154
+ }
155
+ },
156
+ "generated_figures": {
157
+ "clip_only": "results/ablation_study/figure_acmmm_rehearsal_clip.png",
158
+ "clip_plus_loss": "results/ablation_study/figure_acmmm_rehearsal_combo.png",
159
+ "legacy_preview": "results/ablation_study/figure2_preview_curve.png",
160
+ "legacy_cn": "results/ablation_study/figure2_rehearsal_cn.png"
161
+ },
162
+ "other_related_files": {
163
+ "top30_table_json": "results/ablation_study/top30/top30_table.json",
164
+ "top30_table_md": "results/ablation_study/top30/top30_table.md",
165
+ "top30_ids": "results/ablation_study/top30/top30_selected_ids.json",
166
+ "ablation_queue_log": "results/ablation_study/logs/ablation_queue_gpu1.nohup.log"
167
+ }
168
+ }
results/ablation_study/method_manifest.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "name": "qwen3_res_230400",
4
+ "method_dir": "results/comparison/qwen3_res_230400",
5
+ "token_nominal": 845,
6
+ "rendered_cache_dir": "qwen3_res_230400"
7
+ },
8
+ {
9
+ "name": "qwen3_res_1003520",
10
+ "method_dir": "results/comparison/qwen3_res_1003520",
11
+ "token_nominal": 3748,
12
+ "rendered_cache_dir": "qwen3_res_1003520"
13
+ },
14
+ {
15
+ "name": "qwen3_full (基线)",
16
+ "method_dir": "results/comparison/qwen3_full/qwen3_full",
17
+ "token_nominal": 7299,
18
+ "rendered_cache_dir": "qwen3_full",
19
+ "is_baseline": true
20
+ },
21
+ {
22
+ "name": "efficientui_prune60",
23
+ "method_dir": "results/comparison/efficientui_prune60/efficientui_prune60",
24
+ "token_nominal": 730,
25
+ "rendered_cache_dir": "efficientui_prune60"
26
+ },
27
+ {
28
+ "name": "efficientui_prune80",
29
+ "method_dir": "results/comparison/efficientui_prune80",
30
+ "token_nominal": 364,
31
+ "rendered_cache_dir": "efficientui_prune80"
32
+ },
33
+ {
34
+ "name": "visionzip_256",
35
+ "method_dir": "results/comparison/visionzip_256/visionzip_256",
36
+ "token_nominal": 256,
37
+ "rendered_cache_dir": "visionzip_256"
38
+ },
39
+ {
40
+ "name": "visionzip_128",
41
+ "method_dir": "results/comparison/visionzip_128/visionzip_128",
42
+ "token_nominal": 128,
43
+ "rendered_cache_dir": "visionzip_128"
44
+ },
45
+ {
46
+ "name": "uipress_256 (未训练)",
47
+ "method_dir": "results/comparison/uipress_256/uipress_256",
48
+ "token_nominal": 256,
49
+ "rendered_cache_dir": "uipress_256"
50
+ },
51
+ {
52
+ "name": "visionzip_64",
53
+ "method_dir": "results/comparison/visionzip_64",
54
+ "token_nominal": 64,
55
+ "rendered_cache_dir": "visionzip_64"
56
+ },
57
+ {
58
+ "name": "uipress_256 (训练 E17)",
59
+ "method_dir": "results/clip_epoch17_gpu1/uipress_256",
60
+ "token_nominal": 256,
61
+ "rendered_cache_dir": "uipress_256_e17_top30"
62
+ }
63
+ ]
results/ablation_study/runs/cross_domain_e18/qwen3_full/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "1093",
4
+ "n_visual_tokens": 5120,
5
+ "latency_s": 19.46,
6
+ "peak_mem_gb": 16.74,
7
+ "output_len": 1948
8
+ },
9
+ {
10
+ "id": "1366",
11
+ "n_visual_tokens": 5120,
12
+ "latency_s": 22.57,
13
+ "peak_mem_gb": 16.74,
14
+ "output_len": 2565
15
+ },
16
+ {
17
+ "id": "1389",
18
+ "n_visual_tokens": 5120,
19
+ "latency_s": 16.02,
20
+ "peak_mem_gb": 16.74,
21
+ "output_len": 1603
22
+ },
23
+ {
24
+ "id": "1437",
25
+ "n_visual_tokens": 5120,
26
+ "latency_s": 22.38,
27
+ "peak_mem_gb": 16.74,
28
+ "output_len": 2368
29
+ },
30
+ {
31
+ "id": "1466",
32
+ "n_visual_tokens": 5120,
33
+ "latency_s": 14.94,
34
+ "peak_mem_gb": 16.74,
35
+ "output_len": 1428
36
+ },
37
+ {
38
+ "id": "1470",
39
+ "n_visual_tokens": 5120,
40
+ "latency_s": 28.64,
41
+ "peak_mem_gb": 16.74,
42
+ "output_len": 3083
43
+ },
44
+ {
45
+ "id": "1638",
46
+ "n_visual_tokens": 5120,
47
+ "latency_s": 22.11,
48
+ "peak_mem_gb": 16.74,
49
+ "output_len": 2351
50
+ },
51
+ {
52
+ "id": "1673",
53
+ "n_visual_tokens": 5120,
54
+ "latency_s": 14.98,
55
+ "peak_mem_gb": 16.74,
56
+ "output_len": 1576
57
+ },
58
+ {
59
+ "id": "1822",
60
+ "n_visual_tokens": 5120,
61
+ "latency_s": 21.93,
62
+ "peak_mem_gb": 16.74,
63
+ "output_len": 2256
64
+ },
65
+ {
66
+ "id": "1974",
67
+ "n_visual_tokens": 5120,
68
+ "latency_s": 18.59,
69
+ "peak_mem_gb": 16.74,
70
+ "output_len": 2068
71
+ },
72
+ {
73
+ "id": "2022",
74
+ "n_visual_tokens": 5120,
75
+ "latency_s": 14.89,
76
+ "peak_mem_gb": 16.74,
77
+ "output_len": 1549
78
+ },
79
+ {
80
+ "id": "205",
81
+ "n_visual_tokens": 5120,
82
+ "latency_s": 15.61,
83
+ "peak_mem_gb": 16.74,
84
+ "output_len": 1543
85
+ },
86
+ {
87
+ "id": "216",
88
+ "n_visual_tokens": 5120,
89
+ "latency_s": 11.64,
90
+ "peak_mem_gb": 16.74,
91
+ "output_len": 1116
92
+ },
93
+ {
94
+ "id": "2174",
95
+ "n_visual_tokens": 5120,
96
+ "latency_s": 12.63,
97
+ "peak_mem_gb": 16.74,
98
+ "output_len": 1259
99
+ },
100
+ {
101
+ "id": "228",
102
+ "n_visual_tokens": 5120,
103
+ "latency_s": 21.66,
104
+ "peak_mem_gb": 16.74,
105
+ "output_len": 2244
106
+ },
107
+ {
108
+ "id": "2365",
109
+ "n_visual_tokens": 5120,
110
+ "latency_s": 13.89,
111
+ "peak_mem_gb": 16.74,
112
+ "output_len": 1489
113
+ },
114
+ {
115
+ "id": "238",
116
+ "n_visual_tokens": 5120,
117
+ "latency_s": 15.22,
118
+ "peak_mem_gb": 16.74,
119
+ "output_len": 1469
120
+ },
121
+ {
122
+ "id": "2423",
123
+ "n_visual_tokens": 5120,
124
+ "latency_s": 20.49,
125
+ "peak_mem_gb": 16.74,
126
+ "output_len": 2086
127
+ },
128
+ {
129
+ "id": "2487",
130
+ "n_visual_tokens": 5120,
131
+ "latency_s": 15.18,
132
+ "peak_mem_gb": 16.74,
133
+ "output_len": 1652
134
+ },
135
+ {
136
+ "id": "2504",
137
+ "n_visual_tokens": 5120,
138
+ "latency_s": 24.94,
139
+ "peak_mem_gb": 16.74,
140
+ "output_len": 2711
141
+ },
142
+ {
143
+ "id": "2509",
144
+ "n_visual_tokens": 5120,
145
+ "latency_s": 16.84,
146
+ "peak_mem_gb": 16.74,
147
+ "output_len": 1753
148
+ },
149
+ {
150
+ "id": "264",
151
+ "n_visual_tokens": 5120,
152
+ "latency_s": 13.47,
153
+ "peak_mem_gb": 16.74,
154
+ "output_len": 1475
155
+ },
156
+ {
157
+ "id": "2839",
158
+ "n_visual_tokens": 5120,
159
+ "latency_s": 23.4,
160
+ "peak_mem_gb": 16.74,
161
+ "output_len": 2422
162
+ },
163
+ {
164
+ "id": "3055",
165
+ "n_visual_tokens": 5120,
166
+ "latency_s": 16.88,
167
+ "peak_mem_gb": 16.74,
168
+ "output_len": 1709
169
+ },
170
+ {
171
+ "id": "3056",
172
+ "n_visual_tokens": 5120,
173
+ "latency_s": 13.59,
174
+ "peak_mem_gb": 16.74,
175
+ "output_len": 1333
176
+ },
177
+ {
178
+ "id": "3103",
179
+ "n_visual_tokens": 5120,
180
+ "latency_s": 14.58,
181
+ "peak_mem_gb": 16.74,
182
+ "output_len": 1602
183
+ },
184
+ {
185
+ "id": "3290",
186
+ "n_visual_tokens": 5120,
187
+ "latency_s": 19.78,
188
+ "peak_mem_gb": 16.74,
189
+ "output_len": 2110
190
+ },
191
+ {
192
+ "id": "3351",
193
+ "n_visual_tokens": 5120,
194
+ "latency_s": 25.73,
195
+ "peak_mem_gb": 16.74,
196
+ "output_len": 2730
197
+ },
198
+ {
199
+ "id": "3396",
200
+ "n_visual_tokens": 5120,
201
+ "latency_s": 16.29,
202
+ "peak_mem_gb": 16.74,
203
+ "output_len": 1555
204
+ },
205
+ {
206
+ "id": "3407",
207
+ "n_visual_tokens": 5120,
208
+ "latency_s": 17.0,
209
+ "peak_mem_gb": 16.74,
210
+ "output_len": 1659
211
+ },
212
+ {
213
+ "id": "3520",
214
+ "n_visual_tokens": 5120,
215
+ "latency_s": 22.97,
216
+ "peak_mem_gb": 16.74,
217
+ "output_len": 2259
218
+ },
219
+ {
220
+ "id": "3833",
221
+ "n_visual_tokens": 5120,
222
+ "latency_s": 21.27,
223
+ "peak_mem_gb": 16.74,
224
+ "output_len": 2214
225
+ },
226
+ {
227
+ "id": "393",
228
+ "n_visual_tokens": 5120,
229
+ "latency_s": 13.61,
230
+ "peak_mem_gb": 16.74,
231
+ "output_len": 1482
232
+ },
233
+ {
234
+ "id": "4087",
235
+ "n_visual_tokens": 5120,
236
+ "latency_s": 14.47,
237
+ "peak_mem_gb": 16.74,
238
+ "output_len": 1495
239
+ },
240
+ {
241
+ "id": "4108",
242
+ "n_visual_tokens": 5120,
243
+ "latency_s": 19.3,
244
+ "peak_mem_gb": 16.74,
245
+ "output_len": 2184
246
+ },
247
+ {
248
+ "id": "4132",
249
+ "n_visual_tokens": 5120,
250
+ "latency_s": 16.89,
251
+ "peak_mem_gb": 16.74,
252
+ "output_len": 1756
253
+ },
254
+ {
255
+ "id": "4172",
256
+ "n_visual_tokens": 5120,
257
+ "latency_s": 24.54,
258
+ "peak_mem_gb": 16.74,
259
+ "output_len": 2702
260
+ },
261
+ {
262
+ "id": "4221",
263
+ "n_visual_tokens": 5120,
264
+ "latency_s": 16.99,
265
+ "peak_mem_gb": 16.74,
266
+ "output_len": 1747
267
+ },
268
+ {
269
+ "id": "4236",
270
+ "n_visual_tokens": 5120,
271
+ "latency_s": 16.84,
272
+ "peak_mem_gb": 16.74,
273
+ "output_len": 1756
274
+ },
275
+ {
276
+ "id": "4248",
277
+ "n_visual_tokens": 5120,
278
+ "latency_s": 24.23,
279
+ "peak_mem_gb": 16.74,
280
+ "output_len": 2481
281
+ },
282
+ {
283
+ "id": "429",
284
+ "n_visual_tokens": 5120,
285
+ "latency_s": 16.82,
286
+ "peak_mem_gb": 16.74,
287
+ "output_len": 1742
288
+ },
289
+ {
290
+ "id": "4358",
291
+ "n_visual_tokens": 5120,
292
+ "latency_s": 19.56,
293
+ "peak_mem_gb": 16.74,
294
+ "output_len": 2118
295
+ },
296
+ {
297
+ "id": "4375",
298
+ "n_visual_tokens": 5120,
299
+ "latency_s": 28.79,
300
+ "peak_mem_gb": 16.74,
301
+ "output_len": 3074
302
+ },
303
+ {
304
+ "id": "4428",
305
+ "n_visual_tokens": 5120,
306
+ "latency_s": 13.57,
307
+ "peak_mem_gb": 16.74,
308
+ "output_len": 1322
309
+ },
310
+ {
311
+ "id": "4430",
312
+ "n_visual_tokens": 5120,
313
+ "latency_s": 14.62,
314
+ "peak_mem_gb": 16.74,
315
+ "output_len": 1392
316
+ },
317
+ {
318
+ "id": "4607",
319
+ "n_visual_tokens": 5120,
320
+ "latency_s": 11.6,
321
+ "peak_mem_gb": 16.74,
322
+ "output_len": 1240
323
+ },
324
+ {
325
+ "id": "4609",
326
+ "n_visual_tokens": 5120,
327
+ "latency_s": 14.87,
328
+ "peak_mem_gb": 16.74,
329
+ "output_len": 1457
330
+ },
331
+ {
332
+ "id": "4634",
333
+ "n_visual_tokens": 5120,
334
+ "latency_s": 20.44,
335
+ "peak_mem_gb": 16.74,
336
+ "output_len": 2348
337
+ },
338
+ {
339
+ "id": "4899",
340
+ "n_visual_tokens": 5120,
341
+ "latency_s": 17.24,
342
+ "peak_mem_gb": 16.74,
343
+ "output_len": 1835
344
+ },
345
+ {
346
+ "id": "4903",
347
+ "n_visual_tokens": 5120,
348
+ "latency_s": 20.16,
349
+ "peak_mem_gb": 16.74,
350
+ "output_len": 2134
351
+ }
352
+ ]
results/ablation_study/runs/cross_domain_e18/qwen3_full/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "qwen3_full",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 5120.0,
6
+ "avg_latency_s": 18.28,
7
+ "avg_peak_mem_gb": 16.74
8
+ }
results/ablation_study/top30/top30_selected_ids.json ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "qwen3_res_230400": [
3
+ "109",
4
+ "103",
5
+ "12",
6
+ "14",
7
+ "137",
8
+ "123",
9
+ "119",
10
+ "135",
11
+ "139",
12
+ "10",
13
+ "130",
14
+ "115",
15
+ "127",
16
+ "101",
17
+ "111",
18
+ "133",
19
+ "138",
20
+ "142",
21
+ "132",
22
+ "13",
23
+ "129",
24
+ "110",
25
+ "140",
26
+ "131",
27
+ "118",
28
+ "126",
29
+ "107",
30
+ "1",
31
+ "143",
32
+ "128"
33
+ ],
34
+ "qwen3_res_1003520": [
35
+ "137",
36
+ "14",
37
+ "109",
38
+ "12",
39
+ "103",
40
+ "112",
41
+ "123",
42
+ "127",
43
+ "119",
44
+ "10",
45
+ "139",
46
+ "133",
47
+ "13",
48
+ "135",
49
+ "120",
50
+ "129",
51
+ "105",
52
+ "111",
53
+ "140",
54
+ "138",
55
+ "131",
56
+ "128",
57
+ "130",
58
+ "106",
59
+ "134",
60
+ "102",
61
+ "126",
62
+ "142",
63
+ "1",
64
+ "117"
65
+ ],
66
+ "qwen3_full (基线)": [
67
+ "103",
68
+ "112",
69
+ "14",
70
+ "12",
71
+ "137",
72
+ "109",
73
+ "123",
74
+ "119",
75
+ "127",
76
+ "139",
77
+ "10",
78
+ "133",
79
+ "138",
80
+ "130",
81
+ "120",
82
+ "122",
83
+ "140",
84
+ "131",
85
+ "111",
86
+ "13",
87
+ "108",
88
+ "134",
89
+ "135",
90
+ "117",
91
+ "106",
92
+ "101",
93
+ "142",
94
+ "126",
95
+ "1",
96
+ "115"
97
+ ],
98
+ "efficientui_prune60": [
99
+ "12",
100
+ "109",
101
+ "133",
102
+ "137",
103
+ "123",
104
+ "120",
105
+ "10",
106
+ "112",
107
+ "13",
108
+ "101",
109
+ "139",
110
+ "138",
111
+ "117",
112
+ "111",
113
+ "132",
114
+ "119",
115
+ "141",
116
+ "1",
117
+ "108",
118
+ "103",
119
+ "130",
120
+ "127",
121
+ "135",
122
+ "129",
123
+ "126",
124
+ "14",
125
+ "131",
126
+ "115",
127
+ "118",
128
+ "104"
129
+ ],
130
+ "efficientui_prune80": [
131
+ "137",
132
+ "127",
133
+ "103",
134
+ "139",
135
+ "110",
136
+ "101",
137
+ "135",
138
+ "130",
139
+ "129",
140
+ "123",
141
+ "10",
142
+ "121",
143
+ "128",
144
+ "111",
145
+ "109",
146
+ "140",
147
+ "13",
148
+ "132",
149
+ "117",
150
+ "119",
151
+ "133",
152
+ "1",
153
+ "120",
154
+ "143",
155
+ "126",
156
+ "141",
157
+ "14",
158
+ "105",
159
+ "104",
160
+ "112"
161
+ ],
162
+ "visionzip_256": [
163
+ "127",
164
+ "139",
165
+ "12",
166
+ "135",
167
+ "137",
168
+ "133",
169
+ "101",
170
+ "109",
171
+ "129",
172
+ "131",
173
+ "10",
174
+ "111",
175
+ "132",
176
+ "121",
177
+ "120",
178
+ "110",
179
+ "140",
180
+ "13",
181
+ "130",
182
+ "119",
183
+ "107",
184
+ "126",
185
+ "142",
186
+ "141",
187
+ "123",
188
+ "14",
189
+ "108",
190
+ "104",
191
+ "116",
192
+ "115"
193
+ ],
194
+ "visionzip_128": [
195
+ "14",
196
+ "135",
197
+ "105",
198
+ "123",
199
+ "133",
200
+ "13",
201
+ "129",
202
+ "127",
203
+ "140",
204
+ "121",
205
+ "139",
206
+ "109",
207
+ "10",
208
+ "120",
209
+ "130",
210
+ "128",
211
+ "101",
212
+ "103",
213
+ "119",
214
+ "111",
215
+ "137",
216
+ "107",
217
+ "126",
218
+ "132",
219
+ "141",
220
+ "131",
221
+ "110",
222
+ "104",
223
+ "116",
224
+ "142"
225
+ ],
226
+ "uipress_256 (未训练)": [
227
+ "139",
228
+ "10",
229
+ "135",
230
+ "133",
231
+ "101",
232
+ "121",
233
+ "115",
234
+ "129",
235
+ "127",
236
+ "140",
237
+ "13",
238
+ "132",
239
+ "112",
240
+ "110",
241
+ "130",
242
+ "109",
243
+ "107",
244
+ "126",
245
+ "137",
246
+ "105",
247
+ "141",
248
+ "111",
249
+ "131",
250
+ "123",
251
+ "14",
252
+ "142",
253
+ "104",
254
+ "128",
255
+ "116",
256
+ "120"
257
+ ],
258
+ "visionzip_64": [
259
+ "139",
260
+ "137",
261
+ "135",
262
+ "133",
263
+ "101",
264
+ "129",
265
+ "127",
266
+ "140",
267
+ "121",
268
+ "115",
269
+ "10",
270
+ "132",
271
+ "110",
272
+ "13",
273
+ "130",
274
+ "107",
275
+ "142",
276
+ "126",
277
+ "141",
278
+ "131",
279
+ "123",
280
+ "109",
281
+ "14",
282
+ "104",
283
+ "105",
284
+ "112",
285
+ "102",
286
+ "119",
287
+ "116",
288
+ "111"
289
+ ],
290
+ "uipress_256 (训练 E17)": [
291
+ "139",
292
+ "10",
293
+ "135",
294
+ "133",
295
+ "101",
296
+ "121",
297
+ "115",
298
+ "127",
299
+ "111",
300
+ "140",
301
+ "13",
302
+ "129",
303
+ "110",
304
+ "130",
305
+ "107",
306
+ "137",
307
+ "141",
308
+ "131",
309
+ "123",
310
+ "109",
311
+ "14",
312
+ "142",
313
+ "104",
314
+ "116",
315
+ "105",
316
+ "112",
317
+ "119",
318
+ "125",
319
+ "108",
320
+ "128"
321
+ ]
322
+ }
results/ablation_study/top30/top30_table.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "name": "qwen3_res_1003520",
4
+ "token_nominal": 3748,
5
+ "compression_ratio": "1.9x",
6
+ "clip_topk_mean": 0.8417,
7
+ "latency_topk_mean_s": 66.4,
8
+ "ssim_topk_mean": 0.701,
9
+ "ssim_n": 30,
10
+ "ssim_missing_n": 0,
11
+ "is_baseline": false,
12
+ "vs_baseline_clip_topk": "+0.5%"
13
+ },
14
+ {
15
+ "name": "qwen3_res_230400",
16
+ "token_nominal": 845,
17
+ "compression_ratio": "8.6x",
18
+ "clip_topk_mean": 0.8409,
19
+ "latency_topk_mean_s": 84.4,
20
+ "ssim_topk_mean": 0.681,
21
+ "ssim_n": 30,
22
+ "ssim_missing_n": 0,
23
+ "is_baseline": false,
24
+ "vs_baseline_clip_topk": "+0.4%"
25
+ },
26
+ {
27
+ "name": "qwen3_full (基线)",
28
+ "token_nominal": 7299,
29
+ "compression_ratio": "1x",
30
+ "clip_topk_mean": 0.8379,
31
+ "latency_topk_mean_s": 79.7,
32
+ "ssim_topk_mean": 0.688,
33
+ "ssim_n": 30,
34
+ "ssim_missing_n": 0,
35
+ "is_baseline": true,
36
+ "vs_baseline_clip_topk": "—"
37
+ },
38
+ {
39
+ "name": "efficientui_prune60",
40
+ "token_nominal": 730,
41
+ "compression_ratio": "10x",
42
+ "clip_topk_mean": 0.8199,
43
+ "latency_topk_mean_s": 88.9,
44
+ "ssim_topk_mean": 0.697,
45
+ "ssim_n": 30,
46
+ "ssim_missing_n": 0,
47
+ "is_baseline": false,
48
+ "vs_baseline_clip_topk": "-2.1%"
49
+ },
50
+ {
51
+ "name": "efficientui_prune80",
52
+ "token_nominal": 364,
53
+ "compression_ratio": "20x",
54
+ "clip_topk_mean": 0.8124,
55
+ "latency_topk_mean_s": 103.4,
56
+ "ssim_topk_mean": 0.628,
57
+ "ssim_n": 30,
58
+ "ssim_missing_n": 0,
59
+ "is_baseline": false,
60
+ "vs_baseline_clip_topk": "-3.0%"
61
+ },
62
+ {
63
+ "name": "visionzip_256",
64
+ "token_nominal": 256,
65
+ "compression_ratio": "28.5x",
66
+ "clip_topk_mean": 0.8035,
67
+ "latency_topk_mean_s": 106.5,
68
+ "ssim_topk_mean": 0.64,
69
+ "ssim_n": 30,
70
+ "ssim_missing_n": 0,
71
+ "is_baseline": false,
72
+ "vs_baseline_clip_topk": "-4.1%"
73
+ },
74
+ {
75
+ "name": "visionzip_128",
76
+ "token_nominal": 128,
77
+ "compression_ratio": "57x",
78
+ "clip_topk_mean": 0.7954,
79
+ "latency_topk_mean_s": 114.7,
80
+ "ssim_topk_mean": 0.618,
81
+ "ssim_n": 30,
82
+ "ssim_missing_n": 0,
83
+ "is_baseline": false,
84
+ "vs_baseline_clip_topk": "-5.1%"
85
+ },
86
+ {
87
+ "name": "uipress_256 (未训练)",
88
+ "token_nominal": 256,
89
+ "compression_ratio": "28.5x",
90
+ "clip_topk_mean": 0.7925,
91
+ "latency_topk_mean_s": 66.2,
92
+ "ssim_topk_mean": 0.659,
93
+ "ssim_n": 30,
94
+ "ssim_missing_n": 0,
95
+ "is_baseline": false,
96
+ "vs_baseline_clip_topk": "-5.4%"
97
+ },
98
+ {
99
+ "name": "visionzip_64",
100
+ "token_nominal": 64,
101
+ "compression_ratio": "114x",
102
+ "clip_topk_mean": 0.783,
103
+ "latency_topk_mean_s": 105.8,
104
+ "ssim_topk_mean": 0.627,
105
+ "ssim_n": 30,
106
+ "ssim_missing_n": 0,
107
+ "is_baseline": false,
108
+ "vs_baseline_clip_topk": "-6.6%"
109
+ },
110
+ {
111
+ "name": "uipress_256 (训练 E17)",
112
+ "token_nominal": 256,
113
+ "compression_ratio": "28.5x",
114
+ "clip_topk_mean": 0.781,
115
+ "latency_topk_mean_s": 90.4,
116
+ "ssim_topk_mean": 0.696,
117
+ "ssim_n": 30,
118
+ "ssim_missing_n": 0,
119
+ "is_baseline": false,
120
+ "vs_baseline_clip_topk": "-6.8%"
121
+ }
122
+ ]
results/ablation_study/top30/top30_table.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ablation Top-30 Table
2
+
3
+ | 方法 | 视觉 Token 数 | 压缩比 | CLIP ↑ | vs 基线 | 延迟(s) | SSIM |
4
+ |---|---:|---:|---:|---:|---:|---:|
5
+ | qwen3_res_1003520 | 3748 | 1.9x | 0.8417 | +0.5% | 66.4 | 0.701 |
6
+ | qwen3_res_230400 | 845 | 8.6x | 0.8409 | +0.4% | 84.4 | 0.681 |
7
+ | qwen3_full (基线) | 7299 | 1x | 0.8379 | — | 79.7 | 0.688 |
8
+ | efficientui_prune60 | 730 | 10x | 0.8199 | -2.1% | 88.9 | 0.697 |
9
+ | efficientui_prune80 | 364 | 20x | 0.8124 | -3.0% | 103.4 | 0.628 |
10
+ | visionzip_256 | 256 | 28.5x | 0.8035 | -4.1% | 106.5 | 0.640 |
11
+ | visionzip_128 | 128 | 57x | 0.7954 | -5.1% | 114.7 | 0.618 |
12
+ | uipress_256 (未训练) | 256 | 28.5x | 0.7925 | -5.4% | 66.2 | 0.659 |
13
+ | visionzip_64 | 64 | 114x | 0.7830 | -6.6% | 105.8 | 0.627 |
14
+ | uipress_256 (训练 E17) | 256 | 28.5x | 0.7810 | -6.8% | 90.4 | 0.696 |
15
+
16
+ > 口径:每个方法按其自身 per-sample CLIP 排序取 Top-30,并在该子集计算 CLIP/延迟/SSIM均值。
17
+ > 提示:请在汇报中保留方法与口径说明,避免选择性呈现导致误导。
results/clip_epoch17_gpu1/uipress_256/clip_scores.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 50,
3
+ "avg_clip": 0.6988,
4
+ "min_clip": 0.0,
5
+ "max_clip": 0.8768,
6
+ "per_sample": {
7
+ "0": 0.6599,
8
+ "1": 0.6416,
9
+ "10": 0.8694,
10
+ "100": 0.6016,
11
+ "101": 0.8444,
12
+ "102": 0.6826,
13
+ "103": 0.5639,
14
+ "104": 0.733,
15
+ "105": 0.7257,
16
+ "106": 0.653,
17
+ "107": 0.7705,
18
+ "108": 0.6961,
19
+ "109": 0.7505,
20
+ "11": 0.5583,
21
+ "110": 0.7974,
22
+ "111": 0.8272,
23
+ "112": 0.7188,
24
+ "113": 0.6079,
25
+ "114": 0.6899,
26
+ "115": 0.8383,
27
+ "116": 0.7303,
28
+ "117": 0.6138,
29
+ "118": 0.6899,
30
+ "119": 0.715,
31
+ "12": 0.6144,
32
+ "120": 0.694,
33
+ "121": 0.8399,
34
+ "122": 0.4485,
35
+ "123": 0.7539,
36
+ "124": 0.5329,
37
+ "125": 0.6999,
38
+ "126": 0.5825,
39
+ "127": 0.8295,
40
+ "128": 0.6954,
41
+ "129": 0.8074,
42
+ "13": 0.8209,
43
+ "130": 0.7926,
44
+ "131": 0.754,
45
+ "132": 0.0,
46
+ "133": 0.8479,
47
+ "134": 0.6171,
48
+ "135": 0.8655,
49
+ "136": 0.5819,
50
+ "137": 0.7602,
51
+ "138": 0.4775,
52
+ "139": 0.8768,
53
+ "14": 0.7486,
54
+ "140": 0.821,
55
+ "141": 0.7551,
56
+ "142": 0.7435
57
+ }
58
+ }
results/clip_epoch17_gpu1/uipress_256/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "n_visual_tokens": 256,
5
+ "latency_s": 288.0,
6
+ "peak_mem_gb": 17.48,
7
+ "output_len": 13421
8
+ },
9
+ {
10
+ "id": "1",
11
+ "n_visual_tokens": 256,
12
+ "latency_s": 38.79,
13
+ "peak_mem_gb": 16.94,
14
+ "output_len": 1907
15
+ },
16
+ {
17
+ "id": "10",
18
+ "n_visual_tokens": 256,
19
+ "latency_s": 296.59,
20
+ "peak_mem_gb": 17.74,
21
+ "output_len": 32762
22
+ },
23
+ {
24
+ "id": "100",
25
+ "n_visual_tokens": 256,
26
+ "latency_s": 286.69,
27
+ "peak_mem_gb": 17.47,
28
+ "output_len": 4790
29
+ },
30
+ {
31
+ "id": "101",
32
+ "n_visual_tokens": 256,
33
+ "latency_s": 1.78,
34
+ "peak_mem_gb": 17.37,
35
+ "output_len": 15
36
+ },
37
+ {
38
+ "id": "102",
39
+ "n_visual_tokens": 256,
40
+ "latency_s": 287.92,
41
+ "peak_mem_gb": 17.49,
42
+ "output_len": 11980
43
+ },
44
+ {
45
+ "id": "103",
46
+ "n_visual_tokens": 256,
47
+ "latency_s": 6.67,
48
+ "peak_mem_gb": 17.11,
49
+ "output_len": 344
50
+ },
51
+ {
52
+ "id": "104",
53
+ "n_visual_tokens": 256,
54
+ "latency_s": 288.8,
55
+ "peak_mem_gb": 17.54,
56
+ "output_len": 14337
57
+ },
58
+ {
59
+ "id": "105",
60
+ "n_visual_tokens": 256,
61
+ "latency_s": 5.56,
62
+ "peak_mem_gb": 17.13,
63
+ "output_len": 201
64
+ },
65
+ {
66
+ "id": "106",
67
+ "n_visual_tokens": 256,
68
+ "latency_s": 3.2,
69
+ "peak_mem_gb": 17.38,
70
+ "output_len": 81
71
+ },
72
+ {
73
+ "id": "107",
74
+ "n_visual_tokens": 256,
75
+ "latency_s": 1.79,
76
+ "peak_mem_gb": 17.36,
77
+ "output_len": 15
78
+ },
79
+ {
80
+ "id": "108",
81
+ "n_visual_tokens": 256,
82
+ "latency_s": 3.45,
83
+ "peak_mem_gb": 17.29,
84
+ "output_len": 82
85
+ },
86
+ {
87
+ "id": "109",
88
+ "n_visual_tokens": 256,
89
+ "latency_s": 198.89,
90
+ "peak_mem_gb": 17.41,
91
+ "output_len": 15607
92
+ },
93
+ {
94
+ "id": "11",
95
+ "n_visual_tokens": 256,
96
+ "latency_s": 2.53,
97
+ "peak_mem_gb": 16.97,
98
+ "output_len": 180
99
+ },
100
+ {
101
+ "id": "110",
102
+ "n_visual_tokens": 256,
103
+ "latency_s": 158.92,
104
+ "peak_mem_gb": 17.56,
105
+ "output_len": 15337
106
+ },
107
+ {
108
+ "id": "111",
109
+ "n_visual_tokens": 256,
110
+ "latency_s": 157.22,
111
+ "peak_mem_gb": 17.49,
112
+ "output_len": 14122
113
+ },
114
+ {
115
+ "id": "112",
116
+ "n_visual_tokens": 256,
117
+ "latency_s": 30.19,
118
+ "peak_mem_gb": 17.01,
119
+ "output_len": 3049
120
+ },
121
+ {
122
+ "id": "113",
123
+ "n_visual_tokens": 256,
124
+ "latency_s": 154.9,
125
+ "peak_mem_gb": 17.38,
126
+ "output_len": 16702
127
+ },
128
+ {
129
+ "id": "114",
130
+ "n_visual_tokens": 256,
131
+ "latency_s": 1.76,
132
+ "peak_mem_gb": 17.89,
133
+ "output_len": 2
134
+ },
135
+ {
136
+ "id": "115",
137
+ "n_visual_tokens": 256,
138
+ "latency_s": 154.26,
139
+ "peak_mem_gb": 17.38,
140
+ "output_len": 4211
141
+ },
142
+ {
143
+ "id": "116",
144
+ "n_visual_tokens": 256,
145
+ "latency_s": 160.67,
146
+ "peak_mem_gb": 17.67,
147
+ "output_len": 32762
148
+ },
149
+ {
150
+ "id": "117",
151
+ "n_visual_tokens": 256,
152
+ "latency_s": 5.86,
153
+ "peak_mem_gb": 16.98,
154
+ "output_len": 558
155
+ },
156
+ {
157
+ "id": "118",
158
+ "n_visual_tokens": 256,
159
+ "latency_s": 161.97,
160
+ "peak_mem_gb": 17.74,
161
+ "output_len": 4268
162
+ },
163
+ {
164
+ "id": "119",
165
+ "n_visual_tokens": 256,
166
+ "latency_s": 3.48,
167
+ "peak_mem_gb": 17.06,
168
+ "output_len": 305
169
+ },
170
+ {
171
+ "id": "12",
172
+ "n_visual_tokens": 256,
173
+ "latency_s": 153.62,
174
+ "peak_mem_gb": 17.38,
175
+ "output_len": 4347
176
+ },
177
+ {
178
+ "id": "120",
179
+ "n_visual_tokens": 256,
180
+ "latency_s": 26.19,
181
+ "peak_mem_gb": 16.98,
182
+ "output_len": 2426
183
+ },
184
+ {
185
+ "id": "121",
186
+ "n_visual_tokens": 256,
187
+ "latency_s": 1.45,
188
+ "peak_mem_gb": 17.74,
189
+ "output_len": 2
190
+ },
191
+ {
192
+ "id": "122",
193
+ "n_visual_tokens": 256,
194
+ "latency_s": 2.82,
195
+ "peak_mem_gb": 17.46,
196
+ "output_len": 150
197
+ },
198
+ {
199
+ "id": "123",
200
+ "n_visual_tokens": 256,
201
+ "latency_s": 154.23,
202
+ "peak_mem_gb": 17.38,
203
+ "output_len": 4347
204
+ },
205
+ {
206
+ "id": "124",
207
+ "n_visual_tokens": 256,
208
+ "latency_s": 3.38,
209
+ "peak_mem_gb": 17.12,
210
+ "output_len": 309
211
+ },
212
+ {
213
+ "id": "125",
214
+ "n_visual_tokens": 256,
215
+ "latency_s": 80.9,
216
+ "peak_mem_gb": 17.24,
217
+ "output_len": 7057
218
+ },
219
+ {
220
+ "id": "126",
221
+ "n_visual_tokens": 256,
222
+ "latency_s": 163.02,
223
+ "peak_mem_gb": 17.77,
224
+ "output_len": 14478
225
+ },
226
+ {
227
+ "id": "127",
228
+ "n_visual_tokens": 256,
229
+ "latency_s": 153.76,
230
+ "peak_mem_gb": 17.38,
231
+ "output_len": 16553
232
+ },
233
+ {
234
+ "id": "128",
235
+ "n_visual_tokens": 256,
236
+ "latency_s": 9.77,
237
+ "peak_mem_gb": 17.05,
238
+ "output_len": 932
239
+ },
240
+ {
241
+ "id": "129",
242
+ "n_visual_tokens": 256,
243
+ "latency_s": 58.3,
244
+ "peak_mem_gb": 17.56,
245
+ "output_len": 4791
246
+ },
247
+ {
248
+ "id": "13",
249
+ "n_visual_tokens": 256,
250
+ "latency_s": 1.99,
251
+ "peak_mem_gb": 17.33,
252
+ "output_len": 82
253
+ },
254
+ {
255
+ "id": "130",
256
+ "n_visual_tokens": 256,
257
+ "latency_s": 1.66,
258
+ "peak_mem_gb": 17.28,
259
+ "output_len": 82
260
+ },
261
+ {
262
+ "id": "131",
263
+ "n_visual_tokens": 256,
264
+ "latency_s": 153.8,
265
+ "peak_mem_gb": 17.38,
266
+ "output_len": 4303
267
+ },
268
+ {
269
+ "id": "132",
270
+ "n_visual_tokens": 256,
271
+ "latency_s": 18.54,
272
+ "peak_mem_gb": 17.28,
273
+ "output_len": 1593
274
+ },
275
+ {
276
+ "id": "133",
277
+ "n_visual_tokens": 256,
278
+ "latency_s": 1.38,
279
+ "peak_mem_gb": 17.33,
280
+ "output_len": 62
281
+ },
282
+ {
283
+ "id": "134",
284
+ "n_visual_tokens": 256,
285
+ "latency_s": 157.01,
286
+ "peak_mem_gb": 17.51,
287
+ "output_len": 13587
288
+ },
289
+ {
290
+ "id": "135",
291
+ "n_visual_tokens": 256,
292
+ "latency_s": 157.9,
293
+ "peak_mem_gb": 17.56,
294
+ "output_len": 13836
295
+ },
296
+ {
297
+ "id": "136",
298
+ "n_visual_tokens": 256,
299
+ "latency_s": 163.98,
300
+ "peak_mem_gb": 17.8,
301
+ "output_len": 32762
302
+ },
303
+ {
304
+ "id": "137",
305
+ "n_visual_tokens": 256,
306
+ "latency_s": 154.85,
307
+ "peak_mem_gb": 17.38,
308
+ "output_len": 4300
309
+ },
310
+ {
311
+ "id": "138",
312
+ "n_visual_tokens": 256,
313
+ "latency_s": 155.09,
314
+ "peak_mem_gb": 17.38,
315
+ "output_len": 14753
316
+ },
317
+ {
318
+ "id": "139",
319
+ "n_visual_tokens": 256,
320
+ "latency_s": 1.29,
321
+ "peak_mem_gb": 17.51,
322
+ "output_len": 15
323
+ },
324
+ {
325
+ "id": "14",
326
+ "n_visual_tokens": 256,
327
+ "latency_s": 1.09,
328
+ "peak_mem_gb": 17.0,
329
+ "output_len": 50
330
+ },
331
+ {
332
+ "id": "140",
333
+ "n_visual_tokens": 256,
334
+ "latency_s": 0.87,
335
+ "peak_mem_gb": 17.37,
336
+ "output_len": 15
337
+ },
338
+ {
339
+ "id": "141",
340
+ "n_visual_tokens": 256,
341
+ "latency_s": 162.67,
342
+ "peak_mem_gb": 17.71,
343
+ "output_len": 32754
344
+ },
345
+ {
346
+ "id": "142",
347
+ "n_visual_tokens": 256,
348
+ "latency_s": 155.1,
349
+ "peak_mem_gb": 17.39,
350
+ "output_len": 14355
351
+ }
352
+ ]
results/clip_epoch17_gpu1/uipress_256/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "uipress_256",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 256.0,
6
+ "avg_latency_s": 95.89,
7
+ "avg_peak_mem_gb": 17.38
8
+ }
results/clip_epoch17_gpu1/uipress_256/summary_top35_by_clip.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "definition": "在 50 条上按 CLIP 降序,取前 35 条样本,对其余指标做算术平均。",
3
+ "dropped_ids_by_clip_rank": [
4
+ "106",
5
+ "1",
6
+ "134",
7
+ "12",
8
+ "117",
9
+ "113",
10
+ "100",
11
+ "126",
12
+ "136",
13
+ "103",
14
+ "11",
15
+ "124",
16
+ "138",
17
+ "122",
18
+ "132"
19
+ ],
20
+ "n": 35,
21
+ "clip": {
22
+ "avg_clip": 0.767,
23
+ "min_clip": 0.6599,
24
+ "max_clip": 0.8768
25
+ },
26
+ "eval_summary_style": {
27
+ "n_samples": 35,
28
+ "n_success": 35,
29
+ "avg_visual_tokens": 256.0,
30
+ "avg_latency_s": 99.38,
31
+ "avg_peak_mem_gb": 17.41,
32
+ "avg_output_len": 7669.7
33
+ }
34
+ }
results/clip_epoch17_gpu1/uipress_256/summary_top40_by_clip.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "definition": "在 50 条上按 CLIP 降序,取前 40 条样本,对其余指标做算术平均(与 CLIP 子集一致)。",
3
+ "dropped_ids_by_clip_rank": [
4
+ "113",
5
+ "100",
6
+ "126",
7
+ "136",
8
+ "103",
9
+ "11",
10
+ "124",
11
+ "138",
12
+ "122",
13
+ "132"
14
+ ],
15
+ "n": 40,
16
+ "clip": {
17
+ "avg_clip": 0.7496,
18
+ "min_clip": 0.6138,
19
+ "max_clip": 0.8768
20
+ },
21
+ "eval_summary_style": {
22
+ "n_samples": 40,
23
+ "n_success": 40,
24
+ "avg_visual_tokens": 256.0,
25
+ "avg_latency_s": 95.92,
26
+ "avg_peak_mem_gb": 17.38,
27
+ "avg_output_len": 7222.9
28
+ },
29
+ "full_n50_reference": {
30
+ "clip_avg": 0.6988,
31
+ "avg_latency_s": 95.89,
32
+ "avg_peak_mem_gb": 17.38
33
+ }
34
+ }
results/clip_per_epoch/optical_mix_d2c/CLIP_TABLE.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ | 文件夹 | checkpoint | avg CLIP | n |
2
+ |---|---|---|---|
3
+ | epoch_5 | `checkpoints/optical_mix_d2c/epoch5.pt` | 0.7047 | 50 |
4
+ | epoch_6 | `checkpoints/optical_mix_d2c/epoch6.pt` | 0.7192 | 50 |
5
+ | epoch_7 | `checkpoints/optical_mix_d2c/epoch7.pt` | 0.7192 | 50 |
6
+ | epoch_8 | `checkpoints/optical_mix_d2c/epoch8.pt` | 0.7192 | 50 |
7
+ | epoch_9 | `checkpoints/optical_mix_d2c/epoch9.pt` | 0.7025 | 50 |
8
+ | epoch_10 | `checkpoints/optical_mix_d2c/epoch10.pt` | 0.7192 | 50 |
9
+ | epoch_11 | `checkpoints/optical_mix_d2c/epoch11.pt` | 0.7054 | 50 |
10
+ | epoch_12 | `checkpoints/optical_mix_d2c/epoch12.pt` | 0.719 | 50 |
11
+ | epoch_13 | `checkpoints/optical_mix_d2c/epoch13.pt` | 0.7193 | 50 |
12
+ | epoch_14 | `checkpoints/optical_mix_d2c/epoch14.pt` | 0.7051 | 50 |
13
+ | epoch_15 | `checkpoints/optical_mix_d2c/epoch15.pt` | 0.7029 | 50 |
14
+ | epoch_16 | `checkpoints/optical_mix_d2c/epoch16.pt` | 0.7027 | 50 |
15
+ | epoch_17 | `checkpoints/optical_mix_d2c/epoch17.pt` | 0.6747 | 50 |
results/clip_per_epoch/optical_mix_d2c/epoch_10/uipress_256/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "n_visual_tokens": 256,
5
+ "latency_s": 1.96,
6
+ "peak_mem_gb": 17.13,
7
+ "output_len": 61
8
+ },
9
+ {
10
+ "id": "1",
11
+ "n_visual_tokens": 256,
12
+ "latency_s": 154.1,
13
+ "peak_mem_gb": 17.38,
14
+ "output_len": 14824
15
+ },
16
+ {
17
+ "id": "10",
18
+ "n_visual_tokens": 256,
19
+ "latency_s": 162.32,
20
+ "peak_mem_gb": 17.74,
21
+ "output_len": 13323
22
+ },
23
+ {
24
+ "id": "100",
25
+ "n_visual_tokens": 256,
26
+ "latency_s": 156.14,
27
+ "peak_mem_gb": 17.47,
28
+ "output_len": 15962
29
+ },
30
+ {
31
+ "id": "101",
32
+ "n_visual_tokens": 256,
33
+ "latency_s": 0.98,
34
+ "peak_mem_gb": 17.37,
35
+ "output_len": 3
36
+ },
37
+ {
38
+ "id": "102",
39
+ "n_visual_tokens": 256,
40
+ "latency_s": 24.92,
41
+ "peak_mem_gb": 17.15,
42
+ "output_len": 3638
43
+ },
44
+ {
45
+ "id": "103",
46
+ "n_visual_tokens": 256,
47
+ "latency_s": 0.87,
48
+ "peak_mem_gb": 17.11,
49
+ "output_len": 37
50
+ },
51
+ {
52
+ "id": "104",
53
+ "n_visual_tokens": 256,
54
+ "latency_s": 1.46,
55
+ "peak_mem_gb": 17.27,
56
+ "output_len": 52
57
+ },
58
+ {
59
+ "id": "105",
60
+ "n_visual_tokens": 256,
61
+ "latency_s": 156.27,
62
+ "peak_mem_gb": 17.48,
63
+ "output_len": 15575
64
+ },
65
+ {
66
+ "id": "106",
67
+ "n_visual_tokens": 256,
68
+ "latency_s": 1.09,
69
+ "peak_mem_gb": 17.38,
70
+ "output_len": 14
71
+ },
72
+ {
73
+ "id": "107",
74
+ "n_visual_tokens": 256,
75
+ "latency_s": 0.97,
76
+ "peak_mem_gb": 17.36,
77
+ "output_len": 3
78
+ },
79
+ {
80
+ "id": "108",
81
+ "n_visual_tokens": 256,
82
+ "latency_s": 158.28,
83
+ "peak_mem_gb": 17.56,
84
+ "output_len": 17741
85
+ },
86
+ {
87
+ "id": "109",
88
+ "n_visual_tokens": 256,
89
+ "latency_s": 155.02,
90
+ "peak_mem_gb": 17.41,
91
+ "output_len": 15164
92
+ },
93
+ {
94
+ "id": "11",
95
+ "n_visual_tokens": 256,
96
+ "latency_s": 154.41,
97
+ "peak_mem_gb": 17.39,
98
+ "output_len": 26369
99
+ },
100
+ {
101
+ "id": "110",
102
+ "n_visual_tokens": 256,
103
+ "latency_s": 1.44,
104
+ "peak_mem_gb": 17.28,
105
+ "output_len": 52
106
+ },
107
+ {
108
+ "id": "111",
109
+ "n_visual_tokens": 256,
110
+ "latency_s": 156.34,
111
+ "peak_mem_gb": 17.48,
112
+ "output_len": 23108
113
+ },
114
+ {
115
+ "id": "112",
116
+ "n_visual_tokens": 256,
117
+ "latency_s": 154.74,
118
+ "peak_mem_gb": 17.41,
119
+ "output_len": 14643
120
+ },
121
+ {
122
+ "id": "113",
123
+ "n_visual_tokens": 256,
124
+ "latency_s": 153.86,
125
+ "peak_mem_gb": 17.38,
126
+ "output_len": 17958
127
+ },
128
+ {
129
+ "id": "114",
130
+ "n_visual_tokens": 256,
131
+ "latency_s": 165.69,
132
+ "peak_mem_gb": 17.89,
133
+ "output_len": 13751
134
+ },
135
+ {
136
+ "id": "115",
137
+ "n_visual_tokens": 256,
138
+ "latency_s": 153.9,
139
+ "peak_mem_gb": 17.38,
140
+ "output_len": 20379
141
+ },
142
+ {
143
+ "id": "116",
144
+ "n_visual_tokens": 256,
145
+ "latency_s": 160.85,
146
+ "peak_mem_gb": 17.67,
147
+ "output_len": 21487
148
+ },
149
+ {
150
+ "id": "117",
151
+ "n_visual_tokens": 256,
152
+ "latency_s": 7.56,
153
+ "peak_mem_gb": 16.98,
154
+ "output_len": 737
155
+ },
156
+ {
157
+ "id": "118",
158
+ "n_visual_tokens": 256,
159
+ "latency_s": 162.09,
160
+ "peak_mem_gb": 17.74,
161
+ "output_len": 12286
162
+ },
163
+ {
164
+ "id": "119",
165
+ "n_visual_tokens": 256,
166
+ "latency_s": 155.64,
167
+ "peak_mem_gb": 17.44,
168
+ "output_len": 13719
169
+ },
170
+ {
171
+ "id": "12",
172
+ "n_visual_tokens": 256,
173
+ "latency_s": 153.89,
174
+ "peak_mem_gb": 17.38,
175
+ "output_len": 4347
176
+ },
177
+ {
178
+ "id": "120",
179
+ "n_visual_tokens": 256,
180
+ "latency_s": 15.48,
181
+ "peak_mem_gb": 16.98,
182
+ "output_len": 1537
183
+ },
184
+ {
185
+ "id": "121",
186
+ "n_visual_tokens": 256,
187
+ "latency_s": 1.66,
188
+ "peak_mem_gb": 17.73,
189
+ "output_len": 28
190
+ },
191
+ {
192
+ "id": "122",
193
+ "n_visual_tokens": 256,
194
+ "latency_s": 160.4,
195
+ "peak_mem_gb": 17.65,
196
+ "output_len": 24414
197
+ },
198
+ {
199
+ "id": "123",
200
+ "n_visual_tokens": 256,
201
+ "latency_s": 153.85,
202
+ "peak_mem_gb": 17.38,
203
+ "output_len": 4273
204
+ },
205
+ {
206
+ "id": "124",
207
+ "n_visual_tokens": 256,
208
+ "latency_s": 0.96,
209
+ "peak_mem_gb": 17.12,
210
+ "output_len": 29
211
+ },
212
+ {
213
+ "id": "125",
214
+ "n_visual_tokens": 256,
215
+ "latency_s": 1.36,
216
+ "peak_mem_gb": 17.21,
217
+ "output_len": 52
218
+ },
219
+ {
220
+ "id": "126",
221
+ "n_visual_tokens": 256,
222
+ "latency_s": 163.08,
223
+ "peak_mem_gb": 17.77,
224
+ "output_len": 16109
225
+ },
226
+ {
227
+ "id": "127",
228
+ "n_visual_tokens": 256,
229
+ "latency_s": 153.89,
230
+ "peak_mem_gb": 17.38,
231
+ "output_len": 13025
232
+ },
233
+ {
234
+ "id": "128",
235
+ "n_visual_tokens": 256,
236
+ "latency_s": 13.08,
237
+ "peak_mem_gb": 17.05,
238
+ "output_len": 1675
239
+ },
240
+ {
241
+ "id": "129",
242
+ "n_visual_tokens": 256,
243
+ "latency_s": 1.87,
244
+ "peak_mem_gb": 17.56,
245
+ "output_len": 60
246
+ },
247
+ {
248
+ "id": "13",
249
+ "n_visual_tokens": 256,
250
+ "latency_s": 1.47,
251
+ "peak_mem_gb": 17.33,
252
+ "output_len": 53
253
+ },
254
+ {
255
+ "id": "130",
256
+ "n_visual_tokens": 256,
257
+ "latency_s": 1.29,
258
+ "peak_mem_gb": 17.28,
259
+ "output_len": 66
260
+ },
261
+ {
262
+ "id": "131",
263
+ "n_visual_tokens": 256,
264
+ "latency_s": 153.89,
265
+ "peak_mem_gb": 17.38,
266
+ "output_len": 12428
267
+ },
268
+ {
269
+ "id": "132",
270
+ "n_visual_tokens": 256,
271
+ "latency_s": 1.39,
272
+ "peak_mem_gb": 17.28,
273
+ "output_len": 51
274
+ },
275
+ {
276
+ "id": "133",
277
+ "n_visual_tokens": 256,
278
+ "latency_s": 10.2,
279
+ "peak_mem_gb": 17.33,
280
+ "output_len": 1200
281
+ },
282
+ {
283
+ "id": "134",
284
+ "n_visual_tokens": 256,
285
+ "latency_s": 157.18,
286
+ "peak_mem_gb": 17.51,
287
+ "output_len": 14187
288
+ },
289
+ {
290
+ "id": "135",
291
+ "n_visual_tokens": 256,
292
+ "latency_s": 1.18,
293
+ "peak_mem_gb": 17.28,
294
+ "output_len": 51
295
+ },
296
+ {
297
+ "id": "136",
298
+ "n_visual_tokens": 256,
299
+ "latency_s": 2.05,
300
+ "peak_mem_gb": 17.75,
301
+ "output_len": 64
302
+ },
303
+ {
304
+ "id": "137",
305
+ "n_visual_tokens": 256,
306
+ "latency_s": 153.88,
307
+ "peak_mem_gb": 17.38,
308
+ "output_len": 14029
309
+ },
310
+ {
311
+ "id": "138",
312
+ "n_visual_tokens": 256,
313
+ "latency_s": 153.89,
314
+ "peak_mem_gb": 17.38,
315
+ "output_len": 15142
316
+ },
317
+ {
318
+ "id": "139",
319
+ "n_visual_tokens": 256,
320
+ "latency_s": 161.09,
321
+ "peak_mem_gb": 17.68,
322
+ "output_len": 16214
323
+ },
324
+ {
325
+ "id": "14",
326
+ "n_visual_tokens": 256,
327
+ "latency_s": 154.85,
328
+ "peak_mem_gb": 17.41,
329
+ "output_len": 14031
330
+ },
331
+ {
332
+ "id": "140",
333
+ "n_visual_tokens": 256,
334
+ "latency_s": 0.75,
335
+ "peak_mem_gb": 17.37,
336
+ "output_len": 3
337
+ },
338
+ {
339
+ "id": "141",
340
+ "n_visual_tokens": 256,
341
+ "latency_s": 1.24,
342
+ "peak_mem_gb": 17.57,
343
+ "output_len": 3
344
+ },
345
+ {
346
+ "id": "142",
347
+ "n_visual_tokens": 256,
348
+ "latency_s": 154.27,
349
+ "peak_mem_gb": 17.39,
350
+ "output_len": 13405
351
+ }
352
+ ]
results/clip_per_epoch/optical_mix_d2c/epoch_11/uipress_256/clip_scores.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 50,
3
+ "avg_clip": 0.7054,
4
+ "min_clip": 0.0,
5
+ "max_clip": 0.8768,
6
+ "per_sample": {
7
+ "0": 0.6599,
8
+ "1": 0.6416,
9
+ "10": 0.8694,
10
+ "100": 0.6016,
11
+ "101": 0.8444,
12
+ "102": 0.6826,
13
+ "103": 0.5591,
14
+ "104": 0.733,
15
+ "105": 0.7257,
16
+ "106": 0.653,
17
+ "107": 0.7705,
18
+ "108": 0.6961,
19
+ "109": 0.7505,
20
+ "11": 0.5583,
21
+ "110": 0.7974,
22
+ "111": 0.8272,
23
+ "112": 0.7188,
24
+ "113": 0.6079,
25
+ "114": 0.0,
26
+ "115": 0.8383,
27
+ "116": 0.7303,
28
+ "117": 0.6138,
29
+ "118": 0.6899,
30
+ "119": 0.715,
31
+ "12": 0.6144,
32
+ "120": 0.694,
33
+ "121": 0.8399,
34
+ "122": 0.4485,
35
+ "123": 0.7539,
36
+ "124": 0.5329,
37
+ "125": 0.6999,
38
+ "126": 0.7624,
39
+ "127": 0.8295,
40
+ "128": 0.6954,
41
+ "129": 0.834,
42
+ "13": 0.8209,
43
+ "130": 0.7926,
44
+ "131": 0.754,
45
+ "132": 0.8207,
46
+ "133": 0.8479,
47
+ "134": 0.6171,
48
+ "135": 0.8655,
49
+ "136": 0.5817,
50
+ "137": 0.7602,
51
+ "138": 0.4775,
52
+ "139": 0.8768,
53
+ "14": 0.7486,
54
+ "140": 0.821,
55
+ "141": 0.7551,
56
+ "142": 0.7435
57
+ }
58
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_11/uipress_256/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "n_visual_tokens": 256,
5
+ "latency_s": 1.96,
6
+ "peak_mem_gb": 17.13,
7
+ "output_len": 61
8
+ },
9
+ {
10
+ "id": "1",
11
+ "n_visual_tokens": 256,
12
+ "latency_s": 154.35,
13
+ "peak_mem_gb": 17.38,
14
+ "output_len": 13037
15
+ },
16
+ {
17
+ "id": "10",
18
+ "n_visual_tokens": 256,
19
+ "latency_s": 162.54,
20
+ "peak_mem_gb": 17.74,
21
+ "output_len": 4237
22
+ },
23
+ {
24
+ "id": "100",
25
+ "n_visual_tokens": 256,
26
+ "latency_s": 0.95,
27
+ "peak_mem_gb": 17.11,
28
+ "output_len": 29
29
+ },
30
+ {
31
+ "id": "101",
32
+ "n_visual_tokens": 256,
33
+ "latency_s": 0.98,
34
+ "peak_mem_gb": 17.37,
35
+ "output_len": 3
36
+ },
37
+ {
38
+ "id": "102",
39
+ "n_visual_tokens": 256,
40
+ "latency_s": 24.95,
41
+ "peak_mem_gb": 17.15,
42
+ "output_len": 3639
43
+ },
44
+ {
45
+ "id": "103",
46
+ "n_visual_tokens": 256,
47
+ "latency_s": 0.87,
48
+ "peak_mem_gb": 17.11,
49
+ "output_len": 37
50
+ },
51
+ {
52
+ "id": "104",
53
+ "n_visual_tokens": 256,
54
+ "latency_s": 1.47,
55
+ "peak_mem_gb": 17.27,
56
+ "output_len": 52
57
+ },
58
+ {
59
+ "id": "105",
60
+ "n_visual_tokens": 256,
61
+ "latency_s": 156.33,
62
+ "peak_mem_gb": 17.48,
63
+ "output_len": 15575
64
+ },
65
+ {
66
+ "id": "106",
67
+ "n_visual_tokens": 256,
68
+ "latency_s": 1.1,
69
+ "peak_mem_gb": 17.38,
70
+ "output_len": 14
71
+ },
72
+ {
73
+ "id": "107",
74
+ "n_visual_tokens": 256,
75
+ "latency_s": 0.96,
76
+ "peak_mem_gb": 17.36,
77
+ "output_len": 3
78
+ },
79
+ {
80
+ "id": "108",
81
+ "n_visual_tokens": 256,
82
+ "latency_s": 158.49,
83
+ "peak_mem_gb": 17.56,
84
+ "output_len": 15709
85
+ },
86
+ {
87
+ "id": "109",
88
+ "n_visual_tokens": 256,
89
+ "latency_s": 155.18,
90
+ "peak_mem_gb": 17.41,
91
+ "output_len": 15169
92
+ },
93
+ {
94
+ "id": "11",
95
+ "n_visual_tokens": 256,
96
+ "latency_s": 154.55,
97
+ "peak_mem_gb": 17.39,
98
+ "output_len": 18039
99
+ },
100
+ {
101
+ "id": "110",
102
+ "n_visual_tokens": 256,
103
+ "latency_s": 1.43,
104
+ "peak_mem_gb": 17.28,
105
+ "output_len": 52
106
+ },
107
+ {
108
+ "id": "111",
109
+ "n_visual_tokens": 256,
110
+ "latency_s": 156.59,
111
+ "peak_mem_gb": 17.48,
112
+ "output_len": 12352
113
+ },
114
+ {
115
+ "id": "112",
116
+ "n_visual_tokens": 256,
117
+ "latency_s": 5.82,
118
+ "peak_mem_gb": 17.01,
119
+ "output_len": 518
120
+ },
121
+ {
122
+ "id": "113",
123
+ "n_visual_tokens": 256,
124
+ "latency_s": 154.02,
125
+ "peak_mem_gb": 17.38,
126
+ "output_len": 14846
127
+ },
128
+ {
129
+ "id": "114",
130
+ "n_visual_tokens": 256,
131
+ "latency_s": 165.84,
132
+ "peak_mem_gb": 17.89,
133
+ "output_len": 12113
134
+ },
135
+ {
136
+ "id": "115",
137
+ "n_visual_tokens": 256,
138
+ "latency_s": 154.04,
139
+ "peak_mem_gb": 17.38,
140
+ "output_len": 15150
141
+ },
142
+ {
143
+ "id": "116",
144
+ "n_visual_tokens": 256,
145
+ "latency_s": 161.05,
146
+ "peak_mem_gb": 17.67,
147
+ "output_len": 21969
148
+ },
149
+ {
150
+ "id": "117",
151
+ "n_visual_tokens": 256,
152
+ "latency_s": 154.78,
153
+ "peak_mem_gb": 17.39,
154
+ "output_len": 22211
155
+ },
156
+ {
157
+ "id": "118",
158
+ "n_visual_tokens": 256,
159
+ "latency_s": 162.29,
160
+ "peak_mem_gb": 17.74,
161
+ "output_len": 16366
162
+ },
163
+ {
164
+ "id": "119",
165
+ "n_visual_tokens": 256,
166
+ "latency_s": 155.79,
167
+ "peak_mem_gb": 17.44,
168
+ "output_len": 14275
169
+ },
170
+ {
171
+ "id": "12",
172
+ "n_visual_tokens": 256,
173
+ "latency_s": 153.99,
174
+ "peak_mem_gb": 17.38,
175
+ "output_len": 4347
176
+ },
177
+ {
178
+ "id": "120",
179
+ "n_visual_tokens": 256,
180
+ "latency_s": 154.54,
181
+ "peak_mem_gb": 17.39,
182
+ "output_len": 15202
183
+ },
184
+ {
185
+ "id": "121",
186
+ "n_visual_tokens": 256,
187
+ "latency_s": 1.66,
188
+ "peak_mem_gb": 17.73,
189
+ "output_len": 28
190
+ },
191
+ {
192
+ "id": "122",
193
+ "n_visual_tokens": 256,
194
+ "latency_s": 64.64,
195
+ "peak_mem_gb": 17.46,
196
+ "output_len": 5684
197
+ },
198
+ {
199
+ "id": "123",
200
+ "n_visual_tokens": 256,
201
+ "latency_s": 154.12,
202
+ "peak_mem_gb": 17.38,
203
+ "output_len": 4275
204
+ },
205
+ {
206
+ "id": "124",
207
+ "n_visual_tokens": 256,
208
+ "latency_s": 0.97,
209
+ "peak_mem_gb": 17.12,
210
+ "output_len": 29
211
+ },
212
+ {
213
+ "id": "125",
214
+ "n_visual_tokens": 256,
215
+ "latency_s": 1.36,
216
+ "peak_mem_gb": 17.21,
217
+ "output_len": 52
218
+ },
219
+ {
220
+ "id": "126",
221
+ "n_visual_tokens": 256,
222
+ "latency_s": 70.6,
223
+ "peak_mem_gb": 17.69,
224
+ "output_len": 5979
225
+ },
226
+ {
227
+ "id": "127",
228
+ "n_visual_tokens": 256,
229
+ "latency_s": 154.05,
230
+ "peak_mem_gb": 17.38,
231
+ "output_len": 13031
232
+ },
233
+ {
234
+ "id": "128",
235
+ "n_visual_tokens": 256,
236
+ "latency_s": 17.8,
237
+ "peak_mem_gb": 17.05,
238
+ "output_len": 1748
239
+ },
240
+ {
241
+ "id": "129",
242
+ "n_visual_tokens": 256,
243
+ "latency_s": 1.87,
244
+ "peak_mem_gb": 17.56,
245
+ "output_len": 60
246
+ },
247
+ {
248
+ "id": "13",
249
+ "n_visual_tokens": 256,
250
+ "latency_s": 1.47,
251
+ "peak_mem_gb": 17.33,
252
+ "output_len": 53
253
+ },
254
+ {
255
+ "id": "130",
256
+ "n_visual_tokens": 256,
257
+ "latency_s": 1.29,
258
+ "peak_mem_gb": 17.28,
259
+ "output_len": 66
260
+ },
261
+ {
262
+ "id": "131",
263
+ "n_visual_tokens": 256,
264
+ "latency_s": 154.15,
265
+ "peak_mem_gb": 17.38,
266
+ "output_len": 18842
267
+ },
268
+ {
269
+ "id": "132",
270
+ "n_visual_tokens": 256,
271
+ "latency_s": 1.39,
272
+ "peak_mem_gb": 17.28,
273
+ "output_len": 51
274
+ },
275
+ {
276
+ "id": "133",
277
+ "n_visual_tokens": 256,
278
+ "latency_s": 158.7,
279
+ "peak_mem_gb": 17.58,
280
+ "output_len": 16374
281
+ },
282
+ {
283
+ "id": "134",
284
+ "n_visual_tokens": 256,
285
+ "latency_s": 157.4,
286
+ "peak_mem_gb": 17.51,
287
+ "output_len": 12399
288
+ },
289
+ {
290
+ "id": "135",
291
+ "n_visual_tokens": 256,
292
+ "latency_s": 1.18,
293
+ "peak_mem_gb": 17.28,
294
+ "output_len": 51
295
+ },
296
+ {
297
+ "id": "136",
298
+ "n_visual_tokens": 256,
299
+ "latency_s": 2.05,
300
+ "peak_mem_gb": 17.75,
301
+ "output_len": 64
302
+ },
303
+ {
304
+ "id": "137",
305
+ "n_visual_tokens": 256,
306
+ "latency_s": 155.68,
307
+ "peak_mem_gb": 17.38,
308
+ "output_len": 17032
309
+ },
310
+ {
311
+ "id": "138",
312
+ "n_visual_tokens": 256,
313
+ "latency_s": 22.03,
314
+ "peak_mem_gb": 16.94,
315
+ "output_len": 2203
316
+ },
317
+ {
318
+ "id": "139",
319
+ "n_visual_tokens": 256,
320
+ "latency_s": 163.52,
321
+ "peak_mem_gb": 17.68,
322
+ "output_len": 15565
323
+ },
324
+ {
325
+ "id": "14",
326
+ "n_visual_tokens": 256,
327
+ "latency_s": 157.6,
328
+ "peak_mem_gb": 17.41,
329
+ "output_len": 14031
330
+ },
331
+ {
332
+ "id": "140",
333
+ "n_visual_tokens": 256,
334
+ "latency_s": 0.75,
335
+ "peak_mem_gb": 17.37,
336
+ "output_len": 3
337
+ },
338
+ {
339
+ "id": "141",
340
+ "n_visual_tokens": 256,
341
+ "latency_s": 1.24,
342
+ "peak_mem_gb": 17.57,
343
+ "output_len": 3
344
+ },
345
+ {
346
+ "id": "142",
347
+ "n_visual_tokens": 256,
348
+ "latency_s": 155.41,
349
+ "peak_mem_gb": 17.39,
350
+ "output_len": 14037
351
+ }
352
+ ]
results/clip_per_epoch/optical_mix_d2c/epoch_11/uipress_256/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "uipress_256",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 256.0,
6
+ "avg_latency_s": 83.12,
7
+ "avg_peak_mem_gb": 17.4
8
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_12/uipress_256/clip_scores.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 50,
3
+ "avg_clip": 0.719,
4
+ "min_clip": 0.4485,
5
+ "max_clip": 0.8768,
6
+ "per_sample": {
7
+ "0": 0.6599,
8
+ "1": 0.6416,
9
+ "10": 0.8694,
10
+ "100": 0.6016,
11
+ "101": 0.8444,
12
+ "102": 0.6826,
13
+ "103": 0.5591,
14
+ "104": 0.733,
15
+ "105": 0.7257,
16
+ "106": 0.653,
17
+ "107": 0.7705,
18
+ "108": 0.6961,
19
+ "109": 0.7505,
20
+ "11": 0.5583,
21
+ "110": 0.7974,
22
+ "111": 0.8272,
23
+ "112": 0.7188,
24
+ "113": 0.6079,
25
+ "114": 0.6899,
26
+ "115": 0.8383,
27
+ "116": 0.7303,
28
+ "117": 0.6138,
29
+ "118": 0.6808,
30
+ "119": 0.7131,
31
+ "12": 0.6144,
32
+ "120": 0.694,
33
+ "121": 0.8399,
34
+ "122": 0.4485,
35
+ "123": 0.7539,
36
+ "124": 0.5329,
37
+ "125": 0.6999,
38
+ "126": 0.7624,
39
+ "127": 0.8295,
40
+ "128": 0.6954,
41
+ "129": 0.834,
42
+ "13": 0.8209,
43
+ "130": 0.7926,
44
+ "131": 0.754,
45
+ "132": 0.8207,
46
+ "133": 0.8479,
47
+ "134": 0.6171,
48
+ "135": 0.8655,
49
+ "136": 0.5819,
50
+ "137": 0.7602,
51
+ "138": 0.4775,
52
+ "139": 0.8768,
53
+ "14": 0.7486,
54
+ "140": 0.821,
55
+ "141": 0.7551,
56
+ "142": 0.7435
57
+ }
58
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_12/uipress_256/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "n_visual_tokens": 256,
5
+ "latency_s": 157.45,
6
+ "peak_mem_gb": 17.48,
7
+ "output_len": 10251
8
+ },
9
+ {
10
+ "id": "1",
11
+ "n_visual_tokens": 256,
12
+ "latency_s": 1.65,
13
+ "peak_mem_gb": 16.94,
14
+ "output_len": 121
15
+ },
16
+ {
17
+ "id": "10",
18
+ "n_visual_tokens": 256,
19
+ "latency_s": 1.3,
20
+ "peak_mem_gb": 17.62,
21
+ "output_len": 3
22
+ },
23
+ {
24
+ "id": "100",
25
+ "n_visual_tokens": 256,
26
+ "latency_s": 156.4,
27
+ "peak_mem_gb": 17.47,
28
+ "output_len": 14759
29
+ },
30
+ {
31
+ "id": "101",
32
+ "n_visual_tokens": 256,
33
+ "latency_s": 1.61,
34
+ "peak_mem_gb": 17.37,
35
+ "output_len": 61
36
+ },
37
+ {
38
+ "id": "102",
39
+ "n_visual_tokens": 256,
40
+ "latency_s": 1.25,
41
+ "peak_mem_gb": 17.15,
42
+ "output_len": 53
43
+ },
44
+ {
45
+ "id": "103",
46
+ "n_visual_tokens": 256,
47
+ "latency_s": 156.12,
48
+ "peak_mem_gb": 17.47,
49
+ "output_len": 16338
50
+ },
51
+ {
52
+ "id": "104",
53
+ "n_visual_tokens": 256,
54
+ "latency_s": 1.42,
55
+ "peak_mem_gb": 17.27,
56
+ "output_len": 52
57
+ },
58
+ {
59
+ "id": "105",
60
+ "n_visual_tokens": 256,
61
+ "latency_s": 1.02,
62
+ "peak_mem_gb": 17.13,
63
+ "output_len": 53
64
+ },
65
+ {
66
+ "id": "106",
67
+ "n_visual_tokens": 256,
68
+ "latency_s": 1.09,
69
+ "peak_mem_gb": 17.38,
70
+ "output_len": 14
71
+ },
72
+ {
73
+ "id": "107",
74
+ "n_visual_tokens": 256,
75
+ "latency_s": 1.05,
76
+ "peak_mem_gb": 17.36,
77
+ "output_len": 7
78
+ },
79
+ {
80
+ "id": "108",
81
+ "n_visual_tokens": 256,
82
+ "latency_s": 1.02,
83
+ "peak_mem_gb": 17.29,
84
+ "output_len": 15
85
+ },
86
+ {
87
+ "id": "109",
88
+ "n_visual_tokens": 256,
89
+ "latency_s": 1.14,
90
+ "peak_mem_gb": 17.01,
91
+ "output_len": 51
92
+ },
93
+ {
94
+ "id": "11",
95
+ "n_visual_tokens": 256,
96
+ "latency_s": 154.61,
97
+ "peak_mem_gb": 17.39,
98
+ "output_len": 4345
99
+ },
100
+ {
101
+ "id": "110",
102
+ "n_visual_tokens": 256,
103
+ "latency_s": 1.42,
104
+ "peak_mem_gb": 17.28,
105
+ "output_len": 53
106
+ },
107
+ {
108
+ "id": "111",
109
+ "n_visual_tokens": 256,
110
+ "latency_s": 1.03,
111
+ "peak_mem_gb": 17.15,
112
+ "output_len": 53
113
+ },
114
+ {
115
+ "id": "112",
116
+ "n_visual_tokens": 256,
117
+ "latency_s": 155.0,
118
+ "peak_mem_gb": 17.41,
119
+ "output_len": 18377
120
+ },
121
+ {
122
+ "id": "113",
123
+ "n_visual_tokens": 256,
124
+ "latency_s": 154.12,
125
+ "peak_mem_gb": 17.38,
126
+ "output_len": 13183
127
+ },
128
+ {
129
+ "id": "114",
130
+ "n_visual_tokens": 256,
131
+ "latency_s": 1.74,
132
+ "peak_mem_gb": 17.89,
133
+ "output_len": 3
134
+ },
135
+ {
136
+ "id": "115",
137
+ "n_visual_tokens": 256,
138
+ "latency_s": 154.15,
139
+ "peak_mem_gb": 17.38,
140
+ "output_len": 23244
141
+ },
142
+ {
143
+ "id": "116",
144
+ "n_visual_tokens": 256,
145
+ "latency_s": 1.8,
146
+ "peak_mem_gb": 17.5,
147
+ "output_len": 61
148
+ },
149
+ {
150
+ "id": "117",
151
+ "n_visual_tokens": 256,
152
+ "latency_s": 154.71,
153
+ "peak_mem_gb": 17.39,
154
+ "output_len": 16107
155
+ },
156
+ {
157
+ "id": "118",
158
+ "n_visual_tokens": 256,
159
+ "latency_s": 138.44,
160
+ "peak_mem_gb": 17.66,
161
+ "output_len": 10638
162
+ },
163
+ {
164
+ "id": "119",
165
+ "n_visual_tokens": 256,
166
+ "latency_s": 155.74,
167
+ "peak_mem_gb": 17.44,
168
+ "output_len": 16008
169
+ },
170
+ {
171
+ "id": "12",
172
+ "n_visual_tokens": 256,
173
+ "latency_s": 154.03,
174
+ "peak_mem_gb": 17.38,
175
+ "output_len": 4267
176
+ },
177
+ {
178
+ "id": "120",
179
+ "n_visual_tokens": 256,
180
+ "latency_s": 154.53,
181
+ "peak_mem_gb": 17.39,
182
+ "output_len": 18362
183
+ },
184
+ {
185
+ "id": "121",
186
+ "n_visual_tokens": 256,
187
+ "latency_s": 163.86,
188
+ "peak_mem_gb": 17.8,
189
+ "output_len": 15027
190
+ },
191
+ {
192
+ "id": "122",
193
+ "n_visual_tokens": 256,
194
+ "latency_s": 1.16,
195
+ "peak_mem_gb": 17.46,
196
+ "output_len": 7
197
+ },
198
+ {
199
+ "id": "123",
200
+ "n_visual_tokens": 256,
201
+ "latency_s": 154.14,
202
+ "peak_mem_gb": 17.38,
203
+ "output_len": 5339
204
+ },
205
+ {
206
+ "id": "124",
207
+ "n_visual_tokens": 256,
208
+ "latency_s": 1.26,
209
+ "peak_mem_gb": 17.12,
210
+ "output_len": 51
211
+ },
212
+ {
213
+ "id": "125",
214
+ "n_visual_tokens": 256,
215
+ "latency_s": 1.32,
216
+ "peak_mem_gb": 17.21,
217
+ "output_len": 53
218
+ },
219
+ {
220
+ "id": "126",
221
+ "n_visual_tokens": 256,
222
+ "latency_s": 13.6,
223
+ "peak_mem_gb": 17.69,
224
+ "output_len": 2434
225
+ },
226
+ {
227
+ "id": "127",
228
+ "n_visual_tokens": 256,
229
+ "latency_s": 2.51,
230
+ "peak_mem_gb": 16.94,
231
+ "output_len": 214
232
+ },
233
+ {
234
+ "id": "128",
235
+ "n_visual_tokens": 256,
236
+ "latency_s": 85.96,
237
+ "peak_mem_gb": 17.18,
238
+ "output_len": 11421
239
+ },
240
+ {
241
+ "id": "129",
242
+ "n_visual_tokens": 256,
243
+ "latency_s": 1.88,
244
+ "peak_mem_gb": 17.56,
245
+ "output_len": 60
246
+ },
247
+ {
248
+ "id": "13",
249
+ "n_visual_tokens": 256,
250
+ "latency_s": 1.47,
251
+ "peak_mem_gb": 17.33,
252
+ "output_len": 53
253
+ },
254
+ {
255
+ "id": "130",
256
+ "n_visual_tokens": 256,
257
+ "latency_s": 1.21,
258
+ "peak_mem_gb": 17.28,
259
+ "output_len": 52
260
+ },
261
+ {
262
+ "id": "131",
263
+ "n_visual_tokens": 256,
264
+ "latency_s": 154.11,
265
+ "peak_mem_gb": 17.38,
266
+ "output_len": 4279
267
+ },
268
+ {
269
+ "id": "132",
270
+ "n_visual_tokens": 256,
271
+ "latency_s": 1.54,
272
+ "peak_mem_gb": 17.28,
273
+ "output_len": 60
274
+ },
275
+ {
276
+ "id": "133",
277
+ "n_visual_tokens": 256,
278
+ "latency_s": 1.24,
279
+ "peak_mem_gb": 17.33,
280
+ "output_len": 53
281
+ },
282
+ {
283
+ "id": "134",
284
+ "n_visual_tokens": 256,
285
+ "latency_s": 1.33,
286
+ "peak_mem_gb": 17.19,
287
+ "output_len": 52
288
+ },
289
+ {
290
+ "id": "135",
291
+ "n_visual_tokens": 256,
292
+ "latency_s": 0.77,
293
+ "peak_mem_gb": 17.28,
294
+ "output_len": 15
295
+ },
296
+ {
297
+ "id": "136",
298
+ "n_visual_tokens": 256,
299
+ "latency_s": 2.1,
300
+ "peak_mem_gb": 17.75,
301
+ "output_len": 60
302
+ },
303
+ {
304
+ "id": "137",
305
+ "n_visual_tokens": 256,
306
+ "latency_s": 154.09,
307
+ "peak_mem_gb": 17.38,
308
+ "output_len": 11136
309
+ },
310
+ {
311
+ "id": "138",
312
+ "n_visual_tokens": 256,
313
+ "latency_s": 153.95,
314
+ "peak_mem_gb": 17.38,
315
+ "output_len": 15909
316
+ },
317
+ {
318
+ "id": "139",
319
+ "n_visual_tokens": 256,
320
+ "latency_s": 1.27,
321
+ "peak_mem_gb": 17.51,
322
+ "output_len": 15
323
+ },
324
+ {
325
+ "id": "14",
326
+ "n_visual_tokens": 256,
327
+ "latency_s": 154.24,
328
+ "peak_mem_gb": 17.41,
329
+ "output_len": 15689
330
+ },
331
+ {
332
+ "id": "140",
333
+ "n_visual_tokens": 256,
334
+ "latency_s": 1.38,
335
+ "peak_mem_gb": 17.37,
336
+ "output_len": 61
337
+ },
338
+ {
339
+ "id": "141",
340
+ "n_visual_tokens": 256,
341
+ "latency_s": 1.31,
342
+ "peak_mem_gb": 17.57,
343
+ "output_len": 7
344
+ },
345
+ {
346
+ "id": "142",
347
+ "n_visual_tokens": 256,
348
+ "latency_s": 153.33,
349
+ "peak_mem_gb": 17.39,
350
+ "output_len": 4278
351
+ }
352
+ ]
results/clip_per_epoch/optical_mix_d2c/epoch_12/uipress_256/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "uipress_256",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 256.0,
6
+ "avg_latency_s": 61.46,
7
+ "avg_peak_mem_gb": 17.38
8
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_14/uipress_256/clip_scores.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 50,
3
+ "avg_clip": 0.7051,
4
+ "min_clip": 0.0,
5
+ "max_clip": 0.8768,
6
+ "per_sample": {
7
+ "0": 0.6599,
8
+ "1": 0.6654,
9
+ "10": 0.8694,
10
+ "100": 0.6016,
11
+ "101": 0.8444,
12
+ "102": 0.6853,
13
+ "103": 0.5639,
14
+ "104": 0.733,
15
+ "105": 0.7257,
16
+ "106": 0.653,
17
+ "107": 0.7705,
18
+ "108": 0.6961,
19
+ "109": 0.7735,
20
+ "11": 0.0,
21
+ "110": 0.7974,
22
+ "111": 0.8272,
23
+ "112": 0.7188,
24
+ "113": 0.6079,
25
+ "114": 0.6899,
26
+ "115": 0.643,
27
+ "116": 0.7303,
28
+ "117": 0.6138,
29
+ "118": 0.6899,
30
+ "119": 0.6907,
31
+ "12": 0.6144,
32
+ "120": 0.694,
33
+ "121": 0.8399,
34
+ "122": 0.4485,
35
+ "123": 0.7539,
36
+ "124": 0.5257,
37
+ "125": 0.6999,
38
+ "126": 0.7624,
39
+ "127": 0.8534,
40
+ "128": 0.6954,
41
+ "129": 0.834,
42
+ "13": 0.8209,
43
+ "130": 0.7926,
44
+ "131": 0.754,
45
+ "132": 0.8207,
46
+ "133": 0.8479,
47
+ "134": 0.6171,
48
+ "135": 0.8655,
49
+ "136": 0.5819,
50
+ "137": 0.7602,
51
+ "138": 0.4775,
52
+ "139": 0.8768,
53
+ "14": 0.7486,
54
+ "140": 0.821,
55
+ "141": 0.7551,
56
+ "142": 0.7435
57
+ }
58
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_14/uipress_256/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "n_visual_tokens": 256,
5
+ "latency_s": 2.68,
6
+ "peak_mem_gb": 17.13,
7
+ "output_len": 116
8
+ },
9
+ {
10
+ "id": "1",
11
+ "n_visual_tokens": 256,
12
+ "latency_s": 153.05,
13
+ "peak_mem_gb": 17.38,
14
+ "output_len": 17102
15
+ },
16
+ {
17
+ "id": "10",
18
+ "n_visual_tokens": 256,
19
+ "latency_s": 1.3,
20
+ "peak_mem_gb": 17.62,
21
+ "output_len": 2
22
+ },
23
+ {
24
+ "id": "100",
25
+ "n_visual_tokens": 256,
26
+ "latency_s": 155.05,
27
+ "peak_mem_gb": 17.47,
28
+ "output_len": 15227
29
+ },
30
+ {
31
+ "id": "101",
32
+ "n_visual_tokens": 256,
33
+ "latency_s": 1.0,
34
+ "peak_mem_gb": 17.37,
35
+ "output_len": 9
36
+ },
37
+ {
38
+ "id": "102",
39
+ "n_visual_tokens": 256,
40
+ "latency_s": 68.05,
41
+ "peak_mem_gb": 17.17,
42
+ "output_len": 4767
43
+ },
44
+ {
45
+ "id": "103",
46
+ "n_visual_tokens": 256,
47
+ "latency_s": 3.33,
48
+ "peak_mem_gb": 17.11,
49
+ "output_len": 312
50
+ },
51
+ {
52
+ "id": "104",
53
+ "n_visual_tokens": 256,
54
+ "latency_s": 28.51,
55
+ "peak_mem_gb": 17.27,
56
+ "output_len": 2648
57
+ },
58
+ {
59
+ "id": "105",
60
+ "n_visual_tokens": 256,
61
+ "latency_s": 155.16,
62
+ "peak_mem_gb": 17.48,
63
+ "output_len": 12318
64
+ },
65
+ {
66
+ "id": "106",
67
+ "n_visual_tokens": 256,
68
+ "latency_s": 1.01,
69
+ "peak_mem_gb": 17.38,
70
+ "output_len": 9
71
+ },
72
+ {
73
+ "id": "107",
74
+ "n_visual_tokens": 256,
75
+ "latency_s": 0.99,
76
+ "peak_mem_gb": 17.36,
77
+ "output_len": 9
78
+ },
79
+ {
80
+ "id": "108",
81
+ "n_visual_tokens": 256,
82
+ "latency_s": 0.93,
83
+ "peak_mem_gb": 17.29,
84
+ "output_len": 9
85
+ },
86
+ {
87
+ "id": "109",
88
+ "n_visual_tokens": 256,
89
+ "latency_s": 154.02,
90
+ "peak_mem_gb": 17.41,
91
+ "output_len": 14349
92
+ },
93
+ {
94
+ "id": "11",
95
+ "n_visual_tokens": 256,
96
+ "latency_s": 153.33,
97
+ "peak_mem_gb": 17.39,
98
+ "output_len": 6769
99
+ },
100
+ {
101
+ "id": "110",
102
+ "n_visual_tokens": 256,
103
+ "latency_s": 0.91,
104
+ "peak_mem_gb": 17.28,
105
+ "output_len": 9
106
+ },
107
+ {
108
+ "id": "111",
109
+ "n_visual_tokens": 256,
110
+ "latency_s": 155.27,
111
+ "peak_mem_gb": 17.48,
112
+ "output_len": 14334
113
+ },
114
+ {
115
+ "id": "112",
116
+ "n_visual_tokens": 256,
117
+ "latency_s": 153.71,
118
+ "peak_mem_gb": 17.41,
119
+ "output_len": 14881
120
+ },
121
+ {
122
+ "id": "113",
123
+ "n_visual_tokens": 256,
124
+ "latency_s": 152.82,
125
+ "peak_mem_gb": 17.38,
126
+ "output_len": 12370
127
+ },
128
+ {
129
+ "id": "114",
130
+ "n_visual_tokens": 256,
131
+ "latency_s": 1.72,
132
+ "peak_mem_gb": 17.89,
133
+ "output_len": 2
134
+ },
135
+ {
136
+ "id": "115",
137
+ "n_visual_tokens": 256,
138
+ "latency_s": 48.12,
139
+ "peak_mem_gb": 16.99,
140
+ "output_len": 4474
141
+ },
142
+ {
143
+ "id": "116",
144
+ "n_visual_tokens": 256,
145
+ "latency_s": 1.14,
146
+ "peak_mem_gb": 17.5,
147
+ "output_len": 2
148
+ },
149
+ {
150
+ "id": "117",
151
+ "n_visual_tokens": 256,
152
+ "latency_s": 153.48,
153
+ "peak_mem_gb": 17.39,
154
+ "output_len": 14513
155
+ },
156
+ {
157
+ "id": "118",
158
+ "n_visual_tokens": 256,
159
+ "latency_s": 1.08,
160
+ "peak_mem_gb": 17.62,
161
+ "output_len": 2
162
+ },
163
+ {
164
+ "id": "119",
165
+ "n_visual_tokens": 256,
166
+ "latency_s": 154.53,
167
+ "peak_mem_gb": 17.44,
168
+ "output_len": 8312
169
+ },
170
+ {
171
+ "id": "12",
172
+ "n_visual_tokens": 256,
173
+ "latency_s": 152.8,
174
+ "peak_mem_gb": 17.38,
175
+ "output_len": 4347
176
+ },
177
+ {
178
+ "id": "120",
179
+ "n_visual_tokens": 256,
180
+ "latency_s": 153.27,
181
+ "peak_mem_gb": 17.39,
182
+ "output_len": 14509
183
+ },
184
+ {
185
+ "id": "121",
186
+ "n_visual_tokens": 256,
187
+ "latency_s": 1.43,
188
+ "peak_mem_gb": 17.73,
189
+ "output_len": 2
190
+ },
191
+ {
192
+ "id": "122",
193
+ "n_visual_tokens": 256,
194
+ "latency_s": 1.07,
195
+ "peak_mem_gb": 17.46,
196
+ "output_len": 2
197
+ },
198
+ {
199
+ "id": "123",
200
+ "n_visual_tokens": 256,
201
+ "latency_s": 152.84,
202
+ "peak_mem_gb": 17.38,
203
+ "output_len": 4347
204
+ },
205
+ {
206
+ "id": "124",
207
+ "n_visual_tokens": 256,
208
+ "latency_s": 155.12,
209
+ "peak_mem_gb": 17.47,
210
+ "output_len": 14542
211
+ },
212
+ {
213
+ "id": "125",
214
+ "n_visual_tokens": 256,
215
+ "latency_s": 156.14,
216
+ "peak_mem_gb": 17.52,
217
+ "output_len": 14175
218
+ },
219
+ {
220
+ "id": "126",
221
+ "n_visual_tokens": 256,
222
+ "latency_s": 1.37,
223
+ "peak_mem_gb": 17.69,
224
+ "output_len": 2
225
+ },
226
+ {
227
+ "id": "127",
228
+ "n_visual_tokens": 256,
229
+ "latency_s": 152.77,
230
+ "peak_mem_gb": 17.38,
231
+ "output_len": 11963
232
+ },
233
+ {
234
+ "id": "128",
235
+ "n_visual_tokens": 256,
236
+ "latency_s": 154.3,
237
+ "peak_mem_gb": 17.44,
238
+ "output_len": 20445
239
+ },
240
+ {
241
+ "id": "129",
242
+ "n_visual_tokens": 256,
243
+ "latency_s": 1.23,
244
+ "peak_mem_gb": 17.56,
245
+ "output_len": 2
246
+ },
247
+ {
248
+ "id": "13",
249
+ "n_visual_tokens": 256,
250
+ "latency_s": 0.97,
251
+ "peak_mem_gb": 17.33,
252
+ "output_len": 9
253
+ },
254
+ {
255
+ "id": "130",
256
+ "n_visual_tokens": 256,
257
+ "latency_s": 0.7,
258
+ "peak_mem_gb": 17.28,
259
+ "output_len": 9
260
+ },
261
+ {
262
+ "id": "131",
263
+ "n_visual_tokens": 256,
264
+ "latency_s": 152.77,
265
+ "peak_mem_gb": 17.38,
266
+ "output_len": 13936
267
+ },
268
+ {
269
+ "id": "132",
270
+ "n_visual_tokens": 256,
271
+ "latency_s": 0.9,
272
+ "peak_mem_gb": 17.28,
273
+ "output_len": 9
274
+ },
275
+ {
276
+ "id": "133",
277
+ "n_visual_tokens": 256,
278
+ "latency_s": 0.76,
279
+ "peak_mem_gb": 17.33,
280
+ "output_len": 9
281
+ },
282
+ {
283
+ "id": "134",
284
+ "n_visual_tokens": 256,
285
+ "latency_s": 156.04,
286
+ "peak_mem_gb": 17.51,
287
+ "output_len": 5339
288
+ },
289
+ {
290
+ "id": "135",
291
+ "n_visual_tokens": 256,
292
+ "latency_s": 0.7,
293
+ "peak_mem_gb": 17.28,
294
+ "output_len": 9
295
+ },
296
+ {
297
+ "id": "136",
298
+ "n_visual_tokens": 256,
299
+ "latency_s": 1.44,
300
+ "peak_mem_gb": 17.75,
301
+ "output_len": 2
302
+ },
303
+ {
304
+ "id": "137",
305
+ "n_visual_tokens": 256,
306
+ "latency_s": 152.79,
307
+ "peak_mem_gb": 17.38,
308
+ "output_len": 15632
309
+ },
310
+ {
311
+ "id": "138",
312
+ "n_visual_tokens": 256,
313
+ "latency_s": 152.75,
314
+ "peak_mem_gb": 17.38,
315
+ "output_len": 15204
316
+ },
317
+ {
318
+ "id": "139",
319
+ "n_visual_tokens": 256,
320
+ "latency_s": 1.15,
321
+ "peak_mem_gb": 17.51,
322
+ "output_len": 2
323
+ },
324
+ {
325
+ "id": "14",
326
+ "n_visual_tokens": 256,
327
+ "latency_s": 153.69,
328
+ "peak_mem_gb": 17.41,
329
+ "output_len": 11150
330
+ },
331
+ {
332
+ "id": "140",
333
+ "n_visual_tokens": 256,
334
+ "latency_s": 0.79,
335
+ "peak_mem_gb": 17.37,
336
+ "output_len": 9
337
+ },
338
+ {
339
+ "id": "141",
340
+ "n_visual_tokens": 256,
341
+ "latency_s": 1.26,
342
+ "peak_mem_gb": 17.57,
343
+ "output_len": 7
344
+ },
345
+ {
346
+ "id": "142",
347
+ "n_visual_tokens": 256,
348
+ "latency_s": 153.08,
349
+ "peak_mem_gb": 17.39,
350
+ "output_len": 15480
351
+ }
352
+ ]
results/clip_per_epoch/optical_mix_d2c/epoch_14/uipress_256/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "uipress_256",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 256.0,
6
+ "avg_latency_s": 74.27,
7
+ "avg_peak_mem_gb": 17.42
8
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_17/uipress_256/clip_scores.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 50,
3
+ "avg_clip": 0.6747,
4
+ "min_clip": 0.0,
5
+ "max_clip": 0.8768,
6
+ "per_sample": {
7
+ "0": 0.6599,
8
+ "1": 0.6416,
9
+ "10": 0.8694,
10
+ "100": 0.6016,
11
+ "101": 0.8444,
12
+ "102": 0.0,
13
+ "103": 0.5639,
14
+ "104": 0.733,
15
+ "105": 0.7257,
16
+ "106": 0.653,
17
+ "107": 0.7705,
18
+ "108": 0.6961,
19
+ "109": 0.7505,
20
+ "11": 0.5583,
21
+ "110": 0.7974,
22
+ "111": 0.8272,
23
+ "112": 0.7188,
24
+ "113": 0.6079,
25
+ "114": 0.6899,
26
+ "115": 0.8383,
27
+ "116": 0.7303,
28
+ "117": 0.6138,
29
+ "118": 0.6899,
30
+ "119": 0.715,
31
+ "12": 0.6144,
32
+ "120": 0.694,
33
+ "121": 0.8399,
34
+ "122": 0.4485,
35
+ "123": 0.7539,
36
+ "124": 0.5329,
37
+ "125": 0.6999,
38
+ "126": 0.6516,
39
+ "127": 0.8295,
40
+ "128": 0.6954,
41
+ "129": 0.834,
42
+ "13": 0.0,
43
+ "130": 0.7926,
44
+ "131": 0.754,
45
+ "132": 0.8207,
46
+ "133": 0.8479,
47
+ "134": 0.0,
48
+ "135": 0.8655,
49
+ "136": 0.5819,
50
+ "137": 0.7573,
51
+ "138": 0.4775,
52
+ "139": 0.8768,
53
+ "14": 0.7486,
54
+ "140": 0.821,
55
+ "141": 0.7551,
56
+ "142": 0.7435
57
+ }
58
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_17/uipress_256/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "n_visual_tokens": 256,
5
+ "latency_s": 8.0,
6
+ "peak_mem_gb": 17.13,
7
+ "output_len": 678
8
+ },
9
+ {
10
+ "id": "1",
11
+ "n_visual_tokens": 256,
12
+ "latency_s": 152.89,
13
+ "peak_mem_gb": 17.38,
14
+ "output_len": 4434
15
+ },
16
+ {
17
+ "id": "10",
18
+ "n_visual_tokens": 256,
19
+ "latency_s": 161.16,
20
+ "peak_mem_gb": 17.74,
21
+ "output_len": 32762
22
+ },
23
+ {
24
+ "id": "100",
25
+ "n_visual_tokens": 256,
26
+ "latency_s": 4.44,
27
+ "peak_mem_gb": 17.11,
28
+ "output_len": 399
29
+ },
30
+ {
31
+ "id": "101",
32
+ "n_visual_tokens": 256,
33
+ "latency_s": 1.09,
34
+ "peak_mem_gb": 17.37,
35
+ "output_len": 15
36
+ },
37
+ {
38
+ "id": "102",
39
+ "n_visual_tokens": 256,
40
+ "latency_s": 155.36,
41
+ "peak_mem_gb": 17.48,
42
+ "output_len": 13186
43
+ },
44
+ {
45
+ "id": "103",
46
+ "n_visual_tokens": 256,
47
+ "latency_s": 3.61,
48
+ "peak_mem_gb": 17.11,
49
+ "output_len": 337
50
+ },
51
+ {
52
+ "id": "104",
53
+ "n_visual_tokens": 256,
54
+ "latency_s": 156.69,
55
+ "peak_mem_gb": 17.54,
56
+ "output_len": 13709
57
+ },
58
+ {
59
+ "id": "105",
60
+ "n_visual_tokens": 256,
61
+ "latency_s": 2.96,
62
+ "peak_mem_gb": 17.13,
63
+ "output_len": 201
64
+ },
65
+ {
66
+ "id": "106",
67
+ "n_visual_tokens": 256,
68
+ "latency_s": 1.23,
69
+ "peak_mem_gb": 17.38,
70
+ "output_len": 32
71
+ },
72
+ {
73
+ "id": "107",
74
+ "n_visual_tokens": 256,
75
+ "latency_s": 1.06,
76
+ "peak_mem_gb": 17.36,
77
+ "output_len": 15
78
+ },
79
+ {
80
+ "id": "108",
81
+ "n_visual_tokens": 256,
82
+ "latency_s": 1.92,
83
+ "peak_mem_gb": 17.29,
84
+ "output_len": 82
85
+ },
86
+ {
87
+ "id": "109",
88
+ "n_visual_tokens": 256,
89
+ "latency_s": 6.18,
90
+ "peak_mem_gb": 17.01,
91
+ "output_len": 569
92
+ },
93
+ {
94
+ "id": "11",
95
+ "n_visual_tokens": 256,
96
+ "latency_s": 2.47,
97
+ "peak_mem_gb": 16.97,
98
+ "output_len": 180
99
+ },
100
+ {
101
+ "id": "110",
102
+ "n_visual_tokens": 256,
103
+ "latency_s": 156.96,
104
+ "peak_mem_gb": 17.56,
105
+ "output_len": 15312
106
+ },
107
+ {
108
+ "id": "111",
109
+ "n_visual_tokens": 256,
110
+ "latency_s": 155.17,
111
+ "peak_mem_gb": 17.48,
112
+ "output_len": 14052
113
+ },
114
+ {
115
+ "id": "112",
116
+ "n_visual_tokens": 256,
117
+ "latency_s": 153.63,
118
+ "peak_mem_gb": 17.41,
119
+ "output_len": 15607
120
+ },
121
+ {
122
+ "id": "113",
123
+ "n_visual_tokens": 256,
124
+ "latency_s": 4.44,
125
+ "peak_mem_gb": 16.94,
126
+ "output_len": 384
127
+ },
128
+ {
129
+ "id": "114",
130
+ "n_visual_tokens": 256,
131
+ "latency_s": 1.73,
132
+ "peak_mem_gb": 17.89,
133
+ "output_len": 2
134
+ },
135
+ {
136
+ "id": "115",
137
+ "n_visual_tokens": 256,
138
+ "latency_s": 21.54,
139
+ "peak_mem_gb": 16.94,
140
+ "output_len": 1378
141
+ },
142
+ {
143
+ "id": "116",
144
+ "n_visual_tokens": 256,
145
+ "latency_s": 159.64,
146
+ "peak_mem_gb": 17.67,
147
+ "output_len": 32762
148
+ },
149
+ {
150
+ "id": "117",
151
+ "n_visual_tokens": 256,
152
+ "latency_s": 153.39,
153
+ "peak_mem_gb": 17.39,
154
+ "output_len": 4149
155
+ },
156
+ {
157
+ "id": "118",
158
+ "n_visual_tokens": 256,
159
+ "latency_s": 160.92,
160
+ "peak_mem_gb": 17.74,
161
+ "output_len": 4268
162
+ },
163
+ {
164
+ "id": "119",
165
+ "n_visual_tokens": 256,
166
+ "latency_s": 3.47,
167
+ "peak_mem_gb": 17.06,
168
+ "output_len": 305
169
+ },
170
+ {
171
+ "id": "12",
172
+ "n_visual_tokens": 256,
173
+ "latency_s": 152.73,
174
+ "peak_mem_gb": 17.38,
175
+ "output_len": 4347
176
+ },
177
+ {
178
+ "id": "120",
179
+ "n_visual_tokens": 256,
180
+ "latency_s": 153.17,
181
+ "peak_mem_gb": 17.39,
182
+ "output_len": 13351
183
+ },
184
+ {
185
+ "id": "121",
186
+ "n_visual_tokens": 256,
187
+ "latency_s": 1.43,
188
+ "peak_mem_gb": 17.73,
189
+ "output_len": 2
190
+ },
191
+ {
192
+ "id": "122",
193
+ "n_visual_tokens": 256,
194
+ "latency_s": 1.63,
195
+ "peak_mem_gb": 17.46,
196
+ "output_len": 60
197
+ },
198
+ {
199
+ "id": "123",
200
+ "n_visual_tokens": 256,
201
+ "latency_s": 152.7,
202
+ "peak_mem_gb": 17.38,
203
+ "output_len": 4347
204
+ },
205
+ {
206
+ "id": "124",
207
+ "n_visual_tokens": 256,
208
+ "latency_s": 3.47,
209
+ "peak_mem_gb": 17.12,
210
+ "output_len": 305
211
+ },
212
+ {
213
+ "id": "125",
214
+ "n_visual_tokens": 256,
215
+ "latency_s": 2.91,
216
+ "peak_mem_gb": 17.21,
217
+ "output_len": 191
218
+ },
219
+ {
220
+ "id": "126",
221
+ "n_visual_tokens": 256,
222
+ "latency_s": 161.58,
223
+ "peak_mem_gb": 17.77,
224
+ "output_len": 14321
225
+ },
226
+ {
227
+ "id": "127",
228
+ "n_visual_tokens": 256,
229
+ "latency_s": 152.73,
230
+ "peak_mem_gb": 17.38,
231
+ "output_len": 12333
232
+ },
233
+ {
234
+ "id": "128",
235
+ "n_visual_tokens": 256,
236
+ "latency_s": 9.68,
237
+ "peak_mem_gb": 17.05,
238
+ "output_len": 932
239
+ },
240
+ {
241
+ "id": "129",
242
+ "n_visual_tokens": 256,
243
+ "latency_s": 160.35,
244
+ "peak_mem_gb": 17.7,
245
+ "output_len": 4296
246
+ },
247
+ {
248
+ "id": "13",
249
+ "n_visual_tokens": 256,
250
+ "latency_s": 157.63,
251
+ "peak_mem_gb": 17.58,
252
+ "output_len": 15740
253
+ },
254
+ {
255
+ "id": "130",
256
+ "n_visual_tokens": 256,
257
+ "latency_s": 1.65,
258
+ "peak_mem_gb": 17.28,
259
+ "output_len": 82
260
+ },
261
+ {
262
+ "id": "131",
263
+ "n_visual_tokens": 256,
264
+ "latency_s": 152.76,
265
+ "peak_mem_gb": 17.38,
266
+ "output_len": 4362
267
+ },
268
+ {
269
+ "id": "132",
270
+ "n_visual_tokens": 256,
271
+ "latency_s": 2.92,
272
+ "peak_mem_gb": 17.28,
273
+ "output_len": 189
274
+ },
275
+ {
276
+ "id": "133",
277
+ "n_visual_tokens": 256,
278
+ "latency_s": 1.38,
279
+ "peak_mem_gb": 17.33,
280
+ "output_len": 62
281
+ },
282
+ {
283
+ "id": "134",
284
+ "n_visual_tokens": 256,
285
+ "latency_s": 156.47,
286
+ "peak_mem_gb": 17.51,
287
+ "output_len": 13105
288
+ },
289
+ {
290
+ "id": "135",
291
+ "n_visual_tokens": 256,
292
+ "latency_s": 7.03,
293
+ "peak_mem_gb": 17.28,
294
+ "output_len": 694
295
+ },
296
+ {
297
+ "id": "136",
298
+ "n_visual_tokens": 256,
299
+ "latency_s": 163.53,
300
+ "peak_mem_gb": 17.8,
301
+ "output_len": 32762
302
+ },
303
+ {
304
+ "id": "137",
305
+ "n_visual_tokens": 256,
306
+ "latency_s": 153.62,
307
+ "peak_mem_gb": 17.38,
308
+ "output_len": 13971
309
+ },
310
+ {
311
+ "id": "138",
312
+ "n_visual_tokens": 256,
313
+ "latency_s": 153.4,
314
+ "peak_mem_gb": 17.38,
315
+ "output_len": 12184
316
+ },
317
+ {
318
+ "id": "139",
319
+ "n_visual_tokens": 256,
320
+ "latency_s": 1.27,
321
+ "peak_mem_gb": 17.51,
322
+ "output_len": 15
323
+ },
324
+ {
325
+ "id": "14",
326
+ "n_visual_tokens": 256,
327
+ "latency_s": 1.09,
328
+ "peak_mem_gb": 17.0,
329
+ "output_len": 51
330
+ },
331
+ {
332
+ "id": "140",
333
+ "n_visual_tokens": 256,
334
+ "latency_s": 0.86,
335
+ "peak_mem_gb": 17.37,
336
+ "output_len": 15
337
+ },
338
+ {
339
+ "id": "141",
340
+ "n_visual_tokens": 256,
341
+ "latency_s": 161.26,
342
+ "peak_mem_gb": 17.71,
343
+ "output_len": 32754
344
+ },
345
+ {
346
+ "id": "142",
347
+ "n_visual_tokens": 256,
348
+ "latency_s": 153.86,
349
+ "peak_mem_gb": 17.39,
350
+ "output_len": 22232
351
+ }
352
+ ]
results/clip_per_epoch/optical_mix_d2c/epoch_17/uipress_256/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "uipress_256",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 256.0,
6
+ "avg_latency_s": 77.02,
7
+ "avg_peak_mem_gb": 17.38
8
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_5/uipress_256/clip_scores.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 50,
3
+ "avg_clip": 0.7047,
4
+ "min_clip": 0.0,
5
+ "max_clip": 0.8768,
6
+ "per_sample": {
7
+ "0": 0.6599,
8
+ "1": 0.6416,
9
+ "10": 0.8694,
10
+ "100": 0.6016,
11
+ "101": 0.8444,
12
+ "102": 0.6826,
13
+ "103": 0.5591,
14
+ "104": 0.733,
15
+ "105": 0.7257,
16
+ "106": 0.653,
17
+ "107": 0.7705,
18
+ "108": 0.6961,
19
+ "109": 0.7505,
20
+ "11": 0.5583,
21
+ "110": 0.7974,
22
+ "111": 0.8272,
23
+ "112": 0.7293,
24
+ "113": 0.6079,
25
+ "114": 0.6899,
26
+ "115": 0.8383,
27
+ "116": 0.6843,
28
+ "117": 0.6138,
29
+ "118": 0.0,
30
+ "119": 0.715,
31
+ "12": 0.6144,
32
+ "120": 0.694,
33
+ "121": 0.8399,
34
+ "122": 0.4485,
35
+ "123": 0.7539,
36
+ "124": 0.5329,
37
+ "125": 0.6999,
38
+ "126": 0.7624,
39
+ "127": 0.8295,
40
+ "128": 0.6954,
41
+ "129": 0.834,
42
+ "13": 0.8209,
43
+ "130": 0.7926,
44
+ "131": 0.754,
45
+ "132": 0.8207,
46
+ "133": 0.8479,
47
+ "134": 0.6171,
48
+ "135": 0.8655,
49
+ "136": 0.5819,
50
+ "137": 0.7602,
51
+ "138": 0.4775,
52
+ "139": 0.8768,
53
+ "14": 0.7486,
54
+ "140": 0.821,
55
+ "141": 0.7551,
56
+ "142": 0.7435
57
+ }
58
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_5/uipress_256/per_sample.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "n_visual_tokens": 256,
5
+ "latency_s": 2.02,
6
+ "peak_mem_gb": 17.13,
7
+ "output_len": 71
8
+ },
9
+ {
10
+ "id": "1",
11
+ "n_visual_tokens": 256,
12
+ "latency_s": 153.01,
13
+ "peak_mem_gb": 17.38,
14
+ "output_len": 22029
15
+ },
16
+ {
17
+ "id": "10",
18
+ "n_visual_tokens": 256,
19
+ "latency_s": 161.34,
20
+ "peak_mem_gb": 17.74,
21
+ "output_len": 20413
22
+ },
23
+ {
24
+ "id": "100",
25
+ "n_visual_tokens": 256,
26
+ "latency_s": 155.26,
27
+ "peak_mem_gb": 17.47,
28
+ "output_len": 14271
29
+ },
30
+ {
31
+ "id": "101",
32
+ "n_visual_tokens": 256,
33
+ "latency_s": 158.29,
34
+ "peak_mem_gb": 17.6,
35
+ "output_len": 15495
36
+ },
37
+ {
38
+ "id": "102",
39
+ "n_visual_tokens": 256,
40
+ "latency_s": 1.9,
41
+ "peak_mem_gb": 17.15,
42
+ "output_len": 119
43
+ },
44
+ {
45
+ "id": "103",
46
+ "n_visual_tokens": 256,
47
+ "latency_s": 0.58,
48
+ "peak_mem_gb": 17.11,
49
+ "output_len": 15
50
+ },
51
+ {
52
+ "id": "104",
53
+ "n_visual_tokens": 256,
54
+ "latency_s": 1.74,
55
+ "peak_mem_gb": 17.27,
56
+ "output_len": 97
57
+ },
58
+ {
59
+ "id": "105",
60
+ "n_visual_tokens": 256,
61
+ "latency_s": 2.57,
62
+ "peak_mem_gb": 17.13,
63
+ "output_len": 186
64
+ },
65
+ {
66
+ "id": "106",
67
+ "n_visual_tokens": 256,
68
+ "latency_s": 158.5,
69
+ "peak_mem_gb": 17.61,
70
+ "output_len": 17065
71
+ },
72
+ {
73
+ "id": "107",
74
+ "n_visual_tokens": 256,
75
+ "latency_s": 158.46,
76
+ "peak_mem_gb": 17.59,
77
+ "output_len": 20375
78
+ },
79
+ {
80
+ "id": "108",
81
+ "n_visual_tokens": 256,
82
+ "latency_s": 157.75,
83
+ "peak_mem_gb": 17.56,
84
+ "output_len": 20382
85
+ },
86
+ {
87
+ "id": "109",
88
+ "n_visual_tokens": 256,
89
+ "latency_s": 154.67,
90
+ "peak_mem_gb": 17.41,
91
+ "output_len": 13548
92
+ },
93
+ {
94
+ "id": "11",
95
+ "n_visual_tokens": 256,
96
+ "latency_s": 25.06,
97
+ "peak_mem_gb": 16.97,
98
+ "output_len": 2878
99
+ },
100
+ {
101
+ "id": "110",
102
+ "n_visual_tokens": 256,
103
+ "latency_s": 1.4,
104
+ "peak_mem_gb": 17.28,
105
+ "output_len": 53
106
+ },
107
+ {
108
+ "id": "111",
109
+ "n_visual_tokens": 256,
110
+ "latency_s": 156.93,
111
+ "peak_mem_gb": 17.49,
112
+ "output_len": 23357
113
+ },
114
+ {
115
+ "id": "112",
116
+ "n_visual_tokens": 256,
117
+ "latency_s": 154.53,
118
+ "peak_mem_gb": 17.41,
119
+ "output_len": 12645
120
+ },
121
+ {
122
+ "id": "113",
123
+ "n_visual_tokens": 256,
124
+ "latency_s": 153.63,
125
+ "peak_mem_gb": 17.38,
126
+ "output_len": 4347
127
+ },
128
+ {
129
+ "id": "114",
130
+ "n_visual_tokens": 256,
131
+ "latency_s": 2.45,
132
+ "peak_mem_gb": 17.89,
133
+ "output_len": 68
134
+ },
135
+ {
136
+ "id": "115",
137
+ "n_visual_tokens": 256,
138
+ "latency_s": 153.6,
139
+ "peak_mem_gb": 17.38,
140
+ "output_len": 4198
141
+ },
142
+ {
143
+ "id": "116",
144
+ "n_visual_tokens": 256,
145
+ "latency_s": 4.79,
146
+ "peak_mem_gb": 17.5,
147
+ "output_len": 301
148
+ },
149
+ {
150
+ "id": "117",
151
+ "n_visual_tokens": 256,
152
+ "latency_s": 154.29,
153
+ "peak_mem_gb": 17.39,
154
+ "output_len": 14571
155
+ },
156
+ {
157
+ "id": "118",
158
+ "n_visual_tokens": 256,
159
+ "latency_s": 161.83,
160
+ "peak_mem_gb": 17.74,
161
+ "output_len": 12925
162
+ },
163
+ {
164
+ "id": "119",
165
+ "n_visual_tokens": 256,
166
+ "latency_s": 155.37,
167
+ "peak_mem_gb": 17.44,
168
+ "output_len": 15279
169
+ },
170
+ {
171
+ "id": "12",
172
+ "n_visual_tokens": 256,
173
+ "latency_s": 153.57,
174
+ "peak_mem_gb": 17.38,
175
+ "output_len": 4347
176
+ },
177
+ {
178
+ "id": "120",
179
+ "n_visual_tokens": 256,
180
+ "latency_s": 154.07,
181
+ "peak_mem_gb": 17.39,
182
+ "output_len": 21095
183
+ },
184
+ {
185
+ "id": "121",
186
+ "n_visual_tokens": 256,
187
+ "latency_s": 1.56,
188
+ "peak_mem_gb": 17.74,
189
+ "output_len": 15
190
+ },
191
+ {
192
+ "id": "122",
193
+ "n_visual_tokens": 256,
194
+ "latency_s": 1.19,
195
+ "peak_mem_gb": 17.46,
196
+ "output_len": 15
197
+ },
198
+ {
199
+ "id": "123",
200
+ "n_visual_tokens": 256,
201
+ "latency_s": 153.64,
202
+ "peak_mem_gb": 17.38,
203
+ "output_len": 4347
204
+ },
205
+ {
206
+ "id": "124",
207
+ "n_visual_tokens": 256,
208
+ "latency_s": 156.07,
209
+ "peak_mem_gb": 17.47,
210
+ "output_len": 27488
211
+ },
212
+ {
213
+ "id": "125",
214
+ "n_visual_tokens": 256,
215
+ "latency_s": 1.33,
216
+ "peak_mem_gb": 17.21,
217
+ "output_len": 53
218
+ },
219
+ {
220
+ "id": "126",
221
+ "n_visual_tokens": 256,
222
+ "latency_s": 10.06,
223
+ "peak_mem_gb": 17.69,
224
+ "output_len": 892
225
+ },
226
+ {
227
+ "id": "127",
228
+ "n_visual_tokens": 256,
229
+ "latency_s": 153.65,
230
+ "peak_mem_gb": 17.38,
231
+ "output_len": 4347
232
+ },
233
+ {
234
+ "id": "128",
235
+ "n_visual_tokens": 256,
236
+ "latency_s": 155.15,
237
+ "peak_mem_gb": 17.44,
238
+ "output_len": 22210
239
+ },
240
+ {
241
+ "id": "129",
242
+ "n_visual_tokens": 256,
243
+ "latency_s": 1.76,
244
+ "peak_mem_gb": 17.56,
245
+ "output_len": 53
246
+ },
247
+ {
248
+ "id": "13",
249
+ "n_visual_tokens": 256,
250
+ "latency_s": 1.45,
251
+ "peak_mem_gb": 17.33,
252
+ "output_len": 53
253
+ },
254
+ {
255
+ "id": "130",
256
+ "n_visual_tokens": 256,
257
+ "latency_s": 157.68,
258
+ "peak_mem_gb": 17.56,
259
+ "output_len": 5880
260
+ },
261
+ {
262
+ "id": "131",
263
+ "n_visual_tokens": 256,
264
+ "latency_s": 153.6,
265
+ "peak_mem_gb": 17.38,
266
+ "output_len": 24115
267
+ },
268
+ {
269
+ "id": "132",
270
+ "n_visual_tokens": 256,
271
+ "latency_s": 1.83,
272
+ "peak_mem_gb": 17.28,
273
+ "output_len": 97
274
+ },
275
+ {
276
+ "id": "133",
277
+ "n_visual_tokens": 256,
278
+ "latency_s": 161.93,
279
+ "peak_mem_gb": 17.58,
280
+ "output_len": 20967
281
+ },
282
+ {
283
+ "id": "134",
284
+ "n_visual_tokens": 256,
285
+ "latency_s": 157.17,
286
+ "peak_mem_gb": 17.51,
287
+ "output_len": 21330
288
+ },
289
+ {
290
+ "id": "135",
291
+ "n_visual_tokens": 256,
292
+ "latency_s": 1.65,
293
+ "peak_mem_gb": 17.28,
294
+ "output_len": 108
295
+ },
296
+ {
297
+ "id": "136",
298
+ "n_visual_tokens": 256,
299
+ "latency_s": 163.89,
300
+ "peak_mem_gb": 17.8,
301
+ "output_len": 12317
302
+ },
303
+ {
304
+ "id": "137",
305
+ "n_visual_tokens": 256,
306
+ "latency_s": 21.69,
307
+ "peak_mem_gb": 16.94,
308
+ "output_len": 3049
309
+ },
310
+ {
311
+ "id": "138",
312
+ "n_visual_tokens": 256,
313
+ "latency_s": 21.44,
314
+ "peak_mem_gb": 16.94,
315
+ "output_len": 2863
316
+ },
317
+ {
318
+ "id": "139",
319
+ "n_visual_tokens": 256,
320
+ "latency_s": 15.56,
321
+ "peak_mem_gb": 17.51,
322
+ "output_len": 2265
323
+ },
324
+ {
325
+ "id": "14",
326
+ "n_visual_tokens": 256,
327
+ "latency_s": 29.44,
328
+ "peak_mem_gb": 17.0,
329
+ "output_len": 3982
330
+ },
331
+ {
332
+ "id": "140",
333
+ "n_visual_tokens": 256,
334
+ "latency_s": 160.5,
335
+ "peak_mem_gb": 17.6,
336
+ "output_len": 15503
337
+ },
338
+ {
339
+ "id": "141",
340
+ "n_visual_tokens": 256,
341
+ "latency_s": 161.91,
342
+ "peak_mem_gb": 17.71,
343
+ "output_len": 4350
344
+ },
345
+ {
346
+ "id": "142",
347
+ "n_visual_tokens": 256,
348
+ "latency_s": 156.4,
349
+ "peak_mem_gb": 17.39,
350
+ "output_len": 15550
351
+ }
352
+ ]
results/clip_per_epoch/optical_mix_d2c/epoch_5/uipress_256/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "uipress_256",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 256.0,
6
+ "avg_latency_s": 93.96,
7
+ "avg_peak_mem_gb": 17.42
8
+ }
results/clip_per_epoch/optical_mix_d2c/epoch_6/uipress_256/summary.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "method": "uipress_256",
3
+ "n_samples": 50,
4
+ "n_success": 50,
5
+ "avg_visual_tokens": 256.0,
6
+ "avg_latency_s": 84.97,
7
+ "avg_peak_mem_gb": 17.41
8
+ }
results/clip_per_epoch/optical_mix_d2c/summary.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "folder": "epoch_5",
4
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch5.pt",
5
+ "avg_clip": 0.7047,
6
+ "n": 50,
7
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_5/uipress_256/clip_scores.json"
8
+ },
9
+ {
10
+ "folder": "epoch_6",
11
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch6.pt",
12
+ "avg_clip": 0.7192,
13
+ "n": 50,
14
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_6/uipress_256/clip_scores.json"
15
+ },
16
+ {
17
+ "folder": "epoch_7",
18
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch7.pt",
19
+ "avg_clip": 0.7192,
20
+ "n": 50,
21
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_7/uipress_256/clip_scores.json"
22
+ },
23
+ {
24
+ "folder": "epoch_8",
25
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch8.pt",
26
+ "avg_clip": 0.7192,
27
+ "n": 50,
28
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_8/uipress_256/clip_scores.json"
29
+ },
30
+ {
31
+ "folder": "epoch_9",
32
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch9.pt",
33
+ "avg_clip": 0.7025,
34
+ "n": 50,
35
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_9/uipress_256/clip_scores.json"
36
+ },
37
+ {
38
+ "folder": "epoch_10",
39
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch10.pt",
40
+ "avg_clip": 0.7192,
41
+ "n": 50,
42
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_10/uipress_256/clip_scores.json"
43
+ },
44
+ {
45
+ "folder": "epoch_11",
46
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch11.pt",
47
+ "avg_clip": 0.7054,
48
+ "n": 50,
49
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_11/uipress_256/clip_scores.json"
50
+ },
51
+ {
52
+ "folder": "epoch_12",
53
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch12.pt",
54
+ "avg_clip": 0.719,
55
+ "n": 50,
56
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_12/uipress_256/clip_scores.json"
57
+ },
58
+ {
59
+ "folder": "epoch_13",
60
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch13.pt",
61
+ "avg_clip": 0.7193,
62
+ "n": 50,
63
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_13/uipress_256/clip_scores.json"
64
+ },
65
+ {
66
+ "folder": "epoch_14",
67
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch14.pt",
68
+ "avg_clip": 0.7051,
69
+ "n": 50,
70
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_14/uipress_256/clip_scores.json"
71
+ },
72
+ {
73
+ "folder": "epoch_15",
74
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch15.pt",
75
+ "avg_clip": 0.7029,
76
+ "n": 50,
77
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_15/uipress_256/clip_scores.json"
78
+ },
79
+ {
80
+ "folder": "epoch_16",
81
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch16.pt",
82
+ "avg_clip": 0.7027,
83
+ "n": 50,
84
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_16/uipress_256/clip_scores.json"
85
+ },
86
+ {
87
+ "folder": "epoch_17",
88
+ "checkpoint": "checkpoints/optical_mix_d2c/epoch17.pt",
89
+ "avg_clip": 0.6747,
90
+ "n": 50,
91
+ "clip_path": "results/clip_per_epoch/optical_mix_d2c/epoch_17/uipress_256/clip_scores.json"
92
+ }
93
+ ]
results/comparison/top30_by_clip_per_method_table.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "name": "qwen3_res_1003520",
4
+ "tok_nominal": 3748,
5
+ "compression_ratio": "1.9×",
6
+ "clip_top30_mean": 0.8417,
7
+ "vs_baseline_clip_top30": "+0.5%",
8
+ "latency_top30_mean_s": 66.4,
9
+ "ssim_top30_mean": 0.701,
10
+ "ssim_n": 30
11
+ },
12
+ {
13
+ "name": "qwen3_res_230400",
14
+ "tok_nominal": 845,
15
+ "compression_ratio": "8.6×",
16
+ "clip_top30_mean": 0.8409,
17
+ "vs_baseline_clip_top30": "+0.4%",
18
+ "latency_top30_mean_s": 84.4,
19
+ "ssim_top30_mean": 0.681,
20
+ "ssim_n": 30
21
+ },
22
+ {
23
+ "name": "qwen3_full (基线)",
24
+ "tok_nominal": 7299,
25
+ "compression_ratio": "1×",
26
+ "clip_top30_mean": 0.8379,
27
+ "vs_baseline_clip_top30": "—",
28
+ "latency_top30_mean_s": 79.7,
29
+ "ssim_top30_mean": 0.688,
30
+ "ssim_n": 30
31
+ },
32
+ {
33
+ "name": "efficientui_prune60",
34
+ "tok_nominal": 730,
35
+ "compression_ratio": "10×",
36
+ "clip_top30_mean": 0.8199,
37
+ "vs_baseline_clip_top30": "-2.1%",
38
+ "latency_top30_mean_s": 88.9,
39
+ "ssim_top30_mean": 0.697,
40
+ "ssim_n": 30
41
+ },
42
+ {
43
+ "name": "efficientui_prune80",
44
+ "tok_nominal": 364,
45
+ "compression_ratio": "20×",
46
+ "clip_top30_mean": 0.8124,
47
+ "vs_baseline_clip_top30": "-3.1%",
48
+ "latency_top30_mean_s": 103.4,
49
+ "ssim_top30_mean": 0.628,
50
+ "ssim_n": 30
51
+ },
52
+ {
53
+ "name": "visionzip_256",
54
+ "tok_nominal": 256,
55
+ "compression_ratio": "28.5×",
56
+ "clip_top30_mean": 0.8035,
57
+ "vs_baseline_clip_top30": "-4.1%",
58
+ "latency_top30_mean_s": 106.5,
59
+ "ssim_top30_mean": 0.64,
60
+ "ssim_n": 30
61
+ },
62
+ {
63
+ "name": "visionzip_128",
64
+ "tok_nominal": 128,
65
+ "compression_ratio": "57×",
66
+ "clip_top30_mean": 0.7954,
67
+ "vs_baseline_clip_top30": "-5.1%",
68
+ "latency_top30_mean_s": 114.7,
69
+ "ssim_top30_mean": 0.618,
70
+ "ssim_n": 30
71
+ },
72
+ {
73
+ "name": "uipress_256 (未训练)",
74
+ "tok_nominal": 256,
75
+ "compression_ratio": "28.5×",
76
+ "clip_top30_mean": 0.7925,
77
+ "vs_baseline_clip_top30": "-5.4%",
78
+ "latency_top30_mean_s": 66.2,
79
+ "ssim_top30_mean": 0.659,
80
+ "ssim_n": 30
81
+ },
82
+ {
83
+ "name": "visionzip_64",
84
+ "tok_nominal": 64,
85
+ "compression_ratio": "114×",
86
+ "clip_top30_mean": 0.783,
87
+ "vs_baseline_clip_top30": "-6.5%",
88
+ "latency_top30_mean_s": 105.8,
89
+ "ssim_top30_mean": 0.627,
90
+ "ssim_n": 30
91
+ },
92
+ {
93
+ "name": "uipress_256 (训练 E17)",
94
+ "tok_nominal": 256,
95
+ "compression_ratio": "28.5×",
96
+ "clip_top30_mean": 0.781,
97
+ "vs_baseline_clip_top30": "-6.8%",
98
+ "latency_top30_mean_s": 90.4,
99
+ "ssim_top30_mean": 0.696,
100
+ "ssim_n": 30
101
+ }
102
+ ]
scripts/ablation_topk_report.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Build Top-K ablation tables from existing method outputs.
3
+
4
+ By default this script uses the current comparison runs and writes:
5
+ results/ablation_study/top30/top30_table.json
6
+ results/ablation_study/top30/top30_table.md
7
+
8
+ It keeps full reproducibility by also saving selected sample ids per method.
9
+ """
10
+
11
+ import argparse
12
+ import json
13
+ from pathlib import Path
14
+ from typing import Any
15
+
16
+ import numpy as np
17
+ from PIL import Image
18
+
19
+ from step_ssim_bootstrap import compute_ssim_pil, render_html_to_screenshot
20
+
21
+ PROJECT_ROOT = Path(__file__).resolve().parent.parent
22
+
23
+
24
+ def _default_manifest() -> list[dict[str, Any]]:
25
+ return [
26
+ {
27
+ "name": "qwen3_res_230400",
28
+ "method_dir": "results/comparison/qwen3_res_230400",
29
+ "token_nominal": 845,
30
+ "rendered_cache_dir": "qwen3_res_230400",
31
+ },
32
+ {
33
+ "name": "qwen3_res_1003520",
34
+ "method_dir": "results/comparison/qwen3_res_1003520",
35
+ "token_nominal": 3748,
36
+ "rendered_cache_dir": "qwen3_res_1003520",
37
+ },
38
+ {
39
+ "name": "qwen3_full (基线)",
40
+ "method_dir": "results/comparison/qwen3_full/qwen3_full",
41
+ "token_nominal": 7299,
42
+ "rendered_cache_dir": "qwen3_full",
43
+ "is_baseline": True,
44
+ },
45
+ {
46
+ "name": "efficientui_prune60",
47
+ "method_dir": "results/comparison/efficientui_prune60/efficientui_prune60",
48
+ "token_nominal": 730,
49
+ "rendered_cache_dir": "efficientui_prune60",
50
+ },
51
+ {
52
+ "name": "efficientui_prune80",
53
+ "method_dir": "results/comparison/efficientui_prune80",
54
+ "token_nominal": 364,
55
+ "rendered_cache_dir": "efficientui_prune80",
56
+ },
57
+ {
58
+ "name": "visionzip_256",
59
+ "method_dir": "results/comparison/visionzip_256/visionzip_256",
60
+ "token_nominal": 256,
61
+ "rendered_cache_dir": "visionzip_256",
62
+ },
63
+ {
64
+ "name": "visionzip_128",
65
+ "method_dir": "results/comparison/visionzip_128/visionzip_128",
66
+ "token_nominal": 128,
67
+ "rendered_cache_dir": "visionzip_128",
68
+ },
69
+ {
70
+ "name": "uipress_256 (未训练)",
71
+ "method_dir": "results/comparison/uipress_256/uipress_256",
72
+ "token_nominal": 256,
73
+ "rendered_cache_dir": "uipress_256",
74
+ },
75
+ {
76
+ "name": "visionzip_64",
77
+ "method_dir": "results/comparison/visionzip_64",
78
+ "token_nominal": 64,
79
+ "rendered_cache_dir": "visionzip_64",
80
+ },
81
+ {
82
+ "name": "uipress_256 (训练 E17)",
83
+ "method_dir": "results/clip_epoch17_gpu1/uipress_256",
84
+ "token_nominal": 256,
85
+ "rendered_cache_dir": "uipress_256_e17_top30",
86
+ },
87
+ ]
88
+
89
+
90
+ def _load_json(path: Path):
91
+ with open(path, "r", encoding="utf-8") as f:
92
+ return json.load(f)
93
+
94
+
95
+ def _format_compression_ratio(base_tokens: int, tok: int) -> str:
96
+ ratio = base_tokens / max(tok, 1)
97
+ if tok == 256:
98
+ return "28.5x"
99
+ if abs(ratio - 1.0) < 0.02:
100
+ return "1x"
101
+ if abs(ratio - round(ratio)) < 0.08 and round(ratio) >= 3:
102
+ return f"{round(ratio)}x"
103
+ return f"{ratio:.1f}x"
104
+
105
+
106
+ def _load_clip_map(method_dir: Path) -> dict[str, float]:
107
+ blob = _load_json(method_dir / "clip_scores.json")
108
+ out: dict[str, float] = {}
109
+ for sid, val in blob.get("per_sample", {}).items():
110
+ if isinstance(val, dict):
111
+ out[str(sid)] = float(val.get("clip_score", 0.0))
112
+ else:
113
+ out[str(sid)] = float(val)
114
+ return out
115
+
116
+
117
+ def _topk_ids(clip_map: dict[str, float], k: int) -> list[str]:
118
+ pairs = sorted(clip_map.items(), key=lambda x: -x[1])
119
+ return [sid for sid, _ in pairs[:k]]
120
+
121
+
122
+ def _load_per_sample(method_dir: Path) -> dict[str, dict[str, Any]]:
123
+ rows = _load_json(method_dir / "per_sample.json")
124
+ return {str(r["id"]): r for r in rows if "error" not in r}
125
+
126
+
127
+ def _get_ssim_scores_for_ids(
128
+ method_dir: Path,
129
+ top_ids: list[str],
130
+ ref_dir: Path,
131
+ rendered_cache_root: Path,
132
+ rendered_cache_dir: str,
133
+ ) -> tuple[list[float], list[str]]:
134
+ cache_dir = rendered_cache_root / rendered_cache_dir
135
+ cache_dir.mkdir(parents=True, exist_ok=True)
136
+ html_dir = method_dir / "html_predictions"
137
+ missing: list[str] = []
138
+ vals: list[float] = []
139
+
140
+ for sid in top_ids:
141
+ ref_png = ref_dir / f"{sid}.png"
142
+ out_png = cache_dir / f"{sid}.png"
143
+ html_path = html_dir / f"{sid}.html"
144
+ if not ref_png.exists():
145
+ missing.append(sid)
146
+ continue
147
+ if not out_png.exists():
148
+ if not html_path.exists():
149
+ missing.append(sid)
150
+ continue
151
+ ok = render_html_to_screenshot(str(html_path.resolve()), str(out_png.resolve()))
152
+ if not ok:
153
+ missing.append(sid)
154
+ continue
155
+ try:
156
+ ref_img = Image.open(ref_png).convert("RGB")
157
+ pred_img = Image.open(out_png).convert("RGB")
158
+ vals.append(compute_ssim_pil(ref_img, pred_img))
159
+ except Exception:
160
+ missing.append(sid)
161
+ return vals, missing
162
+
163
+
164
+ def _build_table_rows(
165
+ manifest: list[dict[str, Any]],
166
+ topk: int,
167
+ ref_dir: Path,
168
+ rendered_cache_root: Path,
169
+ baseline_tokens: int,
170
+ ) -> tuple[list[dict[str, Any]], dict[str, list[str]]]:
171
+ rows: list[dict[str, Any]] = []
172
+ selected_ids: dict[str, list[str]] = {}
173
+
174
+ for item in manifest:
175
+ method_dir = PROJECT_ROOT / item["method_dir"]
176
+ clip_map = _load_clip_map(method_dir)
177
+ top_ids = _topk_ids(clip_map, topk)
178
+ selected_ids[item["name"]] = top_ids
179
+ per_sample = _load_per_sample(method_dir)
180
+ clips = [clip_map[sid] for sid in top_ids if sid in clip_map]
181
+ lats = [float(per_sample[sid]["latency_s"]) for sid in top_ids if sid in per_sample]
182
+ ssim_vals, ssim_missing = _get_ssim_scores_for_ids(
183
+ method_dir=method_dir,
184
+ top_ids=top_ids,
185
+ ref_dir=ref_dir,
186
+ rendered_cache_root=rendered_cache_root,
187
+ rendered_cache_dir=item.get("rendered_cache_dir", method_dir.name),
188
+ )
189
+
190
+ token_nominal = int(item["token_nominal"])
191
+ row = {
192
+ "name": item["name"],
193
+ "token_nominal": token_nominal,
194
+ "compression_ratio": _format_compression_ratio(baseline_tokens, token_nominal),
195
+ "clip_topk_mean": round(float(np.mean(clips)) if clips else 0.0, 4),
196
+ "latency_topk_mean_s": round(float(np.mean(lats)) if lats else 0.0, 1),
197
+ "ssim_topk_mean": round(float(np.mean(ssim_vals)) if ssim_vals else 0.0, 3),
198
+ "ssim_n": len(ssim_vals),
199
+ "ssim_missing_n": len(ssim_missing),
200
+ "is_baseline": bool(item.get("is_baseline", False)),
201
+ }
202
+ rows.append(row)
203
+
204
+ baseline = next((r for r in rows if r["is_baseline"]), None)
205
+ if baseline is None:
206
+ raise ValueError("Manifest must include one baseline method (`is_baseline: true`).")
207
+ base_clip = baseline["clip_topk_mean"]
208
+ for row in rows:
209
+ if row["is_baseline"]:
210
+ row["vs_baseline_clip_topk"] = "—"
211
+ else:
212
+ row["vs_baseline_clip_topk"] = f"{(row['clip_topk_mean'] / base_clip - 1) * 100:+.1f}%"
213
+
214
+ rows.sort(key=lambda r: -r["clip_topk_mean"])
215
+ return rows, selected_ids
216
+
217
+
218
+ def _to_markdown(rows: list[dict[str, Any]], topk: int) -> str:
219
+ lines = [
220
+ f"# Ablation Top-{topk} Table",
221
+ "",
222
+ "| 方法 | 视觉 Token 数 | 压缩比 | CLIP ↑ | vs 基线 | 延迟(s) | SSIM |",
223
+ "|---|---:|---:|---:|---:|---:|---:|",
224
+ ]
225
+ for r in rows:
226
+ lines.append(
227
+ f"| {r['name']} | {r['token_nominal']} | {r['compression_ratio']} | "
228
+ f"{r['clip_topk_mean']:.4f} | {r['vs_baseline_clip_topk']} | "
229
+ f"{r['latency_topk_mean_s']:.1f} | {r['ssim_topk_mean']:.3f} |"
230
+ )
231
+ lines += [
232
+ "",
233
+ f"> 口径:每个方法按其自身 per-sample CLIP 排序取 Top-{topk},并在该子集计算 CLIP/延迟/SSIM均值。",
234
+ "> 提示:请在汇报中保留方法与口径说明,避免选择性呈现导致误导。",
235
+ ]
236
+ return "\n".join(lines) + "\n"
237
+
238
+
239
+ def parse_args():
240
+ p = argparse.ArgumentParser()
241
+ p.add_argument("--topk", type=int, default=30)
242
+ p.add_argument("--manifest", type=str, default=None)
243
+ p.add_argument("--out_root", type=str, default="results/ablation_study")
244
+ p.add_argument("--ref_dir", type=str, default="data/ref_screenshots")
245
+ p.add_argument("--rendered_cache_root", type=str, default="results/rendered_screenshots")
246
+ p.add_argument("--baseline_tokens", type=int, default=7299)
247
+ p.add_argument(
248
+ "--write_default_manifest",
249
+ action="store_true",
250
+ help="Only write default manifest to <out_root>/method_manifest.json and exit.",
251
+ )
252
+ return p.parse_args()
253
+
254
+
255
+ def main():
256
+ args = parse_args()
257
+ out_root = PROJECT_ROOT / args.out_root
258
+ out_root.mkdir(parents=True, exist_ok=True)
259
+
260
+ default_manifest_path = out_root / "method_manifest.json"
261
+ if args.write_default_manifest:
262
+ default_manifest_path.write_text(
263
+ json.dumps(_default_manifest(), indent=2, ensure_ascii=False),
264
+ encoding="utf-8",
265
+ )
266
+ print(f"Wrote default manifest: {default_manifest_path}")
267
+ return
268
+
269
+ if args.manifest:
270
+ manifest = _load_json(PROJECT_ROOT / args.manifest)
271
+ elif default_manifest_path.exists():
272
+ manifest = _load_json(default_manifest_path)
273
+ else:
274
+ manifest = _default_manifest()
275
+ default_manifest_path.write_text(
276
+ json.dumps(manifest, indent=2, ensure_ascii=False),
277
+ encoding="utf-8",
278
+ )
279
+
280
+ rows, selected_ids = _build_table_rows(
281
+ manifest=manifest,
282
+ topk=args.topk,
283
+ ref_dir=PROJECT_ROOT / args.ref_dir,
284
+ rendered_cache_root=PROJECT_ROOT / args.rendered_cache_root,
285
+ baseline_tokens=args.baseline_tokens,
286
+ )
287
+
288
+ out_dir = out_root / f"top{args.topk}"
289
+ out_dir.mkdir(parents=True, exist_ok=True)
290
+ json_path = out_dir / f"top{args.topk}_table.json"
291
+ md_path = out_dir / f"top{args.topk}_table.md"
292
+ ids_path = out_dir / f"top{args.topk}_selected_ids.json"
293
+ json_path.write_text(json.dumps(rows, indent=2, ensure_ascii=False), encoding="utf-8")
294
+ md_path.write_text(_to_markdown(rows, args.topk), encoding="utf-8")
295
+ ids_path.write_text(json.dumps(selected_ids, indent=2, ensure_ascii=False), encoding="utf-8")
296
+
297
+ print(f"Wrote {json_path}")
298
+ print(f"Wrote {md_path}")
299
+ print(f"Wrote {ids_path}")
300
+
301
+
302
+ if __name__ == "__main__":
303
+ main()
scripts/ablation_watch_status.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # Quick status: are ablation queues still running? Any completion markers in logs?
3
+ set -euo pipefail
4
+ ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
5
+ LOG1="$ROOT/results/ablation_study/logs/ablation_gpu1_train_eval.nohup.log"
6
+ LOG0="$ROOT/results/ablation_study/logs/ablation_gpu0_cross_domain_e18.nohup.log"
7
+
8
+ echo "=== Processes (ablation) ==="
9
+ ps -ef | grep -E 'run_ablation_gpu1_train_eval|run_ablation_gpu0_cross_domain_e18' | grep -v grep || echo "(none)"
10
+
11
+ echo
12
+ echo "=== GPU1 log tail (last 5 lines) ==="
13
+ tail -n 5 "$LOG1" 2>/dev/null || echo "missing $LOG1"
14
+
15
+ echo
16
+ echo "=== GPU0 log: completion markers ==="
17
+ grep -E 'DONE:|completed at|All GPU1|CLIP avg=' "$LOG0" 2>/dev/null | tail -n 8 || true
18
+
19
+ echo
20
+ echo "=== GPU1 log: completion markers ==="
21
+ grep -E 'DONE:|completed at|All GPU1|Epoch [0-9]+: avg_loss' "$LOG1" 2>/dev/null | tail -n 8 || true
22
+
23
+ echo
24
+ echo "When GPU0 script finishes, log should contain: Cross-domain (E18) queue completed"
25
+ echo "When GPU1 script finishes, log should contain: All GPU1 ablation jobs completed"
scripts/batch_uipress_clip_epochs.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ For each checkpoints/optical_mix_d2c/epoch{N}.pt (and optionally latest.pt), run
3
+ eval_all (UIPress) + step_clip_batch, write under results/clip_per_epoch/<name>/.
4
+
5
+ Usage:
6
+ # After GPU 1 is free (or use --wait_gpu to block until VRAM drops):
7
+ CUDA_VISIBLE_DEVICES=1 PYTHONPATH=. python scripts/batch_uipress_clip_epochs.py \\
8
+ --checkpoint_dir checkpoints/optical_mix_d2c \\
9
+ --tag optical_mix_d2c
10
+
11
+ # Skip epochs that already have clip_scores.json:
12
+ ... --skip_existing
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import argparse
18
+ import json
19
+ import os
20
+ import re
21
+ import subprocess
22
+ import sys
23
+ import time
24
+ from pathlib import Path
25
+
26
+ PROJECT_ROOT = Path(__file__).resolve().parent.parent
27
+
28
+
29
+ def _wait_for_gpu(gpu_index: int, free_below_mib: int, poll_s: int) -> None:
30
+ import subprocess as sp
31
+
32
+ print(f"Waiting until GPU {gpu_index} memory.used < {free_below_mib} MiB ...", flush=True)
33
+ while True:
34
+ out = sp.check_output(
35
+ [
36
+ "nvidia-smi",
37
+ "-i",
38
+ str(gpu_index),
39
+ "--query-gpu=memory.used",
40
+ "--format=csv,noheader,nounits",
41
+ ],
42
+ text=True,
43
+ )
44
+ used = int(out.strip().split("\n")[0].strip())
45
+ print(f" GPU{gpu_index} used={used} MiB", flush=True)
46
+ if used < free_below_mib:
47
+ break
48
+ time.sleep(poll_s)
49
+
50
+
51
+ def main() -> int:
52
+ ap = argparse.ArgumentParser()
53
+ ap.add_argument("--checkpoint_dir", type=Path, default=PROJECT_ROOT / "checkpoints" / "optical_mix_d2c")
54
+ ap.add_argument(
55
+ "--output_root",
56
+ type=Path,
57
+ default=None,
58
+ help="Defaults to results/clip_per_epoch/<tag>",
59
+ )
60
+ ap.add_argument("--tag", default="optical_mix_d2c", help="Subfolder under results/clip_per_epoch/")
61
+ ap.add_argument("--max_samples", type=int, default=50)
62
+ ap.add_argument("--target_tokens", type=int, default=256)
63
+ ap.add_argument("--data_dir", default="data")
64
+ ap.add_argument("--ref_dir", default="data/ref_screenshots")
65
+ ap.add_argument("--skip_existing", action="store_true")
66
+ ap.add_argument("--include_latest", action="store_true", help="Also eval latest.pt into folder latest/")
67
+ ap.add_argument("--clip_device", default="cuda", choices=["cuda", "cpu"])
68
+ ap.add_argument("--force_cpu_eval", action="store_true", help="Pass --force_cpu to eval_all (8B on CPU; very slow).")
69
+ ap.add_argument("--wait_gpu", type=int, default=None, help="Poll this GPU index until memory drops.")
70
+ ap.add_argument("--wait_free_mib", type=int, default=12000, help="Start when memory.used < this (MiB).")
71
+ ap.add_argument("--wait_poll_s", type=int, default=60)
72
+ args = ap.parse_args()
73
+
74
+ os.environ.setdefault("HF_ENDPOINT", os.environ.get("HF_ENDPOINT", "https://hf-mirror.com"))
75
+
76
+ ckpt_dir = args.checkpoint_dir.resolve()
77
+ if not ckpt_dir.is_dir():
78
+ print(f"checkpoint_dir not found: {ckpt_dir}", file=sys.stderr)
79
+ return 1
80
+
81
+ out_root = (args.output_root or (PROJECT_ROOT / "results" / "clip_per_epoch" / args.tag)).resolve()
82
+ out_root.mkdir(parents=True, exist_ok=True)
83
+
84
+ if args.wait_gpu is not None:
85
+ _wait_for_gpu(args.wait_gpu, args.wait_free_mib, args.wait_poll_s)
86
+
87
+ jobs: list[tuple[str, Path]] = []
88
+ for p in sorted(ckpt_dir.glob("epoch*.pt"), key=lambda x: int(re.search(r"epoch(\d+)", x.name).group(1))):
89
+ n = int(re.search(r"epoch(\d+)", p.name).group(1))
90
+ jobs.append((f"epoch_{n}", p))
91
+ if args.include_latest and (ckpt_dir / "latest.pt").exists():
92
+ jobs.append(("latest", ckpt_dir / "latest.pt"))
93
+
94
+ manifest: list[dict] = []
95
+ run_name = f"uipress_{args.target_tokens}"
96
+
97
+ for folder_name, ckpt_path in jobs:
98
+ eval_out = out_root / folder_name
99
+ method_dir = eval_out / run_name
100
+ clip_path = method_dir / "clip_scores.json"
101
+ if args.skip_existing and clip_path.is_file():
102
+ print(f"Skip (exists): {clip_path}", flush=True)
103
+ data = json.loads(clip_path.read_text(encoding="utf-8"))
104
+ ep = folder_name.replace("epoch_", "") if folder_name.startswith("epoch_") else folder_name
105
+ manifest.append(
106
+ {
107
+ "folder": folder_name,
108
+ "checkpoint": str(ckpt_path.relative_to(PROJECT_ROOT)),
109
+ "avg_clip": data.get("avg_clip"),
110
+ "n": data.get("n"),
111
+ "clip_path": str(clip_path.relative_to(PROJECT_ROOT)),
112
+ }
113
+ )
114
+ continue
115
+
116
+ eval_out.mkdir(parents=True, exist_ok=True)
117
+ cmd_eval = [
118
+ sys.executable,
119
+ str(PROJECT_ROOT / "scripts" / "eval_all.py"),
120
+ "--method",
121
+ "uipress",
122
+ "--checkpoint",
123
+ str(ckpt_path),
124
+ "--max_samples",
125
+ str(args.max_samples),
126
+ "--data_dir",
127
+ args.data_dir,
128
+ "--output_dir",
129
+ str(eval_out),
130
+ "--target_tokens",
131
+ str(args.target_tokens),
132
+ ]
133
+ if args.force_cpu_eval:
134
+ cmd_eval.append("--force_cpu")
135
+
136
+ print(f"\n=== eval_all: {folder_name} <- {ckpt_path.name} ===", flush=True)
137
+ r1 = subprocess.run(cmd_eval, cwd=str(PROJECT_ROOT))
138
+ if r1.returncode != 0:
139
+ print(f"[error] eval_all failed rc={r1.returncode} for {folder_name}", flush=True)
140
+ continue
141
+
142
+ cmd_clip = [
143
+ sys.executable,
144
+ str(PROJECT_ROOT / "scripts" / "step_clip_batch.py"),
145
+ "--method_dir",
146
+ str(method_dir),
147
+ "--ref_dir",
148
+ str(PROJECT_ROOT / args.ref_dir),
149
+ "--clip_device",
150
+ args.clip_device,
151
+ ]
152
+ print(f"=== CLIP: {method_dir} ===", flush=True)
153
+ r2 = subprocess.run(cmd_clip, cwd=str(PROJECT_ROOT))
154
+ if r2.returncode != 0:
155
+ print(f"[error] step_clip_batch failed rc={r2.returncode} for {folder_name}", flush=True)
156
+ continue
157
+
158
+ if clip_path.is_file():
159
+ data = json.loads(clip_path.read_text(encoding="utf-8"))
160
+ manifest.append(
161
+ {
162
+ "folder": folder_name,
163
+ "checkpoint": str(ckpt_path.relative_to(PROJECT_ROOT)),
164
+ "avg_clip": data.get("avg_clip"),
165
+ "n": data.get("n"),
166
+ "clip_path": str(clip_path.relative_to(PROJECT_ROOT)),
167
+ }
168
+ )
169
+
170
+ def _sort_key(row: dict) -> tuple:
171
+ f = row["folder"]
172
+ if f == "latest":
173
+ return (2, 10**9)
174
+ m = re.match(r"epoch_(\d+)", f)
175
+ return (1, int(m.group(1))) if m else (0, 0)
176
+
177
+ manifest.sort(key=_sort_key)
178
+ summary_path = out_root / "summary.json"
179
+ summary_path.write_text(json.dumps(manifest, indent=2, ensure_ascii=False), encoding="utf-8")
180
+ print(f"\nWrote {summary_path}", flush=True)
181
+
182
+ lines = ["| 文件夹 | checkpoint | avg CLIP | n |", "|---|---|---|---|"]
183
+ for row in manifest:
184
+ lines.append(
185
+ f"| {row['folder']} | `{row['checkpoint']}` | {row.get('avg_clip')} | {row.get('n')} |"
186
+ )
187
+ (out_root / "CLIP_TABLE.md").write_text("\n".join(lines) + "\n", encoding="utf-8")
188
+ print(f"Wrote {out_root / 'CLIP_TABLE.md'}", flush=True)
189
+ return 0
190
+
191
+
192
+ if __name__ == "__main__":
193
+ raise SystemExit(main())
scripts/run_ablation_gpu0_cross_domain_e18.sh ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # GPU0 queue: cross-domain evaluation only, using E18 checkpoint.
5
+ # No E19/E20 operations.
6
+
7
+ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
8
+ cd "$ROOT_DIR"
9
+
10
+ GPU_ID="${GPU_ID:-0}"
11
+ E18_CKPT="${E18_CKPT:-checkpoints/optical_mix_d2c/epoch18.pt}"
12
+
13
+ ABL_ROOT="results/ablation_study"
14
+ RUN_DIR="$ABL_ROOT/runs"
15
+ LOG_DIR="$ABL_ROOT/logs"
16
+ TMP_WEBSIGHT_DIR="$ABL_ROOT/tmp_websight_eval"
17
+
18
+ mkdir -p "$RUN_DIR" "$LOG_DIR" "$TMP_WEBSIGHT_DIR"
19
+ if [[ ! -e "$TMP_WEBSIGHT_DIR/ref_screenshots" ]]; then
20
+ ln -s "$(realpath data/ref_screenshots_websight)" "$TMP_WEBSIGHT_DIR/ref_screenshots"
21
+ fi
22
+
23
+ if [[ ! -f "$E18_CKPT" ]]; then
24
+ echo "Missing checkpoint: $E18_CKPT" >&2
25
+ exit 1
26
+ fi
27
+
28
+ export PYTHONPATH=.
29
+
30
+ run() {
31
+ local name="$1"
32
+ shift
33
+ echo
34
+ echo "============================================================"
35
+ echo "[$(date '+%F %T')] START: $name"
36
+ echo "CMD: $*"
37
+ echo "============================================================"
38
+ "$@"
39
+ echo "[$(date '+%F %T')] DONE: $name"
40
+ }
41
+
42
+ run "cross_domain_qwen3_full" \
43
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
44
+ --method baseline \
45
+ --max_samples 50 \
46
+ --data_dir "$TMP_WEBSIGHT_DIR" \
47
+ --output_dir "$RUN_DIR/cross_domain_e18"
48
+
49
+ run "cross_domain_uipress_e18" \
50
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
51
+ --method uipress \
52
+ --checkpoint "$E18_CKPT" \
53
+ --target_tokens 256 \
54
+ --max_samples 50 \
55
+ --data_dir "$TMP_WEBSIGHT_DIR" \
56
+ --output_dir "$RUN_DIR/cross_domain_e18"
57
+
58
+ run "cross_domain_clip_qwen3_full" \
59
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
60
+ --method_dir "$RUN_DIR/cross_domain_e18/qwen3_full" \
61
+ --ref_dir data/ref_screenshots_websight \
62
+ --clip_device cuda
63
+
64
+ run "cross_domain_clip_uipress_e18" \
65
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
66
+ --method_dir "$RUN_DIR/cross_domain_e18/uipress_256" \
67
+ --ref_dir data/ref_screenshots_websight \
68
+ --clip_device cuda
69
+
70
+ echo
71
+ echo "Cross-domain (E18) queue completed at $(date '+%F %T')."
scripts/run_ablation_gpu1_train_eval.sh ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # GPU1 queue: No-LoRA / token sensitivity / LR scan
5
+ # All outputs are saved under results/ablation_study/{checkpoints,runs}
6
+
7
+ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
8
+ cd "$ROOT_DIR"
9
+
10
+ GPU_ID="${GPU_ID:-1}"
11
+ EPOCHS_ABL="${EPOCHS_ABL:-5}"
12
+ MAX_SAMPLES="${MAX_SAMPLES:-10000}"
13
+
14
+ ABL_ROOT="results/ablation_study"
15
+ CKPT_DIR="$ABL_ROOT/checkpoints"
16
+ RUN_DIR="$ABL_ROOT/runs"
17
+ LOG_DIR="$ABL_ROOT/logs"
18
+ mkdir -p "$CKPT_DIR" "$RUN_DIR" "$LOG_DIR"
19
+
20
+ export PYTHONPATH=.
21
+
22
+ run() {
23
+ local name="$1"
24
+ shift
25
+ echo
26
+ echo "============================================================"
27
+ echo "[$(date '+%F %T')] START: $name"
28
+ echo "CMD: $*"
29
+ echo "============================================================"
30
+ "$@"
31
+ echo "[$(date '+%F %T')] DONE: $name"
32
+ }
33
+
34
+ # 1) Remove LoRA
35
+ run "train_no_lora_256" \
36
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/train_compressor.py \
37
+ --output_dir "$CKPT_DIR/no_lora_256" \
38
+ --disable_lora \
39
+ --target_tokens 256 \
40
+ --epochs "$EPOCHS_ABL" \
41
+ --max_samples "$MAX_SAMPLES" \
42
+ --mix_root data \
43
+ --mix_images_subdir ref_screenshots \
44
+ --mix_gt_subdir gt_html \
45
+ --max_html_tokens 8192
46
+
47
+ run "eval_no_lora_256" \
48
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
49
+ --method uipress \
50
+ --checkpoint "$CKPT_DIR/no_lora_256/latest.pt" \
51
+ --target_tokens 256 \
52
+ --max_samples 50 \
53
+ --data_dir data \
54
+ --output_dir "$RUN_DIR/no_lora_256"
55
+
56
+ run "clip_no_lora_256" \
57
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
58
+ --method_dir "$RUN_DIR/no_lora_256/uipress_256" \
59
+ --ref_dir data/ref_screenshots \
60
+ --clip_device cuda
61
+
62
+ # 2) Token sensitivity: 64 / 128 / 512
63
+ for tok in 64 128 512; do
64
+ run "train_token_${tok}" \
65
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/train_compressor.py \
66
+ --output_dir "$CKPT_DIR/token_${tok}" \
67
+ --target_tokens "$tok" \
68
+ --epochs "$EPOCHS_ABL" \
69
+ --max_samples "$MAX_SAMPLES" \
70
+ --mix_root data \
71
+ --mix_images_subdir ref_screenshots \
72
+ --mix_gt_subdir gt_html \
73
+ --max_html_tokens 8192
74
+
75
+ run "eval_token_${tok}" \
76
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
77
+ --method uipress \
78
+ --checkpoint "$CKPT_DIR/token_${tok}/latest.pt" \
79
+ --target_tokens "$tok" \
80
+ --max_samples 50 \
81
+ --data_dir data \
82
+ --output_dir "$RUN_DIR/token_${tok}"
83
+
84
+ run "clip_token_${tok}" \
85
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
86
+ --method_dir "$RUN_DIR/token_${tok}/uipress_${tok}" \
87
+ --ref_dir data/ref_screenshots \
88
+ --clip_device cuda
89
+ done
90
+
91
+ # 3) Learning-rate scan (compressor LR)
92
+ for lr in 1e-4 2e-4 4e-4; do
93
+ safe_lr="${lr//./p}"
94
+ run "train_lr_${safe_lr}" \
95
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/train_compressor.py \
96
+ --output_dir "$CKPT_DIR/lr_${safe_lr}" \
97
+ --target_tokens 256 \
98
+ --lr_compressor "$lr" \
99
+ --epochs "$EPOCHS_ABL" \
100
+ --max_samples "$MAX_SAMPLES" \
101
+ --mix_root data \
102
+ --mix_images_subdir ref_screenshots \
103
+ --mix_gt_subdir gt_html \
104
+ --max_html_tokens 8192
105
+
106
+ run "eval_lr_${safe_lr}" \
107
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
108
+ --method uipress \
109
+ --checkpoint "$CKPT_DIR/lr_${safe_lr}/latest.pt" \
110
+ --target_tokens 256 \
111
+ --max_samples 50 \
112
+ --data_dir data \
113
+ --output_dir "$RUN_DIR/lr_${safe_lr}"
114
+
115
+ run "clip_lr_${safe_lr}" \
116
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
117
+ --method_dir "$RUN_DIR/lr_${safe_lr}/uipress_256" \
118
+ --ref_dir data/ref_screenshots \
119
+ --clip_device cuda
120
+ done
121
+
122
+ echo
123
+ echo "All GPU1 ablation jobs completed at $(date '+%F %T')."
scripts/run_ablation_queue_gpu1.sh ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # One-GPU ablation queue (default GPU1).
5
+ # This script runs experiments sequentially and stores all artifacts under:
6
+ # results/ablation_study/
7
+
8
+ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
9
+ cd "$ROOT_DIR"
10
+
11
+ GPU_ID="${GPU_ID:-1}"
12
+ EPOCHS_ABL="${EPOCHS_ABL:-5}"
13
+ MAX_SAMPLES="${MAX_SAMPLES:-10000}"
14
+ TOPK="${TOPK:-30}"
15
+
16
+ ABL_ROOT="results/ablation_study"
17
+ CKPT_DIR="$ABL_ROOT/checkpoints"
18
+ RUN_DIR="$ABL_ROOT/runs"
19
+ LOG_DIR="$ABL_ROOT/logs"
20
+
21
+ mkdir -p "$CKPT_DIR" "$RUN_DIR" "$LOG_DIR"
22
+
23
+ run() {
24
+ local name="$1"
25
+ shift
26
+ echo
27
+ echo "============================================================"
28
+ echo "[$(date '+%F %T')] START: $name"
29
+ echo "CMD: $*"
30
+ echo "============================================================"
31
+ "$@"
32
+ echo "[$(date '+%F %T')] DONE: $name"
33
+ }
34
+
35
+ export PYTHONPATH=.
36
+
37
+ # 0) Figure-2 continuation: resume mix_d2c to epoch20 (if possible)
38
+ if [[ -f checkpoints/optical_mix_d2c/latest.pt ]]; then
39
+ run "figure2_resume_to_e20" \
40
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/train_compressor.py \
41
+ --output_dir checkpoints/optical_mix_d2c \
42
+ --resume checkpoints/optical_mix_d2c/latest.pt \
43
+ --epochs 20 \
44
+ --max_samples "$MAX_SAMPLES" \
45
+ --mix_root data \
46
+ --mix_images_subdir ref_screenshots \
47
+ --mix_gt_subdir gt_html \
48
+ --max_html_tokens 8192 \
49
+ --eval_after_epoch \
50
+ --eval_output_dir results/clip_per_epoch/optical_mix_d2c \
51
+ --eval_clip_device cuda
52
+ fi
53
+
54
+ # 1) Remove LoRA ablation
55
+ run "train_no_lora_256" \
56
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/train_compressor.py \
57
+ --output_dir "$CKPT_DIR/no_lora_256" \
58
+ --disable_lora \
59
+ --target_tokens 256 \
60
+ --epochs "$EPOCHS_ABL" \
61
+ --max_samples "$MAX_SAMPLES" \
62
+ --mix_root data \
63
+ --mix_images_subdir ref_screenshots \
64
+ --mix_gt_subdir gt_html \
65
+ --max_html_tokens 8192
66
+
67
+ run "eval_no_lora_256" \
68
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
69
+ --method uipress \
70
+ --checkpoint "$CKPT_DIR/no_lora_256/latest.pt" \
71
+ --target_tokens 256 \
72
+ --max_samples 50 \
73
+ --data_dir data \
74
+ --output_dir "$RUN_DIR/no_lora_256"
75
+
76
+ run "clip_no_lora_256" \
77
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
78
+ --method_dir "$RUN_DIR/no_lora_256/uipress_256" \
79
+ --ref_dir data/ref_screenshots \
80
+ --clip_device cuda
81
+
82
+ # 2) Token sensitivity ablation
83
+ for tok in 64 128 512; do
84
+ run "train_token_${tok}" \
85
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/train_compressor.py \
86
+ --output_dir "$CKPT_DIR/token_${tok}" \
87
+ --target_tokens "$tok" \
88
+ --epochs "$EPOCHS_ABL" \
89
+ --max_samples "$MAX_SAMPLES" \
90
+ --mix_root data \
91
+ --mix_images_subdir ref_screenshots \
92
+ --mix_gt_subdir gt_html \
93
+ --max_html_tokens 8192
94
+
95
+ run "eval_token_${tok}" \
96
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
97
+ --method uipress \
98
+ --checkpoint "$CKPT_DIR/token_${tok}/latest.pt" \
99
+ --target_tokens "$tok" \
100
+ --max_samples 50 \
101
+ --data_dir data \
102
+ --output_dir "$RUN_DIR/token_${tok}"
103
+
104
+ run "clip_token_${tok}" \
105
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
106
+ --method_dir "$RUN_DIR/token_${tok}/uipress_${tok}" \
107
+ --ref_dir data/ref_screenshots \
108
+ --clip_device cuda
109
+ done
110
+
111
+ # 3) Learning-rate scan (compressor lr)
112
+ for lr in 1e-4 2e-4 4e-4; do
113
+ safe_lr="${lr//./p}"
114
+ run "train_lr_${safe_lr}" \
115
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/train_compressor.py \
116
+ --output_dir "$CKPT_DIR/lr_${safe_lr}" \
117
+ --target_tokens 256 \
118
+ --lr_compressor "$lr" \
119
+ --epochs "$EPOCHS_ABL" \
120
+ --max_samples "$MAX_SAMPLES" \
121
+ --mix_root data \
122
+ --mix_images_subdir ref_screenshots \
123
+ --mix_gt_subdir gt_html \
124
+ --max_html_tokens 8192
125
+
126
+ run "eval_lr_${safe_lr}" \
127
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
128
+ --method uipress \
129
+ --checkpoint "$CKPT_DIR/lr_${safe_lr}/latest.pt" \
130
+ --target_tokens 256 \
131
+ --max_samples 50 \
132
+ --data_dir data \
133
+ --output_dir "$RUN_DIR/lr_${safe_lr}"
134
+
135
+ run "clip_lr_${safe_lr}" \
136
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
137
+ --method_dir "$RUN_DIR/lr_${safe_lr}/uipress_256" \
138
+ --ref_dir data/ref_screenshots \
139
+ --clip_device cuda
140
+ done
141
+
142
+ # 4) Cross-domain validation (WebSight screenshots as eval set)
143
+ TMP_WEBSIGHT_DIR="$ABL_ROOT/tmp_websight_eval"
144
+ mkdir -p "$TMP_WEBSIGHT_DIR"
145
+ if [[ ! -e "$TMP_WEBSIGHT_DIR/ref_screenshots" ]]; then
146
+ ln -s "$(realpath data/ref_screenshots_websight)" "$TMP_WEBSIGHT_DIR/ref_screenshots"
147
+ fi
148
+
149
+ run "cross_domain_qwen3_full" \
150
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
151
+ --method baseline \
152
+ --max_samples 50 \
153
+ --data_dir "$TMP_WEBSIGHT_DIR" \
154
+ --output_dir "$RUN_DIR/cross_domain"
155
+
156
+ run "cross_domain_uipress_latest" \
157
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/eval_all.py \
158
+ --method uipress \
159
+ --checkpoint checkpoints/optical_mix_d2c/latest.pt \
160
+ --target_tokens 256 \
161
+ --max_samples 50 \
162
+ --data_dir "$TMP_WEBSIGHT_DIR" \
163
+ --output_dir "$RUN_DIR/cross_domain"
164
+
165
+ run "cross_domain_clip_qwen3_full" \
166
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
167
+ --method_dir "$RUN_DIR/cross_domain/qwen3_full" \
168
+ --ref_dir data/ref_screenshots_websight \
169
+ --clip_device cuda
170
+
171
+ run "cross_domain_clip_uipress_256" \
172
+ env CUDA_VISIBLE_DEVICES="$GPU_ID" python scripts/step_clip_batch.py \
173
+ --method_dir "$RUN_DIR/cross_domain/uipress_256" \
174
+ --ref_dir data/ref_screenshots_websight \
175
+ --clip_device cuda
176
+
177
+ # 5) Build Top-K report from current available methods
178
+ run "build_topk_report" \
179
+ python scripts/ablation_topk_report.py \
180
+ --topk "$TOPK" \
181
+ --out_root "$ABL_ROOT"
182
+
183
+ echo
184
+ echo "All queue steps completed at $(date '+%F %T')."
scripts/run_ablation_study.sh ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # Usage:
5
+ # bash scripts/run_ablation_study.sh
6
+ #
7
+ # This script prepares a dedicated ablation workspace and prints/executes
8
+ # reproducible commands for the Top-30-focused ablation study.
9
+
10
+ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
11
+ cd "$ROOT_DIR"
12
+
13
+ ABL_ROOT="results/ablation_study"
14
+ LOG_DIR="$ABL_ROOT/logs"
15
+ CKPT_DIR="$ABL_ROOT/checkpoints"
16
+ RUN_DIR="$ABL_ROOT/runs"
17
+
18
+ mkdir -p "$LOG_DIR" "$CKPT_DIR" "$RUN_DIR"
19
+
20
+ echo "Ablation workspace:"
21
+ echo " $ABL_ROOT"
22
+ echo
23
+
24
+ # -----------------------
25
+ # 1) No-LoRA ablation
26
+ # -----------------------
27
+ echo "[1/5] No-LoRA ablation"
28
+ echo "Train compressor-only checkpoint into: $CKPT_DIR/no_lora"
29
+ cat <<'CMD'
30
+ # Example:
31
+ # CUDA_VISIBLE_DEVICES=1 PYTHONPATH=. python scripts/train_compressor.py \
32
+ # --output_dir results/ablation_study/checkpoints/no_lora \
33
+ # --disable_lora --target_tokens 256 \
34
+ # --epochs 5 --max_samples 10000 \
35
+ # --mix_root data --mix_images_subdir ref_screenshots --mix_gt_subdir gt_html \
36
+ # --max_html_tokens 8192
37
+ CMD
38
+ echo
39
+
40
+ # -----------------------
41
+ # 2) Token sensitivity
42
+ # -----------------------
43
+ echo "[2/5] Token sensitivity (64/128/512)"
44
+ cat <<'CMD'
45
+ # For each token in {64,128,512}, train and eval:
46
+ # CUDA_VISIBLE_DEVICES=1 PYTHONPATH=. python scripts/train_compressor.py \
47
+ # --output_dir results/ablation_study/checkpoints/token_64 \
48
+ # --target_tokens 64 --epochs 5 --max_samples 10000 \
49
+ # --mix_root data --mix_images_subdir ref_screenshots --mix_gt_subdir gt_html \
50
+ # --max_html_tokens 8192
51
+ #
52
+ # CUDA_VISIBLE_DEVICES=0 PYTHONPATH=. python scripts/eval_all.py \
53
+ # --method uipress --checkpoint results/ablation_study/checkpoints/token_64/latest.pt \
54
+ # --target_tokens 64 --max_samples 50 --output_dir results/ablation_study/runs/token_64
55
+ #
56
+ # CUDA_VISIBLE_DEVICES=0 PYTHONPATH=. python scripts/step_clip_batch.py \
57
+ # --method_dir results/ablation_study/runs/token_64/uipress_64 \
58
+ # --ref_dir data/ref_screenshots
59
+ CMD
60
+ echo
61
+
62
+ # -----------------------
63
+ # 3) Cross-domain check
64
+ # -----------------------
65
+ echo "[3/5] Cross-domain (WebSight eval split)"
66
+ cat <<'CMD'
67
+ # Run eval with the same methods on WebSight-side eval set directory:
68
+ # CUDA_VISIBLE_DEVICES=0 PYTHONPATH=. python scripts/eval_all.py \
69
+ # --method uipress --checkpoint checkpoints/optical_mix_d2c/latest.pt \
70
+ # --target_tokens 256 --data_dir data --max_samples 50 \
71
+ # --output_dir results/ablation_study/runs/websight_eval
72
+ CMD
73
+ echo
74
+
75
+ # -----------------------
76
+ # 4) LR scan
77
+ # -----------------------
78
+ echo "[4/5] Learning-rate scan"
79
+ cat <<'CMD'
80
+ # Suggested compressor LR scan:
81
+ # 1e-4 / 2e-4 / 4e-4 with fixed other settings.
82
+ # Save each run under:
83
+ # results/ablation_study/checkpoints/lr_1e-4
84
+ # results/ablation_study/checkpoints/lr_2e-4
85
+ # results/ablation_study/checkpoints/lr_4e-4
86
+ CMD
87
+ echo
88
+
89
+ # -----------------------
90
+ # 5) Page-type analysis
91
+ # -----------------------
92
+ echo "[5/5] Page-type analysis"
93
+ cat <<'CMD'
94
+ # Put page-type id mapping as:
95
+ # results/ablation_study/page_types.json
96
+ # Then post-process top-k IDs by category from:
97
+ # results/ablation_study/top30/top30_selected_ids.json
98
+ CMD
99
+ echo
100
+
101
+ # Build Top-30 report from available runs (safe to run repeatedly).
102
+ PYTHONPATH=. python scripts/ablation_topk_report.py --topk 30 --out_root "$ABL_ROOT"
103
+
104
+ echo
105
+ echo "Done. Generated:"
106
+ echo " $ABL_ROOT/top30/top30_table.json"
107
+ echo " $ABL_ROOT/top30/top30_table.md"
108
+ echo " $ABL_ROOT/top30/top30_selected_ids.json"
scripts/train_compressor.py CHANGED
@@ -93,7 +93,7 @@ def _run_subprocess_eval_and_clip(args, out_dir: Path, epoch: int) -> Path | Non
93
  eval_root = (
94
  Path(args.eval_output_dir).resolve()
95
  if args.eval_output_dir
96
- else (PROJECT_ROOT / "results" / "train_eval" / out_dir.name)
97
  )
98
  eval_epoch_dir = eval_root / f"epoch_{epoch}"
99
  eval_epoch_dir.mkdir(parents=True, exist_ok=True)
@@ -130,6 +130,8 @@ def _run_subprocess_eval_and_clip(args, out_dir: Path, epoch: int) -> Path | Non
130
  str(method_dir),
131
  "--ref_dir",
132
  args.eval_ref_dir,
 
 
133
  ]
134
  r2 = subprocess.run(cmd_clip, cwd=str(PROJECT_ROOT))
135
  if r2.returncode != 0:
@@ -157,20 +159,15 @@ def _torch_load_compat(path, map_location):
157
 
158
 
159
  def _rebuild_optimizer_scheduler(args, model, device, total_steps, ckpt_path: Path):
 
160
  trainable_params = (
161
  list(model.compressor.parameters())
162
- + [p for p in model.lora_modules.parameters() if p.requires_grad]
163
- )
164
- optimizer = torch.optim.AdamW(
165
- [
166
- {"params": list(model.compressor.parameters()), "lr": args.lr_compressor},
167
- {
168
- "params": [p for p in model.lora_modules.parameters() if p.requires_grad],
169
- "lr": args.lr_lora,
170
- },
171
- ],
172
- weight_decay=0.01,
173
  )
 
 
 
 
174
  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
175
  optimizer, T_max=max(total_steps, 1), eta_min=1e-6,
176
  )
@@ -397,6 +394,7 @@ class CompressedQwen3VL(nn.Module):
397
  lora_r=16,
398
  lora_alpha=32,
399
  max_html_tokens=2048,
 
400
  ):
401
  super().__init__()
402
  from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
@@ -423,15 +421,17 @@ class CompressedQwen3VL(nn.Module):
423
  # Monkey-patch get_image_features so it auto-compresses visual tokens.
424
  self._patch_vision(compressor=self.compressor, target_tokens=target_tokens)
425
 
426
- # Add LoRA to LLM decoder
427
- self._add_lora(lora_r, lora_alpha)
428
 
429
  self.target_tokens = target_tokens
430
  self.max_html_tokens = max_html_tokens
431
 
432
- def _add_lora(self, r, alpha):
433
  """Manually inject LoRA into q_proj, v_proj of each LLM layer."""
434
  self.lora_modules = nn.ModuleDict()
 
 
435
  lm = self.base_model.model
436
  if hasattr(lm, "language_model"):
437
  layers = lm.language_model.layers
@@ -703,7 +703,10 @@ def train(args):
703
  if is_main:
704
  print(f"=== UIPress Optical Compressor Training ===", flush=True)
705
  print(f" GPUs: {world_size}, target_tokens: {args.target_tokens}", flush=True)
706
- print(f" LoRA r={args.lora_r}, alpha={args.lora_alpha}", flush=True)
 
 
 
707
  print(f" Batch: {args.batch_size} x {args.grad_accum} x {world_size} "
708
  f"= {args.batch_size * args.grad_accum * world_size}", flush=True)
709
  if args.eval_after_epoch and is_distributed:
@@ -721,17 +724,18 @@ def train(args):
721
  lora_r=args.lora_r,
722
  lora_alpha=args.lora_alpha,
723
  max_html_tokens=args.max_html_tokens,
 
724
  )
725
  model.base_model.to(device)
726
  model.compressor.to(device)
727
  log_all(f"Model loaded in {time.time() - t0:.1f}s")
728
 
 
 
729
  # Count trainable params
730
  if is_main:
731
  comp_params = model.compressor.count_parameters()
732
- lora_params = sum(
733
- p.numel() for p in model.lora_modules.parameters() if p.requires_grad
734
- )
735
  print(f" Compressor params: {comp_params['trainable']:,}", flush=True)
736
  print(f" LoRA params: {lora_params:,}", flush=True)
737
  print(f" Total trainable: {comp_params['trainable'] + lora_params:,}", flush=True)
@@ -739,7 +743,7 @@ def train(args):
739
  # Collect all trainable parameters
740
  trainable_params = (
741
  list(model.compressor.parameters())
742
- + [p for p in model.lora_modules.parameters() if p.requires_grad]
743
  )
744
 
745
  # No DDP wrapper — LoRA is injected via setattr into base_model,
@@ -781,11 +785,10 @@ def train(args):
781
  )
782
 
783
  # Optimizer — use trainable_params collected before DDP
784
- optimizer = torch.optim.AdamW([
785
- {"params": list(model.compressor.parameters()), "lr": args.lr_compressor},
786
- {"params": [p for p in model.lora_modules.parameters() if p.requires_grad],
787
- "lr": args.lr_lora},
788
- ], weight_decay=0.01)
789
 
790
  total_steps = len(loader) * args.epochs // args.grad_accum
791
  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
@@ -801,7 +804,7 @@ def train(args):
801
  for k, v in comp_state.items():
802
  new_state[k.replace("module.", "")] = v
803
  model.compressor.load_state_dict(new_state)
804
- if "lora" in ckpt:
805
  model.lora_modules.load_state_dict(ckpt["lora"])
806
  if "epoch" in ckpt:
807
  start_epoch = ckpt["epoch"] + 1
@@ -950,6 +953,11 @@ def parse_args():
950
  p.add_argument("--target_tokens", type=int, default=256)
951
  p.add_argument("--lora_r", type=int, default=16)
952
  p.add_argument("--lora_alpha", type=int, default=32)
 
 
 
 
 
953
  p.add_argument("--resume", type=str, default=None)
954
  p.add_argument(
955
  "--mix_root",
@@ -977,10 +985,16 @@ def parse_args():
977
  p.add_argument(
978
  "--eval_output_dir",
979
  default=None,
980
- help="Defaults to results/train_eval/<output_dir name>.",
981
  )
982
  p.add_argument("--eval_data_dir", default="data")
983
  p.add_argument("--eval_ref_dir", default="data/ref_screenshots")
 
 
 
 
 
 
984
  return p.parse_args()
985
 
986
 
 
93
  eval_root = (
94
  Path(args.eval_output_dir).resolve()
95
  if args.eval_output_dir
96
+ else (PROJECT_ROOT / "results" / "clip_per_epoch" / out_dir.name)
97
  )
98
  eval_epoch_dir = eval_root / f"epoch_{epoch}"
99
  eval_epoch_dir.mkdir(parents=True, exist_ok=True)
 
130
  str(method_dir),
131
  "--ref_dir",
132
  args.eval_ref_dir,
133
+ "--clip_device",
134
+ args.eval_clip_device,
135
  ]
136
  r2 = subprocess.run(cmd_clip, cwd=str(PROJECT_ROOT))
137
  if r2.returncode != 0:
 
159
 
160
 
161
  def _rebuild_optimizer_scheduler(args, model, device, total_steps, ckpt_path: Path):
162
+ lora_trainable = [p for p in model.lora_modules.parameters() if p.requires_grad]
163
  trainable_params = (
164
  list(model.compressor.parameters())
165
+ + lora_trainable
 
 
 
 
 
 
 
 
 
 
166
  )
167
+ optim_groups = [{"params": list(model.compressor.parameters()), "lr": args.lr_compressor}]
168
+ if lora_trainable:
169
+ optim_groups.append({"params": lora_trainable, "lr": args.lr_lora})
170
+ optimizer = torch.optim.AdamW(optim_groups, weight_decay=0.01)
171
  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
172
  optimizer, T_max=max(total_steps, 1), eta_min=1e-6,
173
  )
 
394
  lora_r=16,
395
  lora_alpha=32,
396
  max_html_tokens=2048,
397
+ enable_lora=True,
398
  ):
399
  super().__init__()
400
  from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
 
421
  # Monkey-patch get_image_features so it auto-compresses visual tokens.
422
  self._patch_vision(compressor=self.compressor, target_tokens=target_tokens)
423
 
424
+ # Add LoRA to LLM decoder (can be disabled for ablation).
425
+ self._add_lora(lora_r, lora_alpha, enable_lora=enable_lora)
426
 
427
  self.target_tokens = target_tokens
428
  self.max_html_tokens = max_html_tokens
429
 
430
+ def _add_lora(self, r, alpha, enable_lora=True):
431
  """Manually inject LoRA into q_proj, v_proj of each LLM layer."""
432
  self.lora_modules = nn.ModuleDict()
433
+ if (not enable_lora) or r <= 0:
434
+ return
435
  lm = self.base_model.model
436
  if hasattr(lm, "language_model"):
437
  layers = lm.language_model.layers
 
703
  if is_main:
704
  print(f"=== UIPress Optical Compressor Training ===", flush=True)
705
  print(f" GPUs: {world_size}, target_tokens: {args.target_tokens}", flush=True)
706
+ if args.disable_lora:
707
+ print(" LoRA: disabled", flush=True)
708
+ else:
709
+ print(f" LoRA r={args.lora_r}, alpha={args.lora_alpha}", flush=True)
710
  print(f" Batch: {args.batch_size} x {args.grad_accum} x {world_size} "
711
  f"= {args.batch_size * args.grad_accum * world_size}", flush=True)
712
  if args.eval_after_epoch and is_distributed:
 
724
  lora_r=args.lora_r,
725
  lora_alpha=args.lora_alpha,
726
  max_html_tokens=args.max_html_tokens,
727
+ enable_lora=not args.disable_lora,
728
  )
729
  model.base_model.to(device)
730
  model.compressor.to(device)
731
  log_all(f"Model loaded in {time.time() - t0:.1f}s")
732
 
733
+ lora_trainable = [p for p in model.lora_modules.parameters() if p.requires_grad]
734
+
735
  # Count trainable params
736
  if is_main:
737
  comp_params = model.compressor.count_parameters()
738
+ lora_params = sum(p.numel() for p in lora_trainable)
 
 
739
  print(f" Compressor params: {comp_params['trainable']:,}", flush=True)
740
  print(f" LoRA params: {lora_params:,}", flush=True)
741
  print(f" Total trainable: {comp_params['trainable'] + lora_params:,}", flush=True)
 
743
  # Collect all trainable parameters
744
  trainable_params = (
745
  list(model.compressor.parameters())
746
+ + lora_trainable
747
  )
748
 
749
  # No DDP wrapper — LoRA is injected via setattr into base_model,
 
785
  )
786
 
787
  # Optimizer — use trainable_params collected before DDP
788
+ optim_groups = [{"params": list(model.compressor.parameters()), "lr": args.lr_compressor}]
789
+ if lora_trainable:
790
+ optim_groups.append({"params": lora_trainable, "lr": args.lr_lora})
791
+ optimizer = torch.optim.AdamW(optim_groups, weight_decay=0.01)
 
792
 
793
  total_steps = len(loader) * args.epochs // args.grad_accum
794
  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
 
804
  for k, v in comp_state.items():
805
  new_state[k.replace("module.", "")] = v
806
  model.compressor.load_state_dict(new_state)
807
+ if "lora" in ckpt and len(model.lora_modules) > 0:
808
  model.lora_modules.load_state_dict(ckpt["lora"])
809
  if "epoch" in ckpt:
810
  start_epoch = ckpt["epoch"] + 1
 
953
  p.add_argument("--target_tokens", type=int, default=256)
954
  p.add_argument("--lora_r", type=int, default=16)
955
  p.add_argument("--lora_alpha", type=int, default=32)
956
+ p.add_argument(
957
+ "--disable_lora",
958
+ action="store_true",
959
+ help="Disable LoRA adapters; train compressor-only for ablation.",
960
+ )
961
  p.add_argument("--resume", type=str, default=None)
962
  p.add_argument(
963
  "--mix_root",
 
985
  p.add_argument(
986
  "--eval_output_dir",
987
  default=None,
988
+ help="Defaults to results/clip_per_epoch/<output_dir name>.",
989
  )
990
  p.add_argument("--eval_data_dir", default="data")
991
  p.add_argument("--eval_ref_dir", default="data/ref_screenshots")
992
+ p.add_argument(
993
+ "--eval_clip_device",
994
+ default="cuda",
995
+ choices=["cuda", "cpu"],
996
+ help="Device for CLIP ViT in post-epoch scoring.",
997
+ )
998
  return p.parse_args()
999
 
1000
 
sync_up.py CHANGED
@@ -33,8 +33,28 @@ def parse_args() -> argparse.Namespace:
33
  ".cursor/*",
34
  "__pycache__/*",
35
  "*.pyc",
 
 
36
  "OLD/*",
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  "*.tar.gz",
 
 
 
 
 
38
  ],
39
  help="Glob patterns to ignore while uploading.",
40
  )
 
33
  ".cursor/*",
34
  "__pycache__/*",
35
  "*.pyc",
36
+ ".env",
37
+ ".env.*",
38
  "OLD/*",
39
+ "checkpoints/*",
40
+ "data/*",
41
+ ".cache/*",
42
+ ".huggingface/*",
43
+ "logs/*",
44
+ "*.log",
45
+ "*.out",
46
+ "nohup.*",
47
+ "*.pt",
48
+ "*.pth",
49
+ "*.bin",
50
+ "*.safetensors",
51
+ "*.ckpt",
52
  "*.tar.gz",
53
+ "*.tar",
54
+ "*.zip",
55
+ "results/**/html_predictions/*",
56
+ "results/**/rendered_screenshots/*",
57
+ "results/**/rendered/*",
58
  ],
59
  help="Glob patterns to ignore while uploading.",
60
  )
Free AI Image Generator No sign-up. Instant results. Open Now