File size: 3,414 Bytes
fbc94ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/usr/bin/env bash
set -euo pipefail

# Usage:
#   bash scripts/run_ablation_study.sh
#
# This script prepares a dedicated ablation workspace and prints/executes
# reproducible commands for the Top-30-focused ablation study.

ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT_DIR"

ABL_ROOT="results/ablation_study"
LOG_DIR="$ABL_ROOT/logs"
CKPT_DIR="$ABL_ROOT/checkpoints"
RUN_DIR="$ABL_ROOT/runs"

mkdir -p "$LOG_DIR" "$CKPT_DIR" "$RUN_DIR"

echo "Ablation workspace:"
echo "  $ABL_ROOT"
echo

# -----------------------
# 1) No-LoRA ablation
# -----------------------
echo "[1/5] No-LoRA ablation"
echo "Train compressor-only checkpoint into: $CKPT_DIR/no_lora"
cat <<'CMD'
# Example:
# CUDA_VISIBLE_DEVICES=1 PYTHONPATH=. python scripts/train_compressor.py \
#   --output_dir results/ablation_study/checkpoints/no_lora \
#   --disable_lora --target_tokens 256 \
#   --epochs 5 --max_samples 10000 \
#   --mix_root data --mix_images_subdir ref_screenshots --mix_gt_subdir gt_html \
#   --max_html_tokens 8192
CMD
echo

# -----------------------
# 2) Token sensitivity
# -----------------------
echo "[2/5] Token sensitivity (64/128/512)"
cat <<'CMD'
# For each token in {64,128,512}, train and eval:
# CUDA_VISIBLE_DEVICES=1 PYTHONPATH=. python scripts/train_compressor.py \
#   --output_dir results/ablation_study/checkpoints/token_64 \
#   --target_tokens 64 --epochs 5 --max_samples 10000 \
#   --mix_root data --mix_images_subdir ref_screenshots --mix_gt_subdir gt_html \
#   --max_html_tokens 8192
#
# CUDA_VISIBLE_DEVICES=0 PYTHONPATH=. python scripts/eval_all.py \
#   --method uipress --checkpoint results/ablation_study/checkpoints/token_64/latest.pt \
#   --target_tokens 64 --max_samples 50 --output_dir results/ablation_study/runs/token_64
#
# CUDA_VISIBLE_DEVICES=0 PYTHONPATH=. python scripts/step_clip_batch.py \
#   --method_dir results/ablation_study/runs/token_64/uipress_64 \
#   --ref_dir data/ref_screenshots
CMD
echo

# -----------------------
# 3) Cross-domain check
# -----------------------
echo "[3/5] Cross-domain (WebSight eval split)"
cat <<'CMD'
# Run eval with the same methods on WebSight-side eval set directory:
# CUDA_VISIBLE_DEVICES=0 PYTHONPATH=. python scripts/eval_all.py \
#   --method uipress --checkpoint checkpoints/optical_mix_d2c/latest.pt \
#   --target_tokens 256 --data_dir data --max_samples 50 \
#   --output_dir results/ablation_study/runs/websight_eval
CMD
echo

# -----------------------
# 4) LR scan
# -----------------------
echo "[4/5] Learning-rate scan"
cat <<'CMD'
# Suggested compressor LR scan:
# 1e-4 / 2e-4 / 4e-4 with fixed other settings.
# Save each run under:
# results/ablation_study/checkpoints/lr_1e-4
# results/ablation_study/checkpoints/lr_2e-4
# results/ablation_study/checkpoints/lr_4e-4
CMD
echo

# -----------------------
# 5) Page-type analysis
# -----------------------
echo "[5/5] Page-type analysis"
cat <<'CMD'
# Put page-type id mapping as:
# results/ablation_study/page_types.json
# Then post-process top-k IDs by category from:
# results/ablation_study/top30/top30_selected_ids.json
CMD
echo

# Build Top-30 report from available runs (safe to run repeatedly).
PYTHONPATH=. python scripts/ablation_topk_report.py --topk 30 --out_root "$ABL_ROOT"

echo
echo "Done. Generated:"
echo "  $ABL_ROOT/top30/top30_table.json"
echo "  $ABL_ROOT/top30/top30_table.md"
echo "  $ABL_ROOT/top30/top30_selected_ids.json"