Meilong023 commited on
Commit
f04ca4a
·
verified ·
1 Parent(s): 6ac097b

Upload rft_v2

Browse files
rft_v2/__pycache__/topo_reward_hungarian.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
rft_v2/topo_config_v2.yaml ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================================================
2
+ # Topological Error Detection RL Training Config v2
3
+ # ============================================================================
4
+ #
5
+ # Key changes vs. topo_config_extended.yaml (v4):
6
+ # - Hungarian optimal matching replaces window-based matching
7
+ # - F1-based detection reward replaces decoupled type/position + count penalty
8
+ # - Removed: Adaptive Count Penalty, Dynamic Window Size,
9
+ # Complexity-Aware Weighting, scoring_method, window_size, etc.
10
+ # - Reward function: topo_reward_hungarian.py:compute_score
11
+ # ============================================================================
12
+
13
+ data:
14
+ train_files: /data/meilong/projects/topoagent/data_v2_fixed/final_json/rl_train_all_w_skeletons_cleaned_cov80.json
15
+ val_files: /data/meilong/projects/topoagent/data_v2/RL_data/rl_val_all.json
16
+ prompt_key: problem
17
+ answer_key: answer
18
+ image_key: images
19
+ video_key: videos
20
+ image_dir: null
21
+ video_fps: 2.0
22
+ max_prompt_length: 2048
23
+ max_response_length: 2048
24
+ rollout_batch_size: 256
25
+ mini_rollout_batch_size: null
26
+ val_batch_size: 512
27
+ format_prompt: ./topoagent_rl_scripts/extended_dataset_scripts/rft_v2/topo_prompt.jinja
28
+ override_chat_template: null
29
+ shuffle: true
30
+ seed: 42
31
+ min_pixels: 65536
32
+ max_pixels: 524288
33
+ filter_overlong_prompts: true
34
+
35
+ algorithm:
36
+ adv_estimator: grpo
37
+ disable_kl: false
38
+ use_kl_loss: true
39
+ kl_penalty: low_var_kl
40
+ kl_coef: 0.05
41
+ online_filtering: false
42
+ filter_key: overall
43
+ filter_low: 0.01
44
+ filter_high: 0.99
45
+
46
+ worker:
47
+ actor:
48
+ global_batch_size: 64
49
+ micro_batch_size_per_device_for_update: 1
50
+ micro_batch_size_per_device_for_experience: 2
51
+ max_grad_norm: 1.0
52
+ padding_free: true
53
+ dynamic_batching: true
54
+ ulysses_size: 1
55
+ model:
56
+ model_path: /data/meilong/projects/topoagent/trained_models/sft/roads/qwen3-vl-4b-instruct/roads_sft_4b_20260201_015911
57
+ enable_gradient_checkpointing: true
58
+ trust_remote_code: false
59
+ freeze_vision_tower: false
60
+ optim:
61
+ lr: 5.0e-7
62
+ weight_decay: 1.0e-2
63
+ strategy: adamw
64
+ lr_warmup_ratio: 0.1
65
+ fsdp:
66
+ enable_full_shard: true
67
+ enable_cpu_offload: false
68
+ enable_rank0_init: true
69
+ offload:
70
+ offload_params: true
71
+ offload_optimizer: true
72
+
73
+ rollout:
74
+ n: 4
75
+ temperature: 0.8
76
+ top_p: 0.95
77
+ limit_images: 0
78
+ gpu_memory_utilization: 0.7
79
+ enforce_eager: true
80
+ enable_chunked_prefill: false
81
+ tensor_parallel_size: 1
82
+ disable_tqdm: false
83
+ val_override_config:
84
+ temperature: 0.6
85
+ top_p: 0.95
86
+ n: 1
87
+
88
+ ref:
89
+ fsdp:
90
+ enable_full_shard: true
91
+ enable_cpu_offload: true
92
+ enable_rank0_init: true
93
+ offload:
94
+ offload_params: false
95
+
96
+ reward:
97
+ reward_function: /data/meilong/projects/topoagent/src/EasyR1/topoagent_rl_scripts/extended_dataset_scripts/rft_v2/topo_reward_hungarian.py:compute_score
98
+ reward_function_kwargs:
99
+ # ==================================================================
100
+ # v2 Reward Weights (Hungarian + F1)
101
+ # ==================================================================
102
+ # Top-level: format / accuracy / cldice
103
+ format_weight: 0.10
104
+ accuracy_weight: 0.85
105
+ cldice_weight: 0.05
106
+
107
+ # Accuracy sub-weights (inside R_accuracy)
108
+ detection_weight: 0.60 # F1-based detection score
109
+ localization_weight: 0.25 # IoU quality of matched pairs
110
+ type_bonus_weight: 0.15 # match count / max(n_gt, n_pred)
111
+
112
+ # IoU → score mapping (smooth tiered)
113
+ iou_thresholds: [0.3, 0.5, 0.7, 0.9]
114
+ iou_rewards: [0.25, 0.55, 0.8, 1.0]
115
+ smooth_power: 1.5
116
+
117
+ # Hungarian matching IoU threshold
118
+ match_iou_threshold: 0.1
119
+
120
+ # clDice parameters
121
+ cldice_size_threshold: 0.3
122
+ cldice_penalty_scale: 0.8
123
+
124
+ trainer:
125
+ total_epochs: 10
126
+ max_steps: null
127
+ project_name: topoagent_rl
128
+ experiment_name: qwen3_vl_4b_rft_v2
129
+ logger: ["file", "wandb", "tensorboard"]
130
+ nnodes: 1
131
+ n_gpus_per_node: 8
132
+ max_try_make_batch: 20
133
+ val_freq: 2
134
+ val_before_train: true
135
+ val_only: false
136
+ val_generations_to_log: 5
137
+ save_freq: 2
138
+ save_limit: 5
139
+ save_model_only: false
140
+ save_checkpoint_path: /data/meilong/projects/topoagent/trained_models/rft/data_v2
141
+ load_checkpoint_path: null
142
+ find_last_checkpoint: true
rft_v2/topo_prompt.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {{ content | trim }}
rft_v2/topo_reward_hungarian.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Topological Error Detection Reward Function v2 — Hungarian Matching
3
+
4
+ Key improvements over v1 (topo_reward_with_cldice.py):
5
+ 1. Hungarian optimal matching replaces window-based matching
6
+ - Each GT error matched at most once (no duplicate matching)
7
+ - Global optimum, order-independent
8
+ 2. F1-based detection reward replaces averaging over predictions
9
+ - Naturally penalises both false positives and false negatives
10
+ - Aligned with evaluation metrics (evaluate_hungarian.py)
11
+ 3. Soft IoU scoring preserves smooth gradients for RL
12
+ 4. Removed: Adaptive Count Penalty, Dynamic Window Size,
13
+ Complexity-Aware Weighting (all subsumed by Hungarian + F1)
14
+
15
+ Combined Reward:
16
+ Total = 0.10 * R_format
17
+ + 0.85 * R_accuracy
18
+ + 0.05 * R_cldice
19
+
20
+ R_accuracy = 0.60 * R_detection (F1-based)
21
+ + 0.25 * R_localization (IoU quality of matched pairs)
22
+ + 0.15 * R_type_bonus (type accuracy among matched pairs)
23
+ """
24
+
25
+ import re
26
+ import ast
27
+ import math
28
+ import os
29
+ import numpy as np
30
+ from PIL import Image
31
+ from typing import Any, List, Dict, Tuple, Optional
32
+
33
+ from scipy.optimize import linear_sum_assignment
34
+
35
+ # clDice (optional — graceful fallback if unavailable)
36
+ try:
37
+ import sys
38
+ sys.path.insert(0, "/data/meilong/projects/topoagent/src/cldice")
39
+ from cldice import cldice_score as _cldice_score
40
+ HAS_CLDICE = True
41
+ except ImportError:
42
+ HAS_CLDICE = False
43
+
44
+ # ============================================================================
45
+ # Constants
46
+ # ============================================================================
47
+
48
+ REWARD_NAME = "topo_reward_hungarian_v2"
49
+ REWARD_TYPE = "batch"
50
+
51
+ VALID_ERROR_TYPES = [
52
+ "broken_connection",
53
+ "spurious_connection",
54
+ "missing_branch",
55
+ "extra_branch",
56
+ "erroneous_hole",
57
+ ]
58
+
59
+
60
+ # ============================================================================
61
+ # Parsing helpers
62
+ # ============================================================================
63
+
64
+ def extract_answer_content(response: str) -> str:
65
+ """Extract content inside <answer>...</answer> tags."""
66
+ m = re.search(r"<answer>(.*?)</answer>", response, re.DOTALL)
67
+ return m.group(1).strip() if m else ""
68
+
69
+
70
+ def parse_error_list(answer_str: str) -> Optional[List[Dict[str, Any]]]:
71
+ """Parse error list from answer string. Returns None on failure."""
72
+ try:
73
+ s = answer_str.strip()
74
+ if s == "[]":
75
+ return []
76
+ errors = ast.literal_eval(s)
77
+ if not isinstance(errors, list):
78
+ return None
79
+ for e in errors:
80
+ if not isinstance(e, dict):
81
+ return None
82
+ if "Position" not in e or "ErrorType" not in e:
83
+ return None
84
+ pos = e["Position"]
85
+ if not isinstance(pos, list) or len(pos) != 4:
86
+ return None
87
+ try:
88
+ e["Position"] = [int(c) for c in pos]
89
+ except (TypeError, ValueError):
90
+ return None
91
+ if e["ErrorType"] not in VALID_ERROR_TYPES:
92
+ return None
93
+ return errors
94
+ except Exception:
95
+ return None
96
+
97
+
98
+ # ============================================================================
99
+ # IoU helpers
100
+ # ============================================================================
101
+
102
+ def calculate_iou(b1: List[int], b2: List[int]) -> float:
103
+ x_left = max(b1[0], b2[0])
104
+ y_top = max(b1[1], b2[1])
105
+ x_right = min(b1[2], b2[2])
106
+ y_bot = min(b1[3], b2[3])
107
+ if x_right < x_left or y_bot < y_top:
108
+ return 0.0
109
+ inter = (x_right - x_left) * (y_bot - y_top)
110
+ a1 = max(0, (b1[2] - b1[0]) * (b1[3] - b1[1]))
111
+ a2 = max(0, (b2[2] - b2[0]) * (b2[3] - b2[1]))
112
+ union = a1 + a2 - inter
113
+ return inter / union if union > 0 else 0.0
114
+
115
+
116
+ def iou_to_score(iou: float,
117
+ thresholds: List[float] = [0.3, 0.5, 0.7, 0.9],
118
+ rewards: List[float] = [0.25, 0.55, 0.8, 1.0],
119
+ power: float = 1.5) -> float:
120
+ """Smooth tiered IoU → score mapping (for continuous RL gradients)."""
121
+ for i, t in enumerate(thresholds):
122
+ if iou < t:
123
+ if i == 0:
124
+ return rewards[0] * math.pow(iou / t, power)
125
+ lo_t, hi_t = thresholds[i - 1], t
126
+ lo_r, hi_r = rewards[i - 1], rewards[i]
127
+ ratio = math.pow((iou - lo_t) / (hi_t - lo_t), power)
128
+ return lo_r + ratio * (hi_r - lo_r)
129
+ return rewards[-1]
130
+
131
+
132
+ # ============================================================================
133
+ # Hungarian matching (type-aware)
134
+ # ============================================================================
135
+
136
+ def hungarian_match_by_type(
137
+ gt_errors: List[Dict],
138
+ pred_errors: List[Dict],
139
+ iou_threshold: float = 0.1,
140
+ ) -> Tuple[List[Tuple[int, int, float, str]], List[int], List[int]]:
141
+ """
142
+ Type-aware Hungarian matching.
143
+
144
+ Within each error type, build an IoU cost matrix and solve the optimal
145
+ one-to-one assignment. Pairs with IoU < iou_threshold are rejected.
146
+
147
+ Returns
148
+ -------
149
+ matched : list of (gt_idx, pred_idx, iou, error_type)
150
+ unmatched_gt : list of gt_idx
151
+ unmatched_pred : list of pred_idx
152
+ """
153
+ if not gt_errors and not pred_errors:
154
+ return [], [], []
155
+ if not gt_errors:
156
+ return [], [], list(range(len(pred_errors)))
157
+ if not pred_errors:
158
+ return [], list(range(len(gt_errors))), []
159
+
160
+ from collections import defaultdict
161
+ gt_by_type: Dict[str, List[Tuple[int, Dict]]] = defaultdict(list)
162
+ pred_by_type: Dict[str, List[Tuple[int, Dict]]] = defaultdict(list)
163
+ for i, e in enumerate(gt_errors):
164
+ gt_by_type[e["ErrorType"]].append((i, e))
165
+ for i, e in enumerate(pred_errors):
166
+ pred_by_type[e["ErrorType"]].append((i, e))
167
+
168
+ matched = []
169
+ matched_gt, matched_pred = set(), set()
170
+
171
+ for etype in set(list(gt_by_type) + list(pred_by_type)):
172
+ gts = gt_by_type.get(etype, [])
173
+ pds = pred_by_type.get(etype, [])
174
+ if not gts or not pds:
175
+ continue
176
+ iou_mat = np.zeros((len(gts), len(pds)), dtype=np.float64)
177
+ for i, (_, ge) in enumerate(gts):
178
+ for j, (_, pe) in enumerate(pds):
179
+ iou_mat[i, j] = calculate_iou(ge["Position"], pe["Position"])
180
+ row_ind, col_ind = linear_sum_assignment(-iou_mat)
181
+ for r, c in zip(row_ind, col_ind):
182
+ iou_val = float(iou_mat[r, c])
183
+ if iou_val >= iou_threshold:
184
+ gi, pi = gts[r][0], pds[c][0]
185
+ matched.append((gi, pi, iou_val, etype))
186
+ matched_gt.add(gi)
187
+ matched_pred.add(pi)
188
+
189
+ unmatched_gt = [i for i in range(len(gt_errors)) if i not in matched_gt]
190
+ unmatched_pred = [i for i in range(len(pred_errors)) if i not in matched_pred]
191
+ return matched, unmatched_gt, unmatched_pred
192
+
193
+
194
+ # ============================================================================
195
+ # Reward components
196
+ # ============================================================================
197
+
198
+ def format_reward(response: str) -> float:
199
+ """1.0 if the response has valid <answer> tags and parseable JSON, else 0."""
200
+ if "<answer>" not in response or "</answer>" not in response:
201
+ return 0.0
202
+ content = extract_answer_content(response)
203
+ if not content:
204
+ return 0.0
205
+ if parse_error_list(content) is None:
206
+ return 0.0
207
+ return 1.0
208
+
209
+
210
+ def accuracy_reward(
211
+ response: str,
212
+ ground_truth: str,
213
+ # F1 sub-weights
214
+ detection_weight: float = 0.60,
215
+ localization_weight: float = 0.25,
216
+ type_bonus_weight: float = 0.15,
217
+ # IoU scoring params
218
+ iou_thresholds: List[float] = [0.3, 0.5, 0.7, 0.9],
219
+ iou_rewards: List[float] = [0.25, 0.55, 0.8, 1.0],
220
+ smooth_power: float = 1.5,
221
+ # Hungarian matching
222
+ match_iou_threshold: float = 0.1,
223
+ ) -> Tuple[float, Dict[str, Any]]:
224
+ """
225
+ Accuracy reward based on Hungarian matching and soft-F1.
226
+
227
+ R_accuracy = w_det * R_detection
228
+ + w_loc * R_localization
229
+ + w_type * R_type_bonus
230
+ """
231
+ # --- parse ---
232
+ pred_content = extract_answer_content(response)
233
+ pred_errors = parse_error_list(pred_content)
234
+ if pred_errors is None:
235
+ return 0.0, {"reason": "parse_failure"}
236
+
237
+ gt_str = extract_answer_content(ground_truth) if "<answer>" in ground_truth else ground_truth
238
+ gt_errors = parse_error_list(gt_str)
239
+ if gt_errors is None:
240
+ return 0.0, {"reason": "gt_parse_failure"}
241
+
242
+ n_gt = len(gt_errors)
243
+ n_pred = len(pred_errors)
244
+
245
+ # --- both empty (correct negative) ---
246
+ if n_gt == 0 and n_pred == 0:
247
+ return 1.0, {"reason": "correct_negative"}
248
+
249
+ # --- one side empty ---
250
+ if n_gt == 0:
251
+ # all predictions are FP
252
+ return 0.0, {"reason": "false_positives", "n_fp": n_pred}
253
+ if n_pred == 0:
254
+ # all GT are FN
255
+ return 0.0, {"reason": "false_negatives", "n_fn": n_gt}
256
+
257
+ # --- Hungarian matching ---
258
+ matched, unmatched_gt, unmatched_pred = hungarian_match_by_type(
259
+ gt_errors, pred_errors, match_iou_threshold
260
+ )
261
+
262
+ # --- Soft TP: each matched pair contributes its IoU score ---
263
+ soft_tp = 0.0
264
+ matched_ious = []
265
+ type_correct_count = 0
266
+ for gt_i, pred_i, iou_val, etype in matched:
267
+ score = iou_to_score(iou_val, iou_thresholds, iou_rewards, smooth_power)
268
+ soft_tp += score
269
+ matched_ious.append(iou_val)
270
+ type_correct_count += 1 # type always correct in type-aware matching
271
+
272
+ fp = len(unmatched_pred)
273
+ fn = len(unmatched_gt)
274
+
275
+ # --- R_detection (soft F1) ---
276
+ denom = 2.0 * soft_tp + fp + fn
277
+ r_detection = (2.0 * soft_tp / denom) if denom > 0 else 0.0
278
+
279
+ # --- R_localization (mean IoU quality of matched pairs) ---
280
+ r_localization = (
281
+ sum(iou_to_score(iou, iou_thresholds, iou_rewards, smooth_power) for iou in matched_ious)
282
+ / len(matched_ious)
283
+ ) if matched_ious else 0.0
284
+
285
+ # --- R_type_bonus (fraction of matched pairs, always 1.0 for type-aware) ---
286
+ # We give bonus for having more matches relative to max(n_gt, n_pred)
287
+ r_type_bonus = len(matched) / max(n_gt, n_pred) if max(n_gt, n_pred) > 0 else 0.0
288
+
289
+ # --- combine ---
290
+ r_accuracy = (
291
+ detection_weight * r_detection
292
+ + localization_weight * r_localization
293
+ + type_bonus_weight * r_type_bonus
294
+ )
295
+ r_accuracy = max(0.0, min(1.0, r_accuracy))
296
+
297
+ return r_accuracy, {
298
+ "r_detection": r_detection,
299
+ "r_localization": r_localization,
300
+ "r_type_bonus": r_type_bonus,
301
+ "soft_tp": soft_tp,
302
+ "fp": fp,
303
+ "fn": fn,
304
+ "n_matched": len(matched),
305
+ "n_gt": n_gt,
306
+ "n_pred": n_pred,
307
+ "mean_iou": sum(matched_ious) / len(matched_ious) if matched_ious else 0.0,
308
+ }
309
+
310
+
311
+ # ============================================================================
312
+ # clDice reward (Hungarian-aligned)
313
+ # ============================================================================
314
+
315
+ def _crop(img: np.ndarray, bbox: List[int]) -> np.ndarray:
316
+ x1, y1, x2, y2 = bbox
317
+ x1, x2 = max(0, x1), min(img.shape[1], x2)
318
+ y1, y2 = max(0, y1), min(img.shape[0], y2)
319
+ return img[y1:y2, x1:x2] if len(img.shape) == 2 else img[y1:y2, x1:x2, :]
320
+
321
+
322
+ def _bbox_size_ratio(bbox: List[int], img_size: int = 1000) -> float:
323
+ return max(0, (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])) / (img_size * img_size)
324
+
325
+
326
+ def _loc_penalty(bbox: List[int], thresh: float = 0.5, scale: float = 0.5) -> float:
327
+ r = _bbox_size_ratio(bbox)
328
+ if r <= thresh:
329
+ return 1.0
330
+ return max(0.0, 1.0 - scale * (r - thresh) / (1.0 - thresh))
331
+
332
+
333
+ def cldice_reward(
334
+ response: str,
335
+ ground_truth: str,
336
+ image_paths: List[str],
337
+ skeleton_paths: List[str],
338
+ match_iou_threshold: float = 0.1,
339
+ size_threshold: float = 0.5,
340
+ penalty_scale: float = 0.5,
341
+ ) -> Tuple[float, Dict[str, Any]]:
342
+ """
343
+ clDice reward computed only for Hungarian-matched pairs (type-aware).
344
+ """
345
+ if not HAS_CLDICE:
346
+ return 0.0, {"reason": "cldice_unavailable"}
347
+
348
+ pred_content = extract_answer_content(response)
349
+ pred_errors = parse_error_list(pred_content)
350
+ if not pred_errors:
351
+ return 0.0, {"reason": "no_predictions"}
352
+
353
+ gt_str = extract_answer_content(ground_truth) if "<answer>" in ground_truth else ground_truth
354
+ gt_errors = parse_error_list(gt_str)
355
+ if not gt_errors:
356
+ return 0.0, {"reason": "no_gt_errors"}
357
+
358
+ has_corrupted = len(skeleton_paths) == 2
359
+ if not has_corrupted:
360
+ return 0.0, {"reason": "no_corrupted_sample"}
361
+
362
+ seg_mask_path = image_paths[1]
363
+ seg_skeleton_path = skeleton_paths[0]
364
+ gt_skeleton_path = skeleton_paths[1]
365
+
366
+ # Find GT mask path
367
+ seg_dir = os.path.dirname(seg_mask_path)
368
+ parent_dir = os.path.dirname(seg_dir)
369
+ base_name = os.path.basename(seg_mask_path).replace("_corrupted.png", "").replace("_corrupted", "")
370
+ gt_mask_path = None
371
+ for p in [
372
+ os.path.join(parent_dir, "gt", f"{base_name}_gt.png"),
373
+ os.path.join(parent_dir, "label", f"{base_name}.png"),
374
+ seg_mask_path.replace("/corrupted/", "/gt/").replace("_corrupted", "_gt"),
375
+ ]:
376
+ if os.path.exists(p):
377
+ gt_mask_path = p
378
+ break
379
+ if gt_mask_path is None:
380
+ return 0.0, {"reason": "gt_mask_not_found"}
381
+
382
+ # Hungarian matching
383
+ matched, _, _ = hungarian_match_by_type(gt_errors, pred_errors, match_iou_threshold)
384
+ if not matched:
385
+ return 0.0, {"reason": "no_matched_pairs"}
386
+
387
+ # Compute clDice for each matched pair
388
+ rewards = []
389
+ try:
390
+ seg_mask = np.array(Image.open(seg_mask_path).convert("L"))
391
+ gt_mask = np.array(Image.open(gt_mask_path).convert("L"))
392
+ except Exception:
393
+ return 0.0, {"reason": "image_load_failure"}
394
+
395
+ for gt_i, pred_i, iou_val, etype in matched:
396
+ bbox = pred_errors[pred_i]["Position"]
397
+ seg_crop = _crop(seg_mask, bbox)
398
+ gt_crop = _crop(gt_mask, bbox)
399
+ if seg_crop.size == 0 or gt_crop.size == 0:
400
+ rewards.append(0.0)
401
+ continue
402
+ try:
403
+ seg_skel_crop = np.array(Image.open(seg_skeleton_path).convert("L"))
404
+ seg_skel_crop = _crop(seg_skel_crop, bbox)
405
+ gt_skel_crop = np.array(Image.open(gt_skeleton_path).convert("L"))
406
+ gt_skel_crop = _crop(gt_skel_crop, bbox)
407
+ except Exception:
408
+ seg_skel_crop, gt_skel_crop = None, None
409
+
410
+ try:
411
+ cld, _, _ = _cldice_score(
412
+ seg_crop, gt_crop,
413
+ pred_skeleton=seg_skel_crop,
414
+ gt_skeleton=gt_skel_crop,
415
+ )
416
+ except Exception:
417
+ cld = 0.0
418
+
419
+ r = (1.0 - cld) * _loc_penalty(bbox, size_threshold, penalty_scale)
420
+ rewards.append(r)
421
+
422
+ avg = sum(rewards) / len(rewards) if rewards else 0.0
423
+ return avg, {"n_matched": len(matched), "avg_cldice_reward": avg}
424
+
425
+
426
+ # ============================================================================
427
+ # Main entry point: compute_score (called by verl)
428
+ # ============================================================================
429
+
430
+ def compute_score(
431
+ reward_inputs: List[Dict[str, Any]],
432
+ # Top-level weights
433
+ format_weight: float = 0.10,
434
+ accuracy_weight: float = 0.85,
435
+ cldice_weight: float = 0.05,
436
+ # Accuracy sub-weights
437
+ detection_weight: float = 0.60,
438
+ localization_weight: float = 0.25,
439
+ type_bonus_weight: float = 0.15,
440
+ # IoU scoring
441
+ iou_thresholds: List[float] = [0.3, 0.5, 0.7, 0.9],
442
+ iou_rewards: List[float] = [0.25, 0.55, 0.8, 1.0],
443
+ smooth_power: float = 1.5,
444
+ # Matching
445
+ match_iou_threshold: float = 0.1,
446
+ # clDice
447
+ cldice_size_threshold: float = 0.3,
448
+ cldice_penalty_scale: float = 0.8,
449
+ ) -> List[Dict[str, float]]:
450
+ """
451
+ Compute reward scores for a batch of samples.
452
+
453
+ Called by verl trainer as:
454
+ reward_function: .../topo_reward_hungarian.py:compute_score
455
+ """
456
+ # Normalise weights
457
+ tw = format_weight + accuracy_weight + cldice_weight
458
+ w_fmt = format_weight / tw
459
+ w_acc = accuracy_weight / tw
460
+ w_cld = cldice_weight / tw
461
+
462
+ scores = []
463
+ for inp in reward_inputs:
464
+ response = inp["response"]
465
+ ground_truth = inp["ground_truth"]
466
+ image_paths = inp.get("image_paths", [])
467
+ skeleton_paths = inp.get("skeleton_paths", [])
468
+
469
+ # 1) Format
470
+ s_fmt = format_reward(response)
471
+
472
+ # 2) Accuracy (Hungarian + soft-F1)
473
+ s_acc, _ = accuracy_reward(
474
+ response,
475
+ ground_truth,
476
+ detection_weight=detection_weight,
477
+ localization_weight=localization_weight,
478
+ type_bonus_weight=type_bonus_weight,
479
+ iou_thresholds=iou_thresholds,
480
+ iou_rewards=iou_rewards,
481
+ smooth_power=smooth_power,
482
+ match_iou_threshold=match_iou_threshold,
483
+ )
484
+
485
+ # 3) clDice
486
+ if skeleton_paths:
487
+ s_cld, _ = cldice_reward(
488
+ response,
489
+ ground_truth,
490
+ image_paths,
491
+ skeleton_paths,
492
+ match_iou_threshold=match_iou_threshold,
493
+ size_threshold=cldice_size_threshold,
494
+ penalty_scale=cldice_penalty_scale,
495
+ )
496
+ else:
497
+ s_cld = 0.0
498
+
499
+ overall = w_fmt * s_fmt + w_acc * s_acc + w_cld * s_cld
500
+
501
+ scores.append({
502
+ "overall": overall,
503
+ "format": s_fmt,
504
+ "accuracy": s_acc,
505
+ "cldice": s_cld,
506
+ })
507
+
508
+ return scores
rft_v2/train_qwen2.5_vl_3b.sh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # ============================================================================
4
+ # RFT v2 — Qwen2.5-VL-3B (Hungarian Matching + F1 Reward)
5
+ # ============================================================================
6
+ #
7
+ # Key changes vs v4:
8
+ # - Hungarian optimal matching (no more window-based matching)
9
+ # - F1-based detection reward (no more count penalty / complexity hacks)
10
+ # - Aligned with evaluate_hungarian.py evaluation pipeline
11
+ # ============================================================================
12
+
13
+ set -e
14
+ set -x
15
+
16
+ RED='\033[0;31m'
17
+ GREEN='\033[0;32m'
18
+ YELLOW='\033[1;33m'
19
+ BLUE='\033[0;34m'
20
+ NC='\033[0m'
21
+
22
+ echo -e "${GREEN}============================================================================${NC}"
23
+ echo -e "${GREEN}RFT v2 Training — Qwen2.5-VL-3B (Hungarian + F1)${NC}"
24
+ echo -e "${GREEN}============================================================================${NC}"
25
+
26
+ # ============================================================================
27
+ # 环境设置
28
+ # ============================================================================
29
+
30
+ export WANDB_MODE=offline
31
+ export WANDB_SILENT=true
32
+
33
+ cd "$(dirname "$0")/../.." || exit 1
34
+ echo "Working directory: $(pwd)"
35
+
36
+ if [ -f "/data/meilong/projects/topoagent/.venv/bin/activate" ]; then
37
+ source /data/meilong/projects/topoagent/.venv/bin/activate
38
+ fi
39
+
40
+ # ============================================================================
41
+ # 配置
42
+ # ============================================================================
43
+
44
+ PROJECT_ROOT="/data/meilong/projects/topoagent"
45
+ MODEL_PATH="${PROJECT_ROOT}/trained_models/sft/data_v2/qwen2.5-vl-3b-instruct/data_v2_qwen2.5_sft_3b_20260207_192324"
46
+ TRAIN_DATA="${PROJECT_ROOT}/data_v2_fixed/final_json/rl_train_all_w_skeletons_cleaned_cov80.json"
47
+ VAL_DATA="${PROJECT_ROOT}/data_v2/RL_data/rl_val_all.json"
48
+ CONFIG_FILE="${PROJECT_ROOT}/src/EasyR1/topoagent_rl_scripts/extended_dataset_scripts/rft_v2/topo_config_v2.yaml"
49
+
50
+ TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
51
+ BASE_SAVE_PATH="${PROJECT_ROOT}/trained_models/rft/data_v2/qwen2.5_vl_3b_v2"
52
+ SAVE_PATH="${BASE_SAVE_PATH}/${TIMESTAMP}"
53
+ EXPERIMENT_NAME="qwen2.5_vl_3b_rft_v2_${TIMESTAMP}"
54
+
55
+ N_GPUS=8
56
+ LOG_DIR="${SAVE_PATH}/log"
57
+ mkdir -p "${LOG_DIR}"
58
+ LOG_FILE="${LOG_DIR}/training.log"
59
+
60
+ # ============================================================================
61
+ # 预检查
62
+ # ============================================================================
63
+
64
+ echo -e "${YELLOW}检查配置...${NC}"
65
+
66
+ for CHECK_FILE in "$CONFIG_FILE" "$TRAIN_DATA" "$VAL_DATA"; do
67
+ if [ ! -f "$CHECK_FILE" ]; then
68
+ echo -e "${RED}错误: 文件不存在: $CHECK_FILE${NC}"
69
+ exit 1
70
+ fi
71
+ done
72
+
73
+ if [ ! -d "$MODEL_PATH" ]; then
74
+ echo -e "${RED}错误: 模型路径不存在: $MODEL_PATH${NC}"
75
+ exit 1
76
+ fi
77
+
78
+ AVAILABLE_GPUS=$(nvidia-smi --query-gpu=index --format=csv,noheader | wc -l)
79
+ echo -e "${GREEN}可用 GPU: $AVAILABLE_GPUS${NC}"
80
+
81
+ if [ "$AVAILABLE_GPUS" -lt "$N_GPUS" ]; then
82
+ echo -e "${YELLOW}警告: 可用 GPU ($AVAILABLE_GPUS) < 配置 GPU ($N_GPUS),自动调整${NC}"
83
+ N_GPUS=$AVAILABLE_GPUS
84
+ fi
85
+
86
+ mkdir -p "$SAVE_PATH"
87
+ export TENSORBOARD_DIR="${SAVE_PATH}"
88
+
89
+ # ============================================================================
90
+ # 配置摘要
91
+ # ============================================================================
92
+
93
+ echo ""
94
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
95
+ echo -e "${BLUE}Training Configuration (RFT v2)${NC}"
96
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
97
+ echo -e "Model: ${GREEN}Qwen2.5-VL-3B-Instruct (SFT)${NC}"
98
+ echo -e "Base Model: ${MODEL_PATH}"
99
+ echo -e "Config: ${CONFIG_FILE}"
100
+ echo -e ""
101
+ echo -e "Reward Design (v2):"
102
+ echo -e " Matching: ${YELLOW}Hungarian optimal matching${NC}"
103
+ echo -e " Detection: ${YELLOW}F1-based (soft TP)${NC}"
104
+ echo -e " Localization: ${YELLOW}Smooth tiered IoU${NC}"
105
+ echo -e " Weights: ${YELLOW}format=0.10, accuracy=0.85, cldice=0.05${NC}"
106
+ echo -e ""
107
+ echo -e "GPU Config: ${N_GPUS} GPUs"
108
+ echo -e "Save Path: ${SAVE_PATH}"
109
+ echo -e "TensorBoard: ${SAVE_PATH}/tensorboard"
110
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
111
+ echo ""
112
+
113
+ # ============================================================================
114
+ # 启动训练
115
+ # ============================================================================
116
+
117
+ echo -e "${GREEN}启动训练... $(date)${NC}"
118
+
119
+ python3 -m verl.trainer.main \
120
+ config=${CONFIG_FILE} \
121
+ data.train_files=${TRAIN_DATA} \
122
+ data.val_files=${VAL_DATA} \
123
+ worker.actor.model.model_path=${MODEL_PATH} \
124
+ trainer.experiment_name=${EXPERIMENT_NAME} \
125
+ trainer.n_gpus_per_node=${N_GPUS} \
126
+ trainer.save_checkpoint_path=${SAVE_PATH} \
127
+ 2>&1 | tee "${LOG_FILE}"
128
+
129
+ TRAIN_EXIT_CODE=${PIPESTATUS[0]}
130
+
131
+ # ============================================================================
132
+ # 训练完成
133
+ # ============================================================================
134
+
135
+ echo ""
136
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
137
+ echo -e "${GREEN}训练成功完成! $(date)${NC}"
138
+ else
139
+ echo -e "${RED}训练失败 (退出代码: $TRAIN_EXIT_CODE) $(date)${NC}"
140
+ fi
141
+
142
+ echo -e "保存路径: ${SAVE_PATH}"
143
+ echo -e "日志文件: ${LOG_FILE}"
144
+
145
+ LATEST_LINK="${BASE_SAVE_PATH}/latest"
146
+ rm -f "${LATEST_LINK}" 2>/dev/null
147
+ ln -s "${TIMESTAMP}" "${LATEST_LINK}"
148
+
149
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
150
+ echo -e "${GREEN}TensorBoard: tensorboard --logdir=${SAVE_PATH}/tensorboard${NC}"
151
+ exit 0
152
+ else
153
+ echo -e "查看完整日志: cat ${LOG_FILE}"
154
+ exit 1
155
+ fi
rft_v2/train_qwen2_vl_2b.sh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # ============================================================================
4
+ # RFT v2 — Qwen2-VL-2B (Hungarian Matching + F1 Reward)
5
+ # ============================================================================
6
+ #
7
+ # Key changes vs v4:
8
+ # - Hungarian optimal matching (no more window-based matching)
9
+ # - F1-based detection reward (no more count penalty / complexity hacks)
10
+ # - Aligned with evaluate_hungarian.py evaluation pipeline
11
+ # ============================================================================
12
+
13
+ set -e
14
+ set -x
15
+
16
+ RED='\033[0;31m'
17
+ GREEN='\033[0;32m'
18
+ YELLOW='\033[1;33m'
19
+ BLUE='\033[0;34m'
20
+ NC='\033[0m'
21
+
22
+ echo -e "${GREEN}============================================================================${NC}"
23
+ echo -e "${GREEN}RFT v2 Training — Qwen2-VL-2B (Hungarian + F1)${NC}"
24
+ echo -e "${GREEN}============================================================================${NC}"
25
+
26
+ # ============================================================================
27
+ # 环境设置
28
+ # ============================================================================
29
+
30
+ export WANDB_MODE=offline
31
+ export WANDB_SILENT=true
32
+
33
+ cd "$(dirname "$0")/../.." || exit 1
34
+ echo "Working directory: $(pwd)"
35
+
36
+ if [ -f "/data/meilong/projects/topoagent/.venv/bin/activate" ]; then
37
+ source /data/meilong/projects/topoagent/.venv/bin/activate
38
+ fi
39
+
40
+ # ============================================================================
41
+ # 配置
42
+ # ============================================================================
43
+
44
+ PROJECT_ROOT="/data/meilong/projects/topoagent"
45
+ MODEL_PATH="${PROJECT_ROOT}/trained_models/sft/data_v2/qwen2-vl-2b-instruct/data_v2_qwen2_sft_2b_20260210_151807"
46
+ TRAIN_DATA="${PROJECT_ROOT}/data_v2_fixed/final_json/rl_train_all_w_skeletons_cleaned_cov80.json"
47
+ VAL_DATA="${PROJECT_ROOT}/data_v2/RL_data/rl_val_all.json"
48
+ CONFIG_FILE="${PROJECT_ROOT}/src/EasyR1/topoagent_rl_scripts/extended_dataset_scripts/rft_v2/topo_config_v2.yaml"
49
+
50
+ TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
51
+ BASE_SAVE_PATH="${PROJECT_ROOT}/trained_models/rft/data_v2/qwen2_vl_2b_v2"
52
+ SAVE_PATH="${BASE_SAVE_PATH}/${TIMESTAMP}"
53
+ EXPERIMENT_NAME="qwen2_vl_2b_rft_v2_${TIMESTAMP}"
54
+
55
+ N_GPUS=8
56
+ LOG_DIR="${SAVE_PATH}/log"
57
+ mkdir -p "${LOG_DIR}"
58
+ LOG_FILE="${LOG_DIR}/training.log"
59
+
60
+ # ============================================================================
61
+ # 预检查
62
+ # ============================================================================
63
+
64
+ echo -e "${YELLOW}检查配置...${NC}"
65
+
66
+ for CHECK_FILE in "$CONFIG_FILE" "$TRAIN_DATA" "$VAL_DATA"; do
67
+ if [ ! -f "$CHECK_FILE" ]; then
68
+ echo -e "${RED}错误: 文件不存在: $CHECK_FILE${NC}"
69
+ exit 1
70
+ fi
71
+ done
72
+
73
+ if [ ! -d "$MODEL_PATH" ]; then
74
+ echo -e "${RED}错误: 模型路径不存在: $MODEL_PATH${NC}"
75
+ exit 1
76
+ fi
77
+
78
+ AVAILABLE_GPUS=$(nvidia-smi --query-gpu=index --format=csv,noheader | wc -l)
79
+ echo -e "${GREEN}可用 GPU: $AVAILABLE_GPUS${NC}"
80
+
81
+ if [ "$AVAILABLE_GPUS" -lt "$N_GPUS" ]; then
82
+ echo -e "${YELLOW}警告: 可用 GPU ($AVAILABLE_GPUS) < 配置 GPU ($N_GPUS),自动调整${NC}"
83
+ N_GPUS=$AVAILABLE_GPUS
84
+ fi
85
+
86
+ mkdir -p "$SAVE_PATH"
87
+ export TENSORBOARD_DIR="${SAVE_PATH}"
88
+
89
+ # ============================================================================
90
+ # 配置摘要
91
+ # ============================================================================
92
+
93
+ echo ""
94
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
95
+ echo -e "${BLUE}Training Configuration (RFT v2)${NC}"
96
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
97
+ echo -e "Model: ${GREEN}Qwen2-VL-2B-Instruct (SFT)${NC}"
98
+ echo -e "Base Model: ${MODEL_PATH}"
99
+ echo -e "Config: ${CONFIG_FILE}"
100
+ echo -e ""
101
+ echo -e "Reward Design (v2):"
102
+ echo -e " Matching: ${YELLOW}Hungarian optimal matching${NC}"
103
+ echo -e " Detection: ${YELLOW}F1-based (soft TP)${NC}"
104
+ echo -e " Localization: ${YELLOW}Smooth tiered IoU${NC}"
105
+ echo -e " Weights: ${YELLOW}format=0.10, accuracy=0.85, cldice=0.05${NC}"
106
+ echo -e ""
107
+ echo -e "GPU Config: ${N_GPUS} GPUs"
108
+ echo -e "Save Path: ${SAVE_PATH}"
109
+ echo -e "TensorBoard: ${SAVE_PATH}/tensorboard"
110
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
111
+ echo ""
112
+
113
+ # ============================================================================
114
+ # 启动训练
115
+ # ============================================================================
116
+
117
+ echo -e "${GREEN}启动训练... $(date)${NC}"
118
+
119
+ python3 -m verl.trainer.main \
120
+ config=${CONFIG_FILE} \
121
+ data.train_files=${TRAIN_DATA} \
122
+ data.val_files=${VAL_DATA} \
123
+ worker.actor.model.model_path=${MODEL_PATH} \
124
+ trainer.experiment_name=${EXPERIMENT_NAME} \
125
+ trainer.n_gpus_per_node=${N_GPUS} \
126
+ trainer.save_checkpoint_path=${SAVE_PATH} \
127
+ 2>&1 | tee "${LOG_FILE}"
128
+
129
+ TRAIN_EXIT_CODE=${PIPESTATUS[0]}
130
+
131
+ # ============================================================================
132
+ # 训练完成
133
+ # ============================================================================
134
+
135
+ echo ""
136
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
137
+ echo -e "${GREEN}训练成功完成! $(date)${NC}"
138
+ else
139
+ echo -e "${RED}训练失败 (退出代码: $TRAIN_EXIT_CODE) $(date)${NC}"
140
+ fi
141
+
142
+ echo -e "保存路径: ${SAVE_PATH}"
143
+ echo -e "日志文件: ${LOG_FILE}"
144
+
145
+ LATEST_LINK="${BASE_SAVE_PATH}/latest"
146
+ rm -f "${LATEST_LINK}" 2>/dev/null
147
+ ln -s "${TIMESTAMP}" "${LATEST_LINK}"
148
+
149
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
150
+ echo -e "${GREEN}TensorBoard: tensorboard --logdir=${SAVE_PATH}/tensorboard${NC}"
151
+ exit 0
152
+ else
153
+ echo -e "查看完整日志: cat ${LOG_FILE}"
154
+ exit 1
155
+ fi
rft_v2/train_qwen3_vl_4b.sh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # ============================================================================
4
+ # RFT v2 — Qwen3-VL-4B (Hungarian Matching + F1 Reward)
5
+ # ============================================================================
6
+ #
7
+ # Key changes vs v4:
8
+ # - Hungarian optimal matching (no more window-based matching)
9
+ # - F1-based detection reward (no more count penalty / complexity hacks)
10
+ # - Aligned with evaluate_hungarian.py evaluation pipeline
11
+ # ============================================================================
12
+
13
+ set -e
14
+ set -x
15
+
16
+ RED='\033[0;31m'
17
+ GREEN='\033[0;32m'
18
+ YELLOW='\033[1;33m'
19
+ BLUE='\033[0;34m'
20
+ NC='\033[0m'
21
+
22
+ echo -e "${GREEN}============================================================================${NC}"
23
+ echo -e "${GREEN}RFT v2 Training — Qwen3-VL-4B (Hungarian + F1)${NC}"
24
+ echo -e "${GREEN}============================================================================${NC}"
25
+
26
+ # ============================================================================
27
+ # 环境设置
28
+ # ============================================================================
29
+
30
+ export WANDB_MODE=offline
31
+ export WANDB_SILENT=true
32
+
33
+ cd "$(dirname "$0")/../.." || exit 1
34
+ echo "Working directory: $(pwd)"
35
+
36
+ if [ -f "/data/meilong/projects/topoagent/.venv/bin/activate" ]; then
37
+ source /data/meilong/projects/topoagent/.venv/bin/activate
38
+ fi
39
+
40
+ # ============================================================================
41
+ # 配置
42
+ # ============================================================================
43
+
44
+ PROJECT_ROOT="/data/meilong/projects/topoagent"
45
+ MODEL_PATH="${PROJECT_ROOT}/trained_models/sft/roads/qwen3-vl-4b-instruct/roads_sft_4b_20260201_015911"
46
+ TRAIN_DATA="${PROJECT_ROOT}/data_v2_fixed/final_json/rl_train_all_w_skeletons_cleaned_cov80.json"
47
+ VAL_DATA="${PROJECT_ROOT}/data_v2/RL_data/rl_val_all.json"
48
+ CONFIG_FILE="${PROJECT_ROOT}/src/EasyR1/topoagent_rl_scripts/extended_dataset_scripts/rft_v2/topo_config_v2.yaml"
49
+
50
+ TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
51
+ BASE_SAVE_PATH="${PROJECT_ROOT}/trained_models/rft/data_v2/qwen3_vl_4b_v2"
52
+ SAVE_PATH="${BASE_SAVE_PATH}/${TIMESTAMP}"
53
+ EXPERIMENT_NAME="qwen3_vl_4b_rft_v2_${TIMESTAMP}"
54
+
55
+ N_GPUS=8
56
+ LOG_DIR="${SAVE_PATH}/log"
57
+ mkdir -p "${LOG_DIR}"
58
+ LOG_FILE="${LOG_DIR}/training.log"
59
+
60
+ # ============================================================================
61
+ # 预检查
62
+ # ============================================================================
63
+
64
+ echo -e "${YELLOW}检查配置...${NC}"
65
+
66
+ for CHECK_FILE in "$CONFIG_FILE" "$TRAIN_DATA" "$VAL_DATA"; do
67
+ if [ ! -f "$CHECK_FILE" ]; then
68
+ echo -e "${RED}错误: 文件不存在: $CHECK_FILE${NC}"
69
+ exit 1
70
+ fi
71
+ done
72
+
73
+ if [ ! -d "$MODEL_PATH" ]; then
74
+ echo -e "${RED}错误: 模型路径不存在: $MODEL_PATH${NC}"
75
+ exit 1
76
+ fi
77
+
78
+ AVAILABLE_GPUS=$(nvidia-smi --query-gpu=index --format=csv,noheader | wc -l)
79
+ echo -e "${GREEN}可用 GPU: $AVAILABLE_GPUS${NC}"
80
+
81
+ if [ "$AVAILABLE_GPUS" -lt "$N_GPUS" ]; then
82
+ echo -e "${YELLOW}警告: 可用 GPU ($AVAILABLE_GPUS) < 配置 GPU ($N_GPUS),自动调整${NC}"
83
+ N_GPUS=$AVAILABLE_GPUS
84
+ fi
85
+
86
+ mkdir -p "$SAVE_PATH"
87
+ export TENSORBOARD_DIR="${SAVE_PATH}"
88
+
89
+ # ============================================================================
90
+ # 配置摘要
91
+ # ============================================================================
92
+
93
+ echo ""
94
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
95
+ echo -e "${BLUE}Training Configuration (RFT v2)${NC}"
96
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
97
+ echo -e "Model: ${GREEN}Qwen3-VL-4B-Instruct (SFT)${NC}"
98
+ echo -e "Base Model: ${MODEL_PATH}"
99
+ echo -e "Config: ${CONFIG_FILE}"
100
+ echo -e ""
101
+ echo -e "Reward Design (v2):"
102
+ echo -e " Matching: ${YELLOW}Hungarian optimal matching${NC}"
103
+ echo -e " Detection: ${YELLOW}F1-based (soft TP)${NC}"
104
+ echo -e " Localization: ${YELLOW}Smooth tiered IoU${NC}"
105
+ echo -e " Weights: ${YELLOW}format=0.10, accuracy=0.85, cldice=0.05${NC}"
106
+ echo -e ""
107
+ echo -e "GPU Config: ${N_GPUS} GPUs"
108
+ echo -e "Save Path: ${SAVE_PATH}"
109
+ echo -e "TensorBoard: ${SAVE_PATH}/tensorboard"
110
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
111
+ echo ""
112
+
113
+ # ============================================================================
114
+ # 启动训练
115
+ # ============================================================================
116
+
117
+ echo -e "${GREEN}启动训练... $(date)${NC}"
118
+
119
+ python3 -m verl.trainer.main \
120
+ config=${CONFIG_FILE} \
121
+ data.train_files=${TRAIN_DATA} \
122
+ data.val_files=${VAL_DATA} \
123
+ worker.actor.model.model_path=${MODEL_PATH} \
124
+ trainer.experiment_name=${EXPERIMENT_NAME} \
125
+ trainer.n_gpus_per_node=${N_GPUS} \
126
+ trainer.save_checkpoint_path=${SAVE_PATH} \
127
+ 2>&1 | tee "${LOG_FILE}"
128
+
129
+ TRAIN_EXIT_CODE=${PIPESTATUS[0]}
130
+
131
+ # ============================================================================
132
+ # 训练完成
133
+ # ============================================================================
134
+
135
+ echo ""
136
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
137
+ echo -e "${GREEN}训练成功完成! $(date)${NC}"
138
+ else
139
+ echo -e "${RED}训练失败 (退出代码: $TRAIN_EXIT_CODE) $(date)${NC}"
140
+ fi
141
+
142
+ echo -e "保存路径: ${SAVE_PATH}"
143
+ echo -e "日志文件: ${LOG_FILE}"
144
+
145
+ LATEST_LINK="${BASE_SAVE_PATH}/latest"
146
+ rm -f "${LATEST_LINK}" 2>/dev/null
147
+ ln -s "${TIMESTAMP}" "${LATEST_LINK}"
148
+
149
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
150
+ echo -e "${GREEN}TensorBoard: tensorboard --logdir=${SAVE_PATH}/tensorboard${NC}"
151
+ exit 0
152
+ else
153
+ echo -e "查看完整日志: cat ${LOG_FILE}"
154
+ exit 1
155
+ fi
rft_v2/train_qwen3_vl_8b.sh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # ============================================================================
4
+ # RFT v2 — Qwen3-VL-8B (Hungarian Matching + F1 Reward)
5
+ # ============================================================================
6
+ #
7
+ # Key changes vs v4:
8
+ # - Hungarian optimal matching (no more window-based matching)
9
+ # - F1-based detection reward (no more count penalty / complexity hacks)
10
+ # - Aligned with evaluate_hungarian.py evaluation pipeline
11
+ # ============================================================================
12
+
13
+ set -e
14
+ set -x
15
+
16
+ RED='\033[0;31m'
17
+ GREEN='\033[0;32m'
18
+ YELLOW='\033[1;33m'
19
+ BLUE='\033[0;34m'
20
+ NC='\033[0m'
21
+
22
+ echo -e "${GREEN}============================================================================${NC}"
23
+ echo -e "${GREEN}RFT v2 Training — Qwen3-VL-8B (Hungarian + F1)${NC}"
24
+ echo -e "${GREEN}============================================================================${NC}"
25
+
26
+ # ============================================================================
27
+ # 环境设置
28
+ # ============================================================================
29
+
30
+ export WANDB_MODE=offline
31
+ export WANDB_SILENT=true
32
+
33
+ cd "$(dirname "$0")/../.." || exit 1
34
+ echo "Working directory: $(pwd)"
35
+
36
+ if [ -f "/data/meilong/projects/topoagent/.venv/bin/activate" ]; then
37
+ source /data/meilong/projects/topoagent/.venv/bin/activate
38
+ fi
39
+
40
+ # ============================================================================
41
+ # 配置
42
+ # ============================================================================
43
+
44
+ PROJECT_ROOT="/data/meilong/projects/topoagent"
45
+ MODEL_PATH="${PROJECT_ROOT}/trained_models/sft/data_v2/qwen3-vl-8b-instruct/data_v2_qwen3_sft_8b_20260210_232056"
46
+ TRAIN_DATA="${PROJECT_ROOT}/data_v2_fixed/final_json/rl_train_all_w_skeletons_cleaned_cov80.json"
47
+ VAL_DATA="${PROJECT_ROOT}/data_v2/RL_data/rl_val_all.json"
48
+ CONFIG_FILE="${PROJECT_ROOT}/src/EasyR1/topoagent_rl_scripts/extended_dataset_scripts/rft_v2/topo_config_v2.yaml"
49
+
50
+ TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
51
+ BASE_SAVE_PATH="${PROJECT_ROOT}/trained_models/rft/data_v2/qwen3_vl_8b_v2"
52
+ SAVE_PATH="${BASE_SAVE_PATH}/${TIMESTAMP}"
53
+ EXPERIMENT_NAME="qwen3_vl_8b_rft_v2_${TIMESTAMP}"
54
+
55
+ N_GPUS=8
56
+ LOG_DIR="${SAVE_PATH}/log"
57
+ mkdir -p "${LOG_DIR}"
58
+ LOG_FILE="${LOG_DIR}/training.log"
59
+
60
+ # ============================================================================
61
+ # 预检查
62
+ # ============================================================================
63
+
64
+ echo -e "${YELLOW}检查配置...${NC}"
65
+
66
+ for CHECK_FILE in "$CONFIG_FILE" "$TRAIN_DATA" "$VAL_DATA"; do
67
+ if [ ! -f "$CHECK_FILE" ]; then
68
+ echo -e "${RED}错误: 文件不存在: $CHECK_FILE${NC}"
69
+ exit 1
70
+ fi
71
+ done
72
+
73
+ if [ ! -d "$MODEL_PATH" ]; then
74
+ echo -e "${RED}错误: 模型路径不存在: $MODEL_PATH${NC}"
75
+ exit 1
76
+ fi
77
+
78
+ AVAILABLE_GPUS=$(nvidia-smi --query-gpu=index --format=csv,noheader | wc -l)
79
+ echo -e "${GREEN}可用 GPU: $AVAILABLE_GPUS${NC}"
80
+
81
+ if [ "$AVAILABLE_GPUS" -lt "$N_GPUS" ]; then
82
+ echo -e "${YELLOW}警告: 可用 GPU ($AVAILABLE_GPUS) < 配置 GPU ($N_GPUS),自动调整${NC}"
83
+ N_GPUS=$AVAILABLE_GPUS
84
+ fi
85
+
86
+ mkdir -p "$SAVE_PATH"
87
+ export TENSORBOARD_DIR="${SAVE_PATH}"
88
+
89
+ # ============================================================================
90
+ # 配置摘要
91
+ # ============================================================================
92
+
93
+ echo ""
94
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
95
+ echo -e "${BLUE}Training Configuration (RFT v2)${NC}"
96
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
97
+ echo -e "Model: ${GREEN}Qwen3-VL-8B-Instruct (SFT)${NC}"
98
+ echo -e "Base Model: ${MODEL_PATH}"
99
+ echo -e "Config: ${CONFIG_FILE}"
100
+ echo -e ""
101
+ echo -e "Reward Design (v2):"
102
+ echo -e " Matching: ${YELLOW}Hungarian optimal matching${NC}"
103
+ echo -e " Detection: ${YELLOW}F1-based (soft TP)${NC}"
104
+ echo -e " Localization: ${YELLOW}Smooth tiered IoU${NC}"
105
+ echo -e " Weights: ${YELLOW}format=0.10, accuracy=0.85, cldice=0.05${NC}"
106
+ echo -e ""
107
+ echo -e "GPU Config: ${N_GPUS} GPUs"
108
+ echo -e "Save Path: ${SAVE_PATH}"
109
+ echo -e "TensorBoard: ${SAVE_PATH}/tensorboard"
110
+ echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
111
+ echo ""
112
+
113
+ # ============================================================================
114
+ # 启动训练
115
+ # ============================================================================
116
+
117
+ echo -e "${GREEN}启动训练... $(date)${NC}"
118
+
119
+ python3 -m verl.trainer.main \
120
+ config=${CONFIG_FILE} \
121
+ data.train_files=${TRAIN_DATA} \
122
+ data.val_files=${VAL_DATA} \
123
+ worker.actor.model.model_path=${MODEL_PATH} \
124
+ trainer.experiment_name=${EXPERIMENT_NAME} \
125
+ trainer.n_gpus_per_node=${N_GPUS} \
126
+ trainer.save_checkpoint_path=${SAVE_PATH} \
127
+ 2>&1 | tee "${LOG_FILE}"
128
+
129
+ TRAIN_EXIT_CODE=${PIPESTATUS[0]}
130
+
131
+ # ============================================================================
132
+ # 训练完成
133
+ # ============================================================================
134
+
135
+ echo ""
136
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
137
+ echo -e "${GREEN}训练成功完成! $(date)${NC}"
138
+ else
139
+ echo -e "${RED}训练失败 (退出代码: $TRAIN_EXIT_CODE) $(date)${NC}"
140
+ fi
141
+
142
+ echo -e "保存路径: ${SAVE_PATH}"
143
+ echo -e "日志文件: ${LOG_FILE}"
144
+
145
+ LATEST_LINK="${BASE_SAVE_PATH}/latest"
146
+ rm -f "${LATEST_LINK}" 2>/dev/null
147
+ ln -s "${TIMESTAMP}" "${LATEST_LINK}"
148
+
149
+ if [ $TRAIN_EXIT_CODE -eq 0 ]; then
150
+ echo -e "${GREEN}TensorBoard: tensorboard --logdir=${SAVE_PATH}/tensorboard${NC}"
151
+ exit 0
152
+ else
153
+ echo -e "查看完整日志: cat ${LOG_FILE}"
154
+ exit 1
155
+ fi