Reality123b commited on
Commit
28674b7
Β·
verified Β·
1 Parent(s): dd926e6

Add end-to-end SADC training script (download subset + train FSD model)

Browse files
Files changed (1) hide show
  1. train_sadc_e2e.py +612 -0
train_sadc_e2e.py ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ End-to-end training script for FSD-Level5-CoT on SADC driving data.
4
+
5
+ This script:
6
+ 1. Downloads a subset of the SADC dataset (streaming β†’ disk)
7
+ 2. Builds the FSD model from fsd_model/
8
+ 3. Trains end-to-end with gradient accumulation, warmup, eval, logging
9
+ 4. Pushes the trained model to Hugging Face Hub
10
+
11
+ Dataset: jHaselberger/SADC-Situation-Awareness-for-Driver-Centric-Driving-Style-Adaptation
12
+ Model: Reality123b/FSD-Level5-CoT
13
+
14
+ Usage:
15
+ # Default (5000 train, 1000 val, 5 epochs)
16
+ python train_sadc_e2e.py
17
+
18
+ # Custom
19
+ python train_sadc_e2e.py --train_samples 10000 --val_samples 2000 --epochs 10 --batch_size 4
20
+
21
+ # Quick test run
22
+ python train_sadc_e2e.py --train_samples 100 --val_samples 50 --epochs 1
23
+ """
24
+
25
+ import os
26
+ import sys
27
+ import time
28
+ import json
29
+ import math
30
+ import argparse
31
+ import torch
32
+ import torch.nn as nn
33
+ import torch.nn.functional as F
34
+ from torch.utils.data import Dataset, DataLoader
35
+ import numpy as np
36
+
37
+
38
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
39
+ # Config defaults
40
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
41
+
42
+ DATASET_NAME = "jHaselberger/SADC-Situation-Awareness-for-Driver-Centric-Driving-Style-Adaptation"
43
+ HUB_MODEL_ID = "Reality123b/FSD-Level5-CoT"
44
+
45
+ # Model architecture
46
+ BEV_SIZE = 100
47
+ BEV_FEATURE_DIM = 128
48
+ PLANNING_D_MODEL = 128
49
+ IMG_H, IMG_W = 120, 160
50
+ NUM_WAYPOINTS = 20
51
+ COT_ACTOR_QUERIES = 32
52
+ COT_ROAD_QUERIES = 16
53
+
54
+ # Speed constant
55
+ MAX_SPEED_MS = 20.0 * 0.44704 # 20 mph β†’ m/s
56
+
57
+ ROAD_TYPE_MAP = {
58
+ "misc": 0, "rural": 1, "federal": 2, "highway": 3,
59
+ "city": 4, "parking": 5, "intersection": 6,
60
+ }
61
+
62
+
63
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
64
+ # Step 1: Download SADC Subset
65
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
66
+
67
+ def download_sadc_subset(train_samples, val_samples, output_dir, train_split, val_split):
68
+ """Download a manageable subset of SADC via streaming."""
69
+ from datasets import load_dataset, Dataset as HFDataset
70
+
71
+ os.makedirs(output_dir, exist_ok=True)
72
+ train_path = os.path.join(output_dir, "train")
73
+ val_path = os.path.join(output_dir, "val")
74
+
75
+ # Check if already downloaded
76
+ if os.path.exists(train_path) and os.path.exists(val_path):
77
+ print(f"[Download] Found existing subset at {output_dir}, skipping download.")
78
+ from datasets import load_from_disk
79
+ return load_from_disk(train_path), load_from_disk(val_path)
80
+
81
+ # Train
82
+ print(f"[Download] Streaming {train_samples} train samples from '{train_split}'...")
83
+ ds_stream = load_dataset(DATASET_NAME, split=train_split, streaming=True)
84
+ train_rows = []
85
+ for i, row in enumerate(ds_stream):
86
+ if i >= train_samples:
87
+ break
88
+ train_rows.append(row)
89
+ if (i + 1) % 1000 == 0:
90
+ print(f" ... {i + 1}/{train_samples}")
91
+ train_ds = HFDataset.from_list(train_rows)
92
+ train_ds.save_to_disk(train_path)
93
+ print(f" Saved {len(train_ds)} train samples.")
94
+
95
+ # Val
96
+ print(f"[Download] Streaming {val_samples} val samples from '{val_split}'...")
97
+ ds_stream = load_dataset(DATASET_NAME, split=val_split, streaming=True)
98
+ val_rows = []
99
+ for i, row in enumerate(ds_stream):
100
+ if i >= val_samples:
101
+ break
102
+ val_rows.append(row)
103
+ if (i + 1) % 500 == 0:
104
+ print(f" ... {i + 1}/{val_samples}")
105
+ val_ds = HFDataset.from_list(val_rows)
106
+ val_ds.save_to_disk(val_path)
107
+ print(f" Saved {len(val_ds)} val samples.")
108
+
109
+ return train_ds, val_ds
110
+
111
+
112
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
113
+ # Step 2: Dataset wrapper
114
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
115
+
116
+ class SADCDrivingDataset(Dataset):
117
+ """Wraps SADC HF dataset β†’ FSD model inputs + targets."""
118
+
119
+ def __init__(self, hf_dataset, img_size=(IMG_H, IMG_W)):
120
+ self.ds = hf_dataset
121
+ self.img_h, self.img_w = img_size
122
+
123
+ def __len__(self):
124
+ return len(self.ds)
125
+
126
+ def __getitem__(self, idx):
127
+ row = self.ds[idx]
128
+
129
+ # ── Image ──
130
+ img = row.get("frame", None)
131
+ if img is None:
132
+ img_tensor = torch.zeros(3, self.img_h, self.img_w)
133
+ else:
134
+ from torchvision import transforms
135
+ transform = transforms.Compose([
136
+ transforms.Resize((self.img_h, self.img_w)),
137
+ transforms.ToTensor(),
138
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
139
+ ])
140
+ try:
141
+ if hasattr(img, "convert"):
142
+ img = img.convert("RGB")
143
+ img_tensor = transform(img)
144
+ except Exception:
145
+ img_tensor = torch.zeros(3, self.img_h, self.img_w)
146
+
147
+ # Replicate to 6 virtual cameras with slight noise
148
+ camera_images = img_tensor.unsqueeze(0).expand(6, -1, -1, -1).clone()
149
+ for i in range(1, 6):
150
+ camera_images[i] += torch.randn_like(camera_images[i]) * 0.01
151
+
152
+ # ── Ego state ──
153
+ speed_ms = float(row.get("v_kmph", 0.0)) / 3.6
154
+ ax = float(row.get("ax_mpss", 0.0))
155
+ steering = float(row.get("steering_rack_pos_m", 0.0))
156
+ yaw_rate = float(row.get("yaw_rate_radps", 0.0))
157
+ lane_center = float(row.get("d_lanecenter_m", 0.0))
158
+ curvature = float(row.get("lane_curvature_radpm", 0.0))
159
+
160
+ ego_state = torch.tensor([
161
+ speed_ms, ax, steering, yaw_rate, 0.0, lane_center,
162
+ ], dtype=torch.float32)
163
+
164
+ # ── Navigation command ──
165
+ road_type = str(row.get("road_type", "misc"))
166
+ nav_cmd = ROAD_TYPE_MAP.get(road_type, 0)
167
+
168
+ # ── Camera intrinsics / extrinsics (synthetic) ──
169
+ K = torch.zeros(6, 3, 3)
170
+ K[:, 0, 0] = 200.0
171
+ K[:, 1, 1] = 200.0
172
+ K[:, 0, 2] = self.img_w / 2
173
+ K[:, 1, 2] = self.img_h / 2
174
+ K[:, 2, 2] = 1.0
175
+
176
+ E = torch.eye(4).unsqueeze(0).expand(6, -1, -1).clone()
177
+ yaw_offsets = [-45, 45, -135, 135, -90, 90]
178
+ for i, yaw_deg in enumerate(yaw_offsets):
179
+ yaw_r = math.radians(yaw_deg)
180
+ E[i, 0, 0] = math.cos(yaw_r)
181
+ E[i, 0, 1] = -math.sin(yaw_r)
182
+ E[i, 1, 0] = math.sin(yaw_r)
183
+ E[i, 1, 1] = math.cos(yaw_r)
184
+
185
+ # ── Ultrasonic (simulated) ──
186
+ base_dist = max(0.5, abs(lane_center))
187
+ us_distances = torch.ones(20, 1) * base_dist
188
+ us_distances[:7] = torch.clamp(torch.randn(7, 1) * 0.5 + 3.0, 0.3, 5.0)
189
+ us_distances[7:14] = torch.clamp(torch.randn(7, 1) * 0.5 + 3.5, 0.3, 5.0)
190
+ us_distances[14:17] = torch.clamp(torch.tensor([[base_dist]] * 3) + torch.randn(3, 1) * 0.2, 0.3, 5.0)
191
+ us_distances[17:20] = torch.clamp(torch.tensor([[base_dist]] * 3) + torch.randn(3, 1) * 0.2, 0.3, 5.0)
192
+
193
+ us_placements = torch.zeros(20, 6)
194
+ for i in range(7):
195
+ us_placements[i] = torch.tensor([2.25, (i - 3) * 0.3, 0.4, (i - 3) * 10, 0, 0])
196
+ for i in range(7):
197
+ us_placements[7 + i] = torch.tensor([-2.25, (i - 3) * 0.3, 0.4, 180 + (i - 3) * 10, 0, 0])
198
+ for i in range(3):
199
+ us_placements[14 + i] = torch.tensor([(1 - i) * 1.0, 0.9, 0.6, -90, 0, 0])
200
+ us_placements[17 + i] = torch.tensor([(1 - i) * 1.0, -0.9, 0.6, 90, 0, 0])
201
+
202
+ # ── Ground truth targets ──
203
+ gt_steering = torch.tensor(steering * 20.0)
204
+ gt_throttle = torch.tensor(max(0.0, ax / 3.0)).clamp(0, 1)
205
+ gt_brake = torch.tensor(max(0.0, -ax / 8.0)).clamp(0, 1)
206
+
207
+ gt_waypoints = torch.zeros(NUM_WAYPOINTS, 4)
208
+ for t in range(NUM_WAYPOINTS):
209
+ dt = (t + 1) * 0.5
210
+ gt_waypoints[t, 0] = speed_ms * dt
211
+ gt_waypoints[t, 1] = -lane_center * min(1.0, dt / 3.0)
212
+ gt_waypoints[t, 2] = curvature * speed_ms * dt
213
+ gt_waypoints[t, 3] = min(speed_ms, MAX_SPEED_MS)
214
+
215
+ if abs(steering) > 0.3:
216
+ gt_behavior = 1 if steering > 0 else 2
217
+ elif abs(ax) < 0.1 and speed_ms < 0.5:
218
+ gt_behavior = 5
219
+ else:
220
+ gt_behavior = 0
221
+
222
+ bev = BEV_SIZE
223
+ gt_seg = torch.zeros(bev, bev, dtype=torch.long)
224
+ gt_seg[bev // 4 : 3 * bev // 4, :] = 1
225
+
226
+ gt_heatmap = torch.zeros(10, bev, bev)
227
+
228
+ gt_occ = torch.zeros(1, bev, bev)
229
+ gt_occ[:, : bev // 4, :] = 1.0
230
+ gt_occ[:, 3 * bev // 4 :, :] = 1.0
231
+
232
+ inputs = {
233
+ "camera_images": camera_images,
234
+ "camera_intrinsics": K,
235
+ "camera_extrinsics": E,
236
+ "ultrasonic_distances": us_distances,
237
+ "ultrasonic_placements": us_placements,
238
+ "ego_state": ego_state,
239
+ "nav_command": torch.tensor(nav_cmd, dtype=torch.long),
240
+ }
241
+
242
+ targets = {
243
+ "gt_steering": gt_steering,
244
+ "gt_throttle": gt_throttle,
245
+ "gt_brake": gt_brake,
246
+ "gt_waypoints": gt_waypoints,
247
+ "gt_behavior": torch.tensor(gt_behavior, dtype=torch.long),
248
+ "gt_segmentation": gt_seg,
249
+ "gt_heatmap": gt_heatmap,
250
+ "gt_occupancy": gt_occ,
251
+ }
252
+
253
+ return inputs, targets
254
+
255
+
256
+ def collate_fn(batch):
257
+ inputs_list, targets_list = zip(*batch)
258
+ inputs = {k: torch.stack([d[k] for d in inputs_list]) for k in inputs_list[0]}
259
+ targets = {k: torch.stack([d[k] for d in targets_list]) for k in targets_list[0]}
260
+ return inputs, targets
261
+
262
+
263
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
264
+ # Step 3: Training
265
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
266
+
267
+ @torch.no_grad()
268
+ def evaluate(model, loss_fn, val_loader, device, max_batches=50):
269
+ model.eval()
270
+ losses = []
271
+ for i, (inputs, targets) in enumerate(val_loader):
272
+ if i >= max_batches:
273
+ break
274
+ inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()}
275
+ targets = {k: v.to(device, non_blocking=True) for k, v in targets.items()}
276
+ try:
277
+ output = model(**inputs)
278
+ l = loss_fn(output, targets)
279
+ losses.append(l["total"].item())
280
+ except RuntimeError:
281
+ continue
282
+ return np.mean(losses) if losses else float("inf")
283
+
284
+
285
+ def train(args, train_ds, val_ds):
286
+ """Build model and run training loop."""
287
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
288
+ print(f"\n[Train] Device: {device}")
289
+ if device.type == "cuda":
290
+ print(f" GPU: {torch.cuda.get_device_name()}")
291
+ print(f" VRAM: {torch.cuda.get_device_properties(0).total_mem / 1e9:.1f} GB")
292
+
293
+ # ── Tracking ──
294
+ HAS_TRACKIO = False
295
+ try:
296
+ import trackio
297
+ trackio.init(project="fsd-level5-cot", name="sadc-e2e-training")
298
+ HAS_TRACKIO = True
299
+ print(" Trackio initialized βœ“")
300
+ except Exception as e:
301
+ print(f" Trackio not available: {e}")
302
+
303
+ # ── Datasets + Loaders ──
304
+ train_dataset = SADCDrivingDataset(train_ds)
305
+ val_dataset = SADCDrivingDataset(val_ds)
306
+
307
+ train_loader = DataLoader(
308
+ train_dataset,
309
+ batch_size=args.batch_size,
310
+ shuffle=True,
311
+ num_workers=args.num_workers,
312
+ collate_fn=collate_fn,
313
+ pin_memory=True,
314
+ drop_last=True,
315
+ )
316
+ val_loader = DataLoader(
317
+ val_dataset,
318
+ batch_size=args.batch_size,
319
+ shuffle=False,
320
+ num_workers=args.num_workers,
321
+ collate_fn=collate_fn,
322
+ pin_memory=True,
323
+ drop_last=True,
324
+ )
325
+ print(f" Train batches/epoch: {len(train_loader)}")
326
+ print(f" Val batches: {len(val_loader)}")
327
+
328
+ # ── Build model ──
329
+ print("\n[Train] Building FSD model...")
330
+ script_dir = os.path.dirname(os.path.abspath(__file__))
331
+ if script_dir not in sys.path:
332
+ sys.path.insert(0, script_dir)
333
+
334
+ from fsd_model.config import VehicleConfig
335
+ from fsd_model.model import FullSelfDrivingModel, FSDLoss
336
+
337
+ config = VehicleConfig()
338
+ model = FullSelfDrivingModel(
339
+ vehicle_config=config,
340
+ bev_size=BEV_SIZE,
341
+ bev_resolution=0.5,
342
+ bev_feature_dim=BEV_FEATURE_DIM,
343
+ num_object_classes=10,
344
+ num_seg_classes=7,
345
+ num_waypoints=NUM_WAYPOINTS,
346
+ planning_d_model=PLANNING_D_MODEL,
347
+ future_steps=6,
348
+ num_forecast_modes=6,
349
+ forecast_steps=12,
350
+ num_behaviors=10,
351
+ enable_cot=True,
352
+ cot_num_actor_queries=COT_ACTOR_QUERIES,
353
+ cot_num_road_queries=COT_ROAD_QUERIES,
354
+ ).to(device)
355
+
356
+ param_info = model.count_parameters()
357
+ total_params = param_info["total"]
358
+ print(f" Total parameters: {total_params:,}")
359
+
360
+ # ── Loss ──
361
+ loss_fn = FSDLoss(
362
+ learnable_weights=True,
363
+ w_detection=0.5,
364
+ w_segmentation=1.0,
365
+ w_occupancy=1.0,
366
+ w_motion=0.5,
367
+ w_behavior=1.0,
368
+ w_trajectory=3.0,
369
+ w_control=3.0,
370
+ w_safety=2.0,
371
+ ).to(device)
372
+
373
+ # ── Optimizer + Scheduler ──
374
+ all_params = list(model.parameters()) + list(loss_fn.parameters())
375
+ optimizer = torch.optim.AdamW(all_params, lr=args.lr, weight_decay=args.weight_decay)
376
+
377
+ total_steps = len(train_loader) * args.epochs // args.grad_accum
378
+ scheduler = torch.optim.lr_scheduler.OneCycleLR(
379
+ optimizer,
380
+ max_lr=args.lr,
381
+ total_steps=total_steps + 10,
382
+ pct_start=0.1,
383
+ anneal_strategy="cos",
384
+ )
385
+
386
+ if hasattr(model, "gradient_checkpointing_enable"):
387
+ model.gradient_checkpointing_enable()
388
+
389
+ # ── Training loop ──
390
+ effective_batch = args.batch_size * args.grad_accum
391
+ print(f"\n[Train] Starting: {args.epochs} epochs, effective batch={effective_batch}")
392
+ print(f" Total optimizer steps: ~{total_steps}")
393
+
394
+ global_step = 0
395
+ best_val_loss = float("inf")
396
+ t0 = time.time()
397
+
398
+ for epoch in range(args.epochs):
399
+ model.train()
400
+ epoch_losses = []
401
+ optimizer.zero_grad()
402
+
403
+ for batch_idx, (inputs, targets) in enumerate(train_loader):
404
+ inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()}
405
+ targets = {k: v.to(device, non_blocking=True) for k, v in targets.items()}
406
+
407
+ try:
408
+ output = model(**inputs)
409
+ losses = loss_fn(output, targets)
410
+ loss = losses["total"] / args.grad_accum
411
+ except RuntimeError as e:
412
+ if "out of memory" in str(e):
413
+ torch.cuda.empty_cache()
414
+ print(f" OOM at batch {batch_idx}, skipping")
415
+ continue
416
+ raise
417
+
418
+ loss.backward()
419
+
420
+ if (batch_idx + 1) % args.grad_accum == 0:
421
+ torch.nn.utils.clip_grad_norm_(all_params, args.max_grad_norm)
422
+ optimizer.step()
423
+ scheduler.step()
424
+ optimizer.zero_grad()
425
+ global_step += 1
426
+
427
+ total_loss_val = losses["total"].item()
428
+ epoch_losses.append(total_loss_val)
429
+
430
+ # Logging
431
+ if (batch_idx + 1) % args.log_every == 0:
432
+ elapsed = time.time() - t0
433
+ lr = scheduler.get_last_lr()[0]
434
+ avg_loss = np.mean(epoch_losses[-args.log_every :])
435
+ ctrl = losses.get("control", torch.tensor(0.0)).item()
436
+ traj = losses.get("trajectory", torch.tensor(0.0)).item()
437
+ seg = losses.get("segmentation", torch.tensor(0.0)).item()
438
+ safety = losses.get("safety", torch.tensor(0.0)).item()
439
+
440
+ print(
441
+ f" [E{epoch+1}/{args.epochs}][{batch_idx+1}/{len(train_loader)}] "
442
+ f"loss={avg_loss:.4f} ctrl={ctrl:.4f} traj={traj:.4f} "
443
+ f"seg={seg:.4f} safety={safety:.4f} lr={lr:.2e} t={elapsed:.0f}s"
444
+ )
445
+
446
+ if HAS_TRACKIO:
447
+ trackio.log({
448
+ "train/loss": avg_loss,
449
+ "train/control_loss": ctrl,
450
+ "train/trajectory_loss": traj,
451
+ "train/segmentation_loss": seg,
452
+ "train/safety_loss": safety,
453
+ "train/lr": lr,
454
+ "train/epoch": epoch + batch_idx / len(train_loader),
455
+ })
456
+
457
+ # Periodic eval
458
+ if global_step > 0 and global_step % args.eval_every == 0:
459
+ val_loss = evaluate(model, loss_fn, val_loader, device)
460
+ print(f" ── EVAL step {global_step}: val_loss={val_loss:.4f} (best={best_val_loss:.4f})")
461
+ if HAS_TRACKIO:
462
+ trackio.log({"val/loss": val_loss, "val/step": global_step})
463
+ if val_loss < best_val_loss:
464
+ best_val_loss = val_loss
465
+ save_checkpoint(model, args.save_dir, "best")
466
+ print(f" ── Saved best model (val_loss={val_loss:.4f})")
467
+ model.train()
468
+
469
+ # End-of-epoch eval
470
+ val_loss = evaluate(model, loss_fn, val_loader, device)
471
+ avg_epoch_loss = np.mean(epoch_losses) if epoch_losses else float("inf")
472
+ print(
473
+ f"\n Epoch {epoch+1}/{args.epochs}: "
474
+ f"train_loss={avg_epoch_loss:.4f} val_loss={val_loss:.4f}"
475
+ )
476
+ if val_loss < best_val_loss:
477
+ best_val_loss = val_loss
478
+ save_checkpoint(model, args.save_dir, "best")
479
+ print(f" ── New best model (val_loss={val_loss:.4f})")
480
+
481
+ # ── Final save ──
482
+ total_time = time.time() - t0
483
+ print(f"\n{'='*60}")
484
+ print(f"Training complete in {total_time/60:.1f} min")
485
+ print(f"Best val loss: {best_val_loss:.4f}")
486
+ save_checkpoint(model, args.save_dir, "final")
487
+
488
+ # ── Push to Hub ──
489
+ if args.push_to_hub:
490
+ print(f"\n[Hub] Pushing model to {args.hub_model_id}...")
491
+ try:
492
+ from huggingface_hub import HfApi
493
+ api = HfApi()
494
+ api.upload_folder(
495
+ folder_path=os.path.join(args.save_dir, "best"),
496
+ repo_id=args.hub_model_id,
497
+ path_in_repo="trained_model",
498
+ commit_message=f"Trained model (best val_loss={best_val_loss:.4f})",
499
+ )
500
+ print(f" βœ“ Pushed to {args.hub_model_id}/trained_model")
501
+ except Exception as e:
502
+ print(f" Push failed: {e}")
503
+
504
+ # ── Save metadata ──
505
+ meta = {
506
+ "dataset": DATASET_NAME,
507
+ "train_samples": len(train_ds),
508
+ "val_samples": len(val_ds),
509
+ "epochs": args.epochs,
510
+ "batch_size": args.batch_size,
511
+ "grad_accum": args.grad_accum,
512
+ "lr": args.lr,
513
+ "best_val_loss": best_val_loss,
514
+ "total_params": total_params,
515
+ "training_time_min": total_time / 60,
516
+ "device": str(device),
517
+ }
518
+ meta_path = os.path.join(args.save_dir, "training_meta.json")
519
+ with open(meta_path, "w") as f:
520
+ json.dump(meta, f, indent=2)
521
+ print(f" Metadata saved to {meta_path}")
522
+
523
+ if args.push_to_hub:
524
+ try:
525
+ api.upload_file(
526
+ path_or_fileobj=meta_path,
527
+ path_in_repo="trained_model/training_meta.json",
528
+ repo_id=args.hub_model_id,
529
+ )
530
+ except Exception:
531
+ pass
532
+
533
+ print("\nDone! βœ“")
534
+ return best_val_loss
535
+
536
+
537
+ def save_checkpoint(model, save_dir, tag):
538
+ path = os.path.join(save_dir, tag)
539
+ os.makedirs(path, exist_ok=True)
540
+ if hasattr(model, "save_pretrained"):
541
+ model.save_pretrained(path)
542
+ else:
543
+ torch.save(model.state_dict(), os.path.join(path, "model.pt"))
544
+
545
+
546
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
547
+ # Main
548
+ # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
549
+
550
+ def parse_args():
551
+ p = argparse.ArgumentParser(description="End-to-end FSD-Level5-CoT training on SADC")
552
+
553
+ # Data
554
+ p.add_argument("--train_samples", type=int, default=5000)
555
+ p.add_argument("--val_samples", type=int, default=1000)
556
+ p.add_argument("--train_split", type=str, default="pretrain_train")
557
+ p.add_argument("--val_split", type=str, default="pretrain_val")
558
+ p.add_argument("--data_dir", type=str, default="./sadc_subset")
559
+
560
+ # Training
561
+ p.add_argument("--epochs", type=int, default=5)
562
+ p.add_argument("--batch_size", type=int, default=8)
563
+ p.add_argument("--grad_accum", type=int, default=4)
564
+ p.add_argument("--lr", type=float, default=3e-4)
565
+ p.add_argument("--weight_decay", type=float, default=1e-4)
566
+ p.add_argument("--max_grad_norm", type=float, default=5.0)
567
+ p.add_argument("--num_workers", type=int, default=4)
568
+
569
+ # Logging / eval
570
+ p.add_argument("--log_every", type=int, default=10)
571
+ p.add_argument("--eval_every", type=int, default=500)
572
+
573
+ # Saving
574
+ p.add_argument("--save_dir", type=str, default="./checkpoints")
575
+ p.add_argument("--push_to_hub", action="store_true", default=True)
576
+ p.add_argument("--no_push_to_hub", action="store_false", dest="push_to_hub")
577
+ p.add_argument("--hub_model_id", type=str, default=HUB_MODEL_ID)
578
+
579
+ return p.parse_args()
580
+
581
+
582
+ def main():
583
+ args = parse_args()
584
+
585
+ print("=" * 60)
586
+ print(" FSD-Level5-CoT Β· End-to-End Training on SADC")
587
+ print("=" * 60)
588
+ print(f" Train samples: {args.train_samples}")
589
+ print(f" Val samples: {args.val_samples}")
590
+ print(f" Epochs: {args.epochs}")
591
+ print(f" Batch size: {args.batch_size} Γ— {args.grad_accum} accum = {args.batch_size * args.grad_accum}")
592
+ print(f" LR: {args.lr}")
593
+ print(f" Push to Hub: {args.push_to_hub} β†’ {args.hub_model_id}")
594
+ print("=" * 60)
595
+
596
+ # Step 1: Download
597
+ train_ds, val_ds = download_sadc_subset(
598
+ train_samples=args.train_samples,
599
+ val_samples=args.val_samples,
600
+ output_dir=args.data_dir,
601
+ train_split=args.train_split,
602
+ val_split=args.val_split,
603
+ )
604
+
605
+ # Step 2+3: Train
606
+ best_val = train(args, train_ds, val_ds)
607
+
608
+ return best_val
609
+
610
+
611
+ if __name__ == "__main__":
612
+ main()