| |
| """ |
| Training script for FSD-Level5-CoT model on SADC real driving dataset. |
| |
| Dataset: jHaselberger/SADC-Situation-Awareness-for-Driver-Centric-Driving-Style-Adaptation |
| - 100K+ real driving frames with camera images |
| - Speed, acceleration, steering, yaw rate, lane position |
| - Multiple road types: rural, federal, highway |
| |
| Maps SADC columns β FSD model inputs: |
| frame β camera_images (replicated across 6 virtual cameras) |
| v_kmph β ego_state[0] (converted to m/s) |
| ax_mpss β ego_state[1] |
| steering_rack_pos_m β ego_state[2], gt_steering |
| yaw_rate_radps β ego_state[3] |
| d_lanecenter_m β used for waypoint GT |
| lane_curvature_radpm β used for trajectory generation |
| road_type β nav_command mapping |
| """ |
|
|
| import os |
| import sys |
| import time |
| import json |
| import math |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.utils.data import Dataset, DataLoader |
| import numpy as np |
|
|
| |
| DATASET_NAME = "jHaselberger/SADC-Situation-Awareness-for-Driver-Centric-Driving-Style-Adaptation" |
| SPLIT = "pretrain_train" |
| VAL_SPLIT = "pretrain_val" |
| HUB_MODEL_ID = "Reality123b/FSD-Level5-CoT" |
|
|
| |
| BEV_SIZE = 100 |
| BEV_FEATURE_DIM = 128 |
| PLANNING_D_MODEL = 128 |
| IMG_H, IMG_W = 120, 160 |
| NUM_WAYPOINTS = 20 |
| COT_ACTOR_QUERIES = 32 |
| COT_ROAD_QUERIES = 16 |
|
|
| |
| BATCH_SIZE = 8 |
| LEARNING_RATE = 3e-4 |
| WEIGHT_DECAY = 1e-4 |
| NUM_EPOCHS = 5 |
| GRAD_ACCUM = 4 |
| MAX_GRAD_NORM = 5.0 |
| WARMUP_STEPS = 200 |
| LOG_EVERY = 10 |
| EVAL_EVERY = 500 |
| MAX_TRAIN_SAMPLES = 50000 |
| MAX_VAL_SAMPLES = 2000 |
| NUM_WORKERS = 4 |
|
|
| |
| EFFECTIVE_BATCH = BATCH_SIZE * GRAD_ACCUM |
| MAX_SPEED_MS = 20.0 * 0.44704 |
|
|
|
|
| |
| |
| |
|
|
| ROAD_TYPE_MAP = { |
| "misc": 0, "rural": 1, "federal": 2, "highway": 3, |
| "city": 4, "parking": 5, "intersection": 6, |
| } |
|
|
|
|
| class SADCDrivingDataset(Dataset): |
| """Wraps the SADC dataset for FSD model training.""" |
|
|
| def __init__(self, hf_dataset, max_samples=None, img_size=(IMG_H, IMG_W)): |
| self.ds = hf_dataset |
| self.img_h, self.img_w = img_size |
| if max_samples and len(self.ds) > max_samples: |
| self.ds = self.ds.select(range(max_samples)) |
|
|
| def __len__(self): |
| return len(self.ds) |
|
|
| def __getitem__(self, idx): |
| row = self.ds[idx] |
|
|
| |
| img = row["frame"] |
| if img is None: |
| |
| img_tensor = torch.zeros(3, self.img_h, self.img_w) |
| else: |
| from torchvision import transforms |
| transform = transforms.Compose([ |
| transforms.Resize((self.img_h, self.img_w)), |
| transforms.ToTensor(), |
| transforms.Normalize(mean=[0.485, 0.456, 0.406], |
| std=[0.229, 0.224, 0.225]), |
| ]) |
| try: |
| if hasattr(img, 'convert'): |
| img = img.convert('RGB') |
| img_tensor = transform(img) |
| except Exception: |
| img_tensor = torch.zeros(3, self.img_h, self.img_w) |
|
|
| |
| camera_images = img_tensor.unsqueeze(0).expand(6, -1, -1, -1).clone() |
| |
| for i in range(1, 6): |
| camera_images[i] = camera_images[i] + torch.randn_like(camera_images[i]) * 0.01 |
|
|
| |
| speed_ms = float(row.get("v_kmph", 0.0)) / 3.6 |
| ax = float(row.get("ax_mpss", 0.0)) |
| steering = float(row.get("steering_rack_pos_m", 0.0)) |
| yaw_rate = float(row.get("yaw_rate_radps", 0.0)) |
| lane_center = float(row.get("d_lanecenter_m", 0.0)) |
| curvature = float(row.get("lane_curvature_radpm", 0.0)) |
|
|
| ego_state = torch.tensor([ |
| speed_ms, |
| ax, |
| steering, |
| yaw_rate, |
| 0.0, |
| lane_center, |
| ], dtype=torch.float32) |
|
|
| |
| road_type = str(row.get("road_type", "misc")) |
| nav_cmd = ROAD_TYPE_MAP.get(road_type, 0) |
|
|
| |
| K = torch.zeros(6, 3, 3) |
| K[:, 0, 0] = 200.0 |
| K[:, 1, 1] = 200.0 |
| K[:, 0, 2] = self.img_w / 2 |
| K[:, 1, 2] = self.img_h / 2 |
| K[:, 2, 2] = 1.0 |
|
|
| E = torch.eye(4).unsqueeze(0).expand(6, -1, -1).clone() |
| |
| yaw_offsets = [-45, 45, -135, 135, -90, 90] |
| for i, yaw_deg in enumerate(yaw_offsets): |
| yaw_r = math.radians(yaw_deg) |
| E[i, 0, 0] = math.cos(yaw_r) |
| E[i, 0, 1] = -math.sin(yaw_r) |
| E[i, 1, 0] = math.sin(yaw_r) |
| E[i, 1, 1] = math.cos(yaw_r) |
|
|
| |
| |
| base_dist = max(0.5, abs(lane_center)) |
| us_distances = torch.ones(20, 1) * base_dist |
| us_distances[:7] = torch.clamp(torch.randn(7, 1) * 0.5 + 3.0, 0.3, 5.0) |
| us_distances[7:14] = torch.clamp(torch.randn(7, 1) * 0.5 + 3.5, 0.3, 5.0) |
| us_distances[14:17] = torch.clamp(torch.tensor([[base_dist]] * 3) + torch.randn(3, 1) * 0.2, 0.3, 5.0) |
| us_distances[17:20] = torch.clamp(torch.tensor([[base_dist]] * 3) + torch.randn(3, 1) * 0.2, 0.3, 5.0) |
|
|
| us_placements = torch.zeros(20, 6) |
| |
| for i in range(7): |
| us_placements[i] = torch.tensor([2.25, (i-3)*0.3, 0.4, (i-3)*10, 0, 0]) |
| for i in range(7): |
| us_placements[7+i] = torch.tensor([-2.25, (i-3)*0.3, 0.4, 180+(i-3)*10, 0, 0]) |
| for i in range(3): |
| us_placements[14+i] = torch.tensor([(1-i)*1.0, 0.9, 0.6, -90, 0, 0]) |
| us_placements[17+i] = torch.tensor([(1-i)*1.0, -0.9, 0.6, 90, 0, 0]) |
|
|
| |
| |
| gt_steering = torch.tensor(steering * 20.0) |
|
|
| |
| gt_throttle = torch.tensor(max(0.0, ax / 3.0)).clamp(0, 1) |
| gt_brake = torch.tensor(max(0.0, -ax / 8.0)).clamp(0, 1) |
|
|
| |
| gt_waypoints = torch.zeros(NUM_WAYPOINTS, 4) |
| for t in range(NUM_WAYPOINTS): |
| dt = (t + 1) * 0.5 |
| |
| gt_waypoints[t, 0] = speed_ms * dt |
| |
| gt_waypoints[t, 1] = -lane_center * min(1.0, dt / 3.0) |
| |
| gt_waypoints[t, 2] = curvature * speed_ms * dt |
| |
| gt_waypoints[t, 3] = min(speed_ms, MAX_SPEED_MS) |
|
|
| |
| if abs(steering) > 0.3: |
| if steering > 0: |
| gt_behavior = 1 |
| else: |
| gt_behavior = 2 |
| elif abs(ax) < 0.1 and speed_ms < 0.5: |
| gt_behavior = 5 |
| else: |
| gt_behavior = 0 |
|
|
| |
| bev = BEV_SIZE |
| gt_seg = torch.zeros(bev, bev, dtype=torch.long) |
| gt_seg[bev//4:3*bev//4, :] = 1 |
|
|
| |
| gt_heatmap = torch.zeros(10, bev, bev) |
|
|
| |
| gt_occ = torch.zeros(1, bev, bev) |
| gt_occ[:, :bev//4, :] = 1.0 |
| gt_occ[:, 3*bev//4:, :] = 1.0 |
|
|
| inputs = { |
| "camera_images": camera_images, |
| "camera_intrinsics": K, |
| "camera_extrinsics": E, |
| "ultrasonic_distances": us_distances, |
| "ultrasonic_placements": us_placements, |
| "ego_state": ego_state, |
| "nav_command": torch.tensor(nav_cmd, dtype=torch.long), |
| } |
|
|
| targets = { |
| "gt_steering": gt_steering, |
| "gt_throttle": gt_throttle, |
| "gt_brake": gt_brake, |
| "gt_waypoints": gt_waypoints, |
| "gt_behavior": torch.tensor(gt_behavior, dtype=torch.long), |
| "gt_segmentation": gt_seg, |
| "gt_heatmap": gt_heatmap, |
| "gt_occupancy": gt_occ, |
| } |
|
|
| return inputs, targets |
|
|
|
|
| def collate_fn(batch): |
| """Custom collate for dict-based batches.""" |
| inputs_list, targets_list = zip(*batch) |
| inputs = {} |
| for k in inputs_list[0]: |
| inputs[k] = torch.stack([d[k] for d in inputs_list]) |
| targets = {} |
| for k in targets_list[0]: |
| targets[k] = torch.stack([d[k] for d in targets_list]) |
| return inputs, targets |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| print("=" * 70) |
| print(" FSD-Level5-CoT Training on SADC Real Driving Data") |
| print("=" * 70) |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| print(f"Device: {device}") |
| if device.type == "cuda": |
| print(f"GPU: {torch.cuda.get_device_name()}") |
| print(f"VRAM: {torch.cuda.get_device_properties(0).total_mem / 1e9:.1f} GB") |
|
|
| |
| try: |
| import trackio |
| trackio.init(project="fsd-level5-cot", name="sadc-training") |
| HAS_TRACKIO = True |
| print("Trackio initialized") |
| except Exception as e: |
| print(f"Trackio not available: {e}") |
| HAS_TRACKIO = False |
|
|
| |
| print(f"\nLoading dataset: {DATASET_NAME}") |
| print(f" Train split: {SPLIT} (max {MAX_TRAIN_SAMPLES} samples)") |
| print(f" Val split: {VAL_SPLIT} (max {MAX_VAL_SAMPLES} samples)") |
|
|
| from datasets import load_dataset |
|
|
| ds = load_dataset(DATASET_NAME, split=SPLIT, streaming=False) |
| val_ds = load_dataset(DATASET_NAME, split=VAL_SPLIT, streaming=False) |
|
|
| print(f" Loaded train: {len(ds)} rows") |
| print(f" Loaded val: {len(val_ds)} rows") |
|
|
| train_dataset = SADCDrivingDataset(ds, max_samples=MAX_TRAIN_SAMPLES) |
| val_dataset = SADCDrivingDataset(val_ds, max_samples=MAX_VAL_SAMPLES) |
|
|
| train_loader = DataLoader( |
| train_dataset, batch_size=BATCH_SIZE, shuffle=True, |
| num_workers=NUM_WORKERS, collate_fn=collate_fn, pin_memory=True, |
| drop_last=True, |
| ) |
| val_loader = DataLoader( |
| val_dataset, batch_size=BATCH_SIZE, shuffle=False, |
| num_workers=NUM_WORKERS, collate_fn=collate_fn, pin_memory=True, |
| drop_last=True, |
| ) |
|
|
| print(f" Train batches/epoch: {len(train_loader)}") |
| print(f" Val batches: {len(val_loader)}") |
|
|
| |
| print("\nBuilding model...") |
| sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) |
| from fsd_model.config import VehicleConfig |
| from fsd_model.model import FullSelfDrivingModel, FSDLoss |
|
|
| config = VehicleConfig() |
| model = FullSelfDrivingModel( |
| vehicle_config=config, |
| bev_size=BEV_SIZE, |
| bev_resolution=0.5, |
| bev_feature_dim=BEV_FEATURE_DIM, |
| num_object_classes=10, |
| num_seg_classes=7, |
| num_waypoints=NUM_WAYPOINTS, |
| planning_d_model=PLANNING_D_MODEL, |
| future_steps=6, |
| num_forecast_modes=6, |
| forecast_steps=12, |
| num_behaviors=10, |
| enable_cot=True, |
| cot_num_actor_queries=COT_ACTOR_QUERIES, |
| cot_num_road_queries=COT_ROAD_QUERIES, |
| ).to(device) |
|
|
| param_counts = model.count_parameters() |
| total_params = param_counts["total"] |
| print(f" Total parameters: {total_params:,}") |
| for k, v in param_counts.items(): |
| if k not in ["total", "total_trainable"]: |
| print(f" {k}: {v:,}") |
|
|
| |
| loss_fn = FSDLoss( |
| learnable_weights=True, |
| w_detection=0.5, |
| w_segmentation=1.0, |
| w_occupancy=1.0, |
| w_motion=0.5, |
| w_behavior=1.0, |
| w_trajectory=3.0, |
| w_control=3.0, |
| w_safety=2.0, |
| ).to(device) |
|
|
| all_params = list(model.parameters()) + list(loss_fn.parameters()) |
| optimizer = torch.optim.AdamW(all_params, lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY) |
|
|
| total_steps = len(train_loader) * NUM_EPOCHS // GRAD_ACCUM |
| scheduler = torch.optim.lr_scheduler.OneCycleLR( |
| optimizer, max_lr=LEARNING_RATE, |
| total_steps=total_steps + 10, |
| pct_start=0.1, |
| anneal_strategy='cos', |
| ) |
|
|
| |
| if hasattr(model, 'gradient_checkpointing_enable'): |
| model.gradient_checkpointing_enable() |
|
|
| |
| print(f"\nStarting training: {NUM_EPOCHS} epochs, effective batch={EFFECTIVE_BATCH}") |
| print(f"Total steps: ~{total_steps}") |
|
|
| global_step = 0 |
| best_val_loss = float('inf') |
| t0 = time.time() |
|
|
| for epoch in range(NUM_EPOCHS): |
| model.train() |
| epoch_losses = [] |
| optimizer.zero_grad() |
|
|
| for batch_idx, (inputs, targets) in enumerate(train_loader): |
| |
| inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()} |
| targets = {k: v.to(device, non_blocking=True) for k, v in targets.items()} |
|
|
| |
| try: |
| output = model(**inputs) |
| losses = loss_fn(output, targets) |
| loss = losses["total"] / GRAD_ACCUM |
| except RuntimeError as e: |
| if "out of memory" in str(e): |
| torch.cuda.empty_cache() |
| print(f" OOM at batch {batch_idx}, skipping") |
| continue |
| raise |
|
|
| |
| loss.backward() |
|
|
| if (batch_idx + 1) % GRAD_ACCUM == 0: |
| torch.nn.utils.clip_grad_norm_(all_params, MAX_GRAD_NORM) |
| optimizer.step() |
| scheduler.step() |
| optimizer.zero_grad() |
| global_step += 1 |
|
|
| total_loss_val = losses["total"].item() |
| epoch_losses.append(total_loss_val) |
|
|
| |
| if (batch_idx + 1) % LOG_EVERY == 0: |
| elapsed = time.time() - t0 |
| lr = scheduler.get_last_lr()[0] |
| avg_loss = np.mean(epoch_losses[-LOG_EVERY:]) |
|
|
| ctrl_loss = losses.get("control", torch.tensor(0.0)).item() |
| traj_loss = losses.get("trajectory", torch.tensor(0.0)).item() |
| seg_loss = losses.get("segmentation", torch.tensor(0.0)).item() |
| safety_loss = losses.get("safety", torch.tensor(0.0)).item() |
|
|
| print(f" [E{epoch+1}/{NUM_EPOCHS}][{batch_idx+1}/{len(train_loader)}] " |
| f"loss={avg_loss:.4f} ctrl={ctrl_loss:.4f} traj={traj_loss:.4f} " |
| f"seg={seg_loss:.4f} safety={safety_loss:.4f} " |
| f"lr={lr:.2e} t={elapsed:.0f}s") |
|
|
| if HAS_TRACKIO: |
| trackio.log({ |
| "train/loss": avg_loss, |
| "train/control_loss": ctrl_loss, |
| "train/trajectory_loss": traj_loss, |
| "train/segmentation_loss": seg_loss, |
| "train/safety_loss": safety_loss, |
| "train/lr": lr, |
| "train/epoch": epoch + batch_idx / len(train_loader), |
| }) |
|
|
| |
| if global_step > 0 and global_step % EVAL_EVERY == 0: |
| val_loss = evaluate(model, loss_fn, val_loader, device) |
| print(f" ββ EVAL step {global_step}: val_loss={val_loss:.4f} " |
| f"(best={best_val_loss:.4f})") |
|
|
| if HAS_TRACKIO: |
| trackio.log({"val/loss": val_loss, "val/step": global_step}) |
|
|
| if val_loss < best_val_loss: |
| best_val_loss = val_loss |
| save_dir = "/app/best_model" |
| model.save_pretrained(save_dir) |
| print(f" ββ Saved best model (val_loss={val_loss:.4f})") |
|
|
| model.train() |
|
|
| |
| val_loss = evaluate(model, loss_fn, val_loader, device) |
| avg_epoch_loss = np.mean(epoch_losses) |
| print(f"\n Epoch {epoch+1}/{NUM_EPOCHS} complete: " |
| f"train_loss={avg_epoch_loss:.4f} val_loss={val_loss:.4f}") |
|
|
| if val_loss < best_val_loss: |
| best_val_loss = val_loss |
| model.save_pretrained("/app/best_model") |
| print(f" ββ Saved best model (val_loss={val_loss:.4f})") |
|
|
| |
| total_time = time.time() - t0 |
| print(f"\nTraining complete in {total_time/60:.1f} min") |
| print(f"Best val loss: {best_val_loss:.4f}") |
|
|
| model.save_pretrained("/app/final_model") |
|
|
| print("\nPushing model to Hub...") |
| try: |
| from huggingface_hub import HfApi |
| api = HfApi() |
| api.upload_folder( |
| folder_path="/app/best_model", |
| repo_id=HUB_MODEL_ID, |
| path_in_repo="trained_model", |
| commit_message=f"Upload trained model (best val_loss={best_val_loss:.4f})", |
| ) |
| print(f" β Pushed to {HUB_MODEL_ID}/trained_model") |
| except Exception as e: |
| print(f" Push failed: {e}") |
|
|
| |
| meta = { |
| "dataset": DATASET_NAME, |
| "split": SPLIT, |
| "num_epochs": NUM_EPOCHS, |
| "best_val_loss": best_val_loss, |
| "total_params": total_params, |
| "training_time_min": total_time / 60, |
| "device": str(device), |
| "batch_size": BATCH_SIZE, |
| "grad_accum": GRAD_ACCUM, |
| "learning_rate": LEARNING_RATE, |
| } |
| with open("/app/training_meta.json", "w") as f: |
| json.dump(meta, f, indent=2) |
|
|
| try: |
| api.upload_file( |
| path_or_fileobj="/app/training_meta.json", |
| path_in_repo="trained_model/training_meta.json", |
| repo_id=HUB_MODEL_ID, |
| ) |
| except: |
| pass |
|
|
| print("\nDone!") |
|
|
|
|
| @torch.no_grad() |
| def evaluate(model, loss_fn, val_loader, device, max_batches=50): |
| """Quick validation pass.""" |
| model.eval() |
| val_losses = [] |
| for i, (inputs, targets) in enumerate(val_loader): |
| if i >= max_batches: |
| break |
| inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()} |
| targets = {k: v.to(device, non_blocking=True) for k, v in targets.items()} |
| try: |
| output = model(**inputs) |
| losses = loss_fn(output, targets) |
| val_losses.append(losses["total"].item()) |
| except RuntimeError: |
| continue |
| return np.mean(val_losses) if val_losses else float('inf') |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|