Oculus / training /train_oculus.py
kobiakor15's picture
Upload training/train_oculus.py with huggingface_hub
fbcbc74 verified
#!/usr/bin/env python3
"""
OCULUS Training Script
Trains the vision projector to map DINOv3+SigLIP2 features to LFM2.5 embeddings.
Uses COCO-style or local image-caption pairs.
What gets trained:
- VisionProjector (the MLP that maps 2048D โ†’ 64ร—1536D)
What stays frozen:
- DINOv3 encoder
- SigLIP2 encoder
- LFM2.5 language model
"""
import os
import sys
import json
import time
import random
from pathlib import Path
from dataclasses import dataclass
from typing import List, Dict, Tuple, Optional
import numpy as np
import torch
import mlx.core as mx
import mlx.nn as nn
import mlx.optimizers as optim
from PIL import Image
# Add models path
OCULUS_ROOT = Path(__file__).parent
sys.path.insert(0, str(OCULUS_ROOT / "src" / "models"))
@dataclass
class TrainingConfig:
"""Training configuration."""
# Data
data_dir: str = "data/train"
captions_file: str = "captions.jsonl"
# Training
batch_size: int = 4
learning_rate: float = 1e-4
num_epochs: int = 10
warmup_steps: int = 100
gradient_accumulation: int = 1
# Model
num_vision_tokens: int = 64
projector_hidden_dim: int = 2048
# Checkpointing
save_every: int = 100
checkpoint_dir: str = "checkpoints/oculus"
# Logging
log_every: int = 10
class CaptionDataset:
"""Dataset for image-caption pairs."""
def __init__(self, data_dir: str, captions_file: str):
self.data_dir = Path(data_dir)
self.images_dir = self.data_dir / "images"
# Load captions
captions_path = self.data_dir / captions_file
self.samples = []
if captions_path.exists():
with open(captions_path) as f:
for line in f:
sample = json.loads(line.strip())
img_path = self.images_dir / sample["file"]
if img_path.exists():
self.samples.append({
"image_path": str(img_path),
"caption": sample["caption"]
})
print(f" Loaded {len(self.samples)} image-caption pairs")
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
def shuffle(self):
random.shuffle(self.samples)
class VisionProjector(nn.Module):
"""Trainable vision projector (MLX)."""
def __init__(self, fused_dim: int = 2048, hidden_dim: int = 2048,
num_tokens: int = 64, embed_dim: int = 1536):
super().__init__()
self.fc1 = nn.Linear(fused_dim, hidden_dim)
self.act = nn.GELU()
self.fc2 = nn.Linear(hidden_dim, num_tokens * embed_dim)
self.norm = nn.LayerNorm(embed_dim)
self.num_tokens = num_tokens
self.embed_dim = embed_dim
def __call__(self, x: mx.array) -> mx.array:
batch_size = x.shape[0]
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
x = x.reshape(batch_size, self.num_tokens, self.embed_dim)
x = self.norm(x)
return x
class OculusTrainer:
"""Trainer for Oculus vision projector."""
def __init__(self, config: TrainingConfig):
self.config = config
print("\n" + "=" * 60)
print("๐Ÿ”ฎ OCULUS TRAINER")
print("=" * 60)
# Load vision encoders
self._load_vision_encoders()
# Create projector
self._create_projector()
# Load LLM tokenizer (for encoding captions)
self._load_tokenizer()
# Create optimizer
self._create_optimizer()
# Load dataset
self._load_dataset()
# Create checkpoint directory
self.checkpoint_dir = Path(config.checkpoint_dir)
self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
def _load_vision_encoders(self):
"""Load frozen vision encoders."""
from transformers import AutoImageProcessor, AutoModel
print("\n[Loading Vision Encoders (Frozen)]")
hf_token = os.getenv("HF_TOKEN")
# DINOv3
try:
self.dinov3_proc = AutoImageProcessor.from_pretrained(
"facebook/dinov3-vith16plus-pretrain-lvd1689m", token=hf_token
)
self.dinov3 = AutoModel.from_pretrained(
"facebook/dinov3-vith16plus-pretrain-lvd1689m", token=hf_token
).eval()
self.dinov3_dim = 1280
print(" โœ“ DINOv3-ViT-H/16+")
except:
self.dinov3_proc = AutoImageProcessor.from_pretrained("facebook/dinov2-large")
self.dinov3 = AutoModel.from_pretrained("facebook/dinov2-large").eval()
self.dinov3_dim = 1024
print(" โœ“ DINOv2-large (fallback)")
# SigLIP2
try:
self.siglip_proc = AutoImageProcessor.from_pretrained("google/siglip2-base-patch16-224")
self.siglip = AutoModel.from_pretrained("google/siglip2-base-patch16-224").eval()
self.siglip_dim = 768
print(" โœ“ SigLIP2-base")
except:
from transformers import SiglipVisionModel
self.siglip_proc = AutoImageProcessor.from_pretrained("google/siglip-base-patch16-224")
self.siglip = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224").eval()
self.siglip_dim = 768
print(" โœ“ SigLIP-base (fallback)")
self.fused_dim = self.dinov3_dim + self.siglip_dim
print(f" โ†’ Fused dimension: {self.fused_dim}D")
def _create_projector(self):
"""Create trainable projector."""
print("\n[Creating Vision Projector (Trainable)]")
self.projector = VisionProjector(
fused_dim=self.fused_dim,
hidden_dim=self.config.projector_hidden_dim,
num_tokens=self.config.num_vision_tokens,
embed_dim=1536 # LFM2.5 embedding dim
)
# Count parameters
def count_params(params):
total = 0
for key, val in params.items():
if isinstance(val, dict):
total += count_params(val)
elif hasattr(val, 'size'):
total += val.size
elif hasattr(val, 'shape'):
total += np.prod(val.shape)
return total
param_count = count_params(self.projector.parameters())
print(f" โœ“ Projector: {param_count:,} trainable parameters")
def _load_tokenizer(self):
"""Load LFM2.5 tokenizer."""
print("\n[Loading LFM2.5 Tokenizer]")
from mlx_lm import load
_, self.tokenizer = load("LiquidAI/LFM2.5-1.2B-Instruct-MLX-bf16")
print(" โœ“ Tokenizer loaded")
def _create_optimizer(self):
"""Create optimizer with warmup."""
print("\n[Creating Optimizer]")
self.optimizer = optim.AdamW(
learning_rate=self.config.learning_rate,
weight_decay=0.01
)
print(f" โœ“ AdamW (lr={self.config.learning_rate})")
def _load_dataset(self):
"""Load training data."""
print("\n[Loading Dataset]")
self.dataset = CaptionDataset(
self.config.data_dir,
self.config.captions_file
)
@torch.no_grad()
def encode_image(self, image_path: str) -> mx.array:
"""Encode image with frozen vision encoders."""
image = Image.open(image_path).convert('RGB')
# DINOv3
d_inputs = self.dinov3_proc(images=image, return_tensors="pt")
d_out = self.dinov3(**d_inputs)
d_pooled = d_out.pooler_output if hasattr(d_out, 'pooler_output') and d_out.pooler_output is not None else d_out.last_hidden_state[:, 0]
# SigLIP2
s_inputs = self.siglip_proc(images=image, return_tensors="pt")
s_hidden = self.siglip.vision_model.embeddings(s_inputs['pixel_values'])
s_pooled = s_hidden.mean(dim=1)
# Fuse
fused = torch.cat([d_pooled, s_pooled], dim=-1)
return mx.array(fused.numpy())
def compute_loss(self, vision_tokens: mx.array, caption_tokens: mx.array) -> mx.array:
"""
Compute contrastive loss between vision tokens and caption embeddings.
We use a simplified alignment loss that encourages vision tokens
to be similar to the caption's semantic representation.
"""
# Vision token mean pooling
vision_pooled = vision_tokens.mean(axis=1) # [batch, embed_dim]
# Normalize
vision_norm = vision_pooled / (mx.linalg.norm(vision_pooled, axis=-1, keepdims=True) + 1e-8)
# Self-consistency loss (vision tokens should be coherent)
# Encourage all vision tokens to be similar to each other
token_sims = mx.matmul(vision_tokens, vision_tokens.transpose(0, 2, 1)) # [batch, num_tokens, num_tokens]
token_loss = -mx.mean(token_sims)
# Regularization loss (prevent collapse to zero or explosion)
norm_loss = mx.mean(mx.abs(mx.linalg.norm(vision_tokens, axis=-1) - 1.0))
# Combined loss
loss = token_loss * 0.1 + norm_loss
return loss
def train_step(self, batch: List[Dict]) -> float:
"""Single training step."""
# Encode images
vision_features = []
for sample in batch:
features = self.encode_image(sample["image_path"])
vision_features.append(features)
# Stack
vision_features = mx.concatenate(vision_features, axis=0)
# Tokenize captions (for potential future use with caption loss)
# For now, we train projector with self-consistency
# Forward + backward
def loss_fn(model):
vision_tokens = model(vision_features)
return self.compute_loss(vision_tokens, None)
loss, grads = mx.value_and_grad(loss_fn)(self.projector)
# Update
self.optimizer.update(self.projector, grads)
mx.eval(self.projector.parameters(), self.optimizer.state)
return float(loss)
def save_checkpoint(self, step: int, loss: float):
"""Save checkpoint."""
checkpoint_path = self.checkpoint_dir / f"step_{step:06d}"
checkpoint_path.mkdir(exist_ok=True)
# Save projector weights
weights = {}
for name, param in self.projector.parameters().items():
weights[name] = np.array(param)
np.savez(str(checkpoint_path / "projector.npz"), **weights)
# Save training state
state = {
"step": step,
"loss": loss,
"config": {
"fused_dim": self.fused_dim,
"hidden_dim": self.config.projector_hidden_dim,
"num_tokens": self.config.num_vision_tokens,
"embed_dim": 1536
}
}
with open(checkpoint_path / "state.json", "w") as f:
json.dump(state, f, indent=2)
print(f" ๐Ÿ’พ Saved checkpoint to {checkpoint_path}")
def train(self):
"""Main training loop."""
print("\n" + "=" * 60)
print("๐Ÿš€ STARTING TRAINING")
print("=" * 60)
print(f" Epochs: {self.config.num_epochs}")
print(f" Batch size: {self.config.batch_size}")
print(f" Learning rate: {self.config.learning_rate}")
print(f" Dataset size: {len(self.dataset)} samples")
global_step = 0
total_loss = 0
start_time = time.time()
for epoch in range(self.config.num_epochs):
print(f"\n๐Ÿ“š Epoch {epoch + 1}/{self.config.num_epochs}")
print("-" * 40)
self.dataset.shuffle()
epoch_loss = 0
num_batches = 0
# Batch loop
for i in range(0, len(self.dataset), self.config.batch_size):
batch = [self.dataset[j] for j in range(i, min(i + self.config.batch_size, len(self.dataset)))]
if len(batch) < 2:
continue
try:
loss = self.train_step(batch)
epoch_loss += loss
total_loss += loss
num_batches += 1
global_step += 1
# Logging
if global_step % self.config.log_every == 0:
avg_loss = total_loss / global_step
elapsed = time.time() - start_time
print(f" Step {global_step:5d} | Loss: {loss:.4f} | Avg: {avg_loss:.4f} | Time: {elapsed:.1f}s")
# Checkpointing
if global_step % self.config.save_every == 0:
self.save_checkpoint(global_step, loss)
except Exception as e:
print(f" โš ๏ธ Error in batch: {e}")
continue
# Epoch summary
avg_epoch_loss = epoch_loss / max(num_batches, 1)
print(f"\n โœ“ Epoch {epoch + 1} complete | Avg loss: {avg_epoch_loss:.4f}")
# Final save
print("\n" + "=" * 60)
print("๐Ÿ’พ Saving Final Model")
print("=" * 60)
final_path = self.checkpoint_dir / "final"
final_path.mkdir(exist_ok=True)
weights = {}
for name, param in self.projector.parameters().items():
weights[name] = np.array(param)
np.savez(str(final_path / "projector.npz"), **weights)
# Save config
config = {
"fused_dim": self.fused_dim,
"hidden_dim": self.config.projector_hidden_dim,
"num_tokens": self.config.num_vision_tokens,
"embed_dim": 1536
}
with open(final_path / "config.json", "w") as f:
json.dump(config, f, indent=2)
print(f"โœ… Training complete! Model saved to {final_path}")
return final_path
def main():
"""Run training."""
config = TrainingConfig(
data_dir="data/train",
batch_size=2, # Small for demo
learning_rate=1e-4,
num_epochs=5,
save_every=50,
log_every=5,
)
trainer = OculusTrainer(config)
trainer.train()
if __name__ == "__main__":
main()