DynaTraj / models /base.py
Hang917's picture
UPDATE:init mppi file and infra, loaded model success
4c61b7c
"""
Base Model Classes for Neural Hybrid Systems
"""
import torch
import torch.nn as nn
import os
class BaseModel(nn.Module):
def __init__(self, config, device):
super().__init__()
self.config = config
self.device = device
self.optimizers = {}
# Support for external inputs
self.has_external_input = config.get('has_external_input', False)
self.external_input_dim = config.get('external_input_dim', 0)
def zero_grad(self):
for opt in self.optimizers.values():
opt.zero_grad()
def step(self):
for opt in self.optimizers.values():
opt.step()
def step_component(self, component_name):
"""Step optimizer for a specific component only"""
if component_name in self.optimizers:
self.optimizers[component_name].step()
else:
raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}")
def zero_grad_component(self, component_name):
"""Zero gradients for a specific component only"""
if component_name in self.optimizers:
self.optimizers[component_name].zero_grad()
else:
raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}")
def inference(self, xt_batch, ut_batch=None):
"""
Given xt_batch: [B, L, D+1] and optional ut_batch: [B, L, U+1], return dict with
- x0: [B, D]
- t_batch: [B, L]
- flow: [T, B, D] (ODE solution in x or z space)
- t_eval: [T]
- z_encoded_traj: [B, L, latent_dim] (if applicable)
"""
raise NotImplementedError
def update(self, loss):
loss.backward()
self.step()
def save_checkpoint(self, filepath, epoch=None, step=None, best_loss=None, metadata=None):
"""Save model checkpoint with optimizers and metadata"""
checkpoint = {
'model_state_dict': self.state_dict(),
'optimizers': {name: opt.state_dict() for name, opt in self.optimizers.items()},
'config': self.config,
'epoch': epoch,
'step': step,
'best_loss': best_loss,
'metadata': metadata or {}
}
os.makedirs(os.path.dirname(filepath), exist_ok=True)
torch.save(checkpoint, filepath)
print(f"Checkpoint saved to: {filepath}")
def load_checkpoint(self, filepath, map_location=None, strict=True):
"""Load model checkpoint and restore optimizers"""
if not os.path.exists(filepath):
raise FileNotFoundError(f"Checkpoint not found: {filepath}")
if map_location is None:
map_location = self.device
checkpoint = torch.load(filepath, map_location=map_location)
# Handle different checkpoint formats
if 'model_state_dict' in checkpoint:
# New format: checkpoint saved with save_checkpoint()
try:
self.load_state_dict(checkpoint['model_state_dict'], strict=strict)
print(f"Model state loaded from: {filepath} (new format)")
except RuntimeError as e:
if "size mismatch" in str(e) and not strict:
print(f"Warning: Dimension mismatch detected, loading with strict=False")
print(f"Using current config dimensions as ground truth")
self.load_state_dict(checkpoint['model_state_dict'], strict=False)
print(f"Model state loaded with dimension mismatches ignored")
else:
raise e
# Load optimizer states if available and optimizers exist
if 'optimizers' in checkpoint and self.optimizers:
for name, opt_state in checkpoint['optimizers'].items():
if name in self.optimizers:
self.optimizers[name].load_state_dict(opt_state)
print(f"Optimizer '{name}' state loaded")
else:
print(f"Warning: Optimizer '{name}' not found in current model, skipping")
# Return metadata for trainer to use
return {
'epoch': checkpoint.get('epoch'),
'step': checkpoint.get('step'),
'best_loss': checkpoint.get('best_loss'),
'config': checkpoint.get('config'),
'metadata': checkpoint.get('metadata', {})
}
else:
# Old format: component states saved separately (e.g., 'encoder', 'vector_field', 'decoder')
print(f"Loading model from old format checkpoint: {filepath}")
# Try to load component states with dimension mismatch handling
loaded_components = []
# Load encoder
if 'encoder' in checkpoint and hasattr(self, 'encoder'):
try:
self.encoder.load_state_dict(checkpoint['encoder'], strict=strict)
loaded_components.append('encoder')
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: Encoder dimension mismatch, using config dimensions")
self.encoder.load_state_dict(checkpoint['encoder'], strict=False)
loaded_components.append('encoder (with mismatches)')
else:
print(f"Failed to load encoder: {e}")
# Load vector_field
if 'vector_field' in checkpoint and hasattr(self, 'vector_field'):
try:
self.vector_field.load_state_dict(checkpoint['vector_field'], strict=strict)
loaded_components.append('vector_field')
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: Vector field dimension mismatch, using config dimensions")
self.vector_field.load_state_dict(checkpoint['vector_field'], strict=False)
loaded_components.append('vector_field (with mismatches)')
else:
print(f"Failed to load vector_field: {e}")
# Load decoder
if 'decoder' in checkpoint and hasattr(self, 'decoder'):
try:
self.decoder.load_state_dict(checkpoint['decoder'], strict=strict)
loaded_components.append('decoder')
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: Decoder dimension mismatch, using config dimensions")
self.decoder.load_state_dict(checkpoint['decoder'], strict=False)
loaded_components.append('decoder (with mismatches)')
else:
print(f"Failed to load decoder: {e}")
# Load other components with same error handling pattern
component_map = {
'ag_function': 'ag_function',
'rnn': 'rnn',
'lstm': 'lstm',
'output_layer': 'output_layer',
'event_function': 'event_function',
'state_reset': 'state_reset'
}
for checkpoint_key, attr_name in component_map.items():
if checkpoint_key in checkpoint and hasattr(self, attr_name):
try:
getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=strict)
loaded_components.append(attr_name)
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: {attr_name} dimension mismatch, using config dimensions")
getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=False)
loaded_components.append(f"{attr_name} (with mismatches)")
else:
print(f"Failed to load {attr_name}: {e}")
print(f"Loaded components: {loaded_components}")
# Try to load optimizer states (old format: component_name_optimizer)
if self.optimizers:
for component_name in loaded_components:
opt_key = f"{component_name}_optimizer"
if opt_key in checkpoint and component_name in self.optimizers:
self.optimizers[component_name].load_state_dict(checkpoint[opt_key])
print(f"Optimizer '{component_name}' state loaded")
# Return metadata (old format may not have all fields)
return {
'epoch': checkpoint.get('epoch'),
'step': checkpoint.get('step'),
'best_loss': checkpoint.get('best_loss'),
'config': checkpoint.get('config'),
'metadata': checkpoint.get('experiment_info', {}) # Old format uses 'experiment_info'
}