File size: 9,400 Bytes
4c61b7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
"""
Base Model Classes for Neural Hybrid Systems
"""
import torch
import torch.nn as nn
import os
class BaseModel(nn.Module):
def __init__(self, config, device):
super().__init__()
self.config = config
self.device = device
self.optimizers = {}
# Support for external inputs
self.has_external_input = config.get('has_external_input', False)
self.external_input_dim = config.get('external_input_dim', 0)
def zero_grad(self):
for opt in self.optimizers.values():
opt.zero_grad()
def step(self):
for opt in self.optimizers.values():
opt.step()
def step_component(self, component_name):
"""Step optimizer for a specific component only"""
if component_name in self.optimizers:
self.optimizers[component_name].step()
else:
raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}")
def zero_grad_component(self, component_name):
"""Zero gradients for a specific component only"""
if component_name in self.optimizers:
self.optimizers[component_name].zero_grad()
else:
raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}")
def inference(self, xt_batch, ut_batch=None):
"""
Given xt_batch: [B, L, D+1] and optional ut_batch: [B, L, U+1], return dict with
- x0: [B, D]
- t_batch: [B, L]
- flow: [T, B, D] (ODE solution in x or z space)
- t_eval: [T]
- z_encoded_traj: [B, L, latent_dim] (if applicable)
"""
raise NotImplementedError
def update(self, loss):
loss.backward()
self.step()
def save_checkpoint(self, filepath, epoch=None, step=None, best_loss=None, metadata=None):
"""Save model checkpoint with optimizers and metadata"""
checkpoint = {
'model_state_dict': self.state_dict(),
'optimizers': {name: opt.state_dict() for name, opt in self.optimizers.items()},
'config': self.config,
'epoch': epoch,
'step': step,
'best_loss': best_loss,
'metadata': metadata or {}
}
os.makedirs(os.path.dirname(filepath), exist_ok=True)
torch.save(checkpoint, filepath)
print(f"Checkpoint saved to: {filepath}")
def load_checkpoint(self, filepath, map_location=None, strict=True):
"""Load model checkpoint and restore optimizers"""
if not os.path.exists(filepath):
raise FileNotFoundError(f"Checkpoint not found: {filepath}")
if map_location is None:
map_location = self.device
checkpoint = torch.load(filepath, map_location=map_location)
# Handle different checkpoint formats
if 'model_state_dict' in checkpoint:
# New format: checkpoint saved with save_checkpoint()
try:
self.load_state_dict(checkpoint['model_state_dict'], strict=strict)
print(f"Model state loaded from: {filepath} (new format)")
except RuntimeError as e:
if "size mismatch" in str(e) and not strict:
print(f"Warning: Dimension mismatch detected, loading with strict=False")
print(f"Using current config dimensions as ground truth")
self.load_state_dict(checkpoint['model_state_dict'], strict=False)
print(f"Model state loaded with dimension mismatches ignored")
else:
raise e
# Load optimizer states if available and optimizers exist
if 'optimizers' in checkpoint and self.optimizers:
for name, opt_state in checkpoint['optimizers'].items():
if name in self.optimizers:
self.optimizers[name].load_state_dict(opt_state)
print(f"Optimizer '{name}' state loaded")
else:
print(f"Warning: Optimizer '{name}' not found in current model, skipping")
# Return metadata for trainer to use
return {
'epoch': checkpoint.get('epoch'),
'step': checkpoint.get('step'),
'best_loss': checkpoint.get('best_loss'),
'config': checkpoint.get('config'),
'metadata': checkpoint.get('metadata', {})
}
else:
# Old format: component states saved separately (e.g., 'encoder', 'vector_field', 'decoder')
print(f"Loading model from old format checkpoint: {filepath}")
# Try to load component states with dimension mismatch handling
loaded_components = []
# Load encoder
if 'encoder' in checkpoint and hasattr(self, 'encoder'):
try:
self.encoder.load_state_dict(checkpoint['encoder'], strict=strict)
loaded_components.append('encoder')
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: Encoder dimension mismatch, using config dimensions")
self.encoder.load_state_dict(checkpoint['encoder'], strict=False)
loaded_components.append('encoder (with mismatches)')
else:
print(f"Failed to load encoder: {e}")
# Load vector_field
if 'vector_field' in checkpoint and hasattr(self, 'vector_field'):
try:
self.vector_field.load_state_dict(checkpoint['vector_field'], strict=strict)
loaded_components.append('vector_field')
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: Vector field dimension mismatch, using config dimensions")
self.vector_field.load_state_dict(checkpoint['vector_field'], strict=False)
loaded_components.append('vector_field (with mismatches)')
else:
print(f"Failed to load vector_field: {e}")
# Load decoder
if 'decoder' in checkpoint and hasattr(self, 'decoder'):
try:
self.decoder.load_state_dict(checkpoint['decoder'], strict=strict)
loaded_components.append('decoder')
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: Decoder dimension mismatch, using config dimensions")
self.decoder.load_state_dict(checkpoint['decoder'], strict=False)
loaded_components.append('decoder (with mismatches)')
else:
print(f"Failed to load decoder: {e}")
# Load other components with same error handling pattern
component_map = {
'ag_function': 'ag_function',
'rnn': 'rnn',
'lstm': 'lstm',
'output_layer': 'output_layer',
'event_function': 'event_function',
'state_reset': 'state_reset'
}
for checkpoint_key, attr_name in component_map.items():
if checkpoint_key in checkpoint and hasattr(self, attr_name):
try:
getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=strict)
loaded_components.append(attr_name)
except RuntimeError as e:
if "size mismatch" in str(e):
print(f"Warning: {attr_name} dimension mismatch, using config dimensions")
getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=False)
loaded_components.append(f"{attr_name} (with mismatches)")
else:
print(f"Failed to load {attr_name}: {e}")
print(f"Loaded components: {loaded_components}")
# Try to load optimizer states (old format: component_name_optimizer)
if self.optimizers:
for component_name in loaded_components:
opt_key = f"{component_name}_optimizer"
if opt_key in checkpoint and component_name in self.optimizers:
self.optimizers[component_name].load_state_dict(checkpoint[opt_key])
print(f"Optimizer '{component_name}' state loaded")
# Return metadata (old format may not have all fields)
return {
'epoch': checkpoint.get('epoch'),
'step': checkpoint.get('step'),
'best_loss': checkpoint.get('best_loss'),
'config': checkpoint.get('config'),
'metadata': checkpoint.get('experiment_info', {}) # Old format uses 'experiment_info'
}
|