|
|
""" |
|
|
Base Model Classes for Neural Hybrid Systems |
|
|
""" |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import os |
|
|
|
|
|
|
|
|
class BaseModel(nn.Module): |
|
|
def __init__(self, config, device): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
self.device = device |
|
|
self.optimizers = {} |
|
|
|
|
|
|
|
|
self.has_external_input = config.get('has_external_input', False) |
|
|
self.external_input_dim = config.get('external_input_dim', 0) |
|
|
|
|
|
def zero_grad(self): |
|
|
for opt in self.optimizers.values(): |
|
|
opt.zero_grad() |
|
|
|
|
|
def step(self): |
|
|
for opt in self.optimizers.values(): |
|
|
opt.step() |
|
|
|
|
|
def step_component(self, component_name): |
|
|
"""Step optimizer for a specific component only""" |
|
|
if component_name in self.optimizers: |
|
|
self.optimizers[component_name].step() |
|
|
else: |
|
|
raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}") |
|
|
|
|
|
def zero_grad_component(self, component_name): |
|
|
"""Zero gradients for a specific component only""" |
|
|
if component_name in self.optimizers: |
|
|
self.optimizers[component_name].zero_grad() |
|
|
else: |
|
|
raise ValueError(f"Component '{component_name}' not found in optimizers. Available: {list(self.optimizers.keys())}") |
|
|
|
|
|
def inference(self, xt_batch, ut_batch=None): |
|
|
""" |
|
|
Given xt_batch: [B, L, D+1] and optional ut_batch: [B, L, U+1], return dict with |
|
|
- x0: [B, D] |
|
|
- t_batch: [B, L] |
|
|
- flow: [T, B, D] (ODE solution in x or z space) |
|
|
- t_eval: [T] |
|
|
- z_encoded_traj: [B, L, latent_dim] (if applicable) |
|
|
""" |
|
|
raise NotImplementedError |
|
|
|
|
|
def update(self, loss): |
|
|
loss.backward() |
|
|
self.step() |
|
|
|
|
|
def save_checkpoint(self, filepath, epoch=None, step=None, best_loss=None, metadata=None): |
|
|
"""Save model checkpoint with optimizers and metadata""" |
|
|
checkpoint = { |
|
|
'model_state_dict': self.state_dict(), |
|
|
'optimizers': {name: opt.state_dict() for name, opt in self.optimizers.items()}, |
|
|
'config': self.config, |
|
|
'epoch': epoch, |
|
|
'step': step, |
|
|
'best_loss': best_loss, |
|
|
'metadata': metadata or {} |
|
|
} |
|
|
|
|
|
os.makedirs(os.path.dirname(filepath), exist_ok=True) |
|
|
torch.save(checkpoint, filepath) |
|
|
print(f"Checkpoint saved to: {filepath}") |
|
|
|
|
|
def load_checkpoint(self, filepath, map_location=None, strict=True): |
|
|
"""Load model checkpoint and restore optimizers""" |
|
|
if not os.path.exists(filepath): |
|
|
raise FileNotFoundError(f"Checkpoint not found: {filepath}") |
|
|
|
|
|
if map_location is None: |
|
|
map_location = self.device |
|
|
|
|
|
checkpoint = torch.load(filepath, map_location=map_location) |
|
|
|
|
|
|
|
|
if 'model_state_dict' in checkpoint: |
|
|
|
|
|
try: |
|
|
self.load_state_dict(checkpoint['model_state_dict'], strict=strict) |
|
|
print(f"Model state loaded from: {filepath} (new format)") |
|
|
except RuntimeError as e: |
|
|
if "size mismatch" in str(e) and not strict: |
|
|
print(f"Warning: Dimension mismatch detected, loading with strict=False") |
|
|
print(f"Using current config dimensions as ground truth") |
|
|
self.load_state_dict(checkpoint['model_state_dict'], strict=False) |
|
|
print(f"Model state loaded with dimension mismatches ignored") |
|
|
else: |
|
|
raise e |
|
|
|
|
|
|
|
|
if 'optimizers' in checkpoint and self.optimizers: |
|
|
for name, opt_state in checkpoint['optimizers'].items(): |
|
|
if name in self.optimizers: |
|
|
self.optimizers[name].load_state_dict(opt_state) |
|
|
print(f"Optimizer '{name}' state loaded") |
|
|
else: |
|
|
print(f"Warning: Optimizer '{name}' not found in current model, skipping") |
|
|
|
|
|
|
|
|
return { |
|
|
'epoch': checkpoint.get('epoch'), |
|
|
'step': checkpoint.get('step'), |
|
|
'best_loss': checkpoint.get('best_loss'), |
|
|
'config': checkpoint.get('config'), |
|
|
'metadata': checkpoint.get('metadata', {}) |
|
|
} |
|
|
|
|
|
else: |
|
|
|
|
|
print(f"Loading model from old format checkpoint: {filepath}") |
|
|
|
|
|
|
|
|
loaded_components = [] |
|
|
|
|
|
|
|
|
if 'encoder' in checkpoint and hasattr(self, 'encoder'): |
|
|
try: |
|
|
self.encoder.load_state_dict(checkpoint['encoder'], strict=strict) |
|
|
loaded_components.append('encoder') |
|
|
except RuntimeError as e: |
|
|
if "size mismatch" in str(e): |
|
|
print(f"Warning: Encoder dimension mismatch, using config dimensions") |
|
|
self.encoder.load_state_dict(checkpoint['encoder'], strict=False) |
|
|
loaded_components.append('encoder (with mismatches)') |
|
|
else: |
|
|
print(f"Failed to load encoder: {e}") |
|
|
|
|
|
|
|
|
if 'vector_field' in checkpoint and hasattr(self, 'vector_field'): |
|
|
try: |
|
|
self.vector_field.load_state_dict(checkpoint['vector_field'], strict=strict) |
|
|
loaded_components.append('vector_field') |
|
|
except RuntimeError as e: |
|
|
if "size mismatch" in str(e): |
|
|
print(f"Warning: Vector field dimension mismatch, using config dimensions") |
|
|
self.vector_field.load_state_dict(checkpoint['vector_field'], strict=False) |
|
|
loaded_components.append('vector_field (with mismatches)') |
|
|
else: |
|
|
print(f"Failed to load vector_field: {e}") |
|
|
|
|
|
|
|
|
if 'decoder' in checkpoint and hasattr(self, 'decoder'): |
|
|
try: |
|
|
self.decoder.load_state_dict(checkpoint['decoder'], strict=strict) |
|
|
loaded_components.append('decoder') |
|
|
except RuntimeError as e: |
|
|
if "size mismatch" in str(e): |
|
|
print(f"Warning: Decoder dimension mismatch, using config dimensions") |
|
|
self.decoder.load_state_dict(checkpoint['decoder'], strict=False) |
|
|
loaded_components.append('decoder (with mismatches)') |
|
|
else: |
|
|
print(f"Failed to load decoder: {e}") |
|
|
|
|
|
|
|
|
component_map = { |
|
|
'ag_function': 'ag_function', |
|
|
'rnn': 'rnn', |
|
|
'lstm': 'lstm', |
|
|
'output_layer': 'output_layer', |
|
|
'event_function': 'event_function', |
|
|
'state_reset': 'state_reset' |
|
|
} |
|
|
|
|
|
for checkpoint_key, attr_name in component_map.items(): |
|
|
if checkpoint_key in checkpoint and hasattr(self, attr_name): |
|
|
try: |
|
|
getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=strict) |
|
|
loaded_components.append(attr_name) |
|
|
except RuntimeError as e: |
|
|
if "size mismatch" in str(e): |
|
|
print(f"Warning: {attr_name} dimension mismatch, using config dimensions") |
|
|
getattr(self, attr_name).load_state_dict(checkpoint[checkpoint_key], strict=False) |
|
|
loaded_components.append(f"{attr_name} (with mismatches)") |
|
|
else: |
|
|
print(f"Failed to load {attr_name}: {e}") |
|
|
|
|
|
print(f"Loaded components: {loaded_components}") |
|
|
|
|
|
|
|
|
if self.optimizers: |
|
|
for component_name in loaded_components: |
|
|
opt_key = f"{component_name}_optimizer" |
|
|
if opt_key in checkpoint and component_name in self.optimizers: |
|
|
self.optimizers[component_name].load_state_dict(checkpoint[opt_key]) |
|
|
print(f"Optimizer '{component_name}' state loaded") |
|
|
|
|
|
|
|
|
return { |
|
|
'epoch': checkpoint.get('epoch'), |
|
|
'step': checkpoint.get('step'), |
|
|
'best_loss': checkpoint.get('best_loss'), |
|
|
'config': checkpoint.get('config'), |
|
|
'metadata': checkpoint.get('experiment_info', {}) |
|
|
} |
|
|
|