DynaTraj / models /architectures /augmented_ode.py
Hang917's picture
UPDATE:init mppi file and infra, loaded model success
4c61b7c
"""
Augmented Neural ODE Model Architecture
Augmented ODE extends vanilla ODE by appending encoded features to the state space.
The vector field operates on [x; aug(x); u] where aug(x) = encoder(x) and u is external input.
Only the original x dimensions are used for reconstruction loss.
"""
import torch
import torch.optim as optim
import itertools
import torchdiffeq
from ..base import BaseModel
from ..components.mlp import MLPWithCustomInit
from ..components.initialization import init_autoencoder_network, init_dynamics_network
from ..utils import interpolate_external_input, create_vector_field_with_external_input, interpolate_trajectory
class AugmentedODEModel(BaseModel):
def __init__(self, config, device):
super().__init__(config, device)
input_dim = config['input_dim']
hidden_dims_enc = config.get('hidden_dims_enc', [32, 64])
hidden_dims_vector_field = config['hidden_dims_vector_field']
# 默认 aug_dim = input_dim,即增强维度与原始状态维度相同
aug_dim = config.get('aug_dim', input_dim) # 这里是关键!
# External input support
external_input_dim = config.get('external_input_dim', 0)
# Total dimension for vector field: original x + augmented features + external input
total_dim = input_dim + aug_dim + external_input_dim
vector_field_output_dim = input_dim + aug_dim # Only predict derivatives for state + augmented
# Get activation functions
encoder_activations = config.get('encoder_activations', None)
vector_field_activations = config.get('vector_field_activations', None)
default_activation = getattr(torch.nn, config.get('default_activation', 'ReLU'))
# Create encoder for augmentation: x -> aug(x)
self.encoder = MLPWithCustomInit(
input_dim, hidden_dims_enc, aug_dim,
activation=default_activation,
activation_per_layer=encoder_activations
).to(device)
# Create vector field for augmented space: [x; aug(x); u] -> d/dt[x; aug(x)]
self.vector_field = MLPWithCustomInit(
total_dim, hidden_dims_vector_field, vector_field_output_dim,
activation=default_activation,
activation_per_layer=vector_field_activations
).to(device)
# Initialize networks
init_autoencoder_network(self.encoder)
init_dynamics_network(self.vector_field, scale=config['dynamics_init_scale'])
# Create optimizers with different learning rates
self.optimizers = {
'encoder': optim.Adam(self.encoder.parameters(),
lr=config.get('encoder_lr', config['learning_rate'])),
'vector_field': optim.Adam(self.vector_field.parameters(),
lr=config.get('vector_field_lr', config['learning_rate']))
}
# Print architecture information
self.encoder.print_architecture("Augmentation Encoder")
self.vector_field.print_architecture("Augmented Vector Field")
# Store augmentation dimension in config
config['aug_dim'] = aug_dim
config['total_dim'] = input_dim + aug_dim # State space dimension
config['vector_field_input_dim'] = total_dim # Including external input
print(f"Augmented ODE Configuration:")
print(f" Original dimension: {input_dim}")
print(f" Augmentation dimension: {aug_dim}")
print(f" External input dimension: {external_input_dim}")
print(f" Total state dimension: {input_dim + aug_dim}")
print(f" Vector field input dimension: {total_dim}")
total_params = sum(p.numel() for p in itertools.chain(
self.encoder.parameters(), self.vector_field.parameters()))
print(f"Total parameters: {total_params}")
def _augment_state(self, x):
"""
Augment state x with encoded features.
Args:
x: State tensor [B, input_dim] or [T, B, input_dim]
Returns:
x_aug: Augmented state [B, total_dim] or [T, B, total_dim]
"""
original_shape = x.shape
# Flatten to [N, input_dim] for encoder
if len(original_shape) == 3: # [T, B, input_dim]
T, B, D = original_shape
x_flat = x.reshape(T * B, D)
else: # [B, input_dim]
x_flat = x
# Encode to get augmentation features
aug_features = self.encoder([], x_flat) # [N, aug_dim]
# Concatenate original state with augmented features
x_aug_flat = torch.cat([x_flat, aug_features], dim=1) # [N, total_dim]
# Reshape back to original structure
if len(original_shape) == 3:
x_aug = x_aug_flat.reshape(T, B, self.config['total_dim'])
else:
x_aug = x_aug_flat
return x_aug
def _extract_original_state(self, x_aug):
"""
Extract original state from augmented state.
Args:
x_aug: Augmented state [..., total_dim]
Returns:
x: Original state [..., input_dim]
"""
return x_aug[..., :self.config['input_dim']]
def inference(self, xt_batch, ut_batch=None):
# xt_batch: [B, L, D+1]
state_dim = self.config['input_dim']
x_batch = xt_batch[:, :, :state_dim]
t_batch = xt_batch[:, :, state_dim]
B, L, D = x_batch.shape
# Get initial state and augment it
x0 = x_batch[:, 0, :] # [B, input_dim]
x0_aug = self._augment_state(x0) # [B, total_dim]
# Setup time
t_batch_relative = t_batch - t_batch[:, 0:1]
max_time = torch.max(t_batch_relative[:, -1])
t_eval = torch.linspace(0, max_time.item() + 1e-5, steps=L, device=self.device)
# Handle external input using utility function
u_interp = None
if ut_batch is not None and self.has_external_input:
# u_interp = interpolate_external_input(t_eval, t_batch_relative, ut_batch, self.external_input_dim)
t_batch_u = ut_batch[:, :, -1]
t_batch_u = t_batch_u - t_batch[:, 0:1]
# print(t_eval.shape, t_batch_u.shape, ut_batch[:, :, :-1].shape)
u_interp = interpolate_trajectory(
t_eval, # [T]
t_batch_u, # [B, L_u] (already relative)
ut_batch[:, :, :-1].permute(1, 0, 2) # traj: [L_u, B, U]
).permute(1, 0, 2) # -> [T, B, U]
# Create enhanced vector field - need special handling for augmented state
def augmented_vector_field_with_input(t, x_aug):
if u_interp is None:
return self.vector_field(t, x_aug)
# Find time index
t_idx = torch.searchsorted(t_eval, t, right=False)
t_idx = torch.clamp(t_idx, 0, len(t_eval) - 1)
# Get interpolated external input at time t
u_t = u_interp[t_idx, :, :] # [B, U]
# Concatenate augmented state with external input
x_aug_u = torch.cat([x_aug, u_t], dim=-1) # [B, total_dim + external_input_dim]
return self.vector_field(t, x_aug_u)
vector_field_func = augmented_vector_field_with_input
# Solve ODE in augmented space
x_aug_trajectory = torchdiffeq.odeint(
vector_field_func, x0_aug, t_eval,
method=self.config.get('ode_method', 'rk4')
) # [T, B, total_dim]
# Extract original state trajectory
x_trajectory = self._extract_original_state(x_aug_trajectory) # [T, B, input_dim]
return {
'x0': x0,
't_batch': t_batch,
't_batch_relative': t_batch_relative,
't_eval': t_eval,
'x_trajectory': x_trajectory,
'x_aug_trajectory': x_aug_trajectory, # For debugging/analysis
'u_interp': u_interp,
}