DynaTraj / models /components /initialization.py
Hang917's picture
UPDATE:init mppi file and infra, loaded model success
4c61b7c
"""
Network Initialization Functions
"""
import torch.nn as nn
def init_dynamics_network(model, scale=0.01):
"""Initialize dynamics network with small random weights"""
for module in model.modules():
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=scale)
if module.bias is not None:
nn.init.zeros_(module.bias)
def init_autoencoder_network(model):
"""Initialize autoencoder network with Xavier initialization"""
for module in model.modules():
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight, gain=nn.init.calculate_gain('tanh'))
if module.bias is not None:
nn.init.zeros_(module.bias)
def init_event_network(model, scale=0.1):
"""
Initialize event function network with large standard deviation.
Large initialization helps prevent runaway event boundaries where the event function
is never triggered, which would result in no gradients and model degeneration to
vanilla Neural ODE.
Args:
model: Event function network
scale: Standard deviation for weight initialization (default: 0.1, larger than dynamics)
"""
for module in model.modules():
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=scale)
if module.bias is not None:
# Initialize bias with small random values to encourage event triggering
nn.init.normal_(module.bias, mean=0.0, std=scale * 0.5)
def init_reset_network(model, scale=0.05):
"""
Initialize state reset network with moderate variance.
Reset networks should provide meaningful state perturbations without being too
aggressive, so we use moderate initialization variance.
Args:
model: State reset network
scale: Standard deviation for weight initialization (default: 0.05)
"""
for module in model.modules():
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=scale)
if module.bias is not None:
nn.init.zeros_(module.bias)